This one is more complete, but a lot of tests are for MPI functions not supported in SMPI
Lots of tests are disabled, and some folders not (yet) included.
if(enable_smpi)
if(HAVE_RAWCTX)
- ADD_TEST(smpi-mpich-env-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/env ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/pt2pt ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-context-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/context ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-profile-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/profile ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-coll-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-coll-selector-mpich-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C} -selector=mpich)
- ADD_TEST(smpi-mpich-coll-selector-ompi-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C} -selector=ompi)
- set_tests_properties(smpi-mpich-env-raw smpi-mpich-context-raw smpi-mpich-pt2pt-raw smpi-mpich-coll-raw smpi-mpich-coll-selector-ompi-raw smpi-mpich-coll-selector-mpich-raw smpi-mpich-profile-raw PROPERTIES PASS_REGULAR_EXPRESSION "-- No differences found; test successful")
+ ADD_TEST(smpi-mpich3-attr-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/attr ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/attr -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-coll-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread)
+ ADD_TEST(smpi-mpich3-coll-ompi-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0)
+ ADD_TEST(smpi-mpich3-coll-mpich-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:mpich)
+ ADD_TEST(smpi-mpich3-comm-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/comm ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/comm -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-init-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/init ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/init -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-datatype-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/datatype ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-group-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/group ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/group -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/pt2pt ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ set_tests_properties(smpi-mpich3-attr-raw smpi-mpich3-coll-thread smpi-mpich3-coll-ompi-thread smpi-mpich3-coll-mpich-thread smpi-mpich3-comm-raw smpi-mpich3-init-raw smpi-mpich3-datatype-raw smpi-mpich3-pt2pt-raw smpi-mpich3-group-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
endif()
teshsuite/simdag/partask/CMakeLists.txt
teshsuite/simdag/platforms/CMakeLists.txt
teshsuite/smpi/CMakeLists.txt
- teshsuite/smpi/mpich-test/CMakeLists.txt
- teshsuite/smpi/mpich-test/coll/CMakeLists.txt
- teshsuite/smpi/mpich-test/context/CMakeLists.txt
- teshsuite/smpi/mpich-test/env/CMakeLists.txt
- teshsuite/smpi/mpich-test/profile/CMakeLists.txt
- teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/coll/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/context/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/env/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/profile/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/attr/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/comm/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/coll/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/datatype/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/group/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/init/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt
teshsuite/xbt/CMakeLists.txt
)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/partask)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/attr)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/comm)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/group)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/init)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/xbt)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/testsuite/surf)
--- /dev/null
+set(tesh_files
+ ${tesh_files}
+
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+
+#set(stdo_std_smpi
+# ${CMAKE_CURRENT_SOURCE_DIR}/topol/cartmap.std
+# ${CMAKE_CURRENT_SOURCE_DIR}/topol/graphtest.std
+# ${CMAKE_CURRENT_SOURCE_DIR}/topol/cartf.std
+#)
+
+if("${CMAKE_BINARY_DIR}" STREQUAL "${CMAKE_HOME_DIRECTORY}")
+else()
+ foreach(srcfile ${stdo_std_smpi})
+ set(dstfile ${srcfile})
+ string(REPLACE "${CMAKE_HOME_DIRECTORY}" "${CMAKE_BINARY_DIR}" dstfile "${dstfile}")
+ #message("copy ${srcfile} to ${dstfile}")
+ configure_file("${srcfile}" "${dstfile}" COPYONLY)
+ endforeach()
+endif()
+
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/README
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtest
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ ${CMAKE_CURRENT_SOURCE_DIR}/checktest
+ PARENT_SCOPE)
--- /dev/null
+MPICH Test Suite
+
+This test suite is a *supplement* to other test suites, including the
+original MPICH testsuite, the Intel testsuite, and the IBM MPI test suite
+(or test suites derived from that test, including the MPI C++ tests).
+
+Building the Test Suite
+=======================
+In many cases, configure will find the MPI implementation
+automatically. In some cases, it will need some help. For example:
+
+For IBM MPI, where the compilation commands are not mpicc and mpif77 etc.:
+
+./configure CC=xlc MPICC=mpcc F77=xlf MPIF77=mpxlf CXX=xlC \
+ MPICXX="mpCC -cpp" F90=xlf90 MPIF90=mpxlf90 \
+ --disable-spawn \
+ --enable-strictmpi
+
+(or the _r versions of the compilers)
+
+If mpicc and friends are not in your default path (and you do not want to
+add them), you can specify the path with --with-mpi=<path>. For example,
+if they are in /usr/local/mympi/bin, use
+
+./configure --with-mpi=/usr/local/mympi
+
+(configure will append the bin to the path that you give).
+
+You may need to add MPI_SIZEOF_OFFSET=8 .
+
+The option "-cpp" is needed for at least some versions of mpCC to define the
+C++ bindings of the MPI routines.
+
+For implementations that do not implement all of MPI-2, there are --disable
+options, including --disable-spawn and --disable-cxx. To restrict tests to
+just what is defined in the MPI specification, use --enable-strictmpi .
+
+The script that runs the tests assumes that the MPI implementation
+supports mpiexec; you should consider this the first test of the implementation.
+
+Setting Options
+===============
+The following environment variables will modify the behavior of the tests
+
+MPITEST_DEBUG - if set, output information for debugging the test suite
+MPITEST_VERBOSE - if set to an integer value, output messages whose
+ level is at least that value (0 is a good choice here)
+MPITEST_RETURN_WITH_CODE - Set the return code from the test programs based on
+ success or failure, with a zero for success and one
+ for failure (value must be yes, YES, true, or TRUE to
+ turn this on)
+MPITEST_THREADLEVEL_DEFAULT - Set the default thread level. Values are
+ multiple, serialized, funneled, and single.
+
+Batch Systems
+=============
+For systems that run applications through a batch system, the option "-batch"
+to the runtests script will create a script file that can be edited and
+submitted to the batch system. The script checktests can be run to
+summarize the results.
+
+Specifically, (assuming the bash shell, and that the directory "btest", a
+subdirectory of the test suite directory, is used for running the tests):
+
+export MPITEST_BATCHDIR=`pwd`/btest
+runtests -batch -tests=testlist
+... edit btest/runtests.batch to make it a value batch submissions script
+... run that script and wait for the batch job to complete
+cd btest && ../checktests
+
+If a program other than mpiexec is used in the batch form to run programs, then
+specify that to runtests:
+
+ runtests -batch -mpiexec=aprun -tests=testlist
+
+(Here, aprun is the command used on Cray XE6 systems.)
+
+Note that some programs that are used to run MPI programs add extra output,
+which can confuse any tool that depends on clean output in STDOUT. Since
+such unfortunate behavior is common, the option -ignorebogus can be given
+to checktests:
+
+cd btest && ../checktests --ignorebogus
+
+Controlling the Tests that are Run
+==================================
+The tests are actually built and run by the script "runtests". This script
+can be given a file that contains a list of the tests to run. This file has
+two primary types of entries:
+
+ directories: Enter directory and look for the file "testlist".
+ Recursively run the contents of that file
+ program names: Build and run that program
+
+Lines may also be commented out with "#".
+
+The simplest program line contains the name of the program and the number of
+MPI processes to use. For example, the following will build the
+program sendrecv1 and run it with 4 processes:
+
+sendrecv1 4
+
+In addition, the program line can contain key=value pairs that provide
+special information about running the test. For example,
+
+sendflood 8 timeLimit=600
+
+says to build and run the program sendflood with 8 MPI processes and
+permit the test to run for 600 seconds (by default, at least for
+MPICH, the default timelimit is 180 seconds). Other key=value pairs
+can be used to select whether a program should be run at all,
+depending on the abilities of the MPI implementation (this is
+particularly important for Fortran programs, since preprocessor
+support for Fortran is a non-standard extension to the Fortran
+language, and there are some compilers that would not accept Fortran
+programs that used the preprocessor).
+
+The most important key=value pairs are:
+
+
+timeLimit=n : Use a timelimit of n seconds
+
+arg=string : Run the program with string as an argument to the program
+
+mpiexecarg=string : Run the program with string as an argument to mpiexec
+
+env=name=value : Run the program with environment variable "name" given the
+ value "value"
+
+mpiversion=x.y : Build and run the program only if the MPI version is at
+ least x.y. For example,
+
+ distgraph1 4 mpiversion=2.2
+
+ will build and run distgraph1 with 4 MPI processes only
+ if the MPI version is at least 2.2.
+
+strict=bool : If bool is false, only build and run the program if
+ --enable-strictmpi was not used in configuring the test suite.
+ That is, a line such as
+
+ neighb_coll 4 strict=false
+
+ Says that this test is not valid for a strict MPI implementation;
+ it contains extensions to the standard, or in the case of some
+ MPICH development, MPIX routines
+
+resultTest=proc : This is used to change the way in which the success or
+ failure of a test is evaluated. proc is one of several
+ Perl subroutines defined within the runtest program. These
+ are primarily used within the testsuite for tests programs
+ exit with expected status values or that timeouts are
+ in fact handled.
+
+
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1")
+
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+ add_executable(attr2type attr2type.c ../util/mtest.c)
+ add_executable(attrend2 attrend2.c ../util/mtest.c)
+ add_executable(attrend attrend.c ../util/mtest.c)
+ add_executable(attrerr attrerr.c ../util/mtest.c)
+ add_executable(attrerrcomm attrerrcomm.c ../util/mtest.c)
+ add_executable(attrerrtype attrerrtype.c ../util/mtest.c)
+ add_executable(attric attric.c ../util/mtest.c)
+ add_executable(attrorder attrorder.c ../util/mtest.c)
+ add_executable(attrordercomm attrordercomm.c ../util/mtest.c)
+ add_executable(attrordertype attrordertype.c ../util/mtest.c)
+ add_executable(attrt attrt.c ../util/mtest.c)
+ add_executable(baseattr2 baseattr2.c ../util/mtest.c)
+ add_executable(baseattrcomm baseattrcomm.c ../util/mtest.c)
+ add_executable(fkeyval fkeyval.c ../util/mtest.c)
+ add_executable(fkeyvalcomm fkeyvalcomm.c ../util/mtest.c)
+ add_executable(fkeyvaltype fkeyvaltype.c ../util/mtest.c)
+ add_executable(keyval_double_free keyval_double_free.c ../util/mtest.c)
+
+
+ target_link_libraries(attr2type simgrid)
+ target_link_libraries(attrend2 simgrid)
+ target_link_libraries(attrend simgrid)
+ target_link_libraries(attrerr simgrid)
+ target_link_libraries(attrerrcomm simgrid)
+ target_link_libraries(attrerrtype simgrid)
+ target_link_libraries(attric simgrid)
+ target_link_libraries(attrorder simgrid)
+ target_link_libraries(attrordercomm simgrid)
+ target_link_libraries(attrordertype simgrid)
+ target_link_libraries(attrt simgrid)
+ target_link_libraries(baseattr2 simgrid)
+ target_link_libraries(baseattrcomm simgrid)
+ target_link_libraries(fkeyval simgrid)
+ target_link_libraries(fkeyvalcomm simgrid)
+ target_link_libraries(fkeyvaltype simgrid)
+ target_link_libraries(keyval_double_free simgrid)
+
+
+ set_target_properties(attr2type PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrend2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrend PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrerr PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrerrcomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrerrtype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attric PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrorder PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrordercomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrordertype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrt PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(baseattr2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(baseattrcomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(fkeyval PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(fkeyvalcomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(fkeyvaltype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(keyval_double_free PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/attr2type.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrend2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrend.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrerr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrerrcomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrerrtype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attric.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrorder.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrordercomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrordertype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrt.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/baseattr2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/baseattrcomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/fkeyval.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/fkeyvalcomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/fkeyvaltype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/keyval_double_free.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2007 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static int foo_keyval = MPI_KEYVAL_INVALID;
+
+int foo_initialize(void);
+void foo_finalize(void);
+
+int foo_copy_attr_function(MPI_Datatype type, int type_keyval,
+ void *extra_state, void *attribute_val_in,
+ void *attribute_val_out, int *flag);
+int foo_delete_attr_function(MPI_Datatype type, int type_keyval,
+ void *attribute_val, void *extra_state);
+static const char *my_func = 0;
+static int verbose = 0;
+static int delete_called = 0;
+static int copy_called = 0;
+
+int main(int argc, char *argv[])
+{
+ int mpi_errno;
+ MPI_Datatype type, duptype;
+ int rank;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ foo_initialize();
+
+ mpi_errno = MPI_Type_contiguous(2, MPI_INT, &type);
+
+ mpi_errno = MPI_Type_set_attr(type, foo_keyval, NULL);
+
+ mpi_errno = MPI_Type_dup(type, &duptype);
+
+ my_func = "Free of type";
+ mpi_errno = MPI_Type_free(&type);
+
+ my_func = "free of duptype";
+ mpi_errno = MPI_Type_free(&duptype);
+
+ foo_finalize();
+
+ if (rank == 0) {
+ int errs = 0;
+ if (copy_called != 1) {
+ printf( "Copy called %d times; expected once\n", copy_called );
+ errs++;
+ }
+ if (delete_called != 2) {
+ printf( "Delete called %d times; expected twice\n", delete_called );
+ errs++;
+ }
+ if (errs == 0) {
+ printf( " No Errors\n" );
+ }else if(mpi_errno!=MPI_SUCCESS){
+ printf( " Output fail - Found %d errors\n", errs );
+ }else {
+ printf( " Found %d errors\n", errs );
+ }
+ fflush(stdout);
+ }
+
+ MPI_Finalize();
+ return 0;
+}
+
+int foo_copy_attr_function(MPI_Datatype type,
+ int type_keyval,
+ void *extra_state,
+ void *attribute_val_in,
+ void *attribute_val_out,
+ int *flag)
+{
+ if (verbose) printf("copy fn. called\n");
+ copy_called ++;
+ * (char **) attribute_val_out = NULL;
+ *flag = 1;
+
+ return MPI_SUCCESS;
+}
+
+int foo_delete_attr_function(MPI_Datatype type,
+ int type_keyval,
+ void *attribute_val,
+ void *extra_state)
+{
+ if (verbose) printf("delete fn. called in %s\n", my_func );
+ delete_called ++;
+
+ return MPI_SUCCESS;
+}
+
+int foo_initialize(void)
+{
+ int mpi_errno;
+
+ /* create keyval for use later */
+ mpi_errno = MPI_Type_create_keyval(foo_copy_attr_function,
+ foo_delete_attr_function,
+ &foo_keyval,
+ NULL);
+ if (mpi_errno==MPI_SUCCESS && verbose) printf("created keyval\n");
+
+ return 0;
+}
+
+void foo_finalize(void)
+{
+ int mpi_errno;
+
+ /* remove keyval */
+ mpi_errno = MPI_Type_free_keyval(&foo_keyval);
+
+ if (mpi_errno==MPI_SUCCESS && verbose) printf("freed keyval\n");
+
+ return;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2008 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ The MPI-2 specification makes it clear that delect attributes are
+ called on MPI_COMM_WORLD and MPI_COMM_SELF at the very beginning of
+ MPI_Finalize. This is useful for tools that want to perform the MPI
+ equivalent of an "at_exit" action.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int exit_key = MPI_KEYVAL_INVALID;
+int wasCalled = 0;
+int foundError = 0;
+/* #define DEBUG */
+int delete_fn ( MPI_Comm, int, void *, void * );
+#ifdef DEBUG
+#define FFLUSH fflush(stdout);
+#else
+#define FFLUSH
+#endif
+
+int main( int argc, char **argv )
+{
+ int errs = 0, wrank;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ /* create the keyval for the exit handler */
+ MPI_Keyval_create( MPI_NULL_COPY_FN, delete_fn, &exit_key, (void *)0 );
+
+ /* Attach to comm_self */
+ MPI_Attr_put( MPI_COMM_SELF, exit_key, (void*)0 );
+ /* We can free the key now */
+ MPI_Keyval_free( &exit_key );
+
+ /* Now, exit MPI */
+ /* MTest_Finalize( errs ); */
+ MPI_Finalize();
+
+ /* Check that the exit handler was called, and without error */
+ if (wrank == 0) {
+ /* In case more than one process exits MPI_Finalize */
+ if (wasCalled != 1) {
+ errs++;
+ printf( "Attribute delete function on MPI_COMM_SELF was not called\n" );
+ }
+ if (foundError != 0) {
+ errs++;
+ printf( "Found %d errors while executing delete function in MPI_COMM_SELF\n", foundError );
+ }
+ if (errs == 0) {
+ printf( " No Errors\n" );
+ }
+ else {
+ printf( " Found %d errors\n", errs );
+ }
+ fflush(stdout );
+ }
+
+ return 0;
+}
+
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ int flag;
+ wasCalled++;
+ MPI_Finalized( &flag );
+ if (flag) {
+ foundError++;
+ }
+ return MPI_SUCCESS;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2008 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ The MPI-2.2 specification makes it clear that attributes are called on
+ MPI_COMM_WORLD and MPI_COMM_SELF at the very beginning of MPI_Finalize in
+ LIFO order with respect to the order in which they are set. This is
+ useful for tools that want to perform the MPI equivalent of an "at_exit"
+ action.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* 20 ought to be enough attributes to ensure that hash-table based MPI
+ * implementations do not accidentally pass the test except by being extremely
+ * "lucky". There are (20!) possible permutations which means that there is
+ * about a 1 in 2.43e18 chance of getting LIFO ordering out of a hash table,
+ * assuming a decent hash function is used. */
+#define NUM_TEST_ATTRS (20)
+
+static __attribute__((unused)) int exit_keys[NUM_TEST_ATTRS]; /* init to MPI_KEYVAL_INVALID */
+static __attribute__((unused)) int was_called[NUM_TEST_ATTRS];
+int foundError = 0;
+int delete_fn (MPI_Comm, int, void *, void *);
+
+int main(int argc, char **argv)
+{
+ int wrank;
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ int errs = 0, wrank;
+ int i;
+ for (i = 0; i < NUM_TEST_ATTRS; ++i) {
+ exit_keys[i] = MPI_KEYVAL_INVALID;
+ was_called[i] = 0;
+
+ /* create the keyval for the exit handler */
+ MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, delete_fn, &exit_keys[i], NULL);
+ /* attach to comm_self */
+ MPI_Comm_set_attr(MPI_COMM_SELF, exit_keys[i], (void*)(long)i);
+ }
+
+ /* we can free the keys now */
+ for (i = 0; i < NUM_TEST_ATTRS; ++i) {
+ MPI_Comm_free_keyval(&exit_keys[i]);
+ }
+
+ /* now, exit MPI */
+ MPI_Finalize();
+
+ /* check that the exit handlers were called in LIFO order, and without error */
+ if (wrank == 0) {
+ /* In case more than one process exits MPI_Finalize */
+ for (i = 0; i < NUM_TEST_ATTRS; ++i) {
+ if (was_called[i] < 1) {
+ errs++;
+ printf("Attribute delete function on MPI_COMM_SELF was not called for idx=%d\n", i);
+ }
+ else if (was_called[i] > 1) {
+ errs++;
+ printf("Attribute delete function on MPI_COMM_SELF was called multiple times for idx=%d\n", i);
+ }
+ }
+ if (foundError != 0) {
+ errs++;
+ printf("Found %d errors while executing delete function in MPI_COMM_SELF\n", foundError);
+ }
+ if (errs == 0) {
+ printf(" No Errors\n");
+ }
+ else {
+ printf(" Found %d errors\n", errs);
+ }
+ fflush(stdout);
+ }
+#else /* this is a pre-MPI-2.2 implementation, ordering is not defined */
+ MPI_Finalize();
+ if (wrank == 0)
+ printf(" No Errors\n");
+#endif
+
+ return 0;
+}
+
+int delete_fn(MPI_Comm comm, int keyval, void *attribute_val, void *extra_state)
+{
+ int flag;
+ int i;
+ int my_idx = (int)(long)attribute_val;
+
+ if (my_idx < 0 || my_idx > NUM_TEST_ATTRS) {
+ printf("internal error, my_idx=%d is invalid!\n", my_idx);
+ fflush(stdout);
+ }
+
+ was_called[my_idx]++;
+
+ MPI_Finalized(&flag);
+ if (flag) {
+ printf("my_idx=%d, MPI_Finalized returned %d, should have been 0", my_idx, flag);
+ foundError++;
+ }
+
+ /* since attributes were added in 0..(NUM_TEST_ATTRS-1) order, they will be
+ * called in (NUM_TEST_ATTRS-1)..0 order */
+ for (i = 0; i < my_idx; ++i) {
+ if (was_called[i] != 0) {
+ printf("my_idx=%d, was_called[%d]=%d but should be 0\n", my_idx, i, was_called[i]);
+ foundError++;
+ }
+ }
+ for (i = my_idx; i < NUM_TEST_ATTRS; ++i) {
+ if (was_called[i] != 1) {
+ printf("my_idx=%d, was_called[%d]=%d but should be 1\n", my_idx, i, was_called[i]);
+ foundError++;
+ }
+ }
+
+ return MPI_SUCCESS;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise attribute routines.
+ This version checks for correct behavior of the copy and delete functions
+ on an attribute, particularly the correct behavior when the routine returns
+ failure.
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int test_communicators ( void );
+void abort_msg ( const char *, int );
+int copybomb_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int deletebomb_fn ( MPI_Comm, int, void *, void * );
+
+int main( int argc, char **argv )
+{
+ int errs;
+ MTest_Init( &argc, &argv );
+ errs = test_communicators();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * MPI 1.2 Clarification: Clarification of Error Behavior of
+ * Attribute Callback Functions
+ * Any return value other than MPI_SUCCESS is erroneous. The specific value
+ * returned to the user is undefined (other than it can't be MPI_SUCCESS).
+ * Proposals to specify particular values (e.g., user's value) failed.
+ */
+/* Return an error as the value */
+int copybomb_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *flag = 1;
+ return MPI_ERR_OTHER;
+}
+
+/* Set delete flag to 1 to allow the attribute to be deleted */
+static int delete_flag = 0;
+
+int deletebomb_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ if (delete_flag) return MPI_SUCCESS;
+ return MPI_ERR_OTHER;
+}
+
+void abort_msg( const char *str, int code )
+{
+ fprintf( stderr, "%s, err = %d\n", str, code );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm_world, d2;
+ int world_rank, world_size, key_1;
+ int err, errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Attribute copy/delete return codes ***\n" );
+ }
+#endif
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
+ MPI_Barrier( dup_comm_world );
+
+ MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN );
+
+ value = - 11;
+ if ((err=MPI_Keyval_create( copybomb_fn, deletebomb_fn, &key_1, &value )))
+ abort_msg( "Keyval_create", err );
+
+ err = MPI_Attr_put( dup_comm_world, key_1, (void *) (MPI_Aint) world_rank );
+ if (err) {
+ errs++;
+ printf( "Error with first put\n" );
+ }
+
+ err = MPI_Attr_put( dup_comm_world, key_1,
+ (void *) (MPI_Aint) (2*world_rank) );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in put\n" );
+ }
+
+ /* Because the attribute delete function should fail, the attribute
+ should *not be removed* */
+ err = MPI_Attr_delete( dup_comm_world, key_1 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in delete\n" );
+ }
+
+ err = MPI_Comm_dup( dup_comm_world, &d2 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "copy function return code was MPI_SUCCESS in dup\n" );
+ }
+#ifndef USE_STRICT_MPI
+ /* Another interpretation is to leave d2 unchanged on error */
+ if (err && d2 != MPI_COMM_NULL) {
+ errs++;
+ printf( "dup did not return MPI_COMM_NULL on error\n" );
+ }
+#endif
+
+ delete_flag = 1;
+ MPI_Comm_free( &dup_comm_world );
+ MPI_Keyval_free( &key_1 );
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise attribute routines.
+ This version checks for correct behavior of the copy and delete functions
+ on an attribute, particularly the correct behavior when the routine returns
+ failure.
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int test_communicators ( void );
+void abort_msg ( const char *, int );
+int copybomb_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int deletebomb_fn ( MPI_Comm, int, void *, void * );
+
+int main( int argc, char **argv )
+{
+ int errs;
+ MTest_Init( &argc, &argv );
+ errs = test_communicators();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * MPI 1.2 Clarification: Clarification of Error Behavior of
+ * Attribute Callback Functions
+ * Any return value other than MPI_SUCCESS is erroneous. The specific value
+ * returned to the user is undefined (other than it can't be MPI_SUCCESS).
+ * Proposals to specify particular values (e.g., user's value) failed.
+ */
+/* Return an error as the value */
+int copybomb_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *flag = 1;
+ return MPI_ERR_OTHER;
+}
+
+/* Set delete flag to 1 to allow the attribute to be deleted */
+static int delete_flag = 0;
+
+int deletebomb_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ if (delete_flag) return MPI_SUCCESS;
+ return MPI_ERR_OTHER;
+}
+
+void abort_msg( const char *str, int code )
+{
+ fprintf( stderr, "%s, err = %d\n", str, code );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm_world, d2;
+ int world_rank, world_size, key_1;
+ int err, errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Attribute copy/delete return codes ***\n" );
+ }
+#endif
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
+ MPI_Barrier( dup_comm_world );
+
+ MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN );
+
+ value = - 11;
+ if ((err=MPI_Comm_create_keyval( copybomb_fn, deletebomb_fn, &key_1, &value )))
+ abort_msg( "Keyval_create", err );
+
+ err = MPI_Comm_set_attr( dup_comm_world, key_1, (void *) (MPI_Aint) world_rank );
+ if (err) {
+ errs++;
+ printf( "Error with first put\n" );
+ }
+
+ err = MPI_Comm_set_attr( dup_comm_world, key_1, (void *) (MPI_Aint) (2*world_rank) );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in put\n" );
+ }
+
+ /* Because the attribute delete function should fail, the attribute
+ should *not be removed* */
+ err = MPI_Comm_delete_attr( dup_comm_world, key_1 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in delete\n" );
+ }
+
+ err = MPI_Comm_dup( dup_comm_world, &d2 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "copy function return code was MPI_SUCCESS in dup\n" );
+ }
+ if (err != MPI_ERR_OTHER) {
+ int lerrclass;
+ MPI_Error_class( err, &lerrclass );
+ if (lerrclass != MPI_ERR_OTHER) {
+ errs++;
+ printf( "dup did not return an error code of class ERR_OTHER; " );
+ printf( "err = %d, class = %d\n", err, lerrclass );
+ }
+ }
+#ifndef USE_STRICT_MPI
+ /* Another interpretation is to leave d2 unchanged on error */
+ if (err && d2 != MPI_COMM_NULL) {
+ errs++;
+ printf( "dup did not return MPI_COMM_NULL on error\n" );
+ }
+#endif
+
+ delete_flag = 1;
+ MPI_Comm_free( &dup_comm_world );
+
+ MPI_Comm_free_keyval( &key_1 );
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise attribute routines.
+ This version checks for correct behavior of the copy and delete functions
+ on an attribute, particularly the correct behavior when the routine returns
+ failure.
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int test_attrs ( void );
+void abort_msg ( const char *, int );
+int copybomb_fn ( MPI_Datatype, int, void *, void *, void *, int * );
+int deletebomb_fn ( MPI_Datatype, int, void *, void * );
+
+int main( int argc, char **argv )
+{
+ int errs;
+ MTest_Init( &argc, &argv );
+ errs = test_attrs();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * MPI 1.2 Clarification: Clarification of Error Behavior of
+ * Attribute Callback Functions
+ * Any return value other than MPI_SUCCESS is erroneous. The specific value
+ * returned to the user is undefined (other than it can't be MPI_SUCCESS).
+ * Proposals to specify particular values (e.g., user's value) failed.
+ */
+/* Return an error as the value */
+int copybomb_fn( MPI_Datatype oldtype, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *flag = 1;
+ return MPI_ERR_OTHER;
+}
+
+/* Set delete flag to 1 to allow the attribute to be deleted */
+static int delete_flag = 0;
+static int deleteCalled = 0;
+
+int deletebomb_fn( MPI_Datatype type, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ deleteCalled ++;
+ if (delete_flag) return MPI_SUCCESS;
+ return MPI_ERR_OTHER;
+}
+
+void abort_msg( const char *str, int code )
+{
+ fprintf( stderr, "%s, err = %d\n", str, code );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int test_attrs( void )
+{
+ MPI_Datatype dup_type, d2;
+ int world_rank, world_size, key_1;
+ int err, errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Attribute copy/delete return codes ***\n" );
+ }
+#endif
+
+
+ MPI_Type_dup( MPI_DOUBLE, &dup_type );
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ value = - 11;
+ if ((err=MPI_Type_create_keyval( copybomb_fn, deletebomb_fn, &key_1, &value )))
+ abort_msg( "Keyval_create", err );
+
+ err = MPI_Type_set_attr( dup_type, key_1, (void *) (MPI_Aint) world_rank );
+ if (err) {
+ errs++;
+ printf( "Error with first put\n" );
+ }
+
+ err = MPI_Type_set_attr( dup_type, key_1, (void *) (MPI_Aint) (2*world_rank) );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in put\n" );
+ }
+
+ /* Because the attribute delete function should fail, the attribute
+ should *not be removed* */
+ err = MPI_Type_delete_attr( dup_type, key_1 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in delete\n" );
+ }
+
+ err = MPI_Type_dup( dup_type, &d2 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "copy function return code was MPI_SUCCESS in dup\n" );
+ }
+#ifndef USE_STRICT_MPI
+ /* Another interpretation is to leave d2 unchanged on error */
+ if (err && d2 != MPI_DATATYPE_NULL) {
+ errs++;
+ printf( "dup did not return MPI_DATATYPE_NULL on error\n" );
+ }
+#endif
+
+ delete_flag = 1;
+ deleteCalled = 0;
+ if (d2 != MPI_DATATYPE_NULL)
+ MPI_Type_free(&d2);
+ MPI_Type_free( &dup_type );
+ if (deleteCalled == 0) {
+ errs++;
+ printf( "Free of a datatype did not invoke the attribute delete routine\n" );
+ }
+ MPI_Type_free_keyval( &key_1 );
+
+ return errs;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise communicator routines for intercommunicators
+
+ This C version derived from attrt, which in turn was
+ derived from a Fortran test program from ...
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* #define DEBUG */
+int test_communicators ( void );
+int copy_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int delete_fn ( MPI_Comm, int, void *, void * );
+#ifdef DEBUG
+#define FFLUSH fflush(stdout);
+#else
+#define FFLUSH
+#endif
+
+int main( int argc, char **argv )
+{
+ int errs = 0;
+ MTest_Init( &argc, &argv );
+
+ errs = test_communicators();
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *(MPI_Aint *)attribute_val_out = (MPI_Aint)attribute_val_in;
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ int world_rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ if ((MPI_Aint)attribute_val != (MPI_Aint)world_rank) {
+ printf( "incorrect attribute value %d\n", *(int*)attribute_val );
+ MPI_Abort(MPI_COMM_WORLD, 1005 );
+ }
+ return MPI_SUCCESS;
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm, comm;
+ void *vvalue;
+ int flag, world_rank, world_size, key_1, key_3;
+ int errs = 0;
+ MPI_Aint value;
+ int isLeft;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Communicators ***\n" ); fflush(stdout);
+ }
+#endif
+
+ while (MTestGetIntercomm( &comm, &isLeft, 2 )) {
+ MTestPrintfMsg(1, "start while loop, isLeft=%s\n", (isLeft ? "TRUE" : "FALSE"));
+
+ if (comm == MPI_COMM_NULL) {
+ MTestPrintfMsg(1, "got COMM_NULL, skipping\n");
+ continue;
+ }
+
+ /*
+ Check Comm_dup by adding attributes to comm & duplicating
+ */
+
+ value = 9;
+ MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value );
+ MTestPrintfMsg(1, "Keyval_create key=%#x value=%d\n", key_1, value);
+ value = 7;
+ MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key_3, &value );
+ MTestPrintfMsg(1, "Keyval_create key=%#x value=%d\n", key_3, value);
+
+ /* This may generate a compilation warning; it is, however, an
+ easy way to cache a value instead of a pointer */
+ /* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
+ MPI_Attr_put(comm, key_1, (void *) (MPI_Aint) world_rank );
+ MPI_Attr_put(comm, key_3, (void *)0 );
+
+ MTestPrintfMsg(1, "Comm_dup\n" );
+ MPI_Comm_dup(comm, &dup_comm );
+
+ /* Note that if sizeof(int) < sizeof(void *), we can't use
+ (void **)&value to get the value we passed into Attr_put. To avoid
+ problems (e.g., alignment errors), we recover the value into
+ a (void *) and cast to int. Note that this may generate warning
+ messages from the compiler. */
+ MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+
+ if (! flag) {
+ errs++;
+ printf( "dup_comm key_1 not found on %d\n", world_rank );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3004 );
+ }
+
+ if (value != world_rank) {
+ errs++;
+ printf( "dup_comm key_1 value incorrect: %ld\n", (long)value );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3005 );
+ }
+
+ MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+ if (flag) {
+ errs++;
+ printf( "dup_comm key_3 found!\n" );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3008 );
+ }
+ MTestPrintfMsg(1, "Keyval_free key=%#x\n", key_1);
+ MPI_Keyval_free(&key_1 );
+ MTestPrintfMsg(1, "Keyval_free key=%#x\n", key_3);
+ MPI_Keyval_free(&key_3 );
+ /*
+ Free all communicators created
+ */
+ MTestPrintfMsg(1, "Comm_free comm\n");
+ MPI_Comm_free( &comm );
+ MTestPrintfMsg(1, "Comm_free dup_comm\n");
+ MPI_Comm_free( &dup_comm );
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] );
+int checkNoAttrs( MPI_Comm comm, int n, int key[] );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int key[3], attrval[3];
+ int i;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ {
+ comm = MPI_COMM_WORLD;
+ /* Create key values */
+ for (i=0; i<3; i++) {
+ MPI_Keyval_create( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ attrval[i] = 1024 * i;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Attr_put( comm, key[2], &attrval[2] );
+ MPI_Attr_put( comm, key[1], &attrval[1] );
+ MPI_Attr_put( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Attr_delete( comm, key[0] );
+ MPI_Attr_delete( comm, key[1] );
+ MPI_Attr_delete( comm, key[2] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Attr_put( comm, key[1], &attrval[1] );
+ MPI_Attr_put( comm, key[2], &attrval[2] );
+ MPI_Attr_put( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Attr_delete( comm, key[2] );
+ MPI_Attr_delete( comm, key[1] );
+ MPI_Attr_delete( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Attr_put( comm, key[0], &attrval[0] );
+ MPI_Attr_put( comm, key[1], &attrval[1] );
+ MPI_Attr_put( comm, key[2], &attrval[2] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Attr_delete( comm, key[1] );
+ MPI_Attr_delete( comm, key[2] );
+ MPI_Attr_delete( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ for (i=0; i<3; i++) {
+ MPI_Keyval_free( &key[i] );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Attr_get( comm, key[i], &val_p, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ errs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return errs;
+}
+
+int checkNoAttrs( MPI_Comm comm, int n, int key[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Attr_get( comm, key[i], &val_p, &flag );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] );
+int checkNoAttrs( MPI_Comm comm, int n, int key[] );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int key[3], attrval[3];
+ int i;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ {
+ comm = MPI_COMM_WORLD;
+ /* Create key values */
+ for (i=0; i<3; i++) {
+ MPI_Comm_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ attrval[i] = 1024 * i;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] );
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] );
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Comm_delete_attr( comm, key[0] );
+ MPI_Comm_delete_attr( comm, key[1] );
+ MPI_Comm_delete_attr( comm, key[2] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] );
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] );
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Comm_delete_attr( comm, key[2] );
+ MPI_Comm_delete_attr( comm, key[1] );
+ MPI_Comm_delete_attr( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] );
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] );
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Comm_delete_attr( comm, key[1] );
+ MPI_Comm_delete_attr( comm, key[2] );
+ MPI_Comm_delete_attr( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ for (i=0; i<3; i++) {
+ MPI_Comm_free_keyval( &key[i] );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Comm_get_attr( comm, key[i], &val_p, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ errs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return errs;
+}
+
+int checkNoAttrs( MPI_Comm comm, int n, int key[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Comm_get_attr( comm, key[i], &val_p, &flag );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Datatype type, int n, int key[], int attrval[] );
+int checkNoAttrs( MPI_Datatype type, int n, int key[] );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int key[3], attrval[3];
+ int i;
+ MPI_Datatype type;
+
+ MTest_Init( &argc, &argv );
+
+ {
+ type = MPI_INT;
+ /* Create key values */
+ for (i=0; i<3; i++) {
+ MPI_Type_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ attrval[i] = 1024 * i;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Type_set_attr( type, key[2], &attrval[2] );
+ MPI_Type_set_attr( type, key[1], &attrval[1] );
+ MPI_Type_set_attr( type, key[0], &attrval[0] );
+
+ errs += checkAttrs( type, 3, key, attrval );
+
+ MPI_Type_delete_attr( type, key[0] );
+ MPI_Type_delete_attr( type, key[1] );
+ MPI_Type_delete_attr( type, key[2] );
+
+ errs += checkNoAttrs( type, 3, key );
+
+ MPI_Type_set_attr( type, key[1], &attrval[1] );
+ MPI_Type_set_attr( type, key[2], &attrval[2] );
+ MPI_Type_set_attr( type, key[0], &attrval[0] );
+
+ errs += checkAttrs( type, 3, key, attrval );
+
+ MPI_Type_delete_attr( type, key[2] );
+ MPI_Type_delete_attr( type, key[1] );
+ MPI_Type_delete_attr( type, key[0] );
+
+ errs += checkNoAttrs( type, 3, key );
+
+ MPI_Type_set_attr( type, key[0], &attrval[0] );
+ MPI_Type_set_attr( type, key[1], &attrval[1] );
+ MPI_Type_set_attr( type, key[2], &attrval[2] );
+
+ errs += checkAttrs( type, 3, key, attrval );
+
+ MPI_Type_delete_attr( type, key[1] );
+ MPI_Type_delete_attr( type, key[2] );
+ MPI_Type_delete_attr( type, key[0] );
+
+ errs += checkNoAttrs( type, 3, key );
+
+ for (i=0; i<3; i++) {
+ MPI_Type_free_keyval( &key[i] );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
+
+int checkAttrs( MPI_Datatype type, int n, int key[], int attrval[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Type_get_attr( type, key[i], &val_p, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ errs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return errs;
+}
+
+int checkNoAttrs( MPI_Datatype type, int n, int key[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Type_get_attr( type, key[i], &val_p, &flag );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise communicator routines.
+
+ This C version derived from a Fortran test program from ....
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+//#define DEBUG
+int test_communicators ( void );
+int copy_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int delete_fn ( MPI_Comm, int, void *, void * );
+#ifdef DEBUG
+#define FFLUSH fflush(stdout);
+#else
+#define FFLUSH
+#endif
+
+int main( int argc, char **argv )
+{
+ int errs = 0;
+ MTest_Init( &argc, &argv );
+
+ errs = test_communicators();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *(MPI_Aint *)attribute_val_out = (MPI_Aint)attribute_val_in;
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ int world_rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ if ((MPI_Aint)attribute_val != (MPI_Aint)world_rank) {
+ printf( "incorrect attribute value %d\n", *(int*)attribute_val );
+ MPI_Abort(MPI_COMM_WORLD, 1005 );
+ }
+ return MPI_SUCCESS;
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm,
+ split_comm, world_comm;
+ MPI_Group world_group, lo_group, rev_group;
+ void *vvalue;
+ int ranges[1][3];
+ int flag, world_rank, world_size, rank, size, n, key_1, key_3;
+ int color, key, result;
+ int errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Communicators ***\n" ); fflush(stdout);
+ }
+#endif
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
+
+ /*
+ Exercise Comm_create by creating an equivalent to dup_comm_world
+ (sans attributes) and a half-world communicator.
+ */
+
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_create\n" ); fflush(stdout);
+ }
+#endif
+
+ MPI_Comm_group( dup_comm_world, &world_group );
+ MPI_Comm_create( dup_comm_world, world_group, &world_comm );
+ MPI_Comm_rank( world_comm, &rank );
+ if (rank != world_rank) {
+ errs++;
+ printf( "incorrect rank in world comm: %d\n", rank );
+ MPI_Abort(MPI_COMM_WORLD, 3001 );
+ }
+
+ n = world_size / 2;
+
+ ranges[0][0] = 0;
+ ranges[0][1] = (world_size - n) - 1;
+ ranges[0][2] = 1;
+
+#ifdef DEBUG
+ printf( "world rank = %d before range incl\n", world_rank );FFLUSH;
+#endif
+ MPI_Group_range_incl(world_group, 1, ranges, &lo_group );
+#ifdef DEBUG
+ printf( "world rank = %d after range incl\n", world_rank );FFLUSH;
+#endif
+ MPI_Comm_create(world_comm, lo_group, &lo_comm );
+#ifdef DEBUG
+ printf( "world rank = %d before group free\n", world_rank );FFLUSH;
+#endif
+ MPI_Group_free( &lo_group );
+
+#ifdef DEBUG
+ printf( "world rank = %d after group free\n", world_rank );FFLUSH;
+#endif
+
+ if (world_rank < (world_size - n)) {
+ MPI_Comm_rank(lo_comm, &rank );
+ if (rank == MPI_UNDEFINED) {
+ errs++;
+ printf( "incorrect lo group rank: %d\n", rank ); fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, 3002 );
+ }
+ else {
+ /* printf( "lo in\n" );FFLUSH; */
+ MPI_Barrier(lo_comm );
+ /* printf( "lo out\n" );FFLUSH; */
+ }
+ }
+ else {
+ if (lo_comm != MPI_COMM_NULL) {
+ errs++;
+ printf( "rank : %d incorrect lo comm:\n", rank ); fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, 3003 );
+ }
+ }
+
+#ifdef DEBUG
+ printf( "worldrank = %d\n", world_rank );FFLUSH;
+#endif
+ MPI_Barrier(world_comm);
+
+#ifdef DEBUG
+ printf( "bar!\n" );FFLUSH;
+#endif
+ /*
+ Check Comm_dup by adding attributes to lo_comm & duplicating
+ */
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_dup\n" );
+ fflush(stdout);
+ }
+#endif
+
+ if (lo_comm != MPI_COMM_NULL) {
+ value = 9;
+ MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value );
+ value = 8;
+ value = 7;
+ MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key_3, &value );
+
+ /* This may generate a compilation warning; it is, however, an
+ easy way to cache a value instead of a pointer */
+ /* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
+ MPI_Attr_put(lo_comm, key_1, (void *) (MPI_Aint) world_rank );
+ MPI_Attr_put(lo_comm, key_3, (void *)0 );
+
+ MPI_Comm_dup(lo_comm, &dup_comm );
+
+ /* Note that if sizeof(int) < sizeof(void *), we can't use
+ (void **)&value to get the value we passed into Attr_put. To avoid
+ problems (e.g., alignment errors), we recover the value into
+ a (void *) and cast to int. Note that this may generate warning
+ messages from the compiler. */
+ MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+
+ if (! flag) {
+ errs++;
+ printf( "dup_comm key_1 not found on %d\n", world_rank );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3004 );
+ }
+
+ if (value != world_rank) {
+ errs++;
+ printf( "dup_comm key_1 value incorrect: %ld, expected %d\n",
+ (long)value, world_rank );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3005 );
+ }
+
+ MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+ if (flag) {
+ errs++;
+ printf( "dup_comm key_3 found!\n" );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3008 );
+ }
+ MPI_Keyval_free(&key_1 );
+ MPI_Keyval_free(&key_3 );
+ }
+ /*
+ Split the world into even & odd communicators with reversed ranks.
+ */
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_split\n" );
+ fflush(stdout);
+ }
+#endif
+
+ color = world_rank % 2;
+ key = world_size - world_rank;
+
+ MPI_Comm_split(dup_comm_world, color, key, &split_comm );
+ MPI_Comm_size(split_comm, &size );
+ MPI_Comm_rank(split_comm, &rank );
+ if (rank != ((size - world_rank/2) - 1)) {
+ errs++;
+ printf( "incorrect split rank: %d\n", rank ); fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, 3009 );
+ }
+
+ MPI_Barrier(split_comm );
+ /*
+ Test each possible Comm_compare result
+ */
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_compare\n" );
+ fflush(stdout);
+ }
+#endif
+
+ MPI_Comm_compare(world_comm, world_comm, &result );
+ if (result != MPI_IDENT) {
+ errs++;
+ printf( "incorrect ident result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3010 );
+ }
+
+ if (lo_comm != MPI_COMM_NULL) {
+ MPI_Comm_compare(lo_comm, dup_comm, &result );
+ if (result != MPI_CONGRUENT) {
+ errs++;
+ printf( "incorrect congruent result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3011 );
+ }
+ }
+
+ ranges[0][0] = world_size - 1;
+ ranges[0][1] = 0;
+ ranges[0][2] = -1;
+
+ MPI_Group_range_incl(world_group, 1, ranges, &rev_group );
+ MPI_Comm_create(world_comm, rev_group, &rev_comm );
+
+ MPI_Comm_compare(world_comm, rev_comm, &result );
+ if (result != MPI_SIMILAR && world_size != 1) {
+ errs++;
+ printf( "incorrect similar result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3012 );
+ }
+
+ if (lo_comm != MPI_COMM_NULL) {
+ MPI_Comm_compare(world_comm, lo_comm, &result );
+ if (result != MPI_UNEQUAL && world_size != 1) {
+ errs++;
+ printf( "incorrect unequal result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3013 );
+ }
+ }
+ /*
+ Free all communicators created
+ */
+#ifdef DEBUG
+ if (world_rank == 0)
+ printf( " Comm_free\n" );
+#endif
+
+ MPI_Comm_free( &world_comm );
+ MPI_Comm_free( &dup_comm_world );
+
+ MPI_Comm_free( &rev_comm );
+ MPI_Comm_free( &split_comm );
+
+ MPI_Group_free( &world_group );
+ MPI_Group_free( &rev_group );
+
+ if (lo_comm != MPI_COMM_NULL) {
+ MPI_Comm_free( &lo_comm );
+ MPI_Comm_free( &dup_comm );
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+void MissingKeyval( int rc, const char keyname[] );
+
+int main( int argc, char **argv)
+{
+ int errs = 0;
+ int rc;
+ void *v;
+ int flag;
+ int vval;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the attribute values */
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_TAG_UB" );
+ errs++;
+ }
+ else {
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get TAG_UB\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if (vval < 32767) {
+ errs++;
+ fprintf( stderr, "Got too-small value (%d) for TAG_UB\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_HOST, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_HOST" );
+ errs++;
+ }
+ else {
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get HOST\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for HOST\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_IO, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_IO" );
+ errs++;
+ }
+ else {
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get IO\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_ANY_SOURCE &&
+ vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for IO\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_WTIME_IS_GLOBAL" );
+ errs++;
+ }
+ else {
+ if (flag) {
+ /* Wtime need not be set */
+ vval = *(int*)v;
+ if (vval < 0 || vval > 1) {
+ errs++;
+ fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n",
+ vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_APPNUM, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_APPNUM" );
+ errs++;
+ }
+ else {
+ /* appnum need not be set */
+ if (flag) {
+ vval = *(int *)v;
+ if (vval < 0) {
+ errs++;
+ fprintf( stderr, "MPI_APPNUM is defined as %d but must be nonnegative\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_UNIVERSE_SIZE" );
+ errs++;
+ }
+ else {
+ /* MPI_UNIVERSE_SIZE need not be set */
+ if (flag) {
+ vval = *(int *)v;
+ if (vval < size) {
+ errs++;
+ fprintf( stderr, "MPI_UNIVERSE_SIZE = %d, less than comm world (%d)\n", vval, size );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_LASTUSEDCODE, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_LASTUSEDCODE" );
+ errs++;
+ }
+ else {
+ /* Last used code must be defined and >= MPI_ERR_LASTCODE */
+ if (flag) {
+ vval = *(int*)v;
+ if (vval < MPI_ERR_LASTCODE) {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSEDCODE points to an integer (%d) smaller than MPI_ERR_LASTCODE (%d)\n", vval, MPI_ERR_LASTCODE );
+ }
+ }
+ else {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSECODE is not defined\n" );
+ }
+ }
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
+
+ MTest_Finalize( errs );
+ MPI_Finalize( );
+
+ return 0;
+}
+
+void MissingKeyval( int errcode, const char keyname[] )
+{
+ int errclass, slen;
+ char string[MPI_MAX_ERROR_STRING];
+
+ MPI_Error_class( errcode, &errclass );
+ MPI_Error_string( errcode, string, &slen );
+ printf( "For key %s: Error class %d (%s)\n", keyname, errclass, string );
+ fflush( stdout );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int main( int argc, char **argv)
+{
+ int errs = 0;
+ void *v;
+ int flag;
+ int vval;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_TAG_UB, &v, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get TAG_UB\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if (vval < 32767) {
+ errs++;
+ fprintf( stderr, "Got too-small value (%d) for TAG_UB\n", vval );
+ }
+ }
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_HOST, &v, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get HOST\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for HOST\n", vval );
+ }
+ }
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_IO, &v, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get IO\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_ANY_SOURCE &&
+ vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for IO\n", vval );
+ }
+ }
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &v, &flag );
+ if (flag) {
+ /* Wtime need not be set */
+ vval = *(int*)v;
+ if (vval < 0 || vval > 1) {
+ errs++;
+ fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n",
+ vval );
+ }
+ }
+
+ /* MPI 2.0, section 5.5.3 - MPI_APPNUM should be set if the program is
+ started with more than one executable name (e.g., in MPMD instead
+ of SPMD mode). This is independent of the dynamic process routines,
+ and should be supported even if MPI_COMM_SPAWN and friends are not. */
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_APPNUM, &v, &flag );
+ /* appnum need not be set */
+ if (flag) {
+ vval = *(int *)v;
+ if (vval < 0) {
+ errs++;
+ fprintf( stderr, "MPI_APPNUM is defined as %d but must be nonnegative\n", vval );
+ }
+ }
+
+ /* MPI 2.0 section 5.5.1. MPI_UNIVERSE_SIZE need not be set, but
+ should be present. */
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag );
+ /* MPI_UNIVERSE_SIZE need not be set */
+ if (flag) {
+ /* But if it is set, it must be at least the size of comm_world */
+ vval = *(int *)v;
+ if (vval < size) {
+ errs++;
+ fprintf( stderr, "MPI_UNIVERSE_SIZE = %d, less than comm world (%d)\n", vval, size );
+ }
+ }
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_LASTUSEDCODE, &v, &flag );
+ /* Last used code must be defined and >= MPI_ERR_LASTCODE */
+ if (flag) {
+ vval = *(int*)v;
+ if (vval < MPI_ERR_LASTCODE) {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSEDCODE points to an integer (%d) smaller than MPI_ERR_LASTCODE (%d)\n", vval, MPI_ERR_LASTCODE );
+ }
+ }
+ else {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSECODE is not defined\n" );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize( );
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test freeing keyvals while still attached to \
+a communicator, then make sure that the keyval delete and copy code are still \
+executed";
+*/
+
+/* Function prototypes to keep compilers happy */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag);
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state);
+
+/* Copy increments the attribute value */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag)
+{
+ /* Copy the address of the attribute */
+ *(void **)attribute_val_out = attribute_val_in;
+ /* Change the value */
+ *(int *)attribute_val_in = *(int *)attribute_val_in + 1;
+ /* set flag to 1 to tell comm dup to insert this attribute
+ into the new communicator */
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+/* Delete decrements the attribute value */
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ *(int *)attribute_val = *(int *)attribute_val - 1;
+ return MPI_SUCCESS;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int attrval;
+ int i, key[32], keyval, saveKeyval;
+ MPI_Comm comm, dupcomm;
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracomm( &comm, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Keyval_create( copy_fn, delete_fn, &keyval, (void *)0 );
+ saveKeyval = keyval; /* in case we need to free explicitly */
+ attrval = 1;
+ MPI_Attr_put( comm, keyval, (void*)&attrval );
+ /* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
+ is in use in an attribute */
+ MPI_Keyval_free( &keyval );
+
+ /* We create some dummy keyvals here in case the same keyval
+ is reused */
+ for (i=0; i<32; i++) {
+ MPI_Keyval_create( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ }
+
+ MPI_Comm_dup( comm, &dupcomm );
+ /* Check that the attribute was copied */
+ if (attrval != 2) {
+ errs++;
+ printf( "Attribute not incremented when comm dup'ed (%s)\n",
+ MTestGetIntracommName() );
+ }
+ MPI_Comm_free( &dupcomm );
+ if (attrval != 1) {
+ errs++;
+ printf( "Attribute not decremented when dupcomm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ /* Check that the attribute was freed in the dupcomm */
+
+ if (comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF) {
+ MPI_Comm_free( &comm );
+ /* Check that the original attribute was freed */
+ if (attrval != 0) {
+ errs++;
+ printf( "Attribute not decremented when comm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ }
+ else {
+ /* Explicitly delete the attributes from world and self */
+ MPI_Attr_delete( comm, saveKeyval );
+ }
+ /* Free those other keyvals */
+ for (i=0; i<32; i++) {
+ MPI_Keyval_free( &key[i] );
+ }
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ /* The attributes on comm self and world were deleted by finalize
+ (see separate test) */
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test freeing keyvals while still attached to \
+a communicator, then make sure that the keyval delete and copy code are still \
+executed";
+*/
+
+/* Function prototypes to keep compilers happy */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag);
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state);
+
+/* Copy increments the attribute value */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag)
+{
+ /* Copy the address of the attribute */
+ *(void **)attribute_val_out = attribute_val_in;
+ /* Change the value */
+ *(int *)attribute_val_in = *(int *)attribute_val_in + 1;
+ /* set flag to 1 to tell comm dup to insert this attribute
+ into the new communicator */
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+/* Delete decrements the attribute value */
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ *(int *)attribute_val = *(int *)attribute_val - 1;
+ return MPI_SUCCESS;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int attrval;
+ int i, key[32], keyval, saveKeyval;
+ MPI_Comm comm, dupcomm;
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracomm( &comm, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_create_keyval( copy_fn, delete_fn, &keyval, (void *)0 );
+ saveKeyval = keyval; /* in case we need to free explicitly */
+ attrval = 1;
+ MPI_Comm_set_attr( comm, keyval, (void*)&attrval );
+ /* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
+ is in use in an attribute */
+ MPI_Comm_free_keyval( &keyval );
+
+ /* We create some dummy keyvals here in case the same keyval
+ is reused */
+ for (i=0; i<32; i++) {
+ MPI_Comm_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ }
+
+ MPI_Comm_dup( comm, &dupcomm );
+ /* Check that the attribute was copied */
+ if (attrval != 2) {
+ errs++;
+ printf( "Attribute not incremented when comm dup'ed (%s)\n",
+ MTestGetIntracommName() );
+ }
+ MPI_Comm_free( &dupcomm );
+ if (attrval != 1) {
+ errs++;
+ printf( "Attribute not decremented when dupcomm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ /* Check that the attribute was freed in the dupcomm */
+
+ if (comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF) {
+ MPI_Comm_free( &comm );
+ /* Check that the original attribute was freed */
+ if (attrval != 0) {
+ errs++;
+ printf( "Attribute not decremented when comm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ }
+ else {
+ /* Explicitly delete the attributes from world and self */
+ MPI_Comm_delete_attr( comm, saveKeyval );
+ }
+ /* Free those other keyvals */
+ for (i=0; i<32; i++) {
+ MPI_Comm_free_keyval( &key[i] );
+ }
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ /* The attributes on comm self and world were deleted by finalize
+ (see separate test) */
+
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+#include "stdlib.h"
+
+/*
+static char MTestDescrip[] = "Test freeing keyvals while still attached to \
+a datatype, then make sure that the keyval delete and copy code are still \
+executed";
+*/
+
+/* Copy increments the attribute value */
+int copy_fn( MPI_Datatype oldtype, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag);
+int copy_fn( MPI_Datatype oldtype, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag)
+{
+ /* Copy the address of the attribute */
+ *(void **)attribute_val_out = attribute_val_in;
+ /* Change the value */
+ *(int *)attribute_val_in = *(int *)attribute_val_in + 1;
+ /* set flag to 1 to tell comm dup to insert this attribute
+ into the new communicator */
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+/* Delete decrements the attribute value */
+int delete_fn( MPI_Datatype type, int keyval, void *attribute_val,
+ void *extra_state);
+int delete_fn( MPI_Datatype type, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ *(int *)attribute_val = *(int *)attribute_val - 1;
+ return MPI_SUCCESS;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int attrval;
+ int i, key[32], keyval, saveKeyval;
+ MPI_Datatype type, duptype;
+ MTestDatatype mstype, mrtype;
+ char typename[MPI_MAX_OBJECT_NAME];
+ int tnlen;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetDatatypes( &mstype, &mrtype, 1 )) {
+ type = mstype.datatype;
+ MPI_Type_create_keyval( copy_fn, delete_fn, &keyval, (void *)0 );
+ saveKeyval = keyval; /* in case we need to free explicitly */
+ attrval = 1;
+ MPI_Type_set_attr( type, keyval, (void*)&attrval );
+ /* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
+ is in use in an attribute */
+ MPI_Type_free_keyval( &keyval );
+
+ /* We create some dummy keyvals here in case the same keyval
+ is reused */
+ for (i=0; i<32; i++) {
+ MPI_Type_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ }
+
+ if (attrval != 1) {
+ errs++;
+ MPI_Type_get_name( type, typename, &tnlen );
+ printf( "attrval is %d, should be 1, before dup in type %s\n",
+ attrval, typename );
+ }
+ MPI_Type_dup( type, &duptype );
+ /* Check that the attribute was copied */
+ if (attrval != 2) {
+ errs++;
+ MPI_Type_get_name( type, typename, &tnlen );
+ printf( "Attribute not incremented when type dup'ed (%s)\n",
+ typename );
+ }
+ MPI_Type_free( &duptype );
+ if (attrval != 1) {
+ errs++;
+ MPI_Type_get_name( type, typename, &tnlen );
+ printf( "Attribute not decremented when duptype %s freed\n",
+ typename );
+ }
+ /* Check that the attribute was freed in the duptype */
+
+ if (!mstype.isBasic) {
+ MPI_Type_get_name( type, typename, &tnlen );
+ MTestFreeDatatype(&mstype);
+ /* Check that the original attribute was freed */
+ if (attrval != 0) {
+ errs++;
+ printf( "Attribute not decremented when type %s freed\n",
+ typename );
+ }
+ }
+ else {
+ /* Explicitly delete the attributes from world and self */
+ MPI_Type_delete_attr( type, saveKeyval );
+ if (mstype.buf) {
+ free(mstype.buf);
+ mstype.buf = 0;
+ }
+ }
+ /* Free those other keyvals */
+ for (i=0; i<32; i++) {
+ MPI_Type_free_keyval( &key[i] );
+ }
+ MTestFreeDatatype(&mrtype);
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <mpi.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "mpitest.h"
+
+/* tests multiple invocations of Keyval_free on the same keyval */
+
+int delete_fn(MPI_Comm comm, int keyval, void *attr, void *extra);
+int delete_fn(MPI_Comm comm, int keyval, void *attr, void *extra) {
+ MPI_Keyval_free(&keyval);
+ return MPI_SUCCESS;
+}
+
+int main (int argc, char **argv)
+{
+ MPI_Comm duped;
+ int keyval = MPI_KEYVAL_INVALID;
+ int keyval_copy = MPI_KEYVAL_INVALID;
+ int errs=0;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_dup(MPI_COMM_SELF, &duped);
+
+ MPI_Keyval_create(MPI_NULL_COPY_FN, delete_fn, &keyval, NULL);
+ keyval_copy = keyval;
+
+ MPI_Attr_put(MPI_COMM_SELF, keyval, NULL);
+ MPI_Attr_put(duped, keyval, NULL);
+
+ MPI_Comm_free(&duped); /* first MPI_Keyval_free */
+ MPI_Keyval_free(&keyval); /* second MPI_Keyval_free */
+ MPI_Keyval_free(&keyval_copy); /* third MPI_Keyval_free */
+ MTest_Finalize( errs );
+ MPI_Finalize(); /* fourth MPI_Keyval_free */
+ return 0;
+}
--- /dev/null
+#needs MPI_Keyval_create and MPI_Attr_get
+#attrt 2
+#needs MPI_Intercomm_create
+#attric 4
+#needs MPI_Errhandler_set, MPI_Keyval_create, MPI_Keyval_free, MPI_Attr_put
+#attrerr 1
+#needs MPI_Keyval_create, MPI_Keyval_free, MPI_Attr_put
+#attrend 1
+#attrend 4
+attrend2 1
+attrend2 5
+#needs MPI_Errhandler_set, MPI_Comm_create_keyval, MPI_Comm_free_keyval, MPI_Comm_set_attr, MPI_Comm_delete_attr
+#attrerrcomm 1
+#needs MPI_Errhandler_set, MPI_Type_create_keyval, MPI_Type_dup, MPI_Type_set_attr, MPI_Type_delete_attr
+#attrerrtype 1
+#needs MPI_Type_create_keyval, MPI_Type_dup, MPI_Type_set_attr
+#attr2type 1
+#needs MPI_Keyval_create, MPI_Keyval_free, MPI_Attr_put, MPI_Attr_get, MPI_Attr_delete
+#attrorder 1
+#needs MPI_Comm_create_keyval, MPI_Comm_free_keyval, MPI_Comm_get_attr, MPI_Comm_set_attr, MPI_Comm_delete_attr
+#attrordercomm 1
+#needs MPI_Type_create_keyval, MPI_Type_delete_keyval, MPI_Type_set_attr, MPI_Type_delete_attr
+#attrordertype 1
+#needs MPI_Errhandler_set, MPI_Attr_get
+#baseattr2 1
+#needs MPI_Comm_get_attr
+#baseattrcomm 1
+#MPI_Keyval_create, MPI_Keyval_free for type and comm also
+#fkeyval 1
+#fkeyvalcomm 1
+#fkeyvaltype 1
+#keyval_double_free 1
--- /dev/null
+#! /usr/local/bin/perl
+
+$debug = 1;
+$verbose = 1;
+$ignoreBogusOutput = 0;
+$filePattern = "runtests.*.status";
+
+$testsPassed = 0;
+$testsFailed = 0;
+
+foreach $_ (@ARGV) {
+ if (/^--?ignorebogus/) {
+ $ignoreBogusOutput = 1;
+ }
+ else {
+ print STDERR "checktests [ -ignorebogus ]\n";
+ exit(1);
+ }
+}
+
+open( RESULTS, "ls -1 $filePattern |" ) || die "Cannot list directory using ls -1 $filePattern\n";
+
+while (<RESULTS>) {
+ chop;
+ $statusFile = $_;
+ $resultsFile = $statusFile;
+ $resultsFile =~ s/\.status/.out/;
+
+ if ($resultsFile =~ /runtests\.([0-9]+)\.out/) {
+ $count = $1;
+ }
+ else {
+ $count = -1;
+ print STDERR "Unable to determine test number from $resultsFile!\n";
+ $testsFailed ++;
+ next;
+ }
+ open (SFD, "<$statusFile" );
+ while (<SFD>) {
+ chop;
+ $testStatus = $_;
+ }
+ close (SFD);
+
+ if (-s $resultsFile) {
+ open (RFD, "<$resultsFile");
+ $runLine = <RFD>;
+ $sawNoerrors = 0;
+ # Successful output should contain ONLY the line No Errors
+ while (<RFD>) {
+ chop;
+ $outLine = $_;
+ if ($outLine =~ /^\s+No [Ee]rrors\s*$/) {
+ $sawNoerrors = 1;
+ }
+ else {
+ # To filter out output that may be added to STDOUT
+ # by a badly behaved runtime system, you can either
+ # add a specific filter here (preferred) or set the
+ # -ignorebogus option (considered a workaround)
+ # The following is an example that accepts certain
+ # kinds of output once "No Errors" is seen.
+ if ($sawNoerrors) {
+ if ( /^Application [0-9]+ resources: utime .*/) {
+ last;
+ }
+ }
+ if (!$ignoreBogusOutput) {
+ # Any extraneous output is an error
+ $sawNoerrors = 0;
+ }
+ }
+ }
+ close (RFD);
+ if ($sawNoerrors == 1 && $testStatus == 0) {
+ $testsPassed ++;
+ }
+ else {
+ # Test wrote No Errors but then exited with a non-zero status
+ $testsFailed ++;
+ # Output the errors
+ if ($verbose) {
+ print STDOUT "Test $count failed:\n";
+ print STDOUT "Test status: $testStatus\n";
+ print STDOUT "Test output:\n";
+ system ("cat $resultsFile" );
+ }
+ }
+ }
+ else {
+ print STDERR "No $resultsFile\n" if $debug;
+ $testsFailed ++;
+ }
+}
+
+print "Tests passed: $testsPassed; test failed: $testsFailed\n";
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1 -Wno-error=unused-variable")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+
+ add_executable(allgather2 allgather2.c ../util/mtest.c)
+ add_executable(allgather3 allgather3.c ../util/mtest.c)
+ add_executable(allgatherv2 allgatherv2.c ../util/mtest.c)
+ add_executable(allgatherv3 allgatherv3.c ../util/mtest.c)
+ add_executable(allgatherv4 allgatherv4.c ../util/mtest.c)
+ add_executable(allred2 allred2.c ../util/mtest.c)
+ add_executable(allred3 allred3.c ../util/mtest.c)
+ add_executable(allred4 allred4.c ../util/mtest.c)
+ add_executable(allred5 allred5.c ../util/mtest.c)
+ add_executable(allred6 allred6.c ../util/mtest.c)
+ add_executable(allred allred.c ../util/mtest.c)
+ add_executable(allredmany allredmany.c ../util/mtest.c)
+ add_executable(alltoall1 alltoall1.c ../util/mtest.c)
+ add_executable(alltoallv0 alltoallv0.c ../util/mtest.c)
+ add_executable(alltoallv alltoallv.c ../util/mtest.c)
+ add_executable(alltoallw1 alltoallw1.c ../util/mtest.c)
+ add_executable(alltoallw2 alltoallw2.c ../util/mtest.c)
+ add_executable(alltoallw_zeros alltoallw_zeros.c ../util/mtest.c)
+ add_executable(bcast2 bcast2.c ../util/mtest.c)
+ add_executable(bcast3 bcast3.c ../util/mtest.c)
+ add_executable(bcasttest bcasttest.c ../util/mtest.c)
+ add_executable(bcastzerotype bcastzerotype.c ../util/mtest.c)
+ add_executable(coll10 coll10.c ../util/mtest.c)
+ add_executable(coll11 coll11.c ../util/mtest.c)
+ add_executable(coll12 coll12.c ../util/mtest.c)
+ add_executable(coll13 coll13.c ../util/mtest.c)
+ add_executable(coll2 coll2.c ../util/mtest.c)
+ add_executable(coll3 coll3.c ../util/mtest.c)
+ add_executable(coll4 coll4.c ../util/mtest.c)
+ add_executable(coll5 coll5.c ../util/mtest.c)
+ add_executable(coll6 coll6.c ../util/mtest.c)
+ add_executable(coll7 coll7.c ../util/mtest.c)
+ add_executable(coll8 coll8.c ../util/mtest.c)
+ add_executable(coll9 coll9.c ../util/mtest.c)
+ add_executable(exscan2 exscan2.c ../util/mtest.c)
+ add_executable(exscan exscan.c ../util/mtest.c)
+ add_executable(gather2 gather2.c ../util/mtest.c)
+ add_executable(gather2_save gather2_save.c ../util/mtest.c)
+ add_executable(gather gather.c ../util/mtest.c)
+ add_executable(iallred iallred.c ../util/mtest.c)
+ add_executable(ibarrier ibarrier.c ../util/mtest.c)
+ add_executable(icallgather icallgather.c ../util/mtest.c)
+ add_executable(icallgatherv icallgatherv.c ../util/mtest.c)
+ add_executable(icallreduce icallreduce.c ../util/mtest.c)
+ add_executable(icalltoall icalltoall.c ../util/mtest.c)
+ add_executable(icalltoallv icalltoallv.c ../util/mtest.c)
+ add_executable(icalltoallw icalltoallw.c ../util/mtest.c)
+ add_executable(icbarrier icbarrier.c ../util/mtest.c)
+ add_executable(icbcast icbcast.c ../util/mtest.c)
+ add_executable(icgather icgather.c ../util/mtest.c)
+ add_executable(icgatherv icgatherv.c ../util/mtest.c)
+ add_executable(icreduce icreduce.c ../util/mtest.c)
+ add_executable(icscatter icscatter.c ../util/mtest.c)
+ add_executable(icscatterv icscatterv.c ../util/mtest.c)
+ add_executable(longuser longuser.c ../util/mtest.c)
+ add_executable(nonblocking2 nonblocking2.c ../util/mtest.c)
+ add_executable(nonblocking3 nonblocking3.c ../util/mtest.c)
+ add_executable(nonblocking nonblocking.c ../util/mtest.c)
+ add_executable(opband opband.c ../util/mtest.c)
+ add_executable(opbor opbor.c ../util/mtest.c)
+ add_executable(opbxor opbxor.c ../util/mtest.c)
+ add_executable(op_commutative op_commutative.c ../util/mtest.c)
+ add_executable(opland opland.c ../util/mtest.c)
+ add_executable(oplor oplor.c ../util/mtest.c)
+ add_executable(oplxor oplxor.c ../util/mtest.c)
+ add_executable(opmax opmax.c ../util/mtest.c)
+ add_executable(opmaxloc opmaxloc.c ../util/mtest.c)
+ add_executable(opmin opmin.c ../util/mtest.c)
+ add_executable(opminloc opminloc.c ../util/mtest.c)
+ add_executable(opprod opprod.c ../util/mtest.c)
+ add_executable(opsum opsum.c ../util/mtest.c)
+ add_executable(red3 red3.c ../util/mtest.c)
+ add_executable(red4 red4.c ../util/mtest.c)
+ add_executable(redscat2 redscat2.c ../util/mtest.c)
+ add_executable(redscat3 redscat3.c ../util/mtest.c)
+ add_executable(redscatbkinter redscatbkinter.c ../util/mtest.c)
+ add_executable(redscatblk3 redscatblk3.c ../util/mtest.c)
+ add_executable(red_scat_block2 red_scat_block2.c ../util/mtest.c)
+ add_executable(red_scat_block red_scat_block.c ../util/mtest.c)
+ add_executable(redscat redscat.c ../util/mtest.c)
+ add_executable(redscatinter redscatinter.c ../util/mtest.c)
+ add_executable(reduce_mpich reduce.c ../util/mtest.c)
+ add_executable(reduce_local reduce_local.c ../util/mtest.c)
+ add_executable(scantst scantst.c ../util/mtest.c)
+ add_executable(scatter2 scatter2.c ../util/mtest.c)
+ add_executable(scatter3 scatter3.c ../util/mtest.c)
+ add_executable(scattern scattern.c ../util/mtest.c)
+ add_executable(scatterv scatterv.c ../util/mtest.c)
+ add_executable(uoplong uoplong.c ../util/mtest.c)
+
+
+
+ target_link_libraries(allgather2 simgrid)
+ target_link_libraries(allgather3 simgrid)
+ target_link_libraries(allgatherv2 simgrid)
+ target_link_libraries(allgatherv3 simgrid)
+ target_link_libraries(allgatherv4 simgrid)
+ target_link_libraries(allred2 simgrid)
+ target_link_libraries(allred3 simgrid)
+ target_link_libraries(allred4 simgrid)
+ target_link_libraries(allred5 simgrid)
+ target_link_libraries(allred6 simgrid)
+ target_link_libraries(allred simgrid)
+ target_link_libraries(allredmany simgrid)
+ target_link_libraries(alltoall1 simgrid)
+ target_link_libraries(alltoallv0 simgrid)
+ target_link_libraries(alltoallv simgrid)
+ target_link_libraries(alltoallw1 simgrid)
+ target_link_libraries(alltoallw2 simgrid)
+ target_link_libraries(alltoallw_zeros simgrid)
+ target_link_libraries(bcast2 simgrid)
+ target_link_libraries(bcast3 simgrid)
+ target_link_libraries(bcasttest simgrid)
+ target_link_libraries(bcastzerotype simgrid)
+ target_link_libraries(coll10 simgrid)
+ target_link_libraries(coll11 simgrid)
+ target_link_libraries(coll12 simgrid)
+ target_link_libraries(coll13 simgrid)
+ target_link_libraries(coll2 simgrid)
+ target_link_libraries(coll3 simgrid)
+ target_link_libraries(coll4 simgrid)
+ target_link_libraries(coll5 simgrid)
+ target_link_libraries(coll6 simgrid)
+ target_link_libraries(coll7 simgrid)
+ target_link_libraries(coll8 simgrid)
+ target_link_libraries(coll9 simgrid)
+ target_link_libraries(exscan2 simgrid)
+ target_link_libraries(exscan simgrid)
+ target_link_libraries(gather2 simgrid)
+ target_link_libraries(gather2_save simgrid)
+ target_link_libraries(gather simgrid)
+ target_link_libraries(iallred simgrid)
+ target_link_libraries(ibarrier simgrid)
+ target_link_libraries(icallgather simgrid)
+ target_link_libraries(icallgatherv simgrid)
+ target_link_libraries(icallreduce simgrid)
+ target_link_libraries(icalltoall simgrid)
+ target_link_libraries(icalltoallv simgrid)
+ target_link_libraries(icalltoallw simgrid)
+ target_link_libraries(icbarrier simgrid)
+ target_link_libraries(icbcast simgrid)
+ target_link_libraries(icgather simgrid)
+ target_link_libraries(icgatherv simgrid)
+ target_link_libraries(icreduce simgrid)
+ target_link_libraries(icscatter simgrid)
+ target_link_libraries(icscatterv simgrid)
+ target_link_libraries(longuser simgrid)
+ target_link_libraries(nonblocking2 simgrid)
+ target_link_libraries(nonblocking3 simgrid)
+ target_link_libraries(nonblocking simgrid)
+ target_link_libraries(opband simgrid)
+ target_link_libraries(opbor simgrid)
+ target_link_libraries(opbxor simgrid)
+ target_link_libraries(op_commutative simgrid)
+ target_link_libraries(opland simgrid)
+ target_link_libraries(oplor simgrid)
+ target_link_libraries(oplxor simgrid)
+ target_link_libraries(opmax simgrid)
+ target_link_libraries(opmaxloc simgrid)
+ target_link_libraries(opmin simgrid)
+ target_link_libraries(opminloc simgrid)
+ target_link_libraries(opprod simgrid)
+ target_link_libraries(opsum simgrid)
+ target_link_libraries(red3 simgrid)
+ target_link_libraries(red4 simgrid)
+ target_link_libraries(redscat2 simgrid)
+ target_link_libraries(redscat3 simgrid)
+ target_link_libraries(redscatbkinter simgrid)
+ target_link_libraries(redscatblk3 simgrid)
+ target_link_libraries(red_scat_block2 simgrid)
+ target_link_libraries(red_scat_block simgrid)
+ target_link_libraries(redscat simgrid)
+ target_link_libraries(redscatinter simgrid)
+ target_link_libraries(reduce_mpich simgrid)
+ target_link_libraries(reduce_local simgrid)
+ target_link_libraries(scantst simgrid)
+ target_link_libraries(scatter2 simgrid)
+ target_link_libraries(scatter3 simgrid)
+ target_link_libraries(scattern simgrid)
+ target_link_libraries(scatterv simgrid)
+ target_link_libraries(uoplong simgrid)
+
+
+
+ set_target_properties(allgather2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgather3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgatherv2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgatherv3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgatherv4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred5 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred6 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allredmany PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoall1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallv0 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallw1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallw2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallw_zeros PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcast2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcast3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcasttest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcastzerotype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll10 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll11 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll12 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll13 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll5 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll6 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll7 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll8 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll9 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exscan2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exscan PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gather2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gather2_save PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gather PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(iallred PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(ibarrier PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icallgather PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icallgatherv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icallreduce PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icalltoall PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icalltoallv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icalltoallw PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icbarrier PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icbcast PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icgather PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icgatherv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icreduce PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icscatter PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icscatterv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(longuser PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(nonblocking2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(nonblocking3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(nonblocking PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opband PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opbor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opbxor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(op_commutative PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opland PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(oplor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(oplxor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opmax PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opmaxloc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opmin PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opminloc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opprod PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opsum PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscat2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscat3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscatbkinter PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscatblk3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red_scat_block2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red_scat_block PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscat PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscatinter PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(reduce_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(reduce_local PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scantst PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scatter2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scatter3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scattern PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scatterv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(uoplong PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgather2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgather3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgatherv2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgatherv3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgatherv4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred5.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred6.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allredmany.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoall1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv0.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallw1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallw2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallw_zeros.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcasttest.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcastzerotype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll10.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll11.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll12.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll13.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll5.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll6.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll7.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll8.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll9.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exscan2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exscan.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gather2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gather2_save.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gather.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/iallred.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ibarrier.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icallgather.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icallgatherv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icallreduce.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icalltoall.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icalltoallv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icalltoallw.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icbarrier.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icbcast.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icgather.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icgatherv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icreduce.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icscatter.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icscatterv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/longuser.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opband.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opbor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opbxor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/op_commutative.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opland.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/oplor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/oplxor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opmax.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opmaxloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opmin.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opminloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opprod.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opsum.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscat2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscat3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscatbkinter.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscatblk3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red_scat_block2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red_scat_block.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscat.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscatinter.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/reduce.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/reduce_local.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scantst.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scatter2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scatter3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scattern.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scatterv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/uoplong.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE */
+
+int main( int argc, char **argv )
+{
+ double *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ vecout[rank*count+i] = rank*count+i;
+ }
+ MPI_Allgather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, count, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( vecout );
+ }
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. */
+
+int main( int argc, char **argv )
+{
+ double *vecout, *invec;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ invec = (double *)malloc( count * sizeof(double) );
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ invec[i] = rank*count+i;
+ }
+ MPI_Allgather( invec, count, MPI_DOUBLE,
+ vecout, count, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( invec );
+ free( vecout );
+ }
+
+ MTestFreeComm( &comm );
+ }
+
+ /* Do a zero byte gather */
+ MPI_Allgather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, MPI_COMM_WORLD );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE. This is
+ the trivial version based on the allgather test (allgatherv but with
+ constant data sizes) */
+
+int main( int argc, char **argv )
+{
+ double *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int *displs, *recvcounts;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ displs = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ vecout[rank*count+i] = rank*count+i;
+ }
+ for (i=0; i<size; i++) {
+ recvcounts[i] = count;
+ displs[i] = i * count;
+ }
+ MPI_Allgatherv( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, recvcounts, displs, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( vecout );
+ }
+ free( displs );
+ free( recvcounts );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. This is
+ the trivial version based on the allgather test (allgatherv but with
+ constant data sizes) */
+
+int main( int argc, char **argv )
+{
+ double *vecout, *invec;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int *displs, *recvcounts;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ displs = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ invec = (double *)malloc( count * sizeof(double) );
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ invec[i] = rank*count+i;
+ }
+ for (i=0; i<size; i++) {
+ recvcounts[i] = count;
+ displs[i] = i * count;
+ }
+ MPI_Allgatherv( invec, count, MPI_DOUBLE,
+ vecout, recvcounts, displs, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( invec );
+ free( vecout );
+ }
+ free( displs );
+ free( recvcounts );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <time.h>
+#include <math.h>
+#include <assert.h>
+
+/* FIXME: What is this test supposed to accomplish? */
+
+#define START_BUF (1)
+#define LARGE_BUF (256 * 1024)
+
+/* FIXME: MAX_BUF is too large */
+#define MAX_BUF (128 * 1024 * 1024)
+#define LOOPS 10
+
+__thread char * sbuf, * rbuf;
+__thread int * recvcounts, * displs;
+int errs = 0;
+
+/* #define dprintf printf */
+#define dprintf(...)
+
+typedef enum {
+ REGULAR,
+ BCAST,
+ SPIKE,
+ HALF_FULL,
+ LINEAR_DECREASE,
+ BELL_CURVE
+} test_t;
+
+void comm_tests(MPI_Comm comm);
+double run_test(long long msg_size, MPI_Comm comm, test_t test_type, double * max_time);
+
+int main(int argc, char ** argv)
+{
+ int comm_size, comm_rank;
+ MPI_Comm comm;
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
+
+ if (LARGE_BUF * comm_size > MAX_BUF)
+ goto fn_exit;
+
+ sbuf = (void *) calloc(MAX_BUF, 1);
+ rbuf = (void *) calloc(MAX_BUF, 1);
+
+ srand(time(NULL));
+
+ recvcounts = (void *) malloc(comm_size * sizeof(int));
+ displs = (void *) malloc(comm_size * sizeof(int));
+ if (!recvcounts || !displs || !sbuf || !rbuf) {
+ fprintf(stderr, "Unable to allocate memory:\n");
+ if (!sbuf) fprintf(stderr,"\tsbuf of %d bytes\n", MAX_BUF );
+ if (!rbuf) fprintf(stderr,"\trbuf of %d bytes\n", MAX_BUF );
+ if (!recvcounts) fprintf(stderr,"\trecvcounts of %zd bytes\n", comm_size * sizeof(int) );
+ if (!displs) fprintf(stderr,"\tdispls of %zd bytes\n", comm_size * sizeof(int) );
+ fflush(stderr);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ if (!comm_rank) {
+ dprintf("Message Range: (%d, %d); System size: %d\n", START_BUF, LARGE_BUF, comm_size);
+ fflush(stdout);
+ }
+
+
+ /* COMM_WORLD tests */
+ if (!comm_rank) {
+ dprintf("\n\n==========================================================\n");
+ dprintf(" MPI_COMM_WORLD\n");
+ dprintf("==========================================================\n");
+ }
+ comm_tests(MPI_COMM_WORLD);
+
+ /* non-COMM_WORLD tests */
+ if (!comm_rank) {
+ dprintf("\n\n==========================================================\n");
+ dprintf(" non-COMM_WORLD\n");
+ dprintf("==========================================================\n");
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, (comm_rank == comm_size - 1) ? 0 : 1, 0, &comm);
+ if (comm_rank < comm_size - 1)
+ comm_tests(comm);
+ MPI_Comm_free(&comm);
+
+ /* Randomized communicator tests */
+ if (!comm_rank) {
+ dprintf("\n\n==========================================================\n");
+ dprintf(" Randomized Communicator\n");
+ dprintf("==========================================================\n");
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, 0, rand(), &comm);
+ comm_tests(comm);
+ MPI_Comm_free(&comm);
+
+ //free(sbuf);
+ //free(rbuf);
+ free(recvcounts);
+ free(displs);
+
+fn_exit:
+ MTest_Finalize(errs);
+ MPI_Finalize();
+
+ return 0;
+}
+
+void comm_tests(MPI_Comm comm)
+{
+ int comm_size, comm_rank;
+ double rtime, max_time;
+ long long msg_size;
+
+ MPI_Comm_size(comm, &comm_size);
+ MPI_Comm_rank(comm, &comm_rank);
+
+ for (msg_size = START_BUF; msg_size <= LARGE_BUF; msg_size *= 2) {
+ if (!comm_rank) {
+ dprintf("\n====> MSG_SIZE: %d\n", (int) msg_size);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, REGULAR, &max_time);
+ if (!comm_rank) {
+ dprintf("REGULAR:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, BCAST, &max_time);
+ if (!comm_rank) {
+ dprintf("BCAST:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, SPIKE, &max_time);
+ if (!comm_rank) {
+ dprintf("SPIKE:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, HALF_FULL, &max_time);
+ if (!comm_rank) {
+ dprintf("HALF_FULL:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, LINEAR_DECREASE, &max_time);
+ if (!comm_rank) {
+ dprintf("LINEAR_DECREASE:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, BELL_CURVE, &max_time);
+ if (!comm_rank) {
+ dprintf("BELL_CURVE:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+ }
+}
+
+double run_test(long long msg_size, MPI_Comm comm, test_t test_type,
+ double * max_time)
+{
+ int i, j;
+ int comm_size, comm_rank;
+ double start, end;
+ double total_time, avg_time;
+ MPI_Aint tmp;
+
+ MPI_Comm_size(comm, &comm_size);
+ MPI_Comm_rank(comm, &comm_rank);
+
+ displs[0] = 0;
+ for (i = 0; i < comm_size; i++) {
+ if (test_type == REGULAR)
+ recvcounts[i] = msg_size;
+ else if (test_type == BCAST)
+ recvcounts[i] = (!i) ? msg_size : 0;
+ else if (test_type == SPIKE)
+ recvcounts[i] = (!i) ? (msg_size / 2) : (msg_size / (2 * (comm_size - 1)));
+ else if (test_type == HALF_FULL)
+ recvcounts[i] = (i < (comm_size / 2)) ? (2 * msg_size) : 0;
+ else if (test_type == LINEAR_DECREASE) {
+ tmp = 2 * msg_size * (comm_size - 1 - i) / (comm_size - 1);
+ if (tmp != (int)tmp) {
+ fprintf( stderr, "Integer overflow in variable tmp\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ recvcounts[i] = (int) tmp;
+
+ /* If the maximum message size is too large, don't run */
+ if (tmp > MAX_BUF) return 0;
+ }
+ else if (test_type == BELL_CURVE) {
+ for (j = 0; j < i; j++) {
+ if (i - 1 + j >= comm_size) continue;
+ tmp = msg_size * comm_size / (log(comm_size) * i);
+ recvcounts[i - 1 + j] = (int) tmp;
+ displs[i - 1 + j] = 0;
+
+ /* If the maximum message size is too large, don't run */
+ if (tmp > MAX_BUF) return 0;
+ }
+ }
+
+ if (i < comm_size - 1)
+ displs[i+1] = displs[i] + recvcounts[i];
+ }
+
+ /* Test that:
+ 1: sbuf is large enough
+ 2: rbuf is large enough
+ 3: There were no failures (e.g., tmp nowhere > rbuf size
+ */
+ MPI_Barrier(comm);
+ start = MPI_Wtime();
+ for (i = 0; i < LOOPS; i++) {
+ MPI_Allgatherv(sbuf, recvcounts[comm_rank], MPI_CHAR,
+ rbuf, recvcounts, displs, MPI_CHAR, comm);
+ }
+ end = MPI_Wtime();
+ MPI_Barrier(comm);
+
+ /* Convert to microseconds (why?) */
+ total_time = 1.0e6 * (end - start);
+ MPI_Reduce(&total_time, &avg_time, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
+ MPI_Reduce(&total_time, max_time, 1, MPI_DOUBLE, MPI_MAX, 0, comm);
+
+ return (avg_time / (LOOPS * comm_size));
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/* Warning - this test will fail for MPI_PROD & maybe MPI_SUM
+ * if more than 10 MPI processes are used. Loss of precision
+ * will occur as the number of processors is increased.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+
+int count, size, rank;
+int cerrcnt;
+
+struct int_test { int a; int b; };
+struct long_test { long a; int b; };
+struct short_test { short a; int b; };
+struct float_test { float a; int b; };
+struct double_test { double a; int b; };
+
+#define mpi_op2str(op) \
+ ((op == MPI_SUM) ? "MPI_SUM" : \
+ (op == MPI_PROD) ? "MPI_PROD" : \
+ (op == MPI_MAX) ? "MPI_MAX" : \
+ (op == MPI_MIN) ? "MPI_MIN" : \
+ (op == MPI_LOR) ? "MPI_LOR" : \
+ (op == MPI_LXOR) ? "MPI_LXOR" : \
+ (op == MPI_LAND) ? "MPI_LAND" : \
+ (op == MPI_BOR) ? "MPI_BOR" : \
+ (op == MPI_BAND) ? "MPI_BAND" : \
+ (op == MPI_BXOR) ? "MPI_BXOR" : \
+ (op == MPI_MAXLOC) ? "MPI_MAXLOC" : \
+ (op == MPI_MINLOC) ? "MPI_MINLOC" : \
+ "MPI_NO_OP")
+
+/* calloc to avoid spurious valgrind warnings when "type" has padding bytes */
+#define DECL_MALLOC_IN_OUT_SOL(type) \
+ type *in, *out, *sol; \
+ in = (type *) calloc(count, sizeof(type)); \
+ out = (type *) calloc(count, sizeof(type)); \
+ sol = (type *) calloc(count, sizeof(type));
+
+#define SET_INDEX_CONST(arr, val) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i] = val; \
+ }
+
+#define SET_INDEX_SUM(arr, val) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i] = i + val; \
+ }
+
+#define SET_INDEX_FACTOR(arr, val) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i] = i * (val); \
+ }
+
+#define SET_INDEX_POWER(arr, val) \
+ { \
+ int i, j; \
+ for (i = 0; i < count; i++) { \
+ (arr)[i] = 1; \
+ for (j = 0; j < (val); j++) \
+ arr[i] *= i; \
+ } \
+ }
+
+#define ERROR_CHECK_AND_FREE(lerrcnt, mpi_type, mpi_op) \
+ do { \
+ char name[MPI_MAX_OBJECT_NAME] = {0}; \
+ int len = 0; \
+ if (lerrcnt) { \
+ MPI_Type_get_name(mpi_type, name, &len); \
+ fprintf(stderr, "(%d) Error for type %s and op %s\n", \
+ rank, name, mpi_op2str(mpi_op)); \
+ } \
+ free(in); free(out); free(sol); \
+ } while(0)
+
+/* The logic on the error check on MPI_Allreduce assumes that all
+ MPI_Allreduce routines return a failure if any do - this is sufficient
+ for MPI implementations that reject some of the valid op/datatype pairs
+ (and motivated this addition, as some versions of the IBM MPI
+ failed in just this way).
+*/
+#define ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol) \
+ { \
+ int i, rc, lerrcnt = 0; \
+ rc = MPI_Allreduce(in, out, count, mpi_type, mpi_op, MPI_COMM_WORLD); \
+ if (rc) { lerrcnt++; cerrcnt++; MTestPrintError( rc ); } \
+ else { \
+ for (i = 0; i < count; i++) { \
+ if (out[i] != sol[i]) { \
+ cerrcnt++; \
+ lerrcnt++; \
+ } \
+ } \
+ } \
+ ERROR_CHECK_AND_FREE(lerrcnt, mpi_type, mpi_op); \
+ }
+
+#define STRUCT_ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol) \
+ { \
+ int i, rc, lerrcnt = 0; \
+ rc = MPI_Allreduce(in, out, count, mpi_type, mpi_op, MPI_COMM_WORLD); \
+ if (rc) { lerrcnt++; cerrcnt++; MTestPrintError( rc ); } \
+ else { \
+ for (i = 0; i < count; i++) { \
+ if ((out[i].a != sol[i].a) || (out[i].b != sol[i].b)) { \
+ cerrcnt++; \
+ lerrcnt++; \
+ } \
+ } \
+ } \
+ ERROR_CHECK_AND_FREE(lerrcnt, mpi_type, mpi_op); \
+ }
+
+#define SET_INDEX_STRUCT_CONST(arr, val, el) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i].el = val; \
+ }
+
+#define SET_INDEX_STRUCT_SUM(arr, val, el) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i].el = i + (val); \
+ }
+
+#define sum_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, 0); \
+ SET_INDEX_FACTOR(sol, size); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_SUM, in, out, sol); \
+ }
+
+#define prod_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, 0); \
+ SET_INDEX_POWER(sol, size); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_PROD, in, out, sol); \
+ }
+
+#define max_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, rank); \
+ SET_INDEX_SUM(sol, size - 1); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_MAX, in, out, sol); \
+ }
+
+#define min_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, rank); \
+ SET_INDEX_SUM(sol, 0); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_MIN, in, out, sol); \
+ }
+
+#define const_test(type, mpi_type, mpi_op, val1, val2, val3) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_CONST(in, (val1)); \
+ SET_INDEX_CONST(sol, (val2)); \
+ SET_INDEX_CONST(out, (val3)); \
+ ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol); \
+ }
+
+#define lor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LOR, (rank & 0x1), (size > 1), 0)
+#define lor_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LOR, 0, 0, 0)
+#define lxor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LXOR, (rank == 1), (size > 1), 0)
+#define lxor_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LXOR, 0, 0, 0)
+#define lxor_test3(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LXOR, 1, (size & 0x1), 0)
+#define land_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LAND, (rank & 0x1), 0, 0)
+#define land_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LAND, 1, 1, 0)
+#define bor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BOR, (rank & 0x3), ((size < 3) ? size - 1 : 0x3), 0)
+#define bxor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BXOR, (rank == 1) * 0xf0, (size > 1) * 0xf0, 0)
+#define bxor_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BXOR, 0, 0, 0)
+#define bxor_test3(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BXOR, ~0, (size &0x1) ? ~0 : 0, 0)
+
+#define band_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ if (rank == size-1) { \
+ SET_INDEX_SUM(in, 0); \
+ } \
+ else { \
+ SET_INDEX_CONST(in, ~0); \
+ } \
+ SET_INDEX_SUM(sol, 0); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_BAND, in, out, sol); \
+ }
+
+#define band_test2(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ if (rank == size-1) { \
+ SET_INDEX_SUM(in, 0); \
+ } \
+ else { \
+ SET_INDEX_CONST(in, 0); \
+ } \
+ SET_INDEX_CONST(sol, 0); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_BAND, in, out, sol); \
+ }
+
+#define maxloc_test(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_STRUCT_SUM(in, rank, a); \
+ SET_INDEX_STRUCT_CONST(in, rank, b); \
+ SET_INDEX_STRUCT_SUM(sol, size - 1, a); \
+ SET_INDEX_STRUCT_CONST(sol, size - 1, b); \
+ SET_INDEX_STRUCT_CONST(out, 0, a); \
+ SET_INDEX_STRUCT_CONST(out, -1, b); \
+ STRUCT_ALLREDUCE_AND_FREE(mpi_type, MPI_MAXLOC, in, out, sol); \
+ }
+
+#define minloc_test(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_STRUCT_SUM(in, rank, a); \
+ SET_INDEX_STRUCT_CONST(in, rank, b); \
+ SET_INDEX_STRUCT_SUM(sol, 0, a); \
+ SET_INDEX_STRUCT_CONST(sol, 0, b); \
+ SET_INDEX_STRUCT_CONST(out, 0, a); \
+ SET_INDEX_STRUCT_CONST(out, -1, b); \
+ STRUCT_ALLREDUCE_AND_FREE(mpi_type, MPI_MINLOC, in, out, sol); \
+ }
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+#define test_types_set_mpi_2_2_integer(op,post) do { \
+ op##_test##post(int8_t, MPI_INT8_T); \
+ op##_test##post(int16_t, MPI_INT16_T); \
+ op##_test##post(int32_t, MPI_INT32_T); \
+ op##_test##post(int64_t, MPI_INT64_T); \
+ op##_test##post(uint8_t, MPI_UINT8_T); \
+ op##_test##post(uint16_t, MPI_UINT16_T); \
+ op##_test##post(uint32_t, MPI_UINT32_T); \
+ op##_test##post(uint64_t, MPI_UINT64_T); \
+ op##_test##post(MPI_Aint, MPI_AINT); \
+ op##_test##post(MPI_Offset, MPI_OFFSET); \
+ } while (0)
+#else
+#define test_types_set_mpi_2_2_integer(op,post) do { } while (0)
+#endif
+
+#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
+#define test_types_set_mpi_3_0_integer(op,post) do { \
+ op##_test##post(MPI_Count, MPI_COUNT); \
+ } while (0)
+#else
+#define test_types_set_mpi_3_0_integer(op,post) do { } while (0)
+#endif
+
+#define test_types_set1(op, post) \
+ { \
+ op##_test##post(int, MPI_INT); \
+ op##_test##post(long, MPI_LONG); \
+ op##_test##post(short, MPI_SHORT); \
+ op##_test##post(unsigned short, MPI_UNSIGNED_SHORT); \
+ op##_test##post(unsigned, MPI_UNSIGNED); \
+ op##_test##post(unsigned long, MPI_UNSIGNED_LONG); \
+ op##_test##post(unsigned char, MPI_UNSIGNED_CHAR); \
+ test_types_set_mpi_2_2_integer(op,post); \
+ test_types_set_mpi_3_0_integer(op,post); \
+ }
+
+#define test_types_set2(op, post) \
+ { \
+ test_types_set1(op, post); \
+ op##_test##post(float, MPI_FLOAT); \
+ op##_test##post(double, MPI_DOUBLE); \
+ }
+
+#define test_types_set3(op, post) \
+ { \
+ op##_test##post(unsigned char, MPI_BYTE); \
+ }
+
+/* Make sure that we test complex and double complex, even if long
+ double complex is not available */
+#if defined(USE_LONG_DOUBLE_COMPLEX)
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
+ && defined(HAVE_DOUBLE__COMPLEX) \
+ && defined(HAVE_LONG_DOUBLE__COMPLEX)
+#define test_types_set4(op, post) \
+ do { \
+ op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
+ op##_test##post(double _Complex, MPI_C_DOUBLE_COMPLEX); \
+ if (MPI_C_LONG_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) { \
+ op##_test##post(long double _Complex, MPI_C_LONG_DOUBLE_COMPLEX); \
+ } \
+ } while (0)
+
+#else
+#define test_types_set4(op, post) do { } while (0)
+#endif
+#else
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
+ && defined(HAVE_DOUBLE__COMPLEX)
+#define test_types_set4(op, post) \
+ do { \
+ op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
+ op##_test##post(double _Complex, MPI_C_DOUBLE_COMPLEX); \
+ } while (0)
+
+#else
+#define test_types_set4(op, post) do { } while (0)
+#endif
+
+#endif /* defined(USE_LONG_DOUBLE_COMPLEX) */
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE__BOOL)
+#define test_types_set5(op, post) \
+ do { \
+ op##_test##post(_Bool, MPI_C_BOOL); \
+ } while (0)
+
+#else
+#define test_types_set5(op, post) do { } while (0)
+#endif
+
+int main( int argc, char **argv )
+{
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (size < 2) {
+ fprintf( stderr, "At least 2 processes required\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ count = 10;
+ /* Allow an argument to override the count.
+ Note that the product tests may fail if the count is very large.
+ */
+ if (argc >= 2) {
+ count = atoi( argv[1] );
+ if (count <= 0) {
+ fprintf( stderr, "Invalid count argument %s\n", argv[1] );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ }
+
+ test_types_set2(sum, 1);
+ test_types_set2(prod, 1);
+ test_types_set2(max, 1);
+ test_types_set2(min, 1);
+
+ test_types_set1(lor, 1);
+ test_types_set1(lor, 2);
+
+ test_types_set1(lxor, 1);
+ test_types_set1(lxor, 2);
+ test_types_set1(lxor, 3);
+
+ test_types_set1(land, 1);
+ test_types_set1(land, 2);
+
+ test_types_set1(bor, 1);
+ test_types_set1(band, 1);
+ test_types_set1(band, 2);
+
+ test_types_set1(bxor, 1);
+ test_types_set1(bxor, 2);
+ test_types_set1(bxor, 3);
+
+ test_types_set3(bor, 1);
+ test_types_set3(band, 1);
+ test_types_set3(band, 2);
+
+ test_types_set3(bxor, 1);
+ test_types_set3(bxor, 2);
+ test_types_set3(bxor, 3);
+
+ test_types_set4(sum, 1);
+ test_types_set4(prod, 1);
+
+ test_types_set5(lor, 1);
+ test_types_set5(lor, 2);
+ test_types_set5(lxor, 1);
+ test_types_set5(lxor, 2);
+ test_types_set5(lxor, 3);
+ test_types_set5(land, 1);
+ test_types_set5(land, 2);
+
+ maxloc_test(struct int_test, MPI_2INT);
+ maxloc_test(struct long_test, MPI_LONG_INT);
+ maxloc_test(struct short_test, MPI_SHORT_INT);
+ maxloc_test(struct float_test, MPI_FLOAT_INT);
+ maxloc_test(struct double_test, MPI_DOUBLE_INT);
+
+ minloc_test(struct int_test, MPI_2INT);
+ minloc_test(struct long_test, MPI_LONG_INT);
+ minloc_test(struct short_test, MPI_SHORT_INT);
+ minloc_test(struct float_test, MPI_FLOAT_INT);
+ minloc_test(struct double_test, MPI_DOUBLE_INT);
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( cerrcnt );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with MPI_IN_PLACE";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, i;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ for (count = 1; count < 65000; count = count * 2) {
+ /* Contiguous data */
+ buf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) buf[i] = rank + i;
+ MPI_Allreduce( MPI_IN_PLACE, buf, count, MPI_INT, MPI_SUM, comm );
+ /* Check the results */
+ for (i=0; i<count; i++) {
+ int result = i * size + (size*(size-1))/2;
+ if (buf[i] != result) {
+ errs ++;
+ if (errs < 10) {
+ fprintf( stderr, "buf[%d] = %d expected %d\n",
+ i, buf[i], result );
+ }
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <assert.h>
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with non-commutative user-defined operations";
+*/
+
+/* We make the error count global so that we can easily control the output
+ of error information (in particular, limiting it after the first 10
+ errors */
+int errs = 0;
+
+/* This implements a simple matrix-matrix multiply. This is an associative
+ but not commutative operation. The matrix size is set in matSize;
+ the number of matrices is the count argument. The matrix is stored
+ in C order, so that
+ c(i,j) is cin[j+i*matSize]
+ */
+#define MAXCOL 256
+static int matSize = 0; /* Must be < MAXCOL */
+static int max_offset = 0;
+void uop( void *, void *, int *, MPI_Datatype * );
+void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, j, k, nmat;
+ int tempcol[MAXCOL];
+ int offset1, offset2;
+ int matsize2 = matSize*matSize;
+
+ for (nmat = 0; nmat < *count; nmat++) {
+ for (j=0; j<matSize; j++) {
+ for (i=0; i<matSize; i++) {
+ tempcol[i] = 0;
+ for (k=0; k<matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ offset1 = k+i*matSize;
+ offset2 = j+k*matSize;
+ assert(offset1 < max_offset);
+ assert(offset2 < max_offset);
+ tempcol[i] += cin[offset1] * cout[offset2];
+ }
+ }
+ for (i=0; i<matSize; i++) {
+ offset1 = j+i*matSize;
+ assert(offset1 < max_offset);
+ cout[offset1] = tempcol[i];
+ }
+ }
+ cin += matsize2;
+ cout += matsize2;
+ }
+}
+
+/* Initialize the integer matrix as a permutation of rank with rank+1.
+ If we call this matrix P_r, we know that product of P_0 P_1 ... P_{size-2}
+ is the the matrix representing the permutation that shifts left by one.
+ As the final matrix (in the size-1 position), we use the matrix that
+ shifts RIGHT by one
+*/
+static void initMat( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank;
+ int offset;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size*size; i++) {
+ assert(i < max_offset);
+ mat[i] = 0;
+ }
+
+ if (rank < size-1) {
+ /* Create the permutation matrix that exchanges r with r+1 */
+ for (i=0; i<size; i++) {
+ if (i == rank) {
+ offset = ((i+1)%size) + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ else if (i == ((rank + 1)%size)) {
+ offset = ((i+size-1)%size) + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ else {
+ offset = i+i*size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ }
+ }
+ else {
+ /* Create the permutation matrix that shifts right by one */
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ offset = j + i * size; /* location of c(i,j) */
+ mat[offset] = 0;
+ if ( ((j-i+size)%size) == 1 ) mat[offset] = 1;
+ }
+ }
+
+ }
+}
+
+/* Compare a matrix with the identity matrix */
+static int isIdentity( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank, lerrs = 0;
+ int offset;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ if (i == j) {
+ offset = j+i*size;
+ assert(offset < max_offset);
+ if (mat[offset] != 1) {
+ lerrs++;
+ if (errs + lerrs< 10) {
+ printf( "[%d] mat[%d,%d] = %d, expected 1 for comm %s\n",
+ rank, i,j, mat[offset], MTestGetIntracommName() );
+ }
+ }
+ }
+ else {
+ offset = j+i*size;
+ assert(offset < max_offset);
+ if (mat[offset] != 0) {
+ lerrs++;
+ if (errs + lerrs< 10) {
+ printf( "[%d] mat[%d,%d] = %d, expected 0 for comm %s\n",
+ rank, i,j, mat[offset], MTestGetIntracommName() );
+ }
+ }
+ }
+ }
+ }
+ return lerrs;
+}
+
+int main( int argc, char *argv[] )
+{
+ int size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, *bufout;
+ MPI_Op op;
+ MPI_Datatype mattype;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( uop, 0, &op );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) {
+ continue;
+ }
+ MPI_Comm_size( comm, &size );
+ matSize = size;
+
+ /* Only one matrix for now */
+ count = 1;
+
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous( size*size, MPI_INT, &mattype );
+ MPI_Type_commit( &mattype );
+
+ max_offset = count * size * size;
+ buf = (int *)malloc( max_offset * sizeof(int) );
+ if (!buf) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ bufout = (int *)malloc( max_offset * sizeof(int) );
+ if (!bufout) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ initMat( comm, buf );
+ MPI_Allreduce( buf, bufout, count, mattype, op, comm );
+ errs += isIdentity( comm, bufout );
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat( comm, bufout );
+ MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
+ errs += isIdentity( comm, bufout );
+
+ free( buf );
+ free( bufout );
+
+ //MPI_Type_free( &mattype );
+ MTestFreeComm( &comm );
+ }
+
+ // MPI_Op_free( &op );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2004 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <assert.h>
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with non-commutative user-defined operations using matrix rotations";
+*/
+
+/* This example is similar to allred3.c, but uses only 3x3 matrics with
+ integer-valued entries. This is an associative but not commutative
+ operation.
+ The number of matrices is the count argument. The matrix is stored
+ in C order, so that
+ c(i,j) is cin[j+i*3]
+
+ Three different matrices are used:
+ I = identity matrix
+ A = (1 0 0 B = (0 1 0
+ 0 0 1 1 0 0
+ 0 1 0) 0 0 1)
+
+ The product
+
+ I^k A I^(p-2-k-j) B I^j
+
+ is
+
+ ( 0 1 0
+ 0 0 1
+ 1 0 0 )
+
+ for all values of k, p, and j.
+ */
+
+void matmult( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+
+void matmult( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, j, k, nmat;
+ int tempcol[3];
+ int offset1, offset2;
+
+ for (nmat = 0; nmat < *count; nmat++) {
+ for (j=0; j<3; j++) {
+ for (i=0; i<3; i++) {
+ tempcol[i] = 0;
+ for (k=0; k<3; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ offset1 = k+i*3;
+ offset2 = j+k*3;
+ tempcol[i] += cin[offset1] * cout[offset2];
+ }
+ }
+ for (i=0; i<3; i++) {
+ offset1 = j+i*3;
+ cout[offset1] = tempcol[i];
+ }
+ }
+ /* Advance to the next matrix */
+ cin += 9;
+ cout += 9;
+ }
+}
+
+/* Initialize the integer matrix as one of the
+ above matrix entries, as a function of count.
+ We guarantee that both the A and B matrices are included.
+*/
+static void initMat( int rank, int size, int nmat, int mat[] )
+{
+ int i, kind;
+
+ /* Zero the matrix */
+ for (i=0; i<9; i++) {
+ mat[i] = 0;
+ }
+
+ /* Decide which matrix to create (I, A, or B) */
+ if ( size == 2) {
+ /* rank 0 is A, 1 is B */
+ kind = 1 + rank;
+ }
+ else {
+ int tmpA, tmpB;
+ /* Most ranks are identity matrices */
+ kind = 0;
+ /* Make sure exactly one rank gets the A matrix
+ and one the B matrix */
+ tmpA = size / 4;
+ tmpB = (3 * size) / 4;
+
+ if (rank == tmpA) kind = 1;
+ if (rank == tmpB) kind = 2;
+ }
+
+ switch (kind) {
+ case 0: /* Identity */
+ mat[0] = 1;
+ mat[4] = 1;
+ mat[8] = 1;
+ break;
+ case 1: /* A */
+ mat[0] = 1;
+ mat[5] = 1;
+ mat[7] = 1;
+ break;
+ case 2: /* B */
+ mat[1] = 1;
+ mat[3] = 1;
+ mat[8] = 1;
+ break;
+ }
+}
+
+/* Compare a matrix with the known result */
+static int checkResult( int nmat, int mat[], const char *msg )
+{
+ int n, k, errs = 0, wrank;
+ static int solution[9] = { 0, 1, 0,
+ 0, 0, 1,
+ 1, 0, 0 };
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ for (n=0; n<nmat; n++) {
+ for (k=0; k<9; k++) {
+ if (mat[k] != solution[k]) {
+ errs ++;
+ if (errs == 1) {
+ printf( "Errors for communicators %s\n",
+ MTestGetIntracommName() ); fflush(stdout);
+
+ }
+ if (errs < 10) {
+ printf( "[%d]matrix #%d(%s): Expected mat[%d,%d] = %d, got %d\n",
+ wrank, n, msg, k / 3, k % 3, solution[k], mat[k] );
+ fflush(stdout);
+ }
+ }
+ }
+ /* Advance to the next matrix */
+ mat += 9;
+ }
+ return errs;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, *bufout;
+ MPI_Op op;
+ MPI_Datatype mattype;
+ int i;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( matmult, 0, &op );
+
+ /* A single rotation matrix (3x3, stored as 9 consequetive elements) */
+ MPI_Type_contiguous( 9, MPI_INT, &mattype );
+ MPI_Type_commit( &mattype );
+
+ /* Sanity check: test that our routines work properly */
+ { int one = 1;
+ buf = (int *)malloc( 4*9 * sizeof(int) );
+ initMat( 0, 4, 0, &buf[0] );
+ initMat( 1, 4, 0, &buf[9] );
+ initMat( 2, 4, 0, &buf[18] );
+ initMat( 3, 4, 0, &buf[27] );
+ matmult( &buf[0], &buf[9], &one, &mattype );
+ matmult( &buf[9], &buf[18], &one, &mattype );
+ matmult( &buf[18], &buf[27], &one, &mattype );
+ checkResult( 1, &buf[27], "Sanity Check" );
+ free(buf);
+ }
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ for (count = 1; count < size; count ++ ) {
+
+ /* Allocate the matrices */
+ buf = (int *)malloc( count * 9 * sizeof(int) );
+ if (!buf) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ bufout = (int *)malloc( count * 9 * sizeof(int) );
+ if (!bufout) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ for (i=0; i < count; i++) {
+ initMat( rank, size, i, &buf[i*9] );
+ }
+
+ MPI_Allreduce( buf, bufout, count, mattype, op, comm );
+ errs += checkResult( count, bufout, "" );
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ for (i=0; i < count; i++) {
+ initMat( rank, size, i, &bufout[i*9] );
+ }
+ MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
+ errs += checkResult( count, bufout, "IN_PLACE" );
+
+ free( buf );
+ free( bufout );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MPI_Op_free( &op );
+ MPI_Type_free( &mattype );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <assert.h>
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with count greater than the number of processes";
+*/
+
+/* We make the error count global so that we can easily control the output
+ of error information (in particular, limiting it after the first 10
+ errors */
+int errs = 0;
+
+int main( int argc, char *argv[] )
+{
+ MPI_Comm comm;
+ MPI_Datatype dtype;
+ int count, *bufin, *bufout, size, i, minsize=1;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) {
+ continue;
+ }
+ MPI_Comm_size( comm, &size );
+ count = size * 2;
+ bufin = (int *)malloc( count * sizeof(int) );
+ bufout = (int *)malloc( count * sizeof(int) );
+ if (!bufin || !bufout) {
+ fprintf( stderr, "Unable to allocated space for buffers (%d)\n",
+ count );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<count; i++) {
+ bufin[i] = i;
+ bufout[i] = -1;
+ }
+
+ dtype = MPI_INT;
+ MPI_Allreduce( bufin, bufout, count, dtype, MPI_SUM, comm );
+ /* Check output */
+ for (i=0; i<count; i++) {
+ if (bufout[i] != i * size) {
+ fprintf( stderr, "Expected bufout[%d] = %d but found %d\n",
+ i, i * size, bufout[i] );
+ errs++;
+ }
+ }
+ free( bufin );
+ free( bufout );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with apparent non-commutative operators";
+*/
+/* While the operator is in fact commutative, this forces the MPI code to
+ run the code that is used for non-commutative operators, and for
+ various message lengths. Other tests check truly non-commutative
+ operators */
+
+void mysum( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+
+void mysum( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, n = *count;
+ for (i=0; i<n; i++)
+ cout[i] += cin[i];
+}
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MPI_Op op;
+ int *buf, i;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( mysum, 0, &op );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ for (count = 1; count < 65000; count = count * 2) {
+ /* Contiguous data */
+ buf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) buf[i] = rank + i;
+ MPI_Allreduce( MPI_IN_PLACE, buf, count, MPI_INT, op, comm );
+ /* Check the results */
+ for (i=0; i<count; i++) {
+ int result = i * size + (size*(size-1))/2;
+ if (buf[i] != result) {
+ errs ++;
+ if (errs < 10) {
+ fprintf( stderr, "buf[%d] = %d expected %d\n",
+ i, buf[i], result );
+ }
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+ MPI_Op_free( &op );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+
+/*
+ * This example should be run with 2 processes and tests the ability of the
+ * implementation to handle a flood of one-way messages.
+ */
+
+int main( int argc, char **argv )
+{
+ double wscale = 10.0, scale;
+ int numprocs, myid,i;
+
+ MPI_Init(&argc,&argv);
+ MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
+ MPI_Comm_rank(MPI_COMM_WORLD,&myid);
+
+ for ( i=0; i<10000; i++) {
+ MPI_Allreduce(&wscale,&scale,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
+ }
+
+ if (myid == 0) {
+ /* If we get here at all, we're ok */
+ printf( " No Errors\n" );
+ }
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+#include <stdlib.h>
+
+/*
+static char MTEST_Descrip[] = "";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *sendbuf, *recvbuf, *p;
+ int sendcount, recvcount;
+ int i, j;
+ MPI_Datatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* printf( "Size of comm = %d\n", size ); */
+ for (count = 1; count < 65000; count = count * 2) {
+
+ /* Create a send buf and a receive buf suitable for testing
+ all to all. */
+ sendcount = count;
+ recvcount = count;
+ sendbuf = (int *)malloc( count * size * sizeof(int) );
+ recvbuf = (int *)malloc( count * size * sizeof(int) );
+ sendtype = MPI_INT;
+ recvtype = MPI_INT;
+
+ if (!sendbuf || !recvbuf) {
+ errs++;
+ fprintf( stderr, "Failed to allocate sendbuf and/or recvbuf\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<count*size; i++)
+ recvbuf[i] = -1;
+ p = sendbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ *p++ = j * size + rank + i;
+ }
+ }
+
+ MPI_Alltoall( sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype, comm );
+
+ p = recvbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ if (*p != rank * size + j + i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error with communicator %s and size=%d count=%d\n",
+ MTestGetIntracommName(), size, count );
+ fprintf( stderr, "recvbuf[%d,%d] = %d, should %d\n",
+ j,i, *p, rank * size + j + i );
+ }
+ }
+ p++;
+ }
+ }
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ p = recvbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ *p++ = j * size + rank + i;
+ }
+ }
+ MPI_Alltoall( MPI_IN_PLACE, -1/*ignored*/, MPI_DATATYPE_NULL/*ignored*/,
+ recvbuf, recvcount, recvtype, comm );
+ p = recvbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ if (*p != rank * size + j + i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error (MPI_IN_PLACE) with communicator %s and size=%d count=%d\n",
+ MTestGetIntracommName(), size, count );
+ fprintf(stderr, "recvbuf[%d,%d] = %d, should be %d\n",
+ j,i, *p, rank * size + j + i );
+ }
+ }
+ p++;
+ }
+ }
+#endif
+
+ free( recvbuf );
+ free( sendbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/*
+ This program tests MPI_Alltoallv by having processor i send different
+ amounts of data to each processor.
+
+ Because there are separate send and receive types to alltoallv,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( size * size * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank;
+ sdispls[i] = (i * (i+1))/2;
+ }
+ MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i];
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j],(i*(i+1))/2 + j, j );
+ err++;
+ }
+ }
+ }
+
+ free( sdispls );
+ free( sendcounts );
+ free( sbuf );
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ free( rbuf );
+ rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
+ if (!rbuf) {
+ fprintf( stderr, "Could not reallocate rbuf!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size; i++) {
+ recvcounts[i] = i + rank;
+ rdispls[i] = i * (2 * size);
+ }
+ memset(rbuf, -1, size * (2 * size) * sizeof(int));
+ for (i=0; i < size; i++) {
+ p = rbuf + rdispls[i];
+ for (j = 0; j < recvcounts[i]; ++j) {
+ p[j] = 100 * rank + 10 * i + j;
+ }
+ }
+ MPI_Alltoallv( MPI_IN_PLACE, NULL, NULL, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i];
+ for (j=0; j<recvcounts[i]; j++) {
+ int expected = 100 * i + 10 * rank + j;
+ if (p[j] != expected) {
+ fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
+ rank, p[j], expected, i, j);
+ ++err;
+ }
+ }
+ }
+#endif
+
+ free( rdispls );
+ free( recvcounts );
+ free( rbuf );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/*
+ This program tests MPI_Alltoallv by having processor each process
+ send data to two neighbors only, using counts of 0 for the other processes.
+ This idiom is sometimes used for halo exchange operations.
+
+ Because there are separate send and receive types to alltoallv,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, *p, err;
+ int left, right, length;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (size < 3) continue;
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Get the neighbors */
+ left = (rank - 1 + size) % size;
+ right = (rank + 1) % size;
+
+ /* Set the defaults */
+ for (i=0; i<size; i++) {
+ sendcounts[i] = 0;
+ recvcounts[i] = 0;
+ rdispls[i] = 0;
+ sdispls[i] = 0;
+ }
+
+ for (length=1; length < 66000; length = length*2+1 ) {
+ /* Get the buffers */
+ sbuf = (int *)malloc( 2 * length * sizeof(int) );
+ rbuf = (int *)malloc( 2 * length * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocate buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<length; i++) {
+ sbuf[i] = i + 100000*rank;
+ sbuf[i+length] = i + 100000*rank;
+ rbuf[i] = -i;
+ rbuf[i+length] = -i-length;
+ }
+ sendcounts[left] = length;
+ sendcounts[right] = length;
+ recvcounts[left] = length;
+ recvcounts[right] = length;
+ rdispls[left] = 0;
+ rdispls[right] = length;
+ sdispls[left] = 0;
+ sdispls[right] = length;
+
+ MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+
+ /* Check rbuf */
+ p = rbuf; /* left */
+
+ for (i=0; i<length; i++) {
+ if (p[i] != i + 100000 * left) {
+ if (err < 10) {
+ fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n",
+ rank, left, p[i], i + 100000 * left, i );
+ }
+ err++;
+ }
+ }
+
+ p = rbuf + length; /* right */
+ for (i=0; i<length; i++) {
+ if (p[i] != i + 100000 * right) {
+ if (err < 10) {
+ fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n",
+ rank, right, p[i], i + 100000 * right, i );
+ }
+ err++;
+ }
+ }
+
+ free( rbuf );
+ free( sbuf );
+ }
+
+ free( sdispls );
+ free( rdispls );
+ free( recvcounts );
+ free( sendcounts );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * Changes to this example
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/*
+ * This example is taken from MPI-The complete reference, Vol 1,
+ * pages 222-224.
+ *
+ * Lines after the "--CUT HERE--" were added to make this into a complete
+ * test program.
+ */
+
+/* Specify the maximum number of errors to report. */
+#define MAX_ERRORS 10
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+#define MAX_SIZE 64
+
+MPI_Datatype transpose_type(int M, int m, int n, MPI_Datatype type);
+MPI_Datatype submatrix_type(int N, int m, int n, MPI_Datatype type);
+void Transpose(float *localA, float *localB, int M, int N, MPI_Comm comm);
+void Transpose(float *localA, float *localB, int M, int N, MPI_Comm comm)
+/* transpose MxN matrix A that is block distributed (1-D) on
+ processes of comm onto block distributed matrix B */
+{
+ int i, j, extent, myrank, p, n[2], m[2];
+ int lasti, lastj;
+ int *sendcounts, *recvcounts;
+ int *sdispls, *rdispls;
+ MPI_Datatype xtype[2][2], stype[2][2], *sendtypes, *recvtypes;
+
+ MTestPrintfMsg( 2, "M = %d, N = %d\n", M, N );
+
+ /* compute parameters */
+ MPI_Comm_size(comm, &p);
+ MPI_Comm_rank(comm, &myrank);
+ extent = sizeof(float);
+
+ /* allocate arrays */
+ sendcounts = (int *)malloc(p*sizeof(int));
+ recvcounts = (int *)malloc(p*sizeof(int));
+ sdispls = (int *)malloc(p*sizeof(int));
+ rdispls = (int *)malloc(p*sizeof(int));
+ sendtypes = (MPI_Datatype *)malloc(p*sizeof(MPI_Datatype));
+ recvtypes = (MPI_Datatype *)malloc(p*sizeof(MPI_Datatype));
+
+ /* compute block sizes */
+ m[0] = M/p;
+ m[1] = M - (p-1)*(M/p);
+ n[0] = N/p;
+ n[1] = N - (p-1)*(N/p);
+
+ /* compute types */
+ for (i=0; i <= 1; i++)
+ for (j=0; j <= 1; j++) {
+ xtype[i][j] = transpose_type(N, m[i], n[j], MPI_FLOAT);
+ stype[i][j] = submatrix_type(M, m[i], n[j], MPI_FLOAT);
+ }
+
+ /* prepare collective operation arguments */
+ lasti = myrank == p-1;
+ for (j=0; j < p; j++) {
+ lastj = j == p-1;
+ sendcounts[j] = 1;
+ sdispls[j] = j*n[0]*extent;
+ sendtypes[j] = xtype[lasti][lastj];
+ recvcounts[j] = 1;
+ rdispls[j] = j*m[0]*extent;
+ recvtypes[j] = stype[lastj][lasti];
+ }
+
+ /* communicate */
+ MTestPrintfMsg( 2, "Begin Alltoallw...\n" );
+ /* -- Note that the book incorrectly uses &localA and &localB
+ as arguments to MPI_Alltoallw */
+ MPI_Alltoallw(localA, sendcounts, sdispls, sendtypes,
+ localB, recvcounts, rdispls, recvtypes, comm);
+ MTestPrintfMsg( 2, "Done with Alltoallw\n" );
+
+ /* Free buffers */
+ free( sendcounts );
+ free( recvcounts );
+ free( sdispls );
+ free( rdispls );
+ free( sendtypes );
+ free( recvtypes );
+
+ /* Free datatypes */
+ for (i=0; i <= 1; i++)
+ for (j=0; j <= 1; j++) {
+ MPI_Type_free( &xtype[i][j] );
+ MPI_Type_free( &stype[i][j] );
+ }
+}
+
+
+/* Define an n x m submatrix in a n x M local matrix (this is the
+ destination in the transpose matrix */
+MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
+/* computes a datatype for an mxn submatrix within an MxN matrix
+ with entries of type type */
+{
+ /* MPI_Datatype subrow; */
+ MPI_Datatype submatrix;
+
+ /* The book, MPI: The Complete Reference, has the wrong type constructor
+ here. Since the stride in the vector type is relative to the input
+ type, the stride in the book's code is n times as long as is intended.
+ Since n may not exactly divide N, it is better to simply use the
+ blocklength argument in Type_vector */
+ /*
+ MPI_Type_contiguous(n, type, &subrow);
+ MPI_Type_vector(m, 1, N, subrow, &submatrix);
+ */
+ MPI_Type_vector(n, m, M, type, &submatrix );
+ MPI_Type_commit(&submatrix);
+
+ /* Add a consistency test: the size of submatrix should be
+ n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
+ {
+ int tsize;
+ MPI_Aint textent, lb;
+ MPI_Type_size( type, &tsize );
+ MPI_Type_get_extent( submatrix, &lb, &textent );
+
+ if (textent != tsize * (M * (n-1)+m)) {
+ fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
+ (long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
+ }
+ }
+ return(submatrix);
+}
+
+/* Extract an m x n submatrix within an m x N matrix and transpose it.
+ Assume storage by rows; the defined datatype accesses by columns */
+MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
+/* computes a datatype for the transpose of an mxn matrix
+ with entries of type type */
+{
+ MPI_Datatype subrow, subrow1, submatrix;
+ MPI_Aint lb, extent;
+
+ MPI_Type_vector(m, 1, N, type, &subrow);
+ MPI_Type_get_extent(type, &lb, &extent);
+ MPI_Type_create_resized(subrow, 0, extent, &subrow1);
+ MPI_Type_contiguous(n, subrow1, &submatrix);
+ MPI_Type_commit(&submatrix);
+ MPI_Type_free( &subrow );
+ MPI_Type_free( &subrow1 );
+
+ /* Add a consistency test: the size of submatrix should be
+ n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
+ {
+ int tsize;
+ MPI_Aint textent, llb;
+ MPI_Type_size( type, &tsize );
+ MPI_Type_get_true_extent( submatrix, &llb, &textent );
+
+ if (textent != tsize * (N * (m-1)+n)) {
+ fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
+ (long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
+ }
+ }
+
+ return(submatrix);
+}
+
+/* -- CUT HERE -- */
+
+int main( int argc, char *argv[] )
+{
+ int gM, gN, lm, lmlast, ln, lnlast, i, j, errs = 0;
+ int size, rank;
+ float *localA, *localB;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ gM = 20;
+ gN = 30;
+
+ /* Each block is lm x ln in size, except for the last process,
+ which has lmlast x lnlast */
+ lm = gM/size;
+ lmlast = gM - (size - 1)*lm;
+ ln = gN/size;
+ lnlast = gN - (size - 1)*ln;
+
+ /* Create the local matrices.
+ Initialize the input matrix so that the entries are
+ consequtive integers, by row, starting at 0.
+ */
+ if (rank == size - 1) {
+ localA = (float *)malloc( gN * lmlast * sizeof(float) );
+ localB = (float *)malloc( gM * lnlast * sizeof(float) );
+ for (i=0; i<lmlast; i++) {
+ for (j=0; j<gN; j++) {
+ localA[i*gN+j] = (float)(i*gN+j + rank * gN * lm);
+ }
+ }
+
+ }
+ else {
+ localA = (float *)malloc( gN * lm * sizeof(float) );
+ localB = (float *)malloc( gM * ln * sizeof(float) );
+ for (i=0; i<lm; i++) {
+ for (j=0; j<gN; j++) {
+ localA[i*gN+j] = (float)(i*gN+j + rank * gN * lm);
+ }
+ }
+ }
+
+ MTestPrintfMsg( 2, "Allocated local arrays\n" );
+ /* Transpose */
+ Transpose( localA, localB, gM, gN, comm );
+
+ /* check the transposed matrix
+ In the global matrix, the transpose has consequtive integers,
+ organized by columns.
+ */
+ if (rank == size - 1) {
+ for (i=0; i<lnlast; i++) {
+ for (j=0; j<gM; j++) {
+ int expected = i+gN*j + rank * ln;
+ if ((int)localB[i*gM+j] != expected) {
+ if (errs < MAX_ERRORS)
+ printf( "Found %d but expected %d\n",
+ (int)localB[i*gM+j], expected );
+ errs++;
+ }
+ }
+ }
+
+ }
+ else {
+ for (i=0; i<ln; i++) {
+ for (j=0; j<gM; j++) {
+ int expected = i+gN*j + rank * ln;
+ if ((int)localB[i*gM+j] != expected) {
+ if (errs < MAX_ERRORS)
+ printf( "Found %d but expected %d\n",
+ (int)localB[i*gM+j], expected );
+ errs++;
+ }
+ }
+ }
+ }
+
+ /* Free storage */
+ free( localA );
+ free( localB );
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/*
+ This program tests MPI_Alltoallw by having processor i send different
+ amounts of data to each processor. This is just the MPI_Alltoallv test,
+ but with displacements in bytes rather than units of the datatype.
+
+ Because there are separate send and receive types to alltoallw,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+ MPI_Datatype *sendtypes, *recvtypes;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( size * size * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ sendtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
+ recvtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ /* Note that process 0 sends no data (sendcounts[0] = 0) */
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank * sizeof(int);
+ sdispls[i] = (((i+1) * (i))/2) * sizeof(int);
+ sendtypes[i] = recvtypes[i] = MPI_INT;
+ }
+ MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
+ rbuf, recvcounts, rdispls, recvtypes, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i]/sizeof(int);
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j],(i*(i+1))/2 + j, j );
+ err++;
+ }
+ }
+ }
+
+ free(sendtypes);
+ free(sdispls);
+ free(sendcounts);
+ free(sbuf);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ free( rbuf );
+ rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
+ if (!rbuf) {
+ fprintf( stderr, "Could not reallocate rbuf!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size; i++) {
+ /* alltoallw displs are in bytes, not in type extents */
+ rdispls[i] = i * (2 * size) * sizeof(int);
+ recvtypes[i] = MPI_INT;
+ recvcounts[i] = i + rank;
+ }
+ memset(rbuf, -1, size * (2 * size) * sizeof(int));
+ for (i=0; i < size; i++) {
+ p = rbuf + (rdispls[i] / sizeof(int));
+ for (j = 0; j < recvcounts[i]; ++j) {
+ p[j] = 100 * rank + 10 * i + j;
+ }
+ }
+
+ MPI_Alltoallw( MPI_IN_PLACE, NULL, NULL, NULL,
+ rbuf, recvcounts, rdispls, recvtypes, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + (rdispls[i] / sizeof(int));
+ for (j=0; j<recvcounts[i]; j++) {
+ int expected = 100 * i + 10 * rank + j;
+ if (p[j] != expected) {
+ fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
+ rank, p[j], expected, i, j);
+ ++err;
+ }
+ }
+ }
+#endif
+
+ free(recvtypes);
+ free(rdispls);
+ free(recvcounts);
+ free(rbuf);
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/* Based on a test case contributed by Michael Hofmann.
+ *
+ * This test makes sure that zero counts with non-zero-sized types on the
+ * send (recv) side match and don't cause a problem with non-zero counts and
+ * zero-sized types on the recv (send) side when using MPI_Alltoallw and
+ * MPI_Alltoallv. */
+
+/* TODO test intercommunicators as well */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <mpi.h>
+
+#include "mpitest.h"
+
+int main(int argc, char *argv[])
+{
+ int sendbuf, recvbuf;
+ int *sendcounts;
+ int *recvcounts;
+ int *sdispls;
+ int *rdispls;
+ MPI_Datatype sendtype;
+ MPI_Datatype *sendtypes;
+ MPI_Datatype *recvtypes;
+ int rank = -1;
+ int size = -1;
+ int i;
+
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ sendtypes = malloc(size * sizeof(MPI_Datatype));
+ recvtypes = malloc(size * sizeof(MPI_Datatype));
+ sendcounts = malloc(size * sizeof(int));
+ recvcounts = malloc(size * sizeof(int));
+ sdispls = malloc(size * sizeof(int));
+ rdispls = malloc(size * sizeof(int));
+ if (!sendtypes || !recvtypes ||
+ !sendcounts || !recvcounts ||
+ !sdispls || !rdispls)
+ {
+ printf("error, unable to allocate memory\n");
+ goto fn_exit;
+ }
+
+ MPI_Type_contiguous(0, MPI_INT, &sendtype);
+ MPI_Type_commit(&sendtype);
+
+ for (i = 0; i < size; ++i) {
+ sendtypes[i] = sendtype;
+ sendcounts[i] = 1;
+ sdispls[i] = 0;
+
+ recvtypes[i] = MPI_INT;
+ recvcounts[i] = 0;
+ rdispls[i] = 0;
+ }
+
+
+ /* try zero-counts on both the send and recv side in case only one direction is broken for some reason */
+ MPI_Alltoallw(&sendbuf, sendcounts, sdispls, sendtypes, &recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD);
+ MPI_Alltoallw(&sendbuf, recvcounts, rdispls, recvtypes, &recvbuf, sendcounts, sdispls, sendtypes, MPI_COMM_WORLD);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* pass MPI_IN_PLACE and different but compatible types rank is even/odd */
+ if (rank % 2)
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD);
+ else
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, sendcounts, sdispls, sendtypes, MPI_COMM_WORLD);
+#endif
+
+ /* now the same for Alltoallv instead of Alltoallw */
+ MPI_Alltoallv(&sendbuf, sendcounts, sdispls, sendtypes[0], &recvbuf, recvcounts, rdispls, recvtypes[0], MPI_COMM_WORLD);
+ MPI_Alltoallv(&sendbuf, recvcounts, rdispls, recvtypes[0], &recvbuf, sendcounts, sdispls, sendtypes[0], MPI_COMM_WORLD);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ if (rank % 2)
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, recvcounts, rdispls, recvtypes[0], MPI_COMM_WORLD);
+ else
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, sendcounts, sdispls, sendtypes[0], MPI_COMM_WORLD);
+#endif
+
+ MPI_Type_free(&sendtype);
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+fn_exit:
+ if (rdispls) free(rdispls);
+ if (sdispls) free(sdispls);
+ if (recvcounts) free(recvcounts);
+ if (sendcounts) free(sendcounts);
+ if (recvtypes) free(recvtypes);
+ if (sendtypes) free(sendtypes);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ /* The max value of count must be very large to ensure that we
+ reach the long message algorithms */
+ for (count = 1; count < 280000; count = count * 4) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
+ for (root=0; root<size; root++) {
+ if (rank == root) {
+ sendtype.InitBuf( &sendtype );
+ err = MPI_Bcast( sendtype.buf, sendtype.count,
+ sendtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ recvtype.InitBuf( &recvtype );
+ err = MPI_Bcast( recvtype.buf, recvtype.count,
+ recvtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ fprintf( stderr, "Error with communicator %s and datatype %s\n",
+ MTestGetIntracommName(),
+ MTestGetDatatypeName( &recvtype ) );
+ MTestPrintError( err );
+ }
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ errs += errs;
+ }
+ }
+ }
+ MTestFreeDatatype( &recvtype );
+ MTestFreeDatatype( &sendtype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes and sizes that are not powers of two";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ count = 1;
+ /* This must be very large to ensure that we reach the long message
+ algorithms */
+ for (count = 4; count < 66000; count = count * 4) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count-1 )) {
+ for (root=0; root<size; root++) {
+ if (rank == root) {
+ sendtype.InitBuf( &sendtype );
+ err = MPI_Bcast( sendtype.buf, sendtype.count,
+ sendtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ recvtype.InitBuf( &recvtype );
+ err = MPI_Bcast( recvtype.buf, recvtype.count,
+ recvtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ fprintf( stderr, "Error with communicator %s and datatype %s\n",
+ MTestGetIntracommName(),
+ MTestGetDatatypeName( &recvtype ) );
+ MTestPrintError( err );
+ }
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ errs += errs;
+ }
+ }
+ }
+ MTestFreeDatatype( &recvtype );
+ MTestFreeDatatype( &sendtype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "mpitest.h"
+
+#define ROOT 0
+#define NUM_REPS 5
+#define NUM_SIZES 4
+
+int main( int argc, char **argv)
+{
+ int *buf;
+ int i, rank, reps, n;
+ int bVerify = 1;
+ int sizes[NUM_SIZES] = { 100, 64*1024, 128*1024, 1024*1024 };
+ int num_errors=0;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (argc > 1)
+ {
+ if (strcmp(argv[1], "-novalidate") == 0 || strcmp(argv[1], "-noverify") == 0)
+ bVerify = 0;
+ }
+
+ buf = (int *) malloc(sizes[NUM_SIZES-1]*sizeof(int));
+ memset(buf, 0, sizes[NUM_SIZES-1]*sizeof(int));
+
+ for (n=0; n<NUM_SIZES; n++)
+ {
+#ifdef DEBUG
+ if (rank == ROOT)
+ {
+ printf("bcasting %d MPI_INTs %d times\n", sizes[n], NUM_REPS);
+ fflush(stdout);
+ }
+#endif
+ for (reps=0; reps < NUM_REPS; reps++)
+ {
+ if (bVerify)
+ {
+ if (rank == ROOT)
+ {
+ for (i=0; i<sizes[n]; i++)
+ {
+ buf[i] = 1000000 * (n * NUM_REPS + reps) + i;
+ }
+ }
+ else
+ {
+ for (i=0; i<sizes[n]; i++)
+ {
+ buf[i] = -1 - (n * NUM_REPS + reps);
+ }
+ }
+ }
+
+# ifdef DEBUG
+ {
+ printf("rank=%d, n=%d, reps=%d\n", rank, n, reps);
+ }
+# endif
+
+ MPI_Bcast(buf, sizes[n], MPI_INT, ROOT, MPI_COMM_WORLD);
+
+ if (bVerify)
+ {
+ num_errors = 0;
+ for (i=0; i<sizes[n]; i++)
+ {
+ if (buf[i] != 1000000 * (n * NUM_REPS + reps) + i)
+ {
+ num_errors++;
+ if (num_errors < 10)
+ {
+ printf("Error: Rank=%d, n=%d, reps=%d, i=%d, buf[i]=%d expected=%d\n", rank, n, reps, i, buf[i],
+ 1000000 * (n * NUM_REPS + reps) +i);
+ fflush(stdout);
+ }
+ }
+ }
+ if (num_errors >= 10)
+ {
+ printf("Error: Rank=%d, num_errors = %d\n", rank, num_errors);
+ fflush(stdout);
+ }
+ }
+ }
+ }
+
+ free(buf);
+
+ MTest_Finalize( num_errors );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include <mpi.h>
+
+/* test broadcast behavior with non-zero counts but zero-sized types */
+
+int main(int argc, char *argv[])
+{
+ int i, type_size;
+ MPI_Datatype type = MPI_DATATYPE_NULL;
+ char *buf = NULL;
+ int wrank, wsize;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+
+ /* a random non-zero sized buffer */
+#define NELEM (10)
+ buf = malloc(NELEM*sizeof(int));
+ assert(buf);
+
+ for (i = 0; i < NELEM; i++) {
+ buf[i] = wrank * NELEM + i;
+ }
+
+ /* create a zero-size type */
+ MPI_Type_contiguous(0, MPI_INT, &type);
+ MPI_Type_commit(&type);
+ MPI_Type_size(type, &type_size);
+ assert(type_size == 0);
+
+ /* do the broadcast, which will break on some MPI implementations */
+ MPI_Bcast(buf, NELEM, type, 0, MPI_COMM_WORLD);
+
+ /* check that the buffer remains unmolested */
+ for (i = 0; i < NELEM; i++) {
+ assert(buf[i] == wrank * NELEM + i);
+ }
+
+ MPI_Type_free(&type);
+ MPI_Finalize();
+
+ if (wrank == 0) {
+ printf(" No errors\n");
+ }
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define BAD_ANSWER 100000
+
+int assoc ( int *, int *, int *, MPI_Datatype * );
+
+/*
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ (see 4.9.4). The order is important.
+
+ Note that the computation is in process rank (in the communicator)
+ order, independant of the root.
+ */
+int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ ) {
+ if (inoutvec[i] <= invec[i] ) {
+ int rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
+ rank, inoutvec[0], invec[0] );
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
+ return (1);
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size;
+ int data;
+ int errors=0;
+ int result = -100;
+ MPI_Op op;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+
+ MPI_Op_create( (MPI_User_function*)assoc, 0, &op );
+ MPI_Reduce ( &data, &result, 1, MPI_INT, op, size-1, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, size-1, MPI_COMM_WORLD );
+ MPI_Op_free( &op );
+ if (result == BAD_ANSWER) errors++;
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+void addem ( int *, int *, int *, MPI_Datatype * );
+void assoc ( int *, int *, int *, MPI_Datatype * );
+
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ )
+ inoutvec[i] += invec[i];
+}
+
+#define BAD_ANSWER 100000
+
+/*
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ (see 4.9.4). The order is important.
+
+ Note that the computation is in process rank (in the communicator)
+ order, independant of the root.
+ */
+void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ ) {
+ if (inoutvec[i] <= invec[i] ) {
+ int rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
+ rank, inoutvec[0], invec[0] );
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op_assoc, op_addem;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+
+ correct_result = 0;
+ for (i=0;i<=rank;i++)
+ correct_result += i;
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error suming ints with scan\n", rank );
+ errors++;
+ }
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank );
+ errors++;
+ }
+
+ data = rank;
+ result = -100;
+ MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc );
+ MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem );
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (userop)\n",
+ rank );
+ errors++;
+ }
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n",
+ rank );
+ errors++;
+ }
+ result = -100;
+ data = rank;
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, MPI_COMM_WORLD );
+ if (result == BAD_ANSWER) {
+ fprintf( stderr, "[%d] Error scanning with non-commutative op\n",
+ rank );
+ errors++;
+ }
+
+ MPI_Op_free( &op_assoc );
+ MPI_Op_free( &op_addem );
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define TABLE_SIZE 2
+
+int main( int argc, char **argv )
+{
+ int rank, size;
+ double a[TABLE_SIZE];
+ struct { double a; int b; } in[TABLE_SIZE], out[TABLE_SIZE];
+ int i;
+ int errors = 0;
+
+ /* Initialize the environment and some variables */
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* Initialize the maxloc data */
+ for ( i=0; i<TABLE_SIZE; i++ ) a[i] = 0;
+ for ( i=rank; i<TABLE_SIZE; i++ ) a[i] = (double)rank + 1.0;
+
+ /* Copy data to the "in" buffer */
+ for (i=0; i<TABLE_SIZE; i++) {
+ in[i].a = a[i];
+ in[i].b = rank;
+ }
+
+ /* Reduce it! */
+ MPI_Reduce( in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MAXLOC, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( out, TABLE_SIZE, MPI_DOUBLE_INT, 0, MPI_COMM_WORLD );
+
+ /* Check to see that we got the right answers */
+ for (i=0; i<TABLE_SIZE; i++)
+ if (i % size == rank)
+ if (out[i].b != rank) {
+ printf("MAX (ranks[%d] = %d != %d\n", i, out[i].b, rank );
+ errors++;
+ }
+
+ /* Initialize the minloc data */
+ for ( i=0; i<TABLE_SIZE; i++ ) a[i] = 0;
+ for ( i=rank; i<TABLE_SIZE; i++ ) a[i] = -(double)rank - 1.0;
+
+ /* Copy data to the "in" buffer */
+ for (i=0; i<TABLE_SIZE; i++) {
+ in[i].a = a[i];
+ in[i].b = rank;
+ }
+
+ /* Reduce it! */
+ MPI_Allreduce( in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD );
+
+ /* Check to see that we got the right answers */
+ for (i=0; i<TABLE_SIZE; i++)
+ if (i % size == rank)
+ if (out[i].b != rank) {
+ printf("MIN (ranks[%d] = %d != %d\n", i, out[i].b, rank );
+ errors++;
+ }
+
+ /* Finish up! */
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * Changes to the original code
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+
+/*
+From: hook@nas.nasa.gov (Edward C. Hook)
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "mpitest.h"
+
+#include <string.h>
+#include <errno.h>
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+#endif
+
+int main( int argc, char *argv[] )
+{
+ int rank, size;
+ int chunk = 128;
+ int i;
+ int *sb;
+ int *rb;
+ int status;
+
+ MTest_Init(&argc,&argv);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+ for ( i=1 ; i < argc ; ++i ) {
+ if ( argv[i][0] != '-' )
+ continue;
+ switch(argv[i][1]) {
+ case 'm':
+ chunk = atoi(argv[++i]);
+ break;
+ default:
+ fprintf(stderr,"Unrecognized argument %s\n",
+ argv[i]);
+ MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
+ }
+ }
+
+ sb = (int *)malloc(size*chunk*sizeof(int));
+ if ( !sb ) {
+ perror( "can't allocate send buffer" );
+ MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
+ }
+ rb = (int *)malloc(size*chunk*sizeof(int));
+ if ( !rb ) {
+ perror( "can't allocate recv buffer");
+ free(sb);
+ MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
+ }
+ for ( i=0 ; i < size*chunk ; ++i ) {
+ sb[i] = rank + 1;
+ rb[i] = 0;
+ }
+
+ /* fputs("Before MPI_Alltoall\n",stdout); */
+
+ /* This should really send MPI_CHAR, but since sb and rb were allocated
+ as chunk*size*sizeof(int), the buffers are large enough */
+ status = MPI_Alltoall(sb,chunk,MPI_INT,rb,chunk,MPI_INT,
+ MPI_COMM_WORLD);
+
+ /* fputs("Before MPI_Allreduce\n",stdout); */
+
+ MTest_Finalize( status );
+
+ free(sb);
+ free(rb);
+
+ MPI_Finalize();
+
+ return MTestReturnValue( status );
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ if ( (rank < participants) ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+ int recv_count = send_count;
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Gather everybody's result together - sort of like an */
+ /* inefficient allgather */
+ for (i=0; i<participants; i++) {
+ void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
+ MPI_Gather(sendbuf, send_count, MPI_INT,
+ &table[0][0], recv_count, MPI_INT, i,
+ MPI_COMM_WORLD );
+ }
+
+ /* Everybody should have the same table now, */
+ /* This test does not in any way guarantee there are no errors */
+ /* Print out a table or devise a smart test to make sure it's correct */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ }
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int recv_counts[MAX_PROCESSES];
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+ /* while (MAX_PROCESSES % participants) participants--; */
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ if ( (rank < participants) ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+
+ /* Fill in the displacements and recv_counts */
+ for (i=0; i<participants; i++) {
+ displs[i] = i * block_size * MAX_PROCESSES;
+ recv_counts[i] = send_count;
+ }
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Gather everybody's result together - sort of like an */
+ /* inefficient allgather */
+ for (i=0; i<participants; i++) {
+ void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
+ MPI_Gatherv(sendbuf, send_count, MPI_INT,
+ &table[0][0], recv_counts, displs, MPI_INT,
+ i, MPI_COMM_WORLD);
+ }
+
+
+ /* Everybody should have the same table now.
+
+ The entries are:
+ Table[i][j] = (i/block_size) + 10;
+ */
+ for (i=0; i<MAX_PROCESSES;i++)
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ for (i=0; i<MAX_PROCESSES;i++) {
+ for (j=0; j<MAX_PROCESSES;j++) {
+ if (table[i][j] != (i/block_size) + 10) errors++;
+ }
+ }
+ if (errors) {
+ /* Print out table if there are any errors */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ printf("\n");
+ for (j=0; j<MAX_PROCESSES; j++)
+ printf(" %d",table[i][j]);
+ }
+ printf("\n");
+ }
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int row[MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ comm = MPI_COMM_WORLD;
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) {
+ participants = MAX_PROCESSES;
+ MPI_Comm_split( MPI_COMM_WORLD, rank < MAX_PROCESSES, rank, &comm );
+ }
+ else {
+ participants = size;
+ MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+ }
+ if ( (rank < participants) ) {
+ int send_count = MAX_PROCESSES;
+ int recv_count = MAX_PROCESSES;
+
+ /* If I'm the root (process 0), then fill out the big table */
+ if (rank == 0)
+ for ( i=0; i<participants; i++)
+ for ( j=0; j<MAX_PROCESSES; j++ )
+ table[i][j] = i+j;
+
+ /* Scatter the big table to everybody's little table */
+ MPI_Scatter(&table[0][0], send_count, MPI_INT,
+ &row[0] , recv_count, MPI_INT, 0, comm );
+
+ /* Now see if our row looks right */
+ for (i=0; i<MAX_PROCESSES; i++)
+ if ( row[i] != i+rank ) errors++;
+ }
+
+ MPI_Comm_free( &comm );
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int row[MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int send_counts[MAX_PROCESSES];
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+ if ( (rank < participants) ) {
+ int recv_count = MAX_PROCESSES;
+
+ /* If I'm the root (process 0), then fill out the big table */
+ /* and setup send_counts and displs arrays */
+ if (rank == 0)
+ for ( i=0; i<participants; i++) {
+ send_counts[i] = recv_count;
+ displs[i] = i * MAX_PROCESSES;
+ for ( j=0; j<MAX_PROCESSES; j++ )
+ table[i][j] = i+j;
+ }
+
+ /* Scatter the big table to everybody's little table */
+ MPI_Scatterv(&table[0][0], send_counts, displs, MPI_INT,
+ &row[0] , recv_count, MPI_INT, 0, MPI_COMM_WORLD);
+
+ /* Now see if our row looks right */
+ for (i=0; i<MAX_PROCESSES; i++)
+ if ( row[i] != i+rank ) errors++;
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int recv_counts[MAX_PROCESSES];
+ MPI_Comm test_comm;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ participants = ( size > MAX_PROCESSES ) ? MAX_PROCESSES : size;
+
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, rank<participants, rank, &test_comm);
+
+ if ( rank < participants ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+
+ /* Fill in the displacements and recv_counts */
+ for (i=0; i<participants; i++) {
+ displs[i] = i * block_size * MAX_PROCESSES;
+ recv_counts[i] = send_count;
+ }
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Everybody gets the gathered data */
+ MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
+ &table[0][0], recv_counts, displs,
+ MPI_INT, test_comm);
+
+ /* Everybody should have the same table now.
+
+ The entries are:
+ Table[i][j] = (i/block_size) + 10;
+ */
+ for (i=0; i<MAX_PROCESSES;i++)
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ for (i=0; i<MAX_PROCESSES;i++) {
+ for (j=0; j<MAX_PROCESSES;j++) {
+ if (table[i][j] != (i/block_size) + 10) errors++;
+ }
+ }
+ if (errors) {
+ /* Print out table if there are any errors */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ printf("\n");
+ for (j=0; j<MAX_PROCESSES; j++)
+ printf(" %d",table[i][j]);
+ }
+ printf("\n");
+ }
+ }
+
+ MTest_Finalize( errors );
+
+ MPI_Comm_free(&test_comm);
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ /* while (MAX_PROCESSES % participants) participants--; */
+ if ( (rank < participants) ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+ int recv_count = send_count;
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Everybody gets the gathered table */
+ MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
+ &table[0][0], recv_count, MPI_INT, MPI_COMM_WORLD);
+
+ /* Everybody should have the same table now, */
+ /* This test does not in any way guarantee there are no errors */
+ /* Print out a table or devise a smart test to make sure it's correct */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ }
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+
+ MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ correct_result = 0;
+ for(i=0;i<size;i++)
+ correct_result += i;
+ if (result != correct_result) errors++;
+
+ MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ if (result != 0) errors++;
+
+ MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ if (result != (size-1)) errors++;
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+void addem ( int *, int *, int *, MPI_Datatype * );
+
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ )
+ inoutvec[i] += invec[i];
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+ MPI_Op_create( (MPI_User_function *)addem, 1, &op );
+ MPI_Reduce ( &data, &result, 1, MPI_INT, op, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ MPI_Op_free( &op );
+ correct_result = 0;
+ for(i=0;i<size;i++)
+ correct_result += i;
+ if (result != correct_result) errors++;
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Exscan";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ int *sendbuf, *recvbuf, i;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 65000; count = count * 2) {
+
+ sendbuf = (int *)malloc( count * sizeof(int) );
+ recvbuf = (int *)malloc( count * sizeof(int) );
+
+ for (i=0; i<count; i++) {
+ sendbuf[i] = rank + i * size;
+ recvbuf[i] = -1;
+ }
+
+ MPI_Exscan( sendbuf, recvbuf, count, MPI_INT, MPI_SUM, comm );
+
+ /* Check the results. rank 0 has no data */
+ if (rank > 0) {
+ int result;
+ for (i=0; i<count; i++) {
+ result = rank * i * size + ((rank) * (rank-1))/2;
+ if (recvbuf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
+ i, recvbuf[i], rank, result );
+ }
+ }
+ }
+ }
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* now try the MPI_IN_PLACE flavor */
+ for (i=0; i<count; i++) {
+ sendbuf[i] = -1; /* unused */
+ recvbuf[i] = rank + i * size;
+ }
+
+ MPI_Exscan( MPI_IN_PLACE, recvbuf, count, MPI_INT, MPI_SUM, comm );
+
+ /* Check the results. rank 0's data must remain unchanged */
+ for (i=0; i<count; i++) {
+ int result;
+ if (rank == 0)
+ result = rank + i * size;
+ else
+ result = rank * i * size + ((rank) * (rank-1))/2;
+ if (recvbuf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
+ i, recvbuf[i], rank, result );
+ }
+ }
+ }
+#endif
+
+ free( sendbuf );
+ free( recvbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Exscan (simple test)";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int sendbuf[1], recvbuf[1];
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ sendbuf[0] = rank;
+ recvbuf[0] = -2;
+
+ MPI_Exscan( sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm );
+
+ /* Check the results. rank 0 has no data. Input is
+ 0 1 2 3 4 5 6 7 8 ...
+ Output is
+ - 0 1 3 6 10 15 21 28 36
+ (scan, not counting the contribution from the calling process)
+ */
+ if (rank > 0) {
+ int result = (((rank) * (rank-1))/2);
+ /* printf( "%d: %d\n", rank, result ); */
+ if (recvbuf[0] != result) {
+ errs++;
+ fprintf( stderr, "Error in recvbuf = %d on %d, expected %d\n",
+ recvbuf[0], rank, result );
+ }
+ }
+ else if (recvbuf[0] != -2) {
+ errs++;
+ fprintf( stderr, "Error in recvbuf on zero, is %d\n", recvbuf[0] );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ MPI_Comm comm;
+ double *vecin, *vecout;
+ int minsize = 2, count;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (root=0; root<size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( size * n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride; i++) vecin[i] =-2;
+ for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
+
+ MPI_Gather( vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm );
+
+ if (rank == root) {
+ for (i=0; i<n*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ }
+ MPI_Type_free( &vec );
+ free( vecin );
+ free( vecout );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ /* do a zero length gather */
+ MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (root=0; root<size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( size * n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride; i++) vecin[i] =-2;
+ for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
+ int errorcode = MPI_SUCCESS;
+ if (rank == root) {
+ for (i=0; i<n; i++) {
+ vecout[rank*n+i] = rank*n+i;
+ }
+ errorcode = MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, n, MPI_DOUBLE, root, comm );
+ }
+ else {
+ errorcode = MPI_Gather( vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL,
+ root, comm );
+ }
+
+ if (rank == root) {
+ for (i=0; i<n*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d, err=%d\n",
+ i, (int)vecout[i], errorcode );
+ }
+ }
+ }
+ }
+ MPI_Type_free( &vec );
+ free( vecin );
+ free( vecout );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ /* do a zero length gather */
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if ( rank == 0 ) {
+ MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0,
+ MPI_COMM_WORLD );
+ } else {
+ MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (root=0; root<size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( size * n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride; i++) vecin[i] =-2;
+ for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
+ int errorcode = MPI_SUCCESS;
+ if (rank == root) {
+ for (i=0; i<n; i++) {
+ vecout[rank*n+i] = rank*n+i;
+ }
+ errorcode = MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, n, MPI_DOUBLE, root, comm );
+ }
+ else {
+ errorcode = MPI_Gather( vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL,
+ root, comm );
+ }
+
+ if (rank == root) {
+ for (i=0; i<n*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d, err=%d\n",
+ i, (int)vecout[i], errorcode );
+ }
+ }
+ }
+ }
+ MPI_Type_free( &vec );
+ free( vecin );
+ free( vecout );
+ }
+ }
+ printf("end with comm size : %d\n", size);
+ MTestFreeComm( &comm );
+ }
+
+ /* do a zero length gather */
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if ( rank == 0 ) {
+ MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0,
+ MPI_COMM_WORLD );
+ } else {
+ MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+#include <stdio.h>
+#include <assert.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* Since MPICH is currently the only NBC implementation in existence, just use
+ * this quick-and-dirty #ifdef to decide whether to test the nonblocking
+ * collectives. Eventually we can add a configure option or configure test, or
+ * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
+ * check */
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NBC_ROUTINES 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ MPI_Request request;
+ int size, rank;
+ int one = 1, two = 2, isum, sum;
+ MPI_Init(&argc,&argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ assert(size == 2);
+#if defined(TEST_NBC_ROUTINES)
+ MPI_Iallreduce(&one,&isum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD,&request);
+ MPI_Allreduce(&two,&sum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
+ MPI_Wait(&request,MPI_STATUS_IGNORE);
+
+ assert(isum == 2);
+ assert(sum == 4);
+ if (rank == 0)
+ printf(" No errors\n");
+#endif
+
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2013 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* Regression test for ticket #1785, contributed by Jed Brown. The test was
+ * hanging indefinitely under a buggy version of ch3:sock. */
+
+#include <mpi.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NBC_ROUTINES 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ MPI_Request barrier;
+ int rank,i,done;
+
+ MPI_Init(&argc,&argv);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ #if defined(TEST_NBC_ROUTINES)
+ MPI_Ibarrier(MPI_COMM_WORLD,&barrier);
+ for (i=0,done=0; !done; i++) {
+ usleep(1000);
+ /*printf("[%d] MPI_Test: %d\n",rank,i);*/
+ MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);
+ }
+ #endif
+ if (rank == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm allgather test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *rbuf = 0, *sbuf = 0;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* The left group will send rank to the right group;
+ The right group will send -rank to the left group */
+ rbuf = (int *)malloc( count * rsize * sizeof(int) );
+ sbuf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ if (leftGroup) {
+ for (i=0; i<count; i++) sbuf[i] = i + rank*count;
+ }
+ else {
+ for (i=0; i<count; i++) sbuf[i] = -(i + rank*count);
+ }
+ err = MPI_Allgather( sbuf, count, datatype,
+ rbuf, count, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ if (leftGroup) {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != i) {
+ errs++;
+ }
+ }
+ }
+
+ /* Use Allgather in a unidirectional way */
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ if (leftGroup) {
+ err = MPI_Allgather( sbuf, 0, datatype,
+ rbuf, count, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ err = MPI_Allgather( sbuf, count, datatype,
+ rbuf, 0, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free( rbuf );
+ free( sbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm allgatherv test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *rbuf = 0, *sbuf = 0;
+ int *recvcounts, *recvdispls;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* The left group will send rank to the right group;
+ The right group will send -rank to the left group */
+ rbuf = (int *)malloc( count * rsize * sizeof(int) );
+ sbuf = (int *)malloc( count * sizeof(int) );
+ recvcounts = (int *) malloc( rsize * sizeof(int) );
+ recvdispls = (int *) malloc( rsize * sizeof(int) );
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ for (i=0; i<rsize; i++) {
+ recvcounts[i] = count;
+ recvdispls[i] = i * count;
+ }
+ if (leftGroup) {
+ for (i=0; i<count; i++) sbuf[i] = i + rank*count;
+ }
+ else {
+ for (i=0; i<count; i++) sbuf[i] = -(i + rank*count);
+ }
+ err = MPI_Allgatherv( sbuf, count, datatype,
+ rbuf, recvcounts, recvdispls, datatype,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ if (leftGroup) {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != i) {
+ errs++;
+ }
+ }
+ }
+
+ /* Use Allgather in a unidirectional way */
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ if (leftGroup) {
+ err = MPI_Allgatherv( sbuf, 0, datatype,
+ rbuf, recvcounts, recvdispls, datatype,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<rsize; i++) {
+ recvcounts[i] = 0;
+ recvdispls[i] = 0;
+ }
+ err = MPI_Allgatherv( sbuf, count, datatype,
+ rbuf, recvcounts, recvdispls, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free( rbuf );
+ free( sbuf );
+ free( recvcounts );
+ free( recvdispls );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm allreduce test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *sendbuf = 0, *recvbuf = 0;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* printf( "rank = %d(%d)\n", rank, leftGroup ); fflush(stdout); */
+ sendbuf = (int *)malloc( count * sizeof(int) );
+ recvbuf = (int *)malloc( count * sizeof(int) );
+ if (leftGroup) {
+ for (i=0; i<count; i++) sendbuf[i] = i;
+ }
+ else {
+ for (i=0; i<count; i++) sendbuf[i] = -i;
+ }
+ for (i=0; i<count; i++) recvbuf[i] = 0;
+ err = MPI_Allreduce( sendbuf, recvbuf, count, datatype,
+ MPI_SUM, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* In each process should be the sum of the values from the
+ other process */
+ if (leftGroup) {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != -i * rsize) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "recvbuf[%d] = %d\n", i, recvbuf[i] );
+ }
+ }
+ }
+ }
+ else {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != i * rsize) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "recvbuf[%d] = %d\n", i, recvbuf[i] );
+ }
+ }
+ }
+ }
+ free( sendbuf );
+ free( recvbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm alltoall test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *sendbuf = 0, *recvbuf = 0;
+ int leftGroup, i, j, idx, count, rrank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ for (count = 1; count < 66000; count = 2 * count) {
+ /* Get an intercommunicator */
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_rank( comm, &rrank );
+ sendbuf = (int *)malloc( rsize * count * sizeof(int) );
+ recvbuf = (int *)malloc( rsize * count * sizeof(int) );
+ for (i=0; i<rsize*count; i++) recvbuf[i] = -1;
+ if (leftGroup) {
+ idx = 0;
+ for (j=0; j<rsize; j++) {
+ for (i=0; i<count; i++) {
+ sendbuf[idx++] = i + rrank;
+ }
+ }
+ err = MPI_Alltoall( sendbuf, count, datatype,
+ NULL, 0, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ int rank, size;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* In the right group */
+ err = MPI_Alltoall( NULL, 0, datatype,
+ recvbuf, count, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Check that we have received the correct data */
+ idx = 0;
+ for (j=0; j<rsize; j++) {
+ for (i=0; i<count; i++) {
+ if (recvbuf[idx++] != i + j) {
+ errs++;
+ if (errs < 10)
+ fprintf( stderr, "buf[%d] = %d on %d\n",
+ i, recvbuf[i], rank );
+ }
+ }
+ }
+ }
+ free( recvbuf );
+ free( sendbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/*
+ This program tests MPI_Alltoallv by having processor i send different
+ amounts of data to each processor.
+
+ Because there are separate send and receive types to alltoallv,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size, lsize, asize;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+ int leftGroup;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &lsize );
+ MPI_Comm_remote_size( comm, &size );
+ asize = (lsize > size) ? lsize : size;
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( asize * asize * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ sdispls[i] = (i * (i+1))/2;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank;
+ }
+ MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i];
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j],(i*(i+1))/2 + j, j );
+ err++;
+ }
+ }
+ }
+
+ free( sdispls );
+ free( rdispls );
+ free( recvcounts );
+ free( sendcounts );
+ free( rbuf );
+ free( sbuf );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/*
+ This program tests MPI_Alltoallw by having processor i send different
+ amounts of data to each processor. This is just the MPI_Alltoallv test,
+ but with displacements in bytes rather than units of the datatype.
+
+ Because there are separate send and receive types to alltoallw,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size, lsize, asize;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+ MPI_Datatype *sendtypes, *recvtypes;
+ int leftGroup;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &lsize );
+ MPI_Comm_remote_size( comm, &size );
+ asize = (lsize > size) ? lsize : size;
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( asize * asize * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ sendtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
+ recvtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ /* Note that process 0 sends no data (sendcounts[0] = 0) */
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ sdispls[i] = (((i+1) * (i))/2) * sizeof(int);
+ sendtypes[i] = MPI_INT;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank * sizeof(int);
+ recvtypes[i] = MPI_INT;
+ }
+ MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
+ rbuf, recvcounts, rdispls, recvtypes, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i]/sizeof(int);
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j],(i*(i+1))/2 + j, j );
+ err++;
+ }
+ }
+ }
+
+ free(sendtypes);
+ free(recvtypes);
+ free( sdispls );
+ free( rdispls );
+ free( recvcounts );
+ free( sendcounts );
+ free( rbuf );
+ free( sbuf );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm barrier test";
+*/
+
+/* This only checks that the Barrier operation accepts intercommunicators.
+ It does not check for the semantics of a intercomm barrier (all processes
+ in the local group can exit when (but not before) all processes in the
+ remote group enter the barrier */
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int leftGroup;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ if (leftGroup) {
+ err = MPI_Barrier( comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ /* In the right group */
+ err = MPI_Barrier( comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm broadcast test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *buf = 0;
+ int leftGroup, i, count, rank;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_rank( comm, &rank );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ buf = (int *)malloc( count * sizeof(int) );
+ if (leftGroup) {
+ if (rank == 0) {
+ for (i=0; i<count; i++) buf[i] = i;
+ }
+ else {
+ for (i=0; i<count; i++) buf[i] = -1;
+ }
+ err = MPI_Bcast( buf, count, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Test that no other process in this group received the
+ broadcast */
+ if (rank != 0) {
+ for (i=0; i<count; i++) {
+ if (buf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ for (i=0; i<count; i++) buf[i] = -1;
+ err = MPI_Bcast( buf, count, datatype, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Check that we have received the correct data */
+ for (i=0; i<count; i++) {
+ if (buf[i] != i) {
+ errs++;
+ }
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm gather test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *buf = 0;
+ int leftGroup, i, count, rank, rsize, size;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_size( comm, &size );
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ for (count = 1; count < 65000; count = 2 * count) {
+ if (leftGroup) {
+ buf = (int *)malloc( count * rsize * sizeof(int) );
+ for (i=0; i<count*rsize; i++) buf[i] = -1;
+
+ err = MPI_Gather( NULL, 0, datatype,
+ buf, count, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Test that no other process in this group received the
+ broadcast */
+ if (rank != 0) {
+ for (i=0; i<count; i++) {
+ if (buf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ else {
+ /* Check for the correct data */
+ for (i=0; i<count*rsize; i++) {
+ if (buf[i] != i) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ buf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) buf[i] = rank * count + i;
+ err = MPI_Gather( buf, count, datatype,
+ NULL, 0, datatype, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm gatherv test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *buf = 0;
+ int *recvcounts;
+ int *recvdispls;
+ int leftGroup, i, count, rank, rsize, size;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_size( comm, &size );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* Get an intercommunicator */
+ recvcounts = (int *)malloc( rsize * sizeof(int) );
+ recvdispls = (int *)malloc( rsize * sizeof(int) );
+ /* This simple test duplicates the Gather test,
+ using the same lengths for all messages */
+ for (i=0; i<rsize; i++) {
+ recvcounts[i] = count;
+ recvdispls[i] = count * i;
+ }
+ if (leftGroup) {
+ buf = (int *)malloc( count * rsize * sizeof(int) );
+ for (i=0; i<count*rsize; i++) buf[i] = -1;
+
+ err = MPI_Gatherv( NULL, 0, datatype,
+ buf, recvcounts, recvdispls, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Test that no other process in this group received the
+ broadcast */
+ if (rank != 0) {
+ for (i=0; i<count; i++) {
+ if (buf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ else {
+ /* Check for the correct data */
+ for (i=0; i<count*rsize; i++) {
+ if (buf[i] != i) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ buf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) buf[i] = rank * count + i;
+ err = MPI_Gatherv( buf, count, datatype,
+ NULL, 0, 0, datatype, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ free( buf );
+ free( recvcounts );
+ free( recvdispls );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm reduce test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *sendbuf = 0, *recvbuf=0;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ sendbuf = (int *)malloc( count * sizeof(int) );
+ recvbuf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) {
+ sendbuf[i] = -1;
+ recvbuf[i] = -1;
+ }
+ if (leftGroup) {
+ err = MPI_Reduce( sendbuf, recvbuf, count, datatype, MPI_SUM,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Test that no other process in this group received the
+ broadcast, and that we got the right answers */
+ if (rank == 0) {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != i * rsize) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ for (i=0; i<count; i++) sendbuf[i] = i;
+ err = MPI_Reduce( sendbuf, recvbuf, count, datatype, MPI_SUM,
+ 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Check that we have received no data */
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free( sendbuf );
+ free( recvbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm scatter test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *buf = 0;
+ int leftGroup, i, count, rank, size, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ buf = 0;
+ if (leftGroup) {
+ buf = (int *)malloc( count * rsize * sizeof(int) );
+ if (rank == 0) {
+ for (i=0; i<count*rsize; i++) buf[i] = i;
+ }
+ else {
+ for (i=0; i<count*rsize; i++) buf[i] = -1;
+ }
+ err = MPI_Scatter( buf, count, datatype,
+ NULL, 0, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Test that no other process in this group received the
+ scatter */
+ if (rank != 0) {
+ for (i=0; i<count*rsize; i++) {
+ if (buf[i] != -1) {
+ if (errs < 10) {
+ fprintf( stderr, "Received data on root group!\n" );
+ }
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ buf = (int *)malloc( count * sizeof(int) );
+ /* In the right group */
+ for (i=0; i<count; i++) buf[i] = -1;
+ err = MPI_Scatter( NULL, 0, datatype,
+ buf, count, datatype, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Check that we have received the correct data */
+ for (i=0; i<count; i++) {
+ if (buf[i] != i + rank * count) {
+ if (errs < 10)
+ fprintf( stderr, "buf[%d] = %d on %d\n",
+ i, buf[i], rank );
+ errs++;
+ }
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm scatterv test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *buf = 0;
+ int *sendcounts;
+ int *senddispls;
+ int leftGroup, i, count, rank, rsize, size;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ buf = 0;
+ sendcounts = (int *)malloc( rsize * sizeof(int) );
+ senddispls = (int *)malloc( rsize * sizeof(int) );
+ for (i=0; i<rsize; i++) {
+ sendcounts[i] = count;
+ senddispls[i] = count * i;
+ }
+ if (leftGroup) {
+ buf = (int *)malloc( count * rsize * sizeof(int) );
+ if (rank == 0) {
+ for (i=0; i<count*rsize; i++) buf[i] = i;
+ }
+ else {
+ for (i=0; i<count*rsize; i++) buf[i] = -1;
+ }
+ err = MPI_Scatterv( buf, sendcounts, senddispls, datatype,
+ NULL, 0, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Test that no other process in this group received the
+ scatter */
+ if (rank != 0) {
+ for (i=0; i<count*rsize; i++) {
+ if (buf[i] != -1) {
+ if (errs < 10) {
+ fprintf( stderr, "Received data on root group!\n" );
+ }
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ buf = (int *)malloc( count * sizeof(int) );
+ /* In the right group */
+ for (i=0; i<count; i++) buf[i] = -1;
+ err = MPI_Scatterv( NULL, 0, 0, datatype,
+ buf, count, datatype, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Check that we have received the correct data */
+ for (i=0; i<count; i++) {
+ if (buf[i] != i + rank * count) {
+ if (errs < 10)
+ fprintf( stderr, "buf[%d] = %d on %d\n",
+ i, buf[i], rank );
+ errs++;
+ }
+ }
+ }
+ free( sendcounts );
+ free( senddispls );
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+File smpi_simgrid.trace
+
+Errors :
+84 : the following event doesn't match with any event known:
+85 : expected %EventDef
+86 : expected %EventDef
+87 : expected %EventDef
+
+Warnings :
+88 : expected %EventDef
+1 : the definition is not identified
+2 : the definition is not identified
+2739 : missing field value(s) in an event
+
+Your trace has 4 errors and 4 warnings.
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int add ( double *, double *, int *, MPI_Datatype * );
+/*
+ * User-defined operation on a long value (tests proper handling of
+ * possible pipelining in the implementation of reductions with user-defined
+ * operations).
+ */
+int add( double *invec, double *inoutvec, int *len, MPI_Datatype *dtype )
+{
+ int i, n = *len;
+ for (i=0; i<n; i++) {
+ inoutvec[i] = invec[i] + inoutvec[i];
+ }
+ return 0;
+}
+
+int main( int argc, char **argv )
+{
+ MPI_Op op;
+ int i, rank, size, bufsize, errcnt = 0, toterr;
+ double *inbuf, *outbuf, value;
+
+ MPI_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Op_create( (MPI_User_function *)add, 1, &op );
+
+ bufsize = 1;
+ while (bufsize < 100000) {
+ inbuf = (double *)malloc( bufsize * sizeof(double) );
+ outbuf = (double *)malloc( bufsize * sizeof(double) );
+ if (! inbuf || ! outbuf) {
+ fprintf( stderr, "Could not allocate buffers for size %d\n",
+ bufsize );
+ errcnt++;
+ break;
+ }
+
+ value = (rank & 0x1) ? 1.0 : -1.0;
+ for (i=0; i<bufsize; i++) {
+ inbuf[i] = value;
+ outbuf[i] = 100.0;
+ }
+ MPI_Allreduce( inbuf, outbuf, bufsize, MPI_DOUBLE, op,
+ MPI_COMM_WORLD );
+ /* Check values */
+ value = (size & 0x1) ? -1.0 : 0.0;
+ for (i=0; i<bufsize; i++) {
+ if (outbuf[i] != value) {
+ if (errcnt < 10)
+ printf( "outbuf[%d] = %f, should = %f\n", i, outbuf[i],
+ value );
+ errcnt ++;
+ }
+ }
+ free( inbuf );
+ free( outbuf );
+ bufsize *= 2;
+ }
+
+ MPI_Allreduce( &errcnt, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if (rank == 0) {
+ if (toterr == 0)
+ printf( " No Errors\n" );
+ else
+ printf( "*! %d errors!\n", toterr );
+ }
+
+ MPI_Op_free( &op );
+ MPI_Finalize( );
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This is a very weak sanity test that all nonblocking collectives specified by
+ * MPI-3 are present in the library and take arguments as expected. This test
+ * does not check for progress, matching issues, or sensible output buffer
+ * values. */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+
+#define NUM_INTS (2)
+
+#define my_assert(cond_) \
+ do { \
+ if (!(cond_)) { \
+ fprintf(stderr, "assertion (%s) failed, aborting\n", #cond_); \
+ MPI_Abort(MPI_COMM_WORLD, 1); \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i;
+ int rank, size;
+ int *sbuf = NULL;
+ int *rbuf = NULL;
+ int *scounts = NULL;
+ int *rcounts = NULL;
+ int *sdispls = NULL;
+ int *rdispls = NULL;
+ int *types = NULL;
+ MPI_Comm comm;
+ MPI_Request req;
+
+ /* intentionally not using MTest_Init/MTest_Finalize in order to make it
+ * easy to take this test and use it as an NBC sanity test outside of the
+ * MPICH test suite */
+ MPI_Init(&argc, &argv);
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+ /* enough space for every process to contribute at least NUM_INTS ints to any
+ * collective operation */
+ sbuf = malloc(NUM_INTS*size*sizeof(int));
+ my_assert(sbuf);
+ rbuf = malloc(NUM_INTS*size*sizeof(int));
+ my_assert(rbuf);
+ scounts = malloc(size*sizeof(int));
+ my_assert(scounts);
+ rcounts = malloc(size*sizeof(int));
+ my_assert(rcounts);
+ sdispls = malloc(size*sizeof(int));
+ my_assert(sdispls);
+ rdispls = malloc(size*sizeof(int));
+ my_assert(rdispls);
+ types = malloc(size*sizeof(int));
+ my_assert(types);
+
+ for (i = 0; i < size; ++i) {
+ sbuf[2*i] = i;
+ sbuf[2*i+1] = i;
+ rbuf[2*i] = i;
+ rbuf[2*i+1] = i;
+ scounts[i] = NUM_INTS;
+ rcounts[i] = NUM_INTS;
+ sdispls[i] = i * NUM_INTS;
+ rdispls[i] = i * NUM_INTS;
+ types[i] = MPI_INT;
+ }
+
+ MPI_Ibarrier(comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ibcast(sbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Igather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iallgather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iallgatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ialltoall(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ialltoallv(sbuf, scounts, sdispls, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ialltoallw(sbuf, scounts, sdispls, types, rbuf, rcounts, rdispls, types, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ireduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iallreduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ireduce_scatter(sbuf, rbuf, rcounts, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Ireduce_scatter_block(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ MPI_Iexscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+#endif
+
+ if (sbuf) free(sbuf);
+ if (rbuf) free(rbuf);
+ if (scounts) free(scounts);
+ if (rcounts) free(rcounts);
+ if (sdispls) free(sdispls);
+ if (rdispls) free(rdispls);
+
+ if (rank == 0) {
+ if (errs)
+ fprintf(stderr, "Found %d errors\n", errs);
+ else
+ printf(" No errors\n");
+ }
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* A basic test of all 17 nonblocking collective operations specified by the
+ * draft MPI-3 standard. It only exercises the intracommunicator functionality,
+ * does not use MPI_IN_PLACE, and only transmits/receives simple integer types
+ * with relatively small counts. It does check a few fancier issues, such as
+ * ensuring that "premature user releases" of MPI_Op and MPI_Datatype objects
+ * does not result in an error or segfault. */
+
+#include "mpi.h"
+#include <stdlib.h>
+#include <stdio.h>
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+
+#define COUNT (10)
+#define PRIME (17)
+
+#define my_assert(cond_) \
+ do { \
+ if (!(cond_)) { \
+ fprintf(stderr, "assertion (%s) failed, aborting\n", #cond_); \
+ MPI_Abort(MPI_COMM_WORLD, 1); \
+ } \
+ } while (0)
+
+/* Since MPICH is currently the only NBC implementation in existence, just use
+ * this quick-and-dirty #ifdef to decide whether to test the nonblocking
+ * collectives. Eventually we can add a configure option or configure test, or
+ * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
+ * check */
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NBC_ROUTINES 1
+#endif
+
+static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+{
+ int i;
+ int *in = invec;
+ int *inout = inoutvec;
+ for (i = 0; i < *len; ++i) {
+ inout[i] = in[i] + inout[i];
+ }
+}
+
+
+int main(int argc, char **argv)
+{
+ int i, j;
+ int rank, size;
+ int *buf = NULL;
+ int *recvbuf = NULL;
+ int *sendcounts = NULL;
+ int *recvcounts = NULL;
+ int *sdispls = NULL;
+ int *rdispls = NULL;
+ int *sendtypes = NULL;
+ int *recvtypes = NULL;
+ char *buf_alias = NULL;
+ MPI_Request req;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+#if defined(TEST_NBC_ROUTINES)
+
+ buf = malloc(COUNT*size*sizeof(int));
+ recvbuf = malloc(COUNT*size*sizeof(int));
+ sendcounts = malloc(size*sizeof(int));
+ recvcounts = malloc(size*sizeof(int));
+ sdispls = malloc(size*sizeof(int));
+ rdispls = malloc(size*sizeof(int));
+ sendtypes = malloc(size*sizeof(MPI_Datatype));
+ recvtypes = malloc(size*sizeof(MPI_Datatype));
+
+ /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0) {
+ buf[i] = i;
+ }
+ else {
+ buf[i] = 0xdeadbeef;
+ }
+ }
+ MPI_Ibcast(buf, COUNT, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < COUNT; ++i) {
+ if (buf[i] != i)
+ printf("buf[%d]=%d i=%d\n", i, buf[i], i);
+ my_assert(buf[i] == i);
+ }
+
+ /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ buf_alias = (char *)buf;
+ my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
+ for (i = 0; i < PRIME; ++i) {
+ if (rank == 0)
+ buf_alias[i] = i;
+ else
+ buf_alias[i] = 0xdb;
+ }
+ for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
+ buf_alias[i] = 0xbf;
+ }
+ MPI_Ibcast(buf, PRIME, MPI_SIGNED_CHAR, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < PRIME; ++i) {
+ if (buf_alias[i] != i)
+ printf("buf_alias[%d]=%d i=%d\n", i, buf_alias[i], i);
+ my_assert(buf_alias[i] == i);
+ }
+
+ /* MPI_Ibarrier */
+ MPI_Ibarrier(MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ /* MPI_Ireduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ }
+ }
+
+ /* same again, use a user op and free it before the wait */
+ {
+ MPI_Op op = MPI_OP_NULL;
+ MPI_Op_create(sum_fn, /*commute=*/1, &op);
+
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, MPI_COMM_WORLD, &req);
+ MPI_Op_free(&op);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ }
+ }
+ }
+
+ /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ }
+
+ /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i;
+ rdispls[i] = COUNT * i;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + (i * j);
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
+ my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ }
+ }
+
+ /* MPI_Igather */
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ }
+ else {
+ for (i = 0; i < size*COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ }
+ }
+
+ /* same test again, just use a dup'ed datatype and free it before the wait */
+ {
+ MPI_Datatype type = MPI_DATATYPE_NULL;
+ MPI_Type_dup(MPI_INT, &type);
+
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, MPI_COMM_WORLD, &req);
+ MPI_Type_free(&type); /* should cause implementations that don't refcount
+ correctly to blow up or hang in the wait */
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ }
+ else {
+ for (i = 0; i < size*COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ }
+ }
+ }
+
+ /* MPI_Iscatter */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i*COUNT+j] = i + j;
+ else
+ buf[i*COUNT+j] = 0xdeadbeef;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size*COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
+ }
+ }
+
+ /* MPI_Iscatterv */
+ for (i = 0; i < size; ++i) {
+ /* weak test, just test the regular case where all counts are equal */
+ sendcounts[i] = COUNT;
+ sdispls[i] = i * COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i*COUNT+j] = i + j;
+ else
+ buf[i*COUNT+j] = 0xdeadbeef;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size*COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
+ }
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ }
+ }
+
+ /* MPI_Ireduce_scatter */
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + i;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ }
+ }
+
+ /* MPI_Ireduce_scatter_block */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + i;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ }
+ }
+
+ /* MPI_Igatherv */
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = 0xdeadbeef;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ }
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ }
+ else {
+ for (i = 0; i < size*COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ }
+ }
+
+ /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + (i * j);
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j)));*/
+ my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ }
+ }
+
+ /* MPI_Iallgather */
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+
+ /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ for (i = 0; i < COUNT; ++i)
+ buf[i] = rank + i;
+ MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+
+ /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < COUNT; ++i) {
+ my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1))));
+ }
+
+ /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0)
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ else
+ my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1)) - (rank + i)));
+ }
+
+ /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i * sizeof(int);
+ rdispls[i] = COUNT * i * sizeof(int);
+ sendtypes[i] = MPI_INT;
+ recvtypes[i] = MPI_INT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + (i * j);
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
+ my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ }
+ }
+
+#endif /* defined(TEST_NBC_ROUTINES) */
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+
+ MPI_Finalize();
+ free(buf);
+ free(recvbuf);
+ free(sendcounts);
+ free(recvcounts);
+ free(rdispls);
+ free(sdispls);
+ free(recvtypes);
+ free(sendtypes);
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This test attempts to execute multiple simultaneous nonblocking collective
+ * (NBC) MPI routines at the same time, and manages their completion with a
+ * variety of routines (MPI_{Wait,Test}{,_all,_any,_some}). It also throws a
+ * few point-to-point operations into the mix.
+ *
+ * Possible improvements:
+ * - post operations on multiple comms from multiple threads
+ */
+
+#include "mpi.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+static int errs = 0;
+
+/* Constants that control the high level test harness behavior. */
+/* MAIN_ITERATIONS is how many NBC ops the test will attempt to issue. */
+#define MAIN_ITERATIONS (100000)
+/* WINDOW is the maximum number of outstanding NBC requests at any given time */
+#define WINDOW (20)
+/* we sleep with probability 1/CHANCE_OF_SLEEP */
+#define CHANCE_OF_SLEEP (1000)
+/* JITTER_DELAY is denominated in microseconds (us) */
+#define JITTER_DELAY (50000) /* 0.05 seconds */
+/* NUM_COMMS is the number of communicators on which ops will be posted */
+#define NUM_COMMS (4)
+
+/* Constants that control behavior of the individual testing operations.
+ * Altering these can help to explore the testing space, but increasing them too
+ * much can consume too much memory (often O(n^2) usage). */
+/* FIXME is COUNT==10 too limiting? should we try a larger count too (~500)? */
+#define COUNT (10)
+#define PRIME (17)
+
+#define my_assert(cond_) \
+ do { \
+ if (!(cond_)) { \
+ ++errs; \
+ if (errs < 10) { \
+ fprintf(stderr, "assertion (%s) failed on line %d\n", #cond_, __LINE__); \
+ } \
+ } \
+ } while (0)
+
+/* Since MPICH is currently the only NBC implementation in existence, just use
+ * this quick-and-dirty #ifdef to decide whether to test the nonblocking
+ * collectives. Eventually we can add a configure option or configure test, or
+ * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
+ * check */
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NBC_ROUTINES 1
+#endif
+
+#if defined(TEST_NBC_ROUTINES)
+/* Intended to act like "rand_r", but we can be sure that it will exist and be
+ * consistent across all of comm world. Returns a number in the range
+ * [0,GEN_PRN_MAX] */
+#define GEN_PRN_MAX (4294967291-1)
+static unsigned int gen_prn(unsigned int x)
+{
+ /* a simple "multiplicative congruential method" PRNG, with parameters:
+ * m=4294967291, largest 32-bit prime
+ * a=279470273, good primitive root of m from "TABLES OF LINEAR
+ * CONGRUENTIAL GENERATORS OF DIFFERENT SIZES AND GOOD
+ * LATTICE STRUCTURE", by Pierre L’Ecuyer */
+ return (279470273UL * (unsigned long)x) % 4294967291UL;
+}
+
+/* given a random unsigned int value "rndval_" from gen_prn, this evaluates to a
+ * value in the range [min_,max_) */
+#define rand_range(rndval_,min_,max_) \
+ ((unsigned int)((min_) + ((rndval_) * (1.0 / (GEN_PRN_MAX+1.0)) * ((max_) - (min_)))))
+
+
+static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+{
+ int i;
+ int *in = invec;
+ int *inout = inoutvec;
+ for (i = 0; i < *len; ++i) {
+ inout[i] = in[i] + inout[i];
+ }
+}
+
+/* used to keep track of buffers that should be freed after the corresponding
+ * operation has completed */
+struct laundry {
+ int case_num; /* which test case initiated this req/laundry */
+ MPI_Comm comm;
+ int *buf;
+ int *recvbuf;
+ int *sendcounts;
+ int *recvcounts;
+ int *sdispls;
+ int *rdispls;
+ int *sendtypes;
+ int *recvtypes;
+};
+
+static void cleanup_laundry(struct laundry *l)
+{
+ l->case_num = -1;
+ l->comm = MPI_COMM_NULL;
+ if (l->buf) free(l->buf);
+ if (l->recvbuf) free(l->recvbuf);
+ if (l->sendcounts) free(l->sendcounts);
+ if (l->recvcounts) free(l->recvcounts);
+ if (l->sdispls) free(l->sdispls);
+ if (l->rdispls) free(l->rdispls);
+ if (l->sendtypes) free(l->sendtypes);
+ if (l->recvtypes) free(l->recvtypes);
+}
+
+/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
+ * in (*req) a request handle corresonding to that operation. This call should
+ * be considered collective over comm (with a consistent value for "rndnum"),
+ * even though the operation may only be a point-to-point request. */
+static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request *req, struct laundry *l)
+{
+ int i, j;
+ int rank, size;
+ int *buf = NULL;
+ int *recvbuf = NULL;
+ int *sendcounts = NULL;
+ int *recvcounts = NULL;
+ int *sdispls = NULL;
+ int *rdispls = NULL;
+ int *sendtypes = NULL;
+ int *recvtypes = NULL;
+ char *buf_alias = NULL;
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ *req = MPI_REQUEST_NULL;
+
+ l->case_num = -1;
+ l->comm = comm;
+
+ l->buf = buf = malloc(COUNT*size*sizeof(int));
+ l->recvbuf = recvbuf = malloc(COUNT*size*sizeof(int));
+ l->sendcounts = sendcounts = malloc(size*sizeof(int));
+ l->recvcounts = recvcounts = malloc(size*sizeof(int));
+ l->sdispls = sdispls = malloc(size*sizeof(int));
+ l->rdispls = rdispls = malloc(size*sizeof(int));
+ l->sendtypes = sendtypes = malloc(size*sizeof(MPI_Datatype));
+ l->recvtypes = recvtypes = malloc(size*sizeof(MPI_Datatype));
+
+#define NUM_CASES (21)
+ l->case_num = rand_range(rndnum, 0, NUM_CASES);
+ switch (l->case_num) {
+ case 0: /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0) {
+ buf[i] = i;
+ }
+ else {
+ buf[i] = 0xdeadbeef;
+ }
+ }
+ MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
+ buf_alias = (char *)buf;
+ my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
+ for (i = 0; i < PRIME; ++i) {
+ if (rank == 0)
+ buf_alias[i] = i;
+ else
+ buf_alias[i] = 0xdb;
+ }
+ for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
+ buf_alias[i] = 0xbf;
+ }
+ MPI_Ibcast(buf, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
+ break;
+
+ case 2: /* MPI_Ibarrier */
+ MPI_Ibarrier(comm, req);
+ break;
+
+ case 3: /* MPI_Ireduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
+ break;
+
+ case 4: /* same again, use a user op and free it before the wait */
+ {
+ MPI_Op op = MPI_OP_NULL;
+ MPI_Op_create(sum_fn, /*commute=*/1, &op);
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
+ MPI_Op_free(&op);
+ }
+ break;
+
+ case 5: /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i;
+ rdispls[i] = COUNT * i;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + (i * j);
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
+ break;
+
+ case 7: /* MPI_Igather */
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
+ {
+ MPI_Datatype type = MPI_DATATYPE_NULL;
+ MPI_Type_dup(MPI_INT, &type);
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
+ MPI_Type_free(&type); /* should cause implementations that don't refcount
+ correctly to blow up or hang in the wait */
+ }
+ break;
+
+ case 9: /* MPI_Iscatter */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i*COUNT+j] = i + j;
+ else
+ buf[i*COUNT+j] = 0xdeadbeef;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 10: /* MPI_Iscatterv */
+ for (i = 0; i < size; ++i) {
+ /* weak test, just test the regular case where all counts are equal */
+ sendcounts[i] = COUNT;
+ sdispls[i] = i * COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i*COUNT+j] = i + j;
+ else
+ buf[i*COUNT+j] = 0xdeadbeef;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 11: /* MPI_Ireduce_scatter */
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + i;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 12: /* MPI_Ireduce_scatter_block */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + i;
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 13: /* MPI_Igatherv */
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = 0xdeadbeef;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ }
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
+ break;
+
+ case 14: /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + (i * j);
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
+ break;
+
+ case 15: /* MPI_Iallgather */
+ for (i = 0; i < size*COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
+ break;
+
+ case 16: /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ for (i = 0; i < COUNT; ++i)
+ buf[i] = rank + i;
+ MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
+ break;
+
+ case 17: /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 18: /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i * sizeof(int);
+ rdispls[i] = COUNT * i * sizeof(int);
+ sendtypes[i] = MPI_INT;
+ recvtypes[i] = MPI_INT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i*COUNT+j] = rank + (i * j);
+ recvbuf[i*COUNT+j] = 0xdeadbeef;
+ }
+ }
+ MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, req);
+ break;
+
+ case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
+ /* even ranks send to odd ranks, but only if we have a full pair */
+ if ((rank % 2 != 0) || (rank != size-1)) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[j] = j;
+ recvbuf[j] = 0xdeadbeef;
+ }
+ if (rank % 2 == 0)
+ MPI_Isend(buf, COUNT, MPI_INT, rank+1, 5, comm, req);
+ else
+ MPI_Irecv(recvbuf, COUNT, MPI_INT, rank-1, 5, comm, req);
+ }
+ break;
+
+ default:
+ fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
+ MPI_Abort(comm, 1);
+ break;
+ }
+}
+
+static void check_after_completion(struct laundry *l)
+{
+ int i, j;
+ int rank, size;
+ MPI_Comm comm = l->comm;
+ int *buf = l->buf;
+ int *recvbuf = l->recvbuf;
+ int *sendcounts = l->sendcounts;
+ int *recvcounts = l->recvcounts;
+ int *sdispls = l->sdispls;
+ int *rdispls = l->rdispls;
+ int *sendtypes = l->sendtypes;
+ int *recvtypes = l->recvtypes;
+ char *buf_alias = (char *)buf;
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ /* these cases all correspond to cases in start_random_nonblocking */
+ switch (l->case_num) {
+ case 0: /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (buf[i] != i)
+ printf("buf[%d]=%d i=%d\n", i, buf[i], i);
+ my_assert(buf[i] == i);
+ }
+ break;
+
+ case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ for (i = 0; i < PRIME; ++i) {
+ if (buf_alias[i] != i)
+ printf("buf_alias[%d]=%d i=%d\n", i, buf_alias[i], i);
+ my_assert(buf_alias[i] == i);
+ }
+ break;
+
+ case 2: /* MPI_Ibarrier */
+ /* nothing to check */
+ break;
+
+ case 3: /* MPI_Ireduce */
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ }
+ }
+ break;
+
+ case 4: /* same again, use a user op and free it before the wait */
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ }
+ }
+ break;
+
+ case 5: /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ }
+ break;
+
+ case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
+ my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ }
+ }
+ break;
+
+ case 7: /* MPI_Igather */
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ }
+ else {
+ for (i = 0; i < size*COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ }
+ else {
+ for (i = 0; i < size*COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 9: /* MPI_Iscatter */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size*COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 10: /* MPI_Iscatterv */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size*COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
+ }
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 11: /* MPI_Ireduce_scatter */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 12: /* MPI_Ireduce_scatter_block */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 13: /* MPI_Igatherv */
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ }
+ else {
+ for (i = 0; i < size*COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ }
+ }
+ break;
+
+ case 14: /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j)));*/
+ my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ }
+ }
+ break;
+
+ case 15: /* MPI_Iallgather */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ break;
+
+ case 16: /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i*COUNT+j] == i + j);
+ }
+ }
+ break;
+
+ case 17: /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1))));
+ }
+ break;
+
+ case 18: /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0)
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ else
+ my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1)) - (rank + i)));
+ }
+ break;
+
+ case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
+ my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ }
+ }
+ break;
+
+ case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
+ /* even ranks send to odd ranks, but only if we have a full pair */
+ if ((rank % 2 != 0) || (rank != size-1)) {
+ for (j = 0; j < COUNT; ++j) {
+ /* only odd procs did a recv */
+ if (rank % 2 == 0) {
+ my_assert(recvbuf[j] == 0xdeadbeef);
+ }
+ else {
+ if (recvbuf[j] != j) printf("recvbuf[%d]=%d j=%d\n", j, recvbuf[j], j);
+ my_assert(recvbuf[j] == j);
+ }
+ }
+ }
+ break;
+
+ default:
+ printf("invalid case_num (%d) detected\n", l->case_num);
+ assert(0);
+ break;
+ }
+}
+#undef NUM_CASES
+
+static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[])
+{
+ int i, idx, flag;
+
+#define COMPLETION_CASES (8)
+ switch (rand_range(rndnum, 0, COMPLETION_CASES)) {
+ case 0:
+ MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
+ *outcount = numreqs;
+ for (i = 0; i < numreqs; ++i) {
+ indices[i] = i;
+ }
+ break;
+
+ case 1:
+ MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
+ if (*outcount == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ break;
+
+ case 2:
+ MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
+ if (*outcount == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ break;
+
+ case 3:
+ MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
+ if (idx == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ else {
+ *outcount = 1;
+ indices[0] = idx;
+ }
+ break;
+
+ case 4:
+ MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
+ if (idx == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ else {
+ *outcount = 1;
+ indices[0] = idx;
+ }
+ break;
+
+ case 5:
+ MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
+ if (flag) {
+ *outcount = numreqs;
+ for (i = 0; i < numreqs; ++i) {
+ indices[i] = i;
+ }
+ }
+ else {
+ *outcount = 0;
+ }
+ break;
+
+ case 6:
+ /* select a new random index and wait on it */
+ rndnum = gen_prn(rndnum);
+ idx = rand_range(rndnum, 0, numreqs);
+ MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
+ *outcount = 1;
+ indices[0] = idx;
+ break;
+
+ case 7:
+ /* select a new random index and wait on it */
+ rndnum = gen_prn(rndnum);
+ idx = rand_range(rndnum, 0, numreqs);
+ MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
+ *outcount = (flag ? 1 : 0);
+ indices[0] = idx;
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+#undef COMPLETION_CASES
+}
+#endif /* defined(TEST_NBC_ROUTINES) */
+
+int main(int argc, char **argv)
+{
+ int i, num_posted, num_completed;
+ int wrank, wsize;
+ unsigned int seed = 0x10bc;
+ unsigned int post_seq, complete_seq;
+#if defined(TEST_NBC_ROUTINES)
+ struct laundry larr[WINDOW];
+#endif
+ MPI_Request reqs[WINDOW];
+ int outcount;
+ int indices[WINDOW];
+ MPI_Comm comms[NUM_COMMS];
+ MPI_Comm comm;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+
+#if defined(TEST_NBC_ROUTINES)
+
+ /* it is critical that all processes in the communicator start with a
+ * consistent value for "post_seq" */
+ post_seq = complete_seq = gen_prn(seed);
+
+ num_completed = 0;
+ num_posted = 0;
+
+ /* construct all of the communicators, just dups of comm world for now */
+ for (i = 0; i < NUM_COMMS; ++i) {
+ MPI_Comm_dup(MPI_COMM_WORLD, &comms[i]);
+ }
+
+ /* fill the entire window of ops */
+ for (i = 0; i < WINDOW; ++i) {
+ reqs[i] = MPI_REQUEST_NULL;
+ memset(&larr[i], 0, sizeof(struct laundry));
+ larr[i].case_num = -1;
+
+ /* randomly select a comm, using a new seed to avoid correlating
+ * particular kinds of NBC ops with particular communicators */
+ comm = comms[rand_range(gen_prn(post_seq), 0, NUM_COMMS)];
+
+ start_random_nonblocking(comm, post_seq, &reqs[i], &larr[i]);
+ ++num_posted;
+ post_seq = gen_prn(post_seq);
+ }
+
+ /* now loop repeatedly, completing ops with "random" completion functions,
+ * until we've posted and completed MAIN_ITERATIONS ops */
+ while (num_completed < MAIN_ITERATIONS) {
+ complete_something_somehow(complete_seq, WINDOW, reqs, &outcount, indices);
+ complete_seq = gen_prn(complete_seq);
+ for (i = 0; i < outcount; ++i) {
+ int idx = indices[i];
+ assert(reqs[idx] == MPI_REQUEST_NULL);
+ if (larr[idx].case_num != -1) {
+ check_after_completion(&larr[idx]);
+ cleanup_laundry(&larr[idx]);
+ ++num_completed;
+ if (num_posted < MAIN_ITERATIONS) {
+ comm = comms[rand_range(gen_prn(post_seq), 0, NUM_COMMS)];
+ start_random_nonblocking(comm, post_seq, &reqs[idx], &larr[idx]);
+ ++num_posted;
+ post_seq = gen_prn(post_seq);
+ }
+ }
+ }
+
+ /* "randomly" and infrequently introduce some jitter into the system */
+ if (0 == rand_range(gen_prn(complete_seq + wrank), 0, CHANCE_OF_SLEEP)) {
+ usleep(JITTER_DELAY); /* take a short nap */
+ }
+ }
+
+ for (i = 0; i < NUM_COMMS; ++i) {
+ MPI_Comm_free(&comms[i]);
+ }
+
+#endif /* defined(TEST_NBC_ROUTINES) */
+
+ if (wrank == 0) {
+ if (errs)
+ printf("found %d errors\n", errs);
+ else
+ printf(" No errors\n");
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "A simple test of MPI_Op_create/commute/free";
+*/
+
+static int errs = 0;
+
+/*
+static void comm_user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+{
+ user_op(invec, inoutvec, len, datatype);
+}
+*/
+
+/*
+static void noncomm_user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+{
+ user_op(invec, inoutvec, len, datatype);
+}
+*/
+
+static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+{
+ int i;
+ int *invec_int = (int *)invec;
+ int *inoutvec_int = (int *)inoutvec;
+
+ if (*datatype != MPI_INT) {
+ ++errs;
+ printf("invalid datatype passed to user_op");
+ return;
+ }
+
+ for (i = 0; i < *len; ++i) {
+ inoutvec_int[i] = invec_int[i] * 2 + inoutvec_int[i];
+ }
+}
+
+
+int main( int argc, char *argv[] )
+{
+ MPI_Op c_uop = MPI_OP_NULL;
+ MPI_Op nc_uop = MPI_OP_NULL;
+ int is_commutative = 0;
+
+ MTest_Init(&argc, &argv);
+
+ /* make sure that user-define ops work too */
+ MPI_Op_create(&user_op, 1/*commute*/, &c_uop);
+ MPI_Op_create(&user_op, 0/*!commute*/, &nc_uop);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* this function was added in MPI-2.2 */
+
+#define CHECK_COMMUTATIVE(op_) \
+ do { \
+ MPI_Op_commutative((op_), &is_commutative); \
+ if (!is_commutative) { ++errs; } \
+ } while (0)
+
+ /* Check all predefined reduction operations for commutivity.
+ * This list is from section 5.9.2 of the MPI-2.1 standard */
+ CHECK_COMMUTATIVE(MPI_MAX);
+ CHECK_COMMUTATIVE(MPI_MIN);
+ CHECK_COMMUTATIVE(MPI_SUM);
+ CHECK_COMMUTATIVE(MPI_PROD);
+ CHECK_COMMUTATIVE(MPI_LAND);
+ CHECK_COMMUTATIVE(MPI_BAND);
+ CHECK_COMMUTATIVE(MPI_LOR);
+ CHECK_COMMUTATIVE(MPI_BOR);
+ CHECK_COMMUTATIVE(MPI_LXOR);
+ CHECK_COMMUTATIVE(MPI_BXOR);
+ CHECK_COMMUTATIVE(MPI_MAXLOC);
+ CHECK_COMMUTATIVE(MPI_MINLOC);
+
+#undef CHECK_COMMUTATIVE
+
+ MPI_Op_commutative(c_uop, &is_commutative);
+ if (!is_commutative) {
+ ++errs;
+ }
+
+ /* also check our non-commutative user defined operation */
+ MPI_Op_commutative(nc_uop, &is_commutative);
+ if (is_commutative) {
+ ++errs;
+ }
+#endif
+
+ MPI_Op_free(&nc_uop);
+ MPI_Op_free(&c_uop);
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_BAND operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rc;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ short sinbuf[3], soutbuf[3];
+ unsigned short usinbuf[3], usoutbuf[3];
+ long linbuf[3], loutbuf[3];
+ unsigned long ulinbuf[3], uloutbuf[3];
+ unsigned uinbuf[3], uoutbuf[3];
+
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 0xff;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0) ? 0xff : 0xf0;
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != (char)0xff) {
+ errs++;
+ fprintf( stderr, "char BAND(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char BAND(0) test failed\n" );
+ }
+ if (coutbuf[2] != (char)0xf0 && size > 1) {
+ errs++;
+ fprintf( stderr, "char BAND(>) test failed\n" );
+ }
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 0xff;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0) ? 0xff : 0xf0;
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_SIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (scoutbuf[0] != (signed char)0xff) {
+ errs++;
+ fprintf( stderr, "signed char BAND(1) test failed\n" );
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char BAND(0) test failed\n" );
+ }
+ if (scoutbuf[2] != (signed char)0xf0 && size > 1) {
+ errs++;
+ fprintf( stderr, "signed char BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 0xff;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0) ? 0xff : 0xf0;
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ucoutbuf[0] != 0xff) {
+ errs++;
+ fprintf( stderr, "unsigned char BAND(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char BAND(0) test failed\n" );
+ }
+ if (ucoutbuf[2] != 0xf0 && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned char BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ /* bytes */
+ MTestPrintfMsg( 10, "Reduce of MPI_BYTE\n" );
+ cinbuf[0] = 0xff;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0) ? 0xff : 0xf0;
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_BYTE, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_BYTE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != (char)0xff) {
+ errs++;
+ fprintf( stderr, "byte BAND(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "byte BAND(0) test failed\n" );
+ }
+ if (coutbuf[2] != (char)0xf0 && size > 1) {
+ errs++;
+ fprintf( stderr, "byte BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ /* short */
+ MTestPrintfMsg( 10, "Reduce of MPI_SHORT\n" );
+ sinbuf[0] = 0xffff;
+ sinbuf[1] = 0;
+ sinbuf[2] = (rank > 0) ? 0xffff : 0xf0f0;
+
+ soutbuf[0] = 0;
+ soutbuf[1] = 1;
+ soutbuf[2] = 1;
+ rc = MPI_Reduce( sinbuf, soutbuf, 3, MPI_SHORT, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_SHORT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (soutbuf[0] != (short)0xffff) {
+ errs++;
+ fprintf( stderr, "short BAND(1) test failed\n" );
+ }
+ if (soutbuf[1]) {
+ errs++;
+ fprintf( stderr, "short BAND(0) test failed\n" );
+ }
+ if (soutbuf[2] != (short)0xf0f0 && size > 1) {
+ errs++;
+ fprintf( stderr, "short BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_SHORT\n" );
+ /* unsigned short */
+ usinbuf[0] = 0xffff;
+ usinbuf[1] = 0;
+ usinbuf[2] = (rank > 0) ? 0xffff : 0xf0f0;
+
+ usoutbuf[0] = 0;
+ usoutbuf[1] = 1;
+ usoutbuf[2] = 1;
+ rc = MPI_Reduce( usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED_SHORT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (usoutbuf[0] != 0xffff) {
+ errs++;
+ fprintf( stderr, "short BAND(1) test failed\n" );
+ }
+ if (usoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "short BAND(0) test failed\n" );
+ }
+ if (usoutbuf[2] != 0xf0f0 && size > 1) {
+ errs++;
+ fprintf( stderr, "short BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED\n" );
+ uinbuf[0] = 0xffffffff;
+ uinbuf[1] = 0;
+ uinbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
+
+ uoutbuf[0] = 0;
+ uoutbuf[1] = 1;
+ uoutbuf[2] = 1;
+ rc = MPI_Reduce( uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (uoutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "unsigned BAND(1) test failed\n" );
+ }
+ if (uoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned BAND(0) test failed\n" );
+ }
+ if (uoutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ /* long */
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG\n" );
+ linbuf[0] = 0xffffffff;
+ linbuf[1] = 0;
+ linbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
+
+ loutbuf[0] = 0;
+ loutbuf[1] = 1;
+ loutbuf[2] = 1;
+ rc = MPI_Reduce( linbuf, loutbuf, 3, MPI_LONG, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (loutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "long BAND(1) test failed\n" );
+ }
+ if (loutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long BAND(0) test failed\n" );
+ }
+ if (loutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf( stderr, "long BAND(>) test failed\n" );
+ }
+ }
+ }
+
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_LONG\n" );
+ /* unsigned long */
+ ulinbuf[0] = 0xffffffff;
+ ulinbuf[1] = 0;
+ ulinbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
+
+ uloutbuf[0] = 0;
+ uloutbuf[1] = 1;
+ uloutbuf[2] = 1;
+ rc = MPI_Reduce( ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (uloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "unsigned long BAND(1) test failed\n" );
+ }
+ if (uloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned long BAND(0) test failed\n" );
+ }
+ if (uloutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned long BAND(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 0xffffffff;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BAND and MPI_LONG_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "long long BAND(1) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long BAND(0) test failed\n" );
+ }
+ if (lloutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf( stderr, "long long BAND(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_BOR operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rc;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ short sinbuf[3], soutbuf[3];
+ unsigned short usinbuf[3], usoutbuf[3];
+ long linbuf[3], loutbuf[3];
+ unsigned long ulinbuf[3], uloutbuf[3];
+ unsigned uinbuf[3], uoutbuf[3];
+ int iinbuf[3], ioutbuf[3];
+
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 0xff;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != (char)0xff) {
+ errs++;
+ fprintf( stderr, "char BOR(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char BOR(0) test failed\n" );
+ }
+ if (coutbuf[2] != (char)0xff && size > 1) {
+ errs++;
+ fprintf( stderr, "char BOR(>) test failed\n" );
+ }
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 0xff;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_SIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (scoutbuf[0] != (signed char)0xff) {
+ errs++;
+ fprintf( stderr, "signed char BOR(1) test failed\n" );
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char BOR(0) test failed\n" );
+ }
+ if (scoutbuf[2] != (signed char)0xff && size > 1) {
+ errs++;
+ fprintf( stderr, "signed char BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 0xff;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ucoutbuf[0] != 0xff) {
+ errs++;
+ fprintf( stderr, "unsigned char BOR(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char BOR(0) test failed\n" );
+ }
+ if (ucoutbuf[2] != 0xff && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned char BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* bytes */
+ MTestPrintfMsg( 10, "Reduce of MPI_BYTE\n" );
+ cinbuf[0] = 0xff;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_BYTE, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_BYTE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != (char)0xff) {
+ errs++;
+ fprintf( stderr, "byte BOR(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "byte BOR(0) test failed\n" );
+ }
+ if (coutbuf[2] != (char)0xff && size > 1) {
+ errs++;
+ fprintf( stderr, "byte BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* short */
+ MTestPrintfMsg( 10, "Reduce of MPI_SHORT\n" );
+ sinbuf[0] = 0xffff;
+ sinbuf[1] = 0;
+ sinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
+
+ soutbuf[0] = 0;
+ soutbuf[1] = 1;
+ soutbuf[2] = 1;
+ rc = MPI_Reduce( sinbuf, soutbuf, 3, MPI_SHORT, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_SHORT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (soutbuf[0] != (short)0xffff) {
+ errs++;
+ fprintf( stderr, "short BOR(1) test failed\n" );
+ }
+ if (soutbuf[1]) {
+ errs++;
+ fprintf( stderr, "short BOR(0) test failed\n" );
+ }
+ if (soutbuf[2] != (short)0xffff && size > 1) {
+ errs++;
+ fprintf( stderr, "short BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned short */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_SHORT\n" );
+ usinbuf[0] = 0xffff;
+ usinbuf[1] = 0;
+ usinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
+
+ usoutbuf[0] = 0;
+ usoutbuf[1] = 1;
+ usoutbuf[2] = 1;
+ rc = MPI_Reduce( usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED_SHORT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (usoutbuf[0] != 0xffff) {
+ errs++;
+ fprintf( stderr, "short BOR(1) test failed\n" );
+ }
+ if (usoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "short BOR(0) test failed\n" );
+ }
+ if (usoutbuf[2] != 0xffff && size > 1) {
+ errs++;
+ fprintf( stderr, "short BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED\n" );
+ uinbuf[0] = 0xffffffff;
+ uinbuf[1] = 0;
+ uinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ uoutbuf[0] = 0;
+ uoutbuf[1] = 1;
+ uoutbuf[2] = 1;
+ rc = MPI_Reduce( uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (uoutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "unsigned BOR(1) test failed\n" );
+ }
+ if (uoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned BOR(0) test failed\n" );
+ }
+ if (uoutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* int */
+ MTestPrintfMsg( 10, "Reduce of MPI_INT\n" );
+ iinbuf[0] = 0xffffffff;
+ iinbuf[1] = 0;
+ iinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ ioutbuf[0] = 0;
+ ioutbuf[1] = 1;
+ ioutbuf[2] = 1;
+ rc = MPI_Reduce( iinbuf, ioutbuf, 3, MPI_INT, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_INT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ioutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "int BOR(1) test failed\n" );
+ }
+ if (ioutbuf[1]) {
+ errs++;
+ fprintf( stderr, "int BOR(0) test failed\n" );
+ }
+ if (ioutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf( stderr, "int BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* long */
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG\n" );
+ linbuf[0] = 0xffffffff;
+ linbuf[1] = 0;
+ linbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ loutbuf[0] = 0;
+ loutbuf[1] = 1;
+ loutbuf[2] = 1;
+ rc = MPI_Reduce( linbuf, loutbuf, 3, MPI_LONG, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (loutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "long BOR(1) test failed\n" );
+ }
+ if (loutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long BOR(0) test failed\n" );
+ }
+ if (loutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf( stderr, "long BOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned long */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_LONG\n" );
+ ulinbuf[0] = 0xffffffff;
+ ulinbuf[1] = 0;
+ ulinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ uloutbuf[0] = 0;
+ uloutbuf[1] = 1;
+ uloutbuf[2] = 1;
+ rc = MPI_Reduce( ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (uloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "unsigned long BOR(1) test failed\n" );
+ }
+ if (uloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned long BOR(0) test failed\n" );
+ }
+ if (uloutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned long BOR(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 0xffffffff;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BOR and MPI_LONG_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf( stderr, "long long BOR(1) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long BOR(0) test failed\n" );
+ }
+ if (lloutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf( stderr, "long long BOR(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_BXOR operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rc;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ short sinbuf[3], soutbuf[3];
+ unsigned short usinbuf[3], usoutbuf[3];
+ long linbuf[3], loutbuf[3];
+ unsigned long ulinbuf[3], uloutbuf[3];
+ unsigned uinbuf[3], uoutbuf[3];
+ int iinbuf[3], ioutbuf[3];
+
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 0xff;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ coutbuf[0] = 0xf;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != ((size % 2) ? (char)0xff : (char)0) ) {
+ errs++;
+ fprintf( stderr, "char BXOR(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char BXOR(0) test failed\n" );
+ }
+ if (coutbuf[2] != ((size % 2) ? (char)0xc3 : (char)0xff)) {
+ errs++;
+ fprintf( stderr, "char BXOR(>) test failed\n" );
+ }
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 0xff;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ scoutbuf[0] = 0xf;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_SIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (scoutbuf[0] != ((size % 2) ? (signed char)0xff : (signed char)0) ) {
+ errs++;
+ fprintf( stderr, "signed char BXOR(1) test failed\n" );
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char BXOR(0) test failed\n" );
+ }
+ if (scoutbuf[2] != ((size % 2) ? (signed char)0xc3 : (signed char)0xff)) {
+ errs++;
+ fprintf( stderr, "signed char BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 0xff;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ucoutbuf[0] != ((size % 2) ? 0xff : 0)) {
+ errs++;
+ fprintf( stderr, "unsigned char BXOR(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char BXOR(0) test failed\n" );
+ }
+ if (ucoutbuf[2] != ((size % 2) ? (unsigned char)0xc3 : (unsigned char)0xff)) {
+ errs++;
+ fprintf( stderr, "unsigned char BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* bytes */
+ MTestPrintfMsg( 10, "Reduce of MPI_BYTE\n" );
+ cinbuf[0] = 0xff;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_BYTE, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_BYTE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != ((size % 2) ? (char)0xff : 0)) {
+ errs++;
+ fprintf( stderr, "byte BXOR(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "byte BXOR(0) test failed\n" );
+ }
+ if (coutbuf[2] != ((size % 2) ? (char)0xc3 : (char)0xff)) {
+ errs++;
+ fprintf( stderr, "byte BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* short */
+ MTestPrintfMsg( 10, "Reduce of MPI_SHORT\n" );
+ sinbuf[0] = 0xffff;
+ sinbuf[1] = 0;
+ sinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
+
+ soutbuf[0] = 0;
+ soutbuf[1] = 1;
+ soutbuf[2] = 1;
+ rc = MPI_Reduce( sinbuf, soutbuf, 3, MPI_SHORT, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_SHORT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (soutbuf[0] != ((size % 2) ? (short)0xffff : 0)) {
+ errs++;
+ fprintf( stderr, "short BXOR(1) test failed\n" );
+ }
+ if (soutbuf[1]) {
+ errs++;
+ fprintf( stderr, "short BXOR(0) test failed\n" );
+ }
+ if (soutbuf[2] != ((size % 2) ? (short)0xc3c3 : (short)0xffff)) {
+ errs++;
+ fprintf( stderr, "short BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned short */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_SHORT\n" );
+ usinbuf[0] = 0xffff;
+ usinbuf[1] = 0;
+ usinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
+
+ usoutbuf[0] = 0;
+ usoutbuf[1] = 1;
+ usoutbuf[2] = 1;
+ rc = MPI_Reduce( usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED_SHORT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (usoutbuf[0] != ((size % 2) ? 0xffff : 0)) {
+ errs++;
+ fprintf( stderr, "short BXOR(1) test failed\n" );
+ }
+ if (usoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "short BXOR(0) test failed\n" );
+ }
+ if (usoutbuf[2] != ((size % 2) ? 0xc3c3 : 0xffff)) {
+ errs++;
+ fprintf( stderr, "short BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED\n" );
+ uinbuf[0] = 0xffffffff;
+ uinbuf[1] = 0;
+ uinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ uoutbuf[0] = 0;
+ uoutbuf[1] = 1;
+ uoutbuf[2] = 1;
+ rc = MPI_Reduce( uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (uoutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf( stderr, "unsigned BXOR(1) test failed\n" );
+ }
+ if (uoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned BXOR(0) test failed\n" );
+ }
+ if (uoutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf( stderr, "unsigned BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* int */
+ MTestPrintfMsg( 10, "Reduce of MPI_INT\n" );
+ iinbuf[0] = 0xffffffff;
+ iinbuf[1] = 0;
+ iinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ ioutbuf[0] = 0;
+ ioutbuf[1] = 1;
+ ioutbuf[2] = 1;
+ rc = MPI_Reduce( iinbuf, ioutbuf, 3, MPI_INT, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_INT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ioutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf( stderr, "int BXOR(1) test failed\n" );
+ }
+ if (ioutbuf[1]) {
+ errs++;
+ fprintf( stderr, "int BXOR(0) test failed\n" );
+ }
+ if (ioutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf( stderr, "int BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* long */
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG\n" );
+ linbuf[0] = 0xffffffff;
+ linbuf[1] = 0;
+ linbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ loutbuf[0] = 0;
+ loutbuf[1] = 1;
+ loutbuf[2] = 1;
+ rc = MPI_Reduce( linbuf, loutbuf, 3, MPI_LONG, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (loutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf( stderr, "long BXOR(1) test failed\n" );
+ }
+ if (loutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long BXOR(0) test failed\n" );
+ }
+ if (loutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf( stderr, "long BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned long */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_LONG\n" );
+ ulinbuf[0] = 0xffffffff;
+ ulinbuf[1] = 0;
+ ulinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ uloutbuf[0] = 0;
+ uloutbuf[1] = 1;
+ uloutbuf[2] = 1;
+ rc = MPI_Reduce( ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (uloutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf( stderr, "unsigned long BXOR(1) test failed\n" );
+ }
+ if (uloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned long BXOR(0) test failed\n" );
+ }
+ if (uloutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf( stderr, "unsigned long BXOR(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 0xffffffff;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_BXOR and MPI_LONG_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf( stderr, "long long BXOR(1) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long BXOR(0) test failed\n" );
+ }
+ if (lloutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf( stderr, "long long BXOR(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_LAND operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rc;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ float finbuf[3], foutbuf[3];
+ double dinbuf[3], doutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0);
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!coutbuf[0]) {
+ errs++;
+ fprintf( stderr, "char AND(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char AND(0) test failed\n" );
+ }
+ if (coutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "char AND(>) test failed\n" );
+ }
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0);
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_SIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!scoutbuf[0]) {
+ errs++;
+ fprintf( stderr, "signed char AND(1) test failed\n" );
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char AND(0) test failed\n" );
+ }
+ if (scoutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "signed char AND(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0);
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_UNSIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!ucoutbuf[0]) {
+ errs++;
+ fprintf( stderr, "unsigned char AND(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char AND(0) test failed\n" );
+ }
+ if (ucoutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned char AND(>) test failed\n" );
+ }
+ }
+ }
+
+#ifndef USE_STRICT_MPI
+ /* float */
+ MTestPrintfMsg( 10, "Reduce of MPI_FLOAT\n" );
+ finbuf[0] = 1;
+ finbuf[1] = 0;
+ finbuf[2] = (rank > 0);
+
+ foutbuf[0] = 0;
+ foutbuf[1] = 1;
+ foutbuf[2] = 1;
+ rc = MPI_Reduce( finbuf, foutbuf, 3, MPI_FLOAT, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_FLOAT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!foutbuf[0]) {
+ errs++;
+ fprintf( stderr, "float AND(1) test failed\n" );
+ }
+ if (foutbuf[1]) {
+ errs++;
+ fprintf( stderr, "float AND(0) test failed\n" );
+ }
+ if (foutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "float AND(>) test failed\n" );
+ }
+ }
+ }
+
+ MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE\n" );
+ /* double */
+ dinbuf[0] = 1;
+ dinbuf[1] = 0;
+ dinbuf[2] = (rank > 0);
+
+ doutbuf[0] = 0;
+ doutbuf[1] = 1;
+ doutbuf[2] = 1;
+ rc = MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_DOUBLE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!doutbuf[0]) {
+ errs++;
+ fprintf( stderr, "double AND(1) test failed\n" );
+ }
+ if (doutbuf[1]) {
+ errs++;
+ fprintf( stderr, "double AND(0) test failed\n" );
+ }
+ if (doutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "double AND(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
+ rc = MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_LONG_DOUBLE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!ldoutbuf[0]) {
+ errs++;
+ fprintf( stderr, "long double AND(1) test failed\n" );
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long double AND(0) test failed\n" );
+ }
+ if (ldoutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "long double AND(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+#endif /* USE_STRICT_MPI */
+
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LAND, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LAND and MPI_LONG_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!lloutbuf[0]) {
+ errs++;
+ fprintf( stderr, "long long AND(1) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long AND(0) test failed\n" );
+ }
+ if (lloutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "long long AND(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_LOR operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ float finbuf[3], foutbuf[3];
+ double dinbuf[3], doutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* Some MPI implementations do not implement all of the required
+ (datatype,operations) combinations, and further, they do not
+ always provide clear and specific error messages. By catching
+ the error, we can provide a higher quality, more specific message.
+ */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0);
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ err = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_CHAR", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!coutbuf[0]) {
+ errs++;
+ fprintf( stderr, "char OR(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char OR(0) test failed\n" );
+ }
+ if (!coutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "char OR(>) test failed\n" );
+ }
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0);
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ err = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_SIGNED_CHAR", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!scoutbuf[0]) {
+ errs++;
+ fprintf( stderr, "signed char OR(1) test failed\n" );
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char OR(0) test failed\n" );
+ }
+ if (!scoutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "signed char OR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0);
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ err = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_UNSIGNED_CHAR", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!ucoutbuf[0]) {
+ errs++;
+ fprintf( stderr, "unsigned char OR(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char OR(0) test failed\n" );
+ }
+ if (!ucoutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned char OR(>) test failed\n" );
+ }
+ }
+ }
+
+#ifndef USE_STRICT_MPI
+ /* float */
+ MTestPrintfMsg( 10, "Reduce of MPI_FLOAT\n" );
+ finbuf[0] = 1;
+ finbuf[1] = 0;
+ finbuf[2] = (rank > 0);
+
+ foutbuf[0] = 0;
+ foutbuf[1] = 1;
+ foutbuf[2] = 1;
+ err = MPI_Reduce( finbuf, foutbuf, 3, MPI_FLOAT, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_FLOAT", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!foutbuf[0]) {
+ errs++;
+ fprintf( stderr, "float OR(1) test failed\n" );
+ }
+ if (foutbuf[1]) {
+ errs++;
+ fprintf( stderr, "float OR(0) test failed\n" );
+ }
+ if (!foutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "float OR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* double */
+ MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE\n" );
+ dinbuf[0] = 1;
+ dinbuf[1] = 0;
+ dinbuf[2] = (rank > 0);
+
+ doutbuf[0] = 0;
+ doutbuf[1] = 1;
+ doutbuf[2] = 1;
+ err = MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_DOUBLE", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!doutbuf[0]) {
+ errs++;
+ fprintf( stderr, "double OR(1) test failed\n" );
+ }
+ if (doutbuf[1]) {
+ errs++;
+ fprintf( stderr, "double OR(0) test failed\n" );
+ }
+ if (!doutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "double OR(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
+ err = MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_LONG_DOUBLE", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!ldoutbuf[0]) {
+ errs++;
+ fprintf( stderr, "long double OR(1) test failed\n" );
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long double OR(0) test failed\n" );
+ }
+ if (!ldoutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "long double OR(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+#endif /* USE_STRICT_MPI */
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ err = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LOR, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg( "MPI_LOR and MPI_LONG_LONG", err );
+ }
+ else {
+ if (rank == 0) {
+ if (!lloutbuf[0]) {
+ errs++;
+ fprintf( stderr, "long long OR(1) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long OR(0) test failed\n" );
+ }
+ if (!lloutbuf[2] && size > 1) {
+ errs++;
+ fprintf( stderr, "long long OR(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_LXOR operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rc;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ float finbuf[3], foutbuf[3];
+ double dinbuf[3], doutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0);
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (coutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "char XOR(1) test failed\n" );
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char XOR(0) test failed\n" );
+ }
+ if (coutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "char XOR(>) test failed\n" );
+ }
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0);
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_SIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (scoutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "signed char XOR(1) test failed\n" );
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char XOR(0) test failed\n" );
+ }
+ if (scoutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "signed char XOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0);
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_UNSIGNED_CHAR", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ucoutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "unsigned char XOR(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char XOR(0) test failed\n" );
+ }
+ if (ucoutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "unsigned char XOR(>) test failed\n" );
+ }
+ }
+ }
+
+#ifndef USE_STRICT_MPI
+ /* float */
+ MTestPrintfMsg( 10, "Reduce of MPI_FLOAT\n" );
+ finbuf[0] = 1;
+ finbuf[1] = 0;
+ finbuf[2] = (rank > 0);
+
+ foutbuf[0] = 0;
+ foutbuf[1] = 1;
+ foutbuf[2] = 1;
+ rc = MPI_Reduce( finbuf, foutbuf, 3, MPI_FLOAT, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_FLOAT", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (foutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "float XOR(1) test failed\n" );
+ }
+ if (foutbuf[1]) {
+ errs++;
+ fprintf( stderr, "float XOR(0) test failed\n" );
+ }
+ if (foutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "float XOR(>) test failed\n" );
+ }
+ }
+ }
+
+ /* double */
+ MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE\n" );
+ dinbuf[0] = 1;
+ dinbuf[1] = 0;
+ dinbuf[2] = (rank > 0);
+
+ doutbuf[0] = 0;
+ doutbuf[1] = 1;
+ doutbuf[2] = 1;
+ rc = MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_DOUBLE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (doutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "double XOR(1) test failed\n" );
+ }
+ if (doutbuf[1]) {
+ errs++;
+ fprintf( stderr, "double XOR(0) test failed\n" );
+ }
+ if (doutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "double XOR(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
+ rc = MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_LONG_DOUBLE", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ldoutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "long double XOR(1) test failed\n" );
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long double XOR(0) test failed\n" );
+ }
+ if (ldoutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "long double XOR(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+#endif /* USE_STRICT_MPI */
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LXOR, 0, comm );
+ if (rc) {
+ MTestPrintErrorMsg( "MPI_LXOR and MPI_LONG_LONG", rc );
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf( stderr, "long long XOR(1) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long XOR(0) test failed\n" );
+ }
+ if (lloutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf( stderr, "long long XOR(>) test failed\n" );
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_MAX operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of char and types that are not required
+ * integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = rank;
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_MAX, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "char MAX(1) test failed\n" );
+ }
+ if (coutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "char MAX(0) test failed\n" );
+ }
+ if (size < 128 && coutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "char MAX(>) test failed\n" );
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = rank;
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_MAX, 0, comm );
+ if (rank == 0) {
+ if (scoutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "signed char MAX(1) test failed\n" );
+ }
+ if (scoutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "signed char MAX(0) test failed\n" );
+ }
+ if (size < 128 && scoutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "signed char MAX(>) test failed\n" );
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = rank;
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_MAX, 0, comm );
+ if (rank == 0) {
+ if (ucoutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "unsigned char MAX(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char MAX(0) test failed\n" );
+ }
+ if (size < 256 && ucoutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "unsigned char MAX(>) test failed\n" );
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = rank;
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
+ MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_MAX, 0, comm );
+ if (rank == 0) {
+ if (ldoutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "long double MAX(1) test failed\n" );
+ }
+ if (ldoutbuf[1] != 0.0) {
+ errs++;
+ fprintf( stderr, "long double MAX(0) test failed\n" );
+ }
+ if (ldoutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "long double MAX(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = rank;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_MAX, 0, comm );
+ if (rank == 0) {
+ if (lloutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "long long MAX(1) test failed\n" );
+ }
+ if (lloutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "long long MAX(0) test failed\n" );
+ }
+ if (lloutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "long long MAX(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_LONG */
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include <string.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_MAXLOC operations on datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of char and types that are not required
+ * integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ *
+ * The rule on max loc is that if there is a tie in the value, the minimum
+ * rank is used (see 4.9.3 in the MPI-1 standard)
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* 2 int */
+ {
+ struct twoint { int val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_2INT, MPI_MAXLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "2int MAXLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf( stderr, "2int MAXLOC(0) test failed, value = %d, should be zero\n", coutbuf[1].val );
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "2int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
+ }
+ if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
+ errs++;
+ fprintf( stderr, "2int MAXLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* float int */
+ {
+ struct floatint { float val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = (float)rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_FLOAT_INT, MPI_MAXLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "float-int MAXLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf( stderr, "float-int MAXLOC(0) test failed, value = %f, should be zero\n", coutbuf[1].val );
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "float-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
+ }
+ if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
+ errs++;
+ fprintf( stderr, "float-int MAXLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* long int */
+ {
+ struct longint { long val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_INT, MPI_MAXLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "long-int MAXLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf( stderr, "long-int MAXLOC(0) test failed, value = %ld, should be zero\n", coutbuf[1].val );
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "long-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
+ }
+ if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
+ errs++;
+ fprintf( stderr, "long-int MAXLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* short int */
+ {
+ struct shortint { short val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_SHORT_INT, MPI_MAXLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "short-int MAXLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf( stderr, "short-int MAXLOC(0) test failed, value = %d, should be zero\n", coutbuf[1].val );
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "short-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
+ }
+ if (coutbuf[2].val != size-1) {
+ errs++;
+ fprintf( stderr, "short-int MAXLOC(>) test failed, value = %d, should be %d\n", coutbuf[2].val, size-1 );
+ }
+ if (coutbuf[2].loc != size -1) {
+ errs++;
+ fprintf( stderr, "short-int MAXLOC(>) test failed, location of max = %d, should be %d\n", coutbuf[2].loc, size-1 );
+ }
+ }
+ }
+
+ /* double int */
+ {
+ struct doubleint { double val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_DOUBLE_INT, MPI_MAXLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "double-int MAXLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf( stderr, "double-int MAXLOC(0) test failed, value = %lf, should be zero\n", coutbuf[1].val );
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "double-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
+ }
+ if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
+ errs++;
+ fprintf( stderr, "double-int MAXLOC(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ /* long double int */
+ {
+ struct longdoubleint { long double val; int loc; } cinbuf[3], coutbuf[3];
+
+ /* avoid valgrind warnings about padding bytes in the long double */
+ memset(&cinbuf[0], 0, sizeof(cinbuf));
+ memset(&coutbuf[0], 0, sizeof(coutbuf));
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_DOUBLE_INT, MPI_MAXLOC,
+ 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "long double-int MAXLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf( stderr, "long double-int MAXLOC(0) test failed, value = %lf, should be zero\n", (double)coutbuf[1].val );
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "long double-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
+ }
+ if (coutbuf[2].val != size-1) {
+ errs++;
+ fprintf( stderr, "long double-int MAXLOC(>) test failed, value = %lf, should be %d\n", (double)coutbuf[2].val, size-1 );
+ }
+ if (coutbuf[2].loc != size-1) {
+ errs++;
+ fprintf( stderr, "long double-int MAXLOC(>) test failed, location of max = %d, should be %d\n", coutbuf[2].loc, size-1 );
+ }
+ }
+ }
+ }
+#endif
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_MIN operations on optional datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of char and types that are not required
+ * integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank & 0x7f);
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_MIN, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "char MIN(1) test failed\n" );
+ }
+ if (coutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "char MIN(0) test failed\n" );
+ }
+ if (coutbuf[2] != 0) {
+ errs++;
+ fprintf( stderr, "char MIN(>) test failed\n" );
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank & 0x7f);
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_MIN, 0, comm );
+ if (rank == 0) {
+ if (scoutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "signed char MIN(1) test failed\n" );
+ }
+ if (scoutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "signed char MIN(0) test failed\n" );
+ }
+ if (scoutbuf[2] != 0) {
+ errs++;
+ fprintf( stderr, "signed char MIN(>) test failed\n" );
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank & 0x7f);
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_MIN, 0, comm );
+ if (rank == 0) {
+ if (ucoutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "unsigned char MIN(1) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char MIN(0) test failed\n" );
+ }
+ if (ucoutbuf[2] != 0) {
+ errs++;
+ fprintf( stderr, "unsigned char MIN(>) test failed\n" );
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = rank;
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
+ MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_MIN, 0, comm );
+ if (rank == 0) {
+ if (ldoutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "long double MIN(1) test failed\n" );
+ }
+ if (ldoutbuf[1] != 0.0) {
+ errs++;
+ fprintf( stderr, "long double MIN(0) test failed\n" );
+ }
+ if (ldoutbuf[2] != 0.0) {
+ errs++;
+ fprintf( stderr, "long double MIN(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = rank;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_MIN, 0, comm );
+ if (rank == 0) {
+ if (lloutbuf[0] != 1) {
+ errs++;
+ fprintf( stderr, "long long MIN(1) test failed\n" );
+ }
+ if (lloutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "long long MIN(0) test failed\n" );
+ }
+ if (lloutbuf[2] != 0) {
+ errs++;
+ fprintf( stderr, "long long MIN(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_LONG */
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_MINLOC operations on datatypes dupported by MPICH";
+*/
+
+/*
+ * This test looks at the handling of char and types that are not required
+ * integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ *
+ * The rule on min loc is that if there is a tie in the value, the minimum
+ * rank is used (see 4.9.3 in the MPI-1 standard)
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* 2 int */
+ {
+ struct twoint { int val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = (rank & 0x7f);
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_2INT, MPI_MINLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 && coutbuf[0].loc != -1) {
+ errs++;
+ fprintf( stderr, "2int MINLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0 && coutbuf[1].loc != -1) {
+ errs++;
+ fprintf( stderr, "2int MINLOC(0) test failed\n" );
+ }
+ if (coutbuf[2].val != 0 && coutbuf[2].loc != 0) {
+ errs++;
+ fprintf( stderr, "2int MINLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* float int */
+ {
+ struct floatint { float val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = (float)rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_FLOAT_INT, MPI_MINLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 && coutbuf[0].loc != -1) {
+ errs++;
+ fprintf( stderr, "float-int MINLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0 && coutbuf[1].loc != -1) {
+ errs++;
+ fprintf( stderr, "float-int MINLOC(0) test failed\n" );
+ }
+ if (coutbuf[2].val != 0 && coutbuf[2].loc != 0) {
+ errs++;
+ fprintf( stderr, "float-int MINLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* long int */
+ {
+ struct longint { long val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_INT, MPI_MINLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "long-int MINLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "long-int MINLOC(0) test failed\n" );
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf( stderr, "long-int MINLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* short int */
+ {
+ struct shortint { short val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_SHORT_INT, MPI_MINLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "short-int MINLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "short-int MINLOC(0) test failed\n" );
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf( stderr, "short-int MINLOC(>) test failed\n" );
+ }
+ }
+ }
+
+ /* double int */
+ {
+ struct doubleint { double val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_DOUBLE_INT, MPI_MINLOC, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "double-int MINLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "double-int MINLOC(0) test failed\n" );
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf( stderr, "double-int MINLOC(>) test failed\n" );
+ }
+ }
+ }
+
+#ifdef HAVE_LONG_DOUBLE
+ /* long double int */
+ {
+ struct longdoubleint { long double val; int loc; } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_DOUBLE_INT, MPI_MINLOC,
+ 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf( stderr, "long double-int MINLOC(1) test failed\n" );
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf( stderr, "long double-int MINLOC(0) test failed\n" );
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf( stderr, "long double-int MINLOC(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_PROD operations on optional datatypes dupported by MPICH";
+*/
+
+typedef struct { double r, i; } d_complex;
+#ifdef HAVE_LONG_DOUBLE
+typedef struct { long double r, i; } ld_complex;
+#endif
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, maxsize, result[6] = { 1, 1, 2, 6, 24, 120 };
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ d_complex dinbuf[3], doutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ if (size > 5) maxsize = 5;
+ else maxsize = size;
+
+ /* General forumula: If we multiple the values from 1 to n, the
+ product is n!. This grows very fast, so we'll only use the first
+ five (1! = 1, 2! = 2, 3! = 6, 4! = 24, 5! = 120), with n!
+ stored in the array result[n] */
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 1);
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ if (coutbuf[0] != (char)result[maxsize-1]) {
+ errs++;
+ fprintf( stderr, "char PROD(rank) test failed (%d!=%d)\n",
+ (int)coutbuf[0], (int)result[maxsize]);
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf( stderr, "char PROD(0) test failed\n" );
+ }
+ if (size > 1 && coutbuf[2]) {
+ errs++;
+ fprintf( stderr, "char PROD(>) test failed\n" );
+ }
+ }
+#endif /* USE_STRICT_MPI */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 1);
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ if (scoutbuf[0] != (signed char)result[maxsize-1]) {
+ errs++;
+ fprintf( stderr, "signed char PROD(rank) test failed (%d!=%d)\n",
+ (int)scoutbuf[0], (int)result[maxsize]);
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "signed char PROD(0) test failed\n" );
+ }
+ if (size > 1 && scoutbuf[2]) {
+ errs++;
+ fprintf( stderr, "signed char PROD(>) test failed\n" );
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0);
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ if (ucoutbuf[0] != (unsigned char)result[maxsize-1]) {
+ errs++;
+ fprintf( stderr, "unsigned char PROD(rank) test failed\n" );
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char PROD(0) test failed\n" );
+ }
+ if (size > 1 && ucoutbuf[2]) {
+ errs++;
+ fprintf( stderr, "unsigned char PROD(>) test failed\n" );
+ }
+ }
+
+#ifndef USE_STRICT_MPI
+ /* For some reason, complex is not allowed for sum and prod */
+ if (MPI_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) {
+ int dc;
+#ifdef HAVE_LONG_DOUBLE
+ ld_complex ldinbuf[3], ldoutbuf[3];
+#endif
+ /* Must determine which C type matches this Fortran type */
+ MPI_Type_size( MPI_DOUBLE_COMPLEX, &dc );
+ if (dc == sizeof(d_complex)) {
+ /* double complex; may be null if we do not have Fortran support */
+ dinbuf[0].r = (rank < maxsize && rank > 0) ? rank : 1;
+ dinbuf[1].r = 0;
+ dinbuf[2].r = (rank > 0);
+ dinbuf[0].i = 0;
+ dinbuf[1].i = 1;
+ dinbuf[2].i = -(rank > 0);
+
+ doutbuf[0].r = 0;
+ doutbuf[1].r = 1;
+ doutbuf[2].r = 1;
+ doutbuf[0].i = 0;
+ doutbuf[1].i = 1;
+ doutbuf[2].i = 1;
+ MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ double imag, real;
+ if (doutbuf[0].r != (double)result[maxsize-1] || doutbuf[0].i != 0) {
+ errs++;
+ fprintf( stderr, "double complex PROD(rank) test failed\n" );
+ }
+ /* Multiplying the imaginary part depends on size mod 4 */
+ imag = 1.0; real = 0.0; /* Make compiler happy */
+ switch (size % 4) {
+ case 1: imag = 1.0; real = 0.0; break;
+ case 2: imag = 0.0; real = -1.0; break;
+ case 3: imag =-1.0; real = 0.0; break;
+ case 0: imag = 0.0; real = 1.0; break;
+ }
+ if (doutbuf[1].r != real || doutbuf[1].i != imag) {
+ errs++;
+ fprintf( stderr, "double complex PROD(i) test failed (%f,%f)!=(%f,%f)\n",
+ doutbuf[1].r,doutbuf[1].i,real,imag);
+ }
+ if (doutbuf[2].r != 0 || doutbuf[2].i != 0) {
+ errs++;
+ fprintf( stderr, "double complex PROD(>) test failed\n" );
+ }
+ }
+ }
+#ifdef HAVE_LONG_DOUBLE
+ else if (dc == sizeof(ld_complex)) {
+ /* double complex; may be null if we do not have Fortran support */
+ ldinbuf[0].r = (rank < maxsize && rank > 0) ? rank : 1;
+ ldinbuf[1].r = 0;
+ ldinbuf[2].r = (rank > 0);
+ ldinbuf[0].i = 0;
+ ldinbuf[1].i = 1;
+ ldinbuf[2].i = -(rank > 0);
+
+ ldoutbuf[0].r = 0;
+ ldoutbuf[1].r = 1;
+ ldoutbuf[2].r = 1;
+ ldoutbuf[0].i = 0;
+ ldoutbuf[1].i = 1;
+ ldoutbuf[2].i = 1;
+ MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ long double imag, real;
+ if (ldoutbuf[0].r != (double)result[maxsize-1] || ldoutbuf[0].i != 0) {
+ errs++;
+ fprintf( stderr, "double complex PROD(rank) test failed\n" );
+ }
+ /* Multiplying the imaginary part depends on size mod 4 */
+ imag = 1.0; real = 0.0; /* Make compiler happy */
+ switch (size % 4) {
+ case 1: imag = 1.0; real = 0.0; break;
+ case 2: imag = 0.0; real = -1.0; break;
+ case 3: imag =-1.0; real = 0.0; break;
+ case 0: imag = 0.0; real = 1.0; break;
+ }
+ if (ldoutbuf[1].r != real || ldoutbuf[1].i != imag) {
+ errs++;
+ fprintf( stderr, "double complex PROD(i) test failed (%Lf,%Lf)!=(%Lf,%Lf)\n",
+ ldoutbuf[1].r,ldoutbuf[1].i,real,imag);
+ }
+ if (ldoutbuf[2].r != 0 || ldoutbuf[2].i != 0) {
+ errs++;
+ fprintf( stderr, "double complex PROD(>) test failed\n" );
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+ }
+#endif /* USE_STRICT_MPI */
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ if (ldoutbuf[0] != (long double)result[maxsize-1]) {
+ errs++;
+ fprintf( stderr, "long double PROD(rank) test failed\n" );
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long double PROD(0) test failed\n" );
+ }
+ if (size > 1 && ldoutbuf[2] != 0) {
+ errs++;
+ fprintf( stderr, "long double PROD(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_DOUBLE */
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_PROD, 0, comm );
+ if (rank == 0) {
+ if (lloutbuf[0] != (long long)result[maxsize-1]) {
+ errs++;
+ fprintf( stderr, "long long PROD(rank) test failed\n" );
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf( stderr, "long long PROD(0) test failed\n" );
+ }
+ if (size > 1 && lloutbuf[2]) {
+ errs++;
+ fprintf( stderr, "long long PROD(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif /* HAVE_LONG_LONG */
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_SUM operations on optional datatypes dupported by MPICH";
+*/
+
+typedef struct { double r, i; } d_complex;
+#ifdef HAVE_LONG_DOUBLE
+typedef struct { long double r, i; } ld_complex;
+#endif
+
+/*
+ * This test looks at the handling of logical and for types that are not
+ * integers or are not required integers (e.g., long long). MPICH allows
+ * these as well. A strict MPI test should not include this test.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ MPI_Comm comm;
+ char cinbuf[3], coutbuf[3];
+ signed char scinbuf[3], scoutbuf[3];
+ unsigned char ucinbuf[3], ucoutbuf[3];
+ d_complex dinbuf[3], doutbuf[3];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+#ifndef USE_STRICT_MPI
+ /* char */
+ MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ cinbuf[0] = 1;
+ cinbuf[1] = 0;
+ cinbuf[2] = (rank > 0);
+
+ coutbuf[0] = 0;
+ coutbuf[1] = 1;
+ coutbuf[2] = 1;
+ MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (size < 128 && coutbuf[0] != size) {
+ errs++;
+ fprintf( stderr, "char SUM(1) test failed\n" );
+ }
+ if (size < 128 && coutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "char SUM(0) test failed\n" );
+ }
+ if (size < 128 && coutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "char SUM(>) test failed\n" );
+ }
+ }
+#endif /* USE_MPI_STRICT */
+
+ /* signed char */
+ MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ scinbuf[0] = 1;
+ scinbuf[1] = 0;
+ scinbuf[2] = (rank > 0);
+
+ scoutbuf[0] = 0;
+ scoutbuf[1] = 1;
+ scoutbuf[2] = 1;
+ MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (size < 128 && scoutbuf[0] != size) {
+ errs++;
+ fprintf( stderr, "signed char SUM(1) test failed\n" );
+ }
+ if (size < 128 && scoutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "signed char SUM(0) test failed\n" );
+ }
+ if (size < 128 && scoutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "signed char SUM(>) test failed\n" );
+ }
+ }
+
+ /* unsigned char */
+ MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ ucinbuf[0] = 1;
+ ucinbuf[1] = 0;
+ ucinbuf[2] = (rank > 0);
+
+ ucoutbuf[0] = 0;
+ ucoutbuf[1] = 1;
+ ucoutbuf[2] = 1;
+ MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (size < 128 && ucoutbuf[0] != size) {
+ errs++;
+ fprintf( stderr, "unsigned char SUM(1) test failed\n" );
+ }
+ if (size < 128 && ucoutbuf[1]) {
+ errs++;
+ fprintf( stderr, "unsigned char SUM(0) test failed\n" );
+ }
+ if (size < 128 && ucoutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "unsigned char SUM(>) test failed\n" );
+ }
+ }
+
+#ifndef USE_STRICT_MPI
+ /* For some reason, complex is not allowed for sum and prod */
+ if (MPI_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) {
+ int dc;
+#ifdef HAVE_LONG_DOUBLE
+ ld_complex ldinbuf[3], ldoutbuf[3];
+#endif
+ /* Must determine which C type matches this Fortran type */
+ MPI_Type_size( MPI_DOUBLE_COMPLEX, &dc );
+ if (dc == sizeof(d_complex)) {
+ MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE_COMPLEX\n" );
+ /* double complex; may be null if we do not have Fortran support */
+ dinbuf[0].r = 1;
+ dinbuf[1].r = 0;
+ dinbuf[2].r = (rank > 0);
+ dinbuf[0].i = -1;
+ dinbuf[1].i = 0;
+ dinbuf[2].i = -(rank > 0);
+
+ doutbuf[0].r = 0;
+ doutbuf[1].r = 1;
+ doutbuf[2].r = 1;
+ doutbuf[0].i = 0;
+ doutbuf[1].i = 1;
+ doutbuf[2].i = 1;
+ MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (doutbuf[0].r != size || doutbuf[0].i != -size) {
+ errs++;
+ fprintf( stderr, "double complex SUM(1) test failed\n" );
+ }
+ if (doutbuf[1].r != 0 || doutbuf[1].i != 0) {
+ errs++;
+ fprintf( stderr, "double complex SUM(0) test failed\n" );
+ }
+ if (doutbuf[2].r != size - 1 || doutbuf[2].i != 1 - size) {
+ errs++;
+ fprintf( stderr, "double complex SUM(>) test failed\n" );
+ }
+ }
+ }
+#ifdef HAVE_LONG_DOUBLE
+ else if (dc == sizeof(ld_complex)) {
+ MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE_COMPLEX\n" );
+ /* double complex; may be null if we do not have Fortran support */
+ ldinbuf[0].r = 1;
+ ldinbuf[1].r = 0;
+ ldinbuf[2].r = (rank > 0);
+ ldinbuf[0].i = -1;
+ ldinbuf[1].i = 0;
+ ldinbuf[2].i = -(rank > 0);
+
+ ldoutbuf[0].r = 0;
+ ldoutbuf[1].r = 1;
+ ldoutbuf[2].r = 1;
+ ldoutbuf[0].i = 0;
+ ldoutbuf[1].i = 1;
+ ldoutbuf[2].i = 1;
+ MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_DOUBLE_COMPLEX,
+ MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (ldoutbuf[0].r != size || ldoutbuf[0].i != -size) {
+ errs++;
+ fprintf( stderr, "double complex SUM(1) test failed\n" );
+ }
+ if (ldoutbuf[1].r != 0 || ldoutbuf[1].i != 0) {
+ errs++;
+ fprintf( stderr, "double complex SUM(0) test failed\n" );
+ }
+ if (ldoutbuf[2].r != size - 1 || ldoutbuf[2].i != 1 - size) {
+ errs++;
+ fprintf( stderr, "double complex SUM(>) test failed\n" );
+ }
+ }
+ }
+#endif
+ /* Implicitly ignore if there is no matching C type */
+ }
+#endif /* USE_STRICT_MPI */
+
+#ifdef HAVE_LONG_DOUBLE
+ { long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
+ MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (ldoutbuf[0] != size) {
+ errs++;
+ fprintf( stderr, "long double SUM(1) test failed\n" );
+ }
+ if (ldoutbuf[1] != 0.0) {
+ errs++;
+ fprintf( stderr, "long double SUM(0) test failed\n" );
+ }
+ if (ldoutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "long double SUM(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif
+
+#ifdef HAVE_LONG_LONG
+ {
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
+ MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_SUM, 0, comm );
+ if (rank == 0) {
+ if (lloutbuf[0] != size) {
+ errs++;
+ fprintf( stderr, "long long SUM(1) test failed\n" );
+ }
+ if (lloutbuf[1] != 0) {
+ errs++;
+ fprintf( stderr, "long long SUM(0) test failed\n" );
+ }
+ if (lloutbuf[2] != size - 1) {
+ errs++;
+ fprintf( stderr, "long long SUM(>) test failed\n" );
+ }
+ }
+ }
+ }
+#endif
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Reduce with non-commutative user-define operations";
+*/
+/*
+ * This tests that the reduce operation respects the noncommutative flag.
+ * See red4.c for a version that can distinguish between P_{root} P_{root+1}
+ * ... P_{root-1} and P_0 ... P_{size-1} . The MPI standard clearly
+ * specifies that the result is P_0 ... P_{size-1}, independent of the root
+ * (see 4.9.4 in MPI-1)
+ */
+
+/* This implements a simple matrix-matrix multiply. This is an associative
+ but not commutative operation. The matrix size is set in matSize;
+ the number of matrices is the count argument. The matrix is stored
+ in C order, so that
+ c(i,j) is cin[j+i*matSize]
+ */
+#define MAXCOL 256
+static int matSize = 0; /* Must be < MAXCOL */
+void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, j, k, nmat;
+ int tempCol[MAXCOL];
+
+ for (nmat = 0; nmat < *count; nmat++) {
+ for (j=0; j<matSize; j++) {
+ for (i=0; i<matSize; i++) {
+ tempCol[i] = 0;
+ for (k=0; k<matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ tempCol[i] += cin[k+i*matSize] * cout[j+k*matSize];
+ }
+ }
+ for (i=0; i<matSize; i++) {
+ cout[j+i*matSize] = tempCol[i];
+ }
+ }
+ }
+}
+
+/* Initialize the integer matrix as a permutation of rank with rank+1.
+ If we call this matrix P_r, we know that product of P_0 P_1 ... P_{size-2}
+ is a left shift by 1.
+*/
+
+static void initMat( MPI_Comm comm, int mat[] )
+{
+ int i, size, rank;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size*size; i++) mat[i] = 0;
+
+ /* For each row */
+ for (i=0; i<size; i++) {
+ if (rank != size - 1) {
+ if (i == rank) mat[((i+1)%size) + i * size] = 1;
+ else if (i == ((rank + 1)%size)) mat[((i+size-1)%size) + i * size] = 1;
+ else mat[i+i*size] = 1;
+ }
+ else {
+ mat[i+i*size] = 1;
+ }
+ }
+}
+
+#ifdef FOO
+/* Compare a matrix with the identity matrix */
+static int isIdentity( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank, errs = 0;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ if (i == j) {
+ if (mat[j+i*size] != 1) {
+ errs++;
+ }
+ }
+ else {
+ if (mat[j+i*size] != 0) {
+ errs++;
+ }
+ }
+ }
+ }
+ return errs;
+}
+#endif
+
+/* Compare a matrix with the identity matrix */
+static int isShiftLeft( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank, errs = 0;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ if (i == ((j + 1) % size)) {
+ if (mat[j+i*size] != 1) {
+ errs++;
+ }
+ }
+ else {
+ if (mat[j+i*size] != 0) {
+ errs++;
+ }
+ }
+ }
+ }
+ return errs;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, *bufout;
+ MPI_Op op;
+ MPI_Datatype mattype;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( uop, 0, &op );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ matSize = size; /* used by the user-defined operation */
+ /* Only one matrix for now */
+ count = 1;
+
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous( size*size, MPI_INT, &mattype );
+ MPI_Type_commit( &mattype );
+
+ buf = (int *)malloc( count * size * size * sizeof(int) );
+ if (!buf) MPI_Abort( MPI_COMM_WORLD, 1 );
+ bufout = (int *)malloc( count * size * size * sizeof(int) );
+ if (!bufout) MPI_Abort( MPI_COMM_WORLD, 1 );
+
+ for (root = 0; root < size; root ++) {
+ initMat( comm, buf );
+ MPI_Reduce( buf, bufout, count, mattype, op, root, comm );
+ if (rank == root) {
+ errs += isShiftLeft( comm, bufout );
+ }
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat( comm, bufout );
+ if (rank == root) {
+ MPI_Reduce( MPI_IN_PLACE, bufout, count, mattype, op, root, comm );
+ }
+ else {
+ MPI_Reduce( bufout, NULL, count, mattype, op, root, comm );
+ }
+ if (rank == root) {
+ errs += isShiftLeft( comm, bufout );
+ }
+ }
+
+ free( buf );
+ free( bufout );
+
+ MPI_Type_free( &mattype );
+
+ MTestFreeComm( &comm );
+ }
+
+ MPI_Op_free( &op );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Reduce with non-commutative user-define operations and arbitrary root";
+*/
+
+/*
+ * This tests that the reduce operation respects the noncommutative flag.
+ * and that can distinguish between P_{root} P_{root+1}
+ * ... P_{root-1} and P_0 ... P_{size-1} . The MPI standard clearly
+ * specifies that the result is P_0 ... P_{size-1}, independent of the root
+ * (see 4.9.4 in MPI-1)
+ */
+
+/* This implements a simple matrix-matrix multiply. This is an associative
+ but not commutative operation. The matrix size is set in matSize;
+ the number of matrices is the count argument. The matrix is stored
+ in C order, so that
+ c(i,j) is cin[j+i*matSize]
+ */
+#define MAXCOL 256
+static int matSize = 0; /* Must be < MAXCOL */
+
+void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin;
+ int *cout;
+ int i, j, k, nmat;
+ int tempCol[MAXCOL];
+
+ if (*count != 1) printf( "Panic!\n" );
+ for (nmat = 0; nmat < *count; nmat++) {
+ cin = (const int *)cinPtr;
+ cout = (int *)coutPtr;
+ for (j=0; j<matSize; j++) {
+ for (i=0; i<matSize; i++) {
+ tempCol[i] = 0;
+ for (k=0; k<matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ tempCol[i] += cin[k+i*matSize] * cout[j+k*matSize];
+ }
+ }
+ for (i=0; i<matSize; i++) {
+ cout[j+i*matSize] = tempCol[i];
+ }
+ }
+ cinPtr = (int *)cinPtr + matSize*matSize;
+ coutPtr = (int *)coutPtr + matSize*matSize;
+ }
+}
+
+/* Initialize the integer matrix as a permutation of rank with rank+1.
+ If we call this matrix P_r, we know that product of P_0 P_1 ... P_{size-1}
+ is the matrix with rows ordered as
+ 1,size,2,3,4,...,size-1
+ (The matrix is basically a circular shift right,
+ shifting right n-1 steps for an n x n dimensional matrix, with the last
+ step swapping rows 1 and size)
+*/
+
+static void initMat( MPI_Comm comm, int mat[] )
+{
+ int i, size, rank;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* Remember the matrix size */
+ matSize = size;
+
+ for (i=0; i<matSize*matSize; i++) mat[i] = 0;
+
+ for (i=0; i<matSize; i++) {
+ if (i == rank)
+ mat[((i+1)%matSize) + i * matSize] = 1;
+ else if (i == ((rank + 1)%matSize))
+ mat[((i+matSize-1)%matSize) + i * matSize] = 1;
+ else
+ mat[i+i*matSize] = 1;
+ }
+}
+
+/* Compare a matrix with the identity matrix */
+/*
+static int isIdentity( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank, errs = 0;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ if (j == i) {
+ if (mat[j+i*size] != 1) {
+ printf( "mat(%d,%d) = %d, should = 1\n",
+ i, j, mat[j+i*size] );
+ errs++;
+ }
+ }
+ else {
+ if (mat[j+i*size] != 0) {
+ printf( "mat(%d,%d) = %d, should = 0\n",
+ i, j, mat[j+i*size] );
+ errs++;
+ }
+ }
+ }
+ }
+ return errs;
+}
+*/
+
+/* Compare a matrix with the identity matrix with rows permuted to as rows
+ 1,size,2,3,4,5,...,size-1 */
+static int isPermutedIdentity( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank, errs = 0;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* Check the first two last rows */
+ i = 0;
+ for (j=0; j<size; j++) {
+ if (j==0) {
+ if (mat[j] != 1) {
+ printf( "mat(%d,%d) = %d, should = 1\n",
+ i, j, mat[j] );
+ errs++;
+ }
+ }
+ else {
+ if (mat[j] != 0) {
+ printf( "mat(%d,%d) = %d, should = 0\n",
+ i, j, mat[j] );
+ errs++;
+ }
+ }
+ }
+ i = 1;
+ for (j=0; j<size; j++) {
+ if (j==size-1) {
+ if (mat[j+i*size] != 1) {
+ printf( "mat(%d,%d) = %d, should = 1\n",
+ i, j, mat[j+i*size] );
+ errs++;
+ }
+ }
+ else {
+ if (mat[j+i*size] != 0) {
+ printf( "mat(%d,%d) = %d, should = 0\n",
+ i, j, mat[j+i*size] );
+ errs++;
+ }
+ }
+ }
+ /* The remaint rows are shifted down by one */
+ for (i=2; i<size; i++) {
+ for (j=0; j<size; j++) {
+ if (j == i-1) {
+ if (mat[j+i*size] != 1) {
+ printf( "mat(%d,%d) = %d, should = 1\n",
+ i, j, mat[j+i*size] );
+ errs++;
+ }
+ }
+ else {
+ if (mat[j+i*size] != 0) {
+ printf( "mat(%d,%d) = %d, should = 0\n",
+ i, j, mat[j+i*size] );
+ errs++;
+ }
+ }
+ }
+ }
+ return errs;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, *bufout;
+ MPI_Op op;
+ MPI_Datatype mattype;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( uop, 0, &op );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (size > MAXCOL) {
+ /* Skip because there are too many processes */
+ MTestFreeComm( &comm );
+ continue;
+ }
+
+ /* Only one matrix for now */
+ count = 1;
+
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous( size*size, MPI_INT, &mattype );
+ MPI_Type_commit( &mattype );
+
+ buf = (int *)malloc( count * size * size * sizeof(int) );
+ if (!buf) MPI_Abort( MPI_COMM_WORLD, 1 );
+ bufout = (int *)malloc( count * size * size * sizeof(int) );
+ if (!bufout) MPI_Abort( MPI_COMM_WORLD, 1 );
+
+ for (root = 0; root < size; root ++) {
+ initMat( comm, buf );
+ MPI_Reduce( buf, bufout, count, mattype, op, root, comm );
+ if (rank == root) {
+ errs += isPermutedIdentity( comm, bufout );
+ }
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat( comm, bufout );
+ if (rank == root) {
+ MPI_Reduce( MPI_IN_PLACE, bufout, count, mattype, op, root, comm );
+ }
+ else {
+ MPI_Reduce( bufout, NULL, count, mattype, op, root, comm );
+ }
+ if (rank == root) {
+ errs += isPermutedIdentity( comm, bufout );
+ }
+ }
+ MPI_Type_free( &mattype );
+
+ free( buf );
+ free( bufout );
+
+ MTestFreeComm( &comm );
+ }
+
+ MPI_Op_free( &op );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter block.
+ *
+ * Each process contributes its rank + the index to the reduction,
+ * then receives the ith sum
+ *
+ * Can be called with any number of processes.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv)
+{
+ int err = 0;
+ int toterr, size, rank, i, sumval;
+ int *sendbuf;
+ int *recvbuf;
+ MPI_Comm comm;
+
+ MPI_Init(&argc, &argv);
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* MPI_Reduce_scatter block was added in MPI-2.2 */
+ sendbuf = (int *) malloc(size * sizeof(int));
+ recvbuf = (int *) malloc(size * sizeof(int));
+ if (!sendbuf || !recvbuf) {
+ err++;
+ fprintf(stderr, "unable to allocate send/recv buffers, aborting");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i=0; i<size; i++)
+ sendbuf[i] = rank + i;
+
+ MPI_Reduce_scatter_block(sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm);
+
+ sumval = size * rank + ((size - 1) * size)/2;
+ if (recvbuf[0] != sumval) {
+ err++;
+ fprintf(stdout, "Did not get expected value for reduce scatter block\n");
+ fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval);
+ }
+
+ free(sendbuf);
+
+ /* let's try it again with MPI_IN_PLACE this time */
+ for (i=0; i<size; i++)
+ recvbuf[i] = rank + i;
+
+ MPI_Reduce_scatter_block(MPI_IN_PLACE, recvbuf, 1, MPI_INT, MPI_SUM, comm);
+
+ sumval = size * rank + ((size - 1) * size)/2;
+ if (recvbuf[0] != sumval) {
+ err++;
+ fprintf(stdout, "Did not get expected value for reduce scatter block\n");
+ fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval);
+ }
+ free(recvbuf);
+#endif
+
+ MPI_Allreduce(&err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
+ if (rank == 0 && toterr == 0) {
+ printf(" No Errors\n");
+ }
+ MPI_Finalize();
+
+ return toterr;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce_scatter_block.
+ *
+ * Checks that non-commutative operations are not commuted and that
+ * all of the operations are performed.
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+int err = 0;
+
+/* left(x,y) ==> x */
+void left(void *a, void *b, int *count, MPI_Datatype *type);
+void left(void *a, void *b, int *count, MPI_Datatype *type)
+{
+ int *in = a;
+ int *inout = b;
+ int i;
+
+ for (i = 0; i < *count; ++i)
+ {
+ if (in[i] > inout[i])
+ ++err;
+ inout[i] = in[i];
+ }
+}
+
+/* right(x,y) ==> y */
+void right(void *a, void *b, int *count, MPI_Datatype *type);
+void right(void *a, void *b, int *count, MPI_Datatype *type)
+{
+ int *in = a;
+ int *inout = b;
+ int i;
+
+ for (i = 0; i < *count; ++i)
+ {
+ if (in[i] > inout[i])
+ ++err;
+ inout[i] = inout[i];
+ }
+}
+
+/* Just performs a simple sum but can be marked as non-commutative to
+ potentially tigger different logic in the implementation. */
+void nc_sum(void *a, void *b, int *count, MPI_Datatype *type);
+void nc_sum(void *a, void *b, int *count, MPI_Datatype *type)
+{
+ int *in = a;
+ int *inout = b;
+ int i;
+
+ for (i = 0; i < *count; ++i)
+ {
+ inout[i] = in[i] + inout[i];
+ }
+}
+
+#define MAX_BLOCK_SIZE 256
+
+int main( int argc, char **argv )
+{
+ int *sendbuf;
+ int block_size;
+ int *recvbuf;
+ int size, rank, i;
+ MPI_Comm comm;
+ MPI_Op left_op, right_op, nc_sum_op;
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* MPI_Reduce_scatter block was added in MPI-2.2 */
+
+ MPI_Op_create(&left, 0/*non-commutative*/, &left_op);
+ MPI_Op_create(&right, 0/*non-commutative*/, &right_op);
+ MPI_Op_create(&nc_sum, 0/*non-commutative*/, &nc_sum_op);
+
+ for (block_size = 1; block_size < MAX_BLOCK_SIZE; block_size *= 2) {
+ sendbuf = (int *) malloc( block_size * size * sizeof(int) );
+ recvbuf = malloc( block_size * sizeof(int) );
+
+ for (i=0; i<(size*block_size); i++)
+ sendbuf[i] = rank + i;
+ for (i=0; i<block_size; i++)
+ recvbuf[i] = 0xdeadbeef;
+
+ MPI_Reduce_scatter_block( sendbuf, recvbuf, block_size, MPI_INT, left_op, comm );
+ for (i = 0; i < block_size; ++i)
+ if (recvbuf[i] != (rank * block_size + i)) ++err;
+
+ MPI_Reduce_scatter_block( sendbuf, recvbuf, block_size, MPI_INT, right_op, comm );
+ for (i = 0; i < block_size; ++i)
+ if (recvbuf[i] != ((size - 1) + (rank * block_size) + i)) ++err;
+
+ MPI_Reduce_scatter_block( sendbuf, recvbuf, block_size, MPI_INT, nc_sum_op, comm );
+ for (i = 0; i < block_size; ++i) {
+ int x = rank * block_size + i;
+ if (recvbuf[i] != (size*x + (size-1)*size/2)) ++err;
+ }
+
+ free(recvbuf);
+ free(sendbuf);
+ }
+
+ MPI_Op_free(&left_op);
+ MPI_Op_free(&right_op);
+ MPI_Op_free(&nc_sum_op);
+#endif
+
+ MTest_Finalize( err );
+ MPI_Finalize( );
+
+ return err;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter.
+ *
+ * Each processor contributes its rank + the index to the reduction,
+ * then receives the ith sum
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int main( int argc, char **argv )
+{
+ int err = 0, toterr;
+ int *sendbuf, recvbuf, *recvcounts;
+ int size, rank, i, sumval;
+ MPI_Comm comm;
+
+
+ MPI_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ sendbuf = (int *) malloc( size * sizeof(int) );
+ for (i=0; i<size; i++)
+ sendbuf[i] = rank + i;
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ for (i=0; i<size; i++)
+ recvcounts[i] = 1;
+
+ MPI_Reduce_scatter( sendbuf, &recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
+
+ sumval = size * rank + ((size - 1) * size)/2;
+/* recvbuf should be size * (rank + i) */
+ if (recvbuf != sumval) {
+ err++;
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf, sumval );
+ }
+
+ MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (rank == 0 && toterr == 0) {
+ printf( " No Errors\n" );
+ }
+ MPI_Finalize( );
+
+ return toterr;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter.
+ *
+ * Checks that non-commutative operations are not commuted and that
+ * all of the operations are performed.
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+int err = 0;
+
+/* left(x,y) ==> x */
+void left(void *a, void *b, int *count, MPI_Datatype *type);
+void left(void *a, void *b, int *count, MPI_Datatype *type)
+{
+ int *in = a;
+ int *inout = b;
+ int i;
+
+ for (i = 0; i < *count; ++i)
+ {
+ if (in[i] > inout[i])
+ ++err;
+ inout[i] = in[i];
+ }
+}
+
+/* right(x,y) ==> y */
+void right(void *a, void *b, int *count, MPI_Datatype *type);
+void right(void *a, void *b, int *count, MPI_Datatype *type)
+{
+ int *in = a;
+ int *inout = b;
+ int i;
+
+ for (i = 0; i < *count; ++i)
+ {
+ if (in[i] > inout[i])
+ ++err;
+ inout[i] = inout[i];
+ }
+}
+
+/* Just performs a simple sum but can be marked as non-commutative to
+ potentially tigger different logic in the implementation. */
+void nc_sum(void *a, void *b, int *count, MPI_Datatype *type);
+void nc_sum(void *a, void *b, int *count, MPI_Datatype *type)
+{
+ int *in = a;
+ int *inout = b;
+ int i;
+
+ for (i = 0; i < *count; ++i)
+ {
+ inout[i] = in[i] + inout[i];
+ }
+}
+
+#define MAX_BLOCK_SIZE 256
+
+int main( int argc, char **argv )
+{
+ int *sendbuf, *recvcounts;
+ int block_size;
+ int *recvbuf;
+ int size, rank, i;
+ MPI_Comm comm;
+ MPI_Op left_op, right_op, nc_sum_op;
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ MPI_Op_create(&left, 0/*non-commutative*/, &left_op);
+ MPI_Op_create(&right, 0/*non-commutative*/, &right_op);
+ MPI_Op_create(&nc_sum, 0/*non-commutative*/, &nc_sum_op);
+
+ for (block_size = 1; block_size < MAX_BLOCK_SIZE; block_size *= 2) {
+ sendbuf = (int *) malloc( block_size * size * sizeof(int) );
+ recvbuf = malloc( block_size * sizeof(int) );
+
+ for (i=0; i<(size*block_size); i++)
+ sendbuf[i] = rank + i;
+ for (i=0; i<block_size; i++)
+ recvbuf[i] = 0xdeadbeef;
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ for (i=0; i<size; i++)
+ recvcounts[i] = block_size;
+
+ MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, left_op, comm );
+ for (i = 0; i < block_size; ++i)
+ if (recvbuf[i] != (rank * block_size + i)) ++err;
+
+ MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, right_op, comm );
+ for (i = 0; i < block_size; ++i)
+ if (recvbuf[i] != ((size - 1) + (rank * block_size) + i)) ++err;
+
+ MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, nc_sum_op, comm );
+ for (i = 0; i < block_size; ++i) {
+ int x = rank * block_size + i;
+ if (recvbuf[i] != (size*x + (size-1)*size/2)) ++err;
+ }
+
+ free(recvbuf);
+ free(sendbuf);
+ }
+
+ MPI_Op_free(&left_op);
+ MPI_Op_free(&right_op);
+ MPI_Op_free(&nc_sum_op);
+
+ MTest_Finalize( err );
+ MPI_Finalize( );
+
+ return err;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter with large data (needed in MPICH to trigger the
+ * long-data algorithm)
+ *
+ * Each processor contributes its rank + the index to the reduction,
+ * then receives the ith sum
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/* Limit the number of error reports */
+#define MAX_ERRORS 10
+
+int main( int argc, char **argv )
+{
+ int err = 0;
+ int *sendbuf, *recvbuf, *recvcounts;
+ int size, rank, i, j, idx, mycount, sumval;
+ MPI_Comm comm;
+
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ if (!recvcounts) {
+ fprintf( stderr, "Could not allocate %d ints for recvcounts\n",
+ size );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ mycount = (1024 * 1024) / size;
+ for (i=0; i<size; i++)
+ recvcounts[i] = mycount;
+ sendbuf = (int *) malloc( mycount * size * sizeof(int) );
+ if (!sendbuf) {
+ fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
+ mycount * size );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ idx = 0;
+ for (i=0; i<size; i++) {
+ for (j=0; j<mycount; j++) {
+ sendbuf[idx++] = rank + i;
+ }
+ }
+ recvbuf = (int *)malloc( mycount * sizeof(int) );
+ if (!recvbuf) {
+ fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
+ mycount );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<mycount; i++) {
+ recvbuf[i] = -1;
+ }
+
+ MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
+
+ sumval = size * rank + ((size - 1) * size)/2;
+ /* recvbuf should be size * (rank + i) */
+ for (i=0; i<mycount; i++) {
+ if (recvbuf[i] != sumval) {
+ err++;
+ if (err < MAX_ERRORS) {
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] Got recvbuf[%d] = %d expected %d\n",
+ rank, i, recvbuf[i], sumval );
+ }
+ }
+ }
+
+ MPI_Reduce_scatter( MPI_IN_PLACE, sendbuf, recvcounts, MPI_INT, MPI_SUM,
+ comm );
+
+ sumval = size * rank + ((size - 1) * size)/2;
+ /* recv'ed values for my process should be size * (rank + i) */
+ for (i=0; i<mycount; i++) {
+ if (sendbuf[i] != sumval) {
+ err++;
+ if (err < MAX_ERRORS) {
+ fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
+ fprintf( stdout, "[%d] Got buf[%d] = %d expected %d\n",
+ rank, i, sendbuf[rank*mycount+i], sumval );
+ }
+ }
+ }
+
+ free(sendbuf);
+ free(recvbuf);
+ free(recvcounts);
+
+ MTest_Finalize( err );
+
+ MPI_Finalize( );
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter block with large data on an intercommunicator
+ * (needed in MPICH to trigger the long-data algorithm)
+ *
+ * Each processor contributes its rank + the index to the reduction,
+ * then receives the ith sum
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+int main( int argc, char **argv )
+{
+ int err = 0;
+ int size, rsize, rank, i;
+ int recvcount, /* Each process receives this much data */
+ sendcount, /* Each process contributes this much data */
+ basecount; /* Unit of elements - basecount *rsize is recvcount,
+ etc. */
+ int isLeftGroup;
+ long long *sendbuf, *recvbuf;
+ long long sumval;
+ MPI_Comm comm;
+
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ basecount = 1024;
+
+ while (MTestGetIntercomm( &comm, &isLeftGroup, 2 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (0) {
+ printf( "[%d] %s (%d,%d) remote %d\n", rank,
+ isLeftGroup ? "L" : "R",
+ rank, size, rsize );
+ }
+
+ recvcount = basecount * rsize;
+ sendcount = basecount * rsize * size;
+
+ sendbuf = (long long *) malloc( sendcount * sizeof(long long) );
+ if (!sendbuf) {
+ fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
+ sendcount );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ for (i=0; i<sendcount; i++) {
+ sendbuf[i] = (long long)(rank*sendcount + i);
+ }
+ recvbuf = (long long *)malloc( recvcount * sizeof(long long) );
+ if (!recvbuf) {
+ fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
+ recvcount );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<recvcount; i++) {
+ recvbuf[i] = (long long)(-i);
+ }
+
+ MPI_Reduce_scatter_block( sendbuf, recvbuf, recvcount, MPI_LONG_LONG,
+ MPI_SUM, comm );
+
+ /* Check received data */
+ for (i=0; i<recvcount; i++) {
+ sumval = (long long)(sendcount) * (long long)((rsize * (rsize-1))/2) +
+ (long long)(i + rank * rsize * basecount) * (long long)rsize;
+ if (recvbuf[i] != sumval) {
+ err++;
+ if (err < 4) {
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
+ rank,
+ isLeftGroup ? "L" : "R",
+ i, recvbuf[i], sumval );
+ }
+ }
+ }
+
+ free(sendbuf);
+ free(recvbuf);
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+
+ MPI_Finalize( );
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter with large data (needed in MPICH to trigger the
+ * long-data algorithm)
+ *
+ * Each processor contributes its rank + the index to the reduction,
+ * then receives the ith sum
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+int main( int argc, char **argv )
+{
+ int err = 0;
+ int *sendbuf, *recvbuf;
+ int size, rank, i, j, idx, mycount, sumval;
+ MPI_Comm comm;
+
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ mycount = (1024 * 1024) / size;
+
+ sendbuf = (int *) malloc( mycount * size * sizeof(int) );
+ if (!sendbuf) {
+ fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
+ mycount * size );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ idx = 0;
+ for (i=0; i<size; i++) {
+ for (j=0; j<mycount; j++) {
+ sendbuf[idx++] = rank + i;
+ }
+ }
+ recvbuf = (int *)malloc( mycount * sizeof(int) );
+ if (!recvbuf) {
+ fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
+ mycount );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ MPI_Reduce_scatter_block( sendbuf, recvbuf, mycount, MPI_INT, MPI_SUM,
+ comm );
+
+ sumval = size * rank + ((size - 1) * size)/2;
+ /* recvbuf should be size * (rank + i) */
+ for (i=0; i<mycount; i++) {
+ if (recvbuf[i] != sumval) {
+ err++;
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf[i], sumval );
+ }
+ }
+
+ MPI_Reduce_scatter_block( MPI_IN_PLACE, sendbuf, mycount, MPI_INT, MPI_SUM,
+ comm );
+
+ sumval = size * rank + ((size - 1) * size)/2;
+ /* recv'ed values for my process should be size * (rank + i) */
+ for (i=0; i<mycount; i++) {
+ if (sendbuf[rank*mycount+i] != sumval) {
+ err++;
+ fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
+ fprintf( stdout, "[%d] Got %d expected %d\n", rank, sendbuf[rank*mycount+i], sumval );
+ }
+ }
+
+ free(sendbuf);
+ free(recvbuf);
+
+ MTest_Finalize( err );
+
+ MPI_Finalize( );
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ * Test of reduce scatter with large data on an intercommunicator
+ * (needed in MPICH to trigger the long-data algorithm)
+ *
+ * Each processor contributes its rank + the index to the reduction,
+ * then receives the ith sum
+ *
+ * Can be called with any number of processors.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+int main( int argc, char **argv )
+{
+ int err = 0;
+ int *recvcounts;
+ int size, rsize, rank, i;
+ int recvcount, /* Each process receives this much data */
+ sendcount, /* Each process contributes this much data */
+ basecount; /* Unit of elements - basecount *rsize is recvcount,
+ etc. */
+ int isLeftGroup;
+ long long *sendbuf, *recvbuf;
+ long long sumval;
+ MPI_Comm comm;
+
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ basecount = 1024;
+
+ while (MTestGetIntercomm( &comm, &isLeftGroup, 2 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (0) {
+ printf( "[%d] %s (%d,%d) remote %d\n", rank,
+ isLeftGroup ? "L" : "R",
+ rank, size, rsize );
+ }
+
+ recvcount = basecount * rsize;
+ sendcount = basecount * rsize * size;
+
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ if (!recvcounts) {
+ fprintf( stderr, "Could not allocate %d int for recvcounts\n",
+ size );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<size; i++)
+ recvcounts[i] = recvcount;
+
+ sendbuf = (long long *) malloc( sendcount * sizeof(long long) );
+ if (!sendbuf) {
+ fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
+ sendcount );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ for (i=0; i<sendcount; i++) {
+ sendbuf[i] = (long long)(rank*sendcount + i);
+ }
+ recvbuf = (long long *)malloc( recvcount * sizeof(long long) );
+ if (!recvbuf) {
+ fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
+ recvcount );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<recvcount; i++) {
+ recvbuf[i] = (long long)(-i);
+ }
+
+ MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_LONG_LONG, MPI_SUM,
+ comm );
+
+ /* Check received data */
+ for (i=0; i<recvcount; i++) {
+ sumval = (long long)(sendcount) * (long long)((rsize * (rsize-1))/2) +
+ (long long)(i + rank * rsize * basecount) * (long long)rsize;
+ if (recvbuf[i] != sumval) {
+ err++;
+ if (err < 4) {
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
+ rank,
+ isLeftGroup ? "L" : "R",
+ i, recvbuf[i], sumval );
+ }
+ }
+ }
+
+ free(sendbuf);
+ free(recvbuf);
+ free(recvcounts);
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+
+ MPI_Finalize( );
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "A simple test of Reduce with all choices of root process";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, root;
+ int *sendbuf, *recvbuf, i;
+ int minsize = 2, count;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 130000; count = count * 2) {
+ sendbuf = (int *)malloc( count * sizeof(int) );
+ recvbuf = (int *)malloc( count * sizeof(int) );
+ for (root = 0; root < size; root ++) {
+ for (i=0; i<count; i++) sendbuf[i] = i;
+ for (i=0; i<count; i++) recvbuf[i] = -1;
+ MPI_Reduce( sendbuf, recvbuf, count, MPI_INT, MPI_SUM,
+ root, comm );
+ if (rank == root) {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != i * size) {
+ errs++;
+ }
+ }
+ }
+ }
+ free( sendbuf );
+ free( recvbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "A simple test of MPI_Reduce_local";
+*/
+
+#define MAX_BUF_ELEMENTS (65000)
+
+static int uop_errs = 0;
+
+/* prototype to keep the compiler happy */
+static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype);
+
+static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+{
+ int i;
+ int *invec_int = (int *)invec;
+ int *inoutvec_int = (int *)inoutvec;
+
+ if (*datatype != MPI_INT) {
+ ++uop_errs;
+ printf("invalid datatype passed to user_op");
+ return;
+ }
+
+ for (i = 0; i < *len; ++i) {
+ inoutvec_int[i] = invec_int[i] * 2 + inoutvec_int[i];
+ }
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int i;
+ int *inbuf = NULL;
+ int *inoutbuf = NULL;
+ int count = -1;
+ MPI_Op uop = MPI_OP_NULL;
+
+ MTest_Init(&argc, &argv);
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* this function was added in MPI-2.2 */
+
+ inbuf = malloc(sizeof(int) * MAX_BUF_ELEMENTS);
+ inoutbuf = malloc(sizeof(int) * MAX_BUF_ELEMENTS);
+
+ for (count = 0; count < MAX_BUF_ELEMENTS; count > 0 ? count*=2 : count++) {
+ for (i = 0; i < count; ++i) {
+ inbuf[i] = i;
+ inoutbuf[i] = i;
+ }
+ MPI_Reduce_local(inbuf, inoutbuf, count, MPI_INT, MPI_SUM);
+ for (i = 0; i < count; ++i)
+ if (inbuf[i] != i) {
+ ++errs;
+ if (inoutbuf[i] != (2*i))
+ ++errs;
+ }
+ }
+
+ /* make sure that user-define ops work too */
+ MPI_Op_create(&user_op, 0/*!commute*/, &uop);
+ for (count = 0; count < MAX_BUF_ELEMENTS; count > 0 ? count*=2 : count++) {
+ for (i = 0; i < count; ++i) {
+ inbuf[i] = i;
+ inoutbuf[i] = i;
+ }
+ MPI_Reduce_local(inbuf, inoutbuf, count, MPI_INT, uop);
+ errs += uop_errs;
+ for (i = 0; i < count; ++i)
+ if (inbuf[i] != i) {
+ ++errs;
+ if (inoutbuf[i] != (3*i))
+ ++errs;
+ }
+ }
+ MPI_Op_free(&uop);
+
+ free(inbuf);
+ free(inoutbuf);
+#endif
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+
+void addem ( int *, int *, int *, MPI_Datatype * );
+void assoc ( int *, int *, int *, MPI_Datatype * );
+
+void addem( int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ )
+ inoutvec[i] += invec[i];
+}
+
+#define BAD_ANSWER 100000
+
+/*
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ (see 4.9.4). The order is important.
+
+ Note that the computation is in process rank (in the communicator)
+ order, independant of the root.
+ */
+void assoc( int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ ) {
+ if (inoutvec[i] <= invec[i] ) {
+ int rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
+ rank, inoutvec[0], invec[0] );
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op_assoc, op_addem;
+ MPI_Comm comm=MPI_COMM_WORLD;
+
+ MPI_Init( &argc, &argv );
+ MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc );
+ MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem );
+
+ /* Run this for a variety of communicator sizes */
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ data = rank;
+
+ correct_result = 0;
+ for (i=0;i<=rank;i++)
+ correct_result += i;
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error suming ints with scan\n", rank );
+ errors++;
+ }
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank );
+ errors++;
+ }
+
+ data = rank;
+ result = -100;
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (userop)\n",
+ rank );
+ errors++;
+ }
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n",
+ rank );
+ errors++;
+ }
+ result = -100;
+ data = rank;
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, comm );
+ if (result == BAD_ANSWER) {
+ fprintf( stderr, "[%d] Error scanning with non-commutative op\n",
+ rank );
+ errors++;
+ }
+
+ MPI_Op_free( &op_assoc );
+ MPI_Op_free( &op_addem );
+
+ MPI_Finalize();
+ if (errors)
+ printf( "[%d] done with ERRORS(%d)!\n", rank, errors );
+ else {
+ if (rank == 0)
+ printf(" No Errors\n");
+ }
+
+ return errors;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* This example sends a vector and receives individual elements, but the
+ root process does not receive any data */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout, ivalue;
+ int root, i, n, stride, err = 0;
+ int rank, size;
+ MPI_Aint vextent;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+ MPI_Type_extent( vec, &vextent );
+ if (vextent != ((n-1)*(MPI_Aint)stride + 1) * sizeof(double) ) {
+ err++;
+ printf( "Vector extent is %ld, should be %ld\n",
+ (long) vextent, (long)(((n-1)*stride+1)*sizeof(double)) );
+ }
+ /* Note that the exted of type vector is from the first to the
+ last element, not n*stride.
+ E.g., with n=1, the extent is a single double */
+
+ for (i=0; i<n*stride*size; i++) vecin[i] = (double)i;
+ for (root=0; root<size; root++) {
+ for (i=0; i<n; i++) vecout[i] = -1.0;
+ if (rank == root) {
+ MPI_Scatter( vecin, 1, vec, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ root, MPI_COMM_WORLD );
+ }
+ else {
+ MPI_Scatter( NULL, -1, MPI_DATATYPE_NULL, vecout, n, MPI_DOUBLE,
+ root, MPI_COMM_WORLD );
+ ivalue = rank * ((n-1) * stride + 1);
+ for (i=0; i<n; i++) {
+ if (vecout[i] != ivalue) {
+ printf( "[%d] Expected %f but found %f for vecout[%d]\n",
+ rank, ivalue, vecout[i], i );
+ err++;
+ }
+ ivalue += stride;
+ }
+ }
+ }
+
+ MTest_Finalize( err );
+ MPI_Type_free( &vec );
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* This example sends contiguous data and receives a vector on some nodes
+ and contiguous data on others. There is some evidence that some
+ MPI implementations do not check recvcount on the root process; this
+ test checks for that case
+*/
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout, ivalue;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+ MPI_Aint vextent;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ n = 12;
+ stride = 10;
+ /* Note that vecout really needs to be only (n-1)*stride+1 doubles, but
+ this is easier and allows a little extra room if there is a bug */
+ vecout = (double *)malloc( n * stride * sizeof(double) );
+ vecin = (double *)malloc( n * size * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+ MPI_Type_extent( vec, &vextent );
+ if (vextent != ((n-1)*(MPI_Aint)stride + 1) * sizeof(double) ) {
+ errs++;
+ printf( "Vector extent is %ld, should be %ld\n",
+ (long) vextent, (long)(((n-1)*stride+1)*sizeof(double)) );
+ }
+ /* Note that the exted of type vector is from the first to the
+ last element, not n*stride.
+ E.g., with n=1, the extent is a single double */
+
+ for (i=0; i<n*size; i++) vecin[i] = (double)i;
+ for (root=0; root<size; root++) {
+ for (i=0; i<n*stride; i++) vecout[i] = -1.0;
+ if (rank == root) {
+ /* Receive into a vector */
+ MPI_Scatter( vecin, n, MPI_DOUBLE, vecout, 1, vec,
+ root, MPI_COMM_WORLD );
+ for (i=0; i<n; i++) {
+ ivalue = n*root + i;
+ if (vecout[i*stride] != ivalue) {
+ errs++;
+ printf( "[%d] Expected %f but found %f for vecout[%d] on root\n",
+ rank, ivalue, vecout[i*stride], i *stride );
+ }
+ }
+ }
+ else {
+ /* Receive into contiguous data */
+ MPI_Scatter( NULL, -1, MPI_DATATYPE_NULL, vecout, n, MPI_DOUBLE,
+ root, MPI_COMM_WORLD );
+ for (i=0; i<n; i++) {
+ ivalue = rank * n + i;
+ if (vecout[i] != ivalue) {
+ printf( "[%d] Expected %f but found %f for vecout[%d]\n",
+ rank, ivalue, vecout[i], i );
+ errs++;
+ }
+ }
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Type_free( &vec );
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* This example sends a vector and receives individual elements */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout, ivalue;
+ int root, i, n, stride, err = 0;
+ int rank, size;
+
+ MPI_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride*size; i++) vecin[i] = (double)i;
+ for (root=0; root<size; root++) {
+ for (i=0; i<n; i++) vecout[i] = -1.0;
+ MPI_Scatter( vecin, 1, vec, vecout, n, MPI_DOUBLE, root,
+ MPI_COMM_WORLD );
+ ivalue = rank * ((n-1) * stride + 1);
+ for (i=0; i<n; i++) {
+ if (vecout[i] != ivalue) {
+ printf( "Expected %f but found %f\n",
+ ivalue, vecout[i] );
+ err++;
+ }
+ ivalue += stride;
+ }
+ }
+ i = err;
+ MPI_Allreduce( &i, &err, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (rank == 0) {
+ if (err > 0) printf( "Found %d errors!\n", err );
+ else printf( " No Errors\n" );
+ }
+ MPI_Type_free( &vec );
+ MPI_Finalize();
+ return 0;
+
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Prototypes for picky compilers */
+void SetData ( double *, double *, int, int, int, int, int, int );
+int CheckData ( double *, int, int, int, int, int, int );
+/*
+ This is an example of using scatterv to send a matrix from one
+ process to all others, with the matrix stored in Fortran order.
+ Note the use of an explicit UB to enable the sources to overlap.
+
+ This tests scatterv to make sure that it uses the datatype size
+ and extent correctly. It requires number of processors that
+ can be split with MPI_Dims_create.
+
+ */
+
+void SetData( double *sendbuf, double *recvbuf, int nx, int ny,
+ int myrow, int mycol, int nrow, int ncol )
+{
+ int coldim, i, j, m, k;
+ double *p;
+
+ if (myrow == 0 && mycol == 0) {
+ coldim = nx * nrow;
+ for (j=0; j<ncol; j++) {
+ for (i=0; i<nrow; i++) {
+ p = sendbuf + i * nx + j * (ny * coldim);
+ for (m=0; m<ny; m++) {
+ for (k=0; k<nx; k++) {
+ p[k] = 1000 * j + 100 * i + m * nx + k;
+ }
+ p += coldim;
+ }
+ }
+ }
+ }
+ for (i=0; i<nx*ny; i++)
+ recvbuf[i] = -1.0;
+}
+
+int CheckData( double *recvbuf,
+ int nx, int ny, int myrow, int mycol, int nrow,
+ int expect_no_value )
+{
+ int coldim, m, k;
+ double *p, val;
+ int errs = 0;
+
+ coldim = nx;
+ p = recvbuf;
+ for (m=0; m<ny; m++) {
+ for (k=0; k<nx; k++) {
+ /* If expect_no_value is true then we assume that the pre-scatterv
+ * value should remain in the recvbuf for our portion of the array.
+ * This is the case for the root process when using MPI_IN_PLACE. */
+ if (expect_no_value)
+ val = -1.0;
+ else
+ val = 1000 * mycol + 100 * myrow + m * nx + k;
+
+ if (p[k] != val) {
+ errs++;
+ if (errs < 10) {
+ printf("Error in (%d,%d) [%d,%d] location, got %f expected %f\n",
+ m, k, myrow, mycol, p[k], val );
+ }
+ else if (errs == 10) {
+ printf( "Too many errors; suppressing printing\n" );
+ }
+ }
+ }
+ p += coldim;
+ }
+ return errs;
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size, myrow, mycol, nx, ny, stride, cnt, i, j, errs, errs_in_place, tot_errs;
+ double *sendbuf, *recvbuf;
+ MPI_Datatype vec, block, types[2];
+ MPI_Aint displs[2];
+ int *scdispls;
+ int blens[2];
+ MPI_Comm comm2d;
+ int dims[2], periods[2], coords[2], lcoords[2];
+ int *sendcounts;
+
+
+ MPI_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* Get a 2-d decomposition of the processes */
+ dims[0] = 0; dims[1] = 0;
+ MPI_Dims_create( size, 2, dims );
+ periods[0] = 0; periods[1] = 0;
+ MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &comm2d );
+ MPI_Cart_get( comm2d, 2, dims, periods, coords );
+ myrow = coords[0];
+ mycol = coords[1];
+/*
+ if (rank == 0)
+ printf( "Decomposition is [%d x %d]\n", dims[0], dims[1] );
+*/
+
+ /* Get the size of the matrix */
+ nx = 10;
+ ny = 8;
+ stride = nx * dims[0];
+
+ recvbuf = (double *)malloc( nx * ny * sizeof(double) );
+ if (!recvbuf) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ sendbuf = 0;
+ if (myrow == 0 && mycol == 0) {
+ sendbuf = (double *)malloc( nx * ny * size * sizeof(double) );
+ if (!sendbuf) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ }
+ sendcounts = (int *) malloc( size * sizeof(int) );
+ scdispls = (int *)malloc( size * sizeof(int) );
+
+ MPI_Type_vector( ny, nx, stride, MPI_DOUBLE, &vec );
+ blens[0] = 1; blens[1] = 1;
+ types[0] = vec; types[1] = MPI_UB;
+ displs[0] = 0; displs[1] = nx * sizeof(double);
+
+ MPI_Type_struct( 2, blens, displs, types, &block );
+ MPI_Type_free( &vec );
+ MPI_Type_commit( &block );
+
+ /* Set up the transfer */
+ cnt = 0;
+ for (i=0; i<dims[1]; i++) {
+ for (j=0; j<dims[0]; j++) {
+ sendcounts[cnt] = 1;
+ /* Using Cart_coords makes sure that ranks (used by
+ sendrecv) matches the cartesian coordinates (used to
+ set data in the matrix) */
+ MPI_Cart_coords( comm2d, cnt, 2, lcoords );
+ scdispls[cnt++] = lcoords[0] + lcoords[1] * (dims[0] * ny);
+ }
+ }
+
+ SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1] );
+ MPI_Scatterv( sendbuf, sendcounts, scdispls, block,
+ recvbuf, nx * ny, MPI_DOUBLE, 0, comm2d );
+ if((errs = CheckData( recvbuf, nx, ny, myrow, mycol, dims[0], 0 ))) {
+ fprintf( stdout, "Failed to transfer data\n" );
+ }
+
+ /* once more, but this time passing MPI_IN_PLACE for the root */
+ SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1] );
+ MPI_Scatterv( sendbuf, sendcounts, scdispls, block,
+ (rank == 0 ? MPI_IN_PLACE : recvbuf), nx * ny, MPI_DOUBLE, 0, comm2d );
+ errs_in_place = CheckData( recvbuf, nx, ny, myrow, mycol, dims[0], (rank == 0) );
+ if(errs_in_place) {
+ fprintf( stdout, "Failed to transfer data (MPI_IN_PLACE)\n" );
+ }
+
+ errs += errs_in_place;
+ MPI_Allreduce( &errs, &tot_errs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (rank == 0) {
+ if (tot_errs == 0)
+ printf( " No Errors\n" );
+ else
+ printf( "%d errors in use of MPI_SCATTERV\n", tot_errs );
+ }
+
+ if (sendbuf) free( sendbuf );
+ free( recvbuf );
+ free( sendcounts );
+ free( scdispls );
+ MPI_Type_free( &block );
+ MPI_Comm_free( &comm2d );
+ MPI_Finalize();
+ return errs;
+}
+
+
--- /dev/null
+#needs MPI_Errhandler_set MPI_Type_get_name
+#allred 4
+#allred 7
+#allred 4 arg=100
+allredmany 4
+allred2 4
+allred3 10
+allred4 4
+allred5 5
+allred5 10
+allred6 4
+allred6 7
+reduce_mpich 5
+reduce_mpich 10
+reduce_local 2 mpiversion=2.2
+op_commutative 2
+red3 10
+red4 10
+alltoall1 8
+alltoallv 10
+alltoallv0 10
+#alltoallw1 10
+#alltoallw2 10
+#alltoallw_zeros 1
+#alltoallw_zeros 2
+#alltoallw_zeros 5
+#alltoallw_zeros 8
+allgather2 10
+allgather3 10
+allgatherv2 10
+allgatherv3 10
+#needs thread factory
+allgatherv4 4 timeLimit=600
+bcasttest 4
+bcasttest 8
+bcasttest 10
+#uses MPI_Comm_dup
+#bcast2 4
+# More that 8 processes are required to get bcast to switch to the long
+# msg algorithm (see coll definitions in mpiimpl.h)
+#bcast2 10 timeLimit=420
+#bcast3 10 timeLimit=420
+bcastzerotype 1
+bcastzerotype 4
+bcastzerotype 5
+bcastzerotype 10
+coll2 5
+coll3 5
+coll4 4
+coll5 4
+coll6 5
+coll7 1
+coll7 2
+coll7 5
+coll8 4
+coll9 4
+coll10 4
+coll11 4
+coll12 4
+coll13 4
+longuser 4
+redscat 4
+redscat 6
+redscat2 4
+redscat2 5
+redscat2 10
+redscat3 8
+#intercomms
+#redscatinter 8
+red_scat_block 4 mpiversion=2.2
+red_scat_block 5 mpiversion=2.2
+red_scat_block 8 mpiversion=2.2
+red_scat_block2 4 mpiversion=2.2
+red_scat_block2 5 mpiversion=2.2
+red_scat_block2 10 mpiversion=2.2
+redscatblk3 8 mpiversion=2.2
+redscatblk3 10 mpiversion=2.2
+redscatbkinter 8 mpiversion=2.2
+redscatbkinter 10 mpiversion=2.2
+scantst 4
+exscan 10
+exscan2 5
+gather 4
+gather2 4
+scattern 4
+scatter2 4
+scatter3 4
+#uses dims, cart
+#scatterv 4
+#icbcast 4
+#icbcast 10
+#icallreduce 5
+#icallreduce 7
+#icreduce 5
+#icreduce 7
+#icscatter 5
+#icscatter 7
+#icgather 5
+#icgather 7
+#icallgather 5
+#icallgather 7
+#icbarrier 5
+#icbarrier 7
+#icallgatherv 5
+#icallgatherv 7
+#icgatherv 5
+#icgatherv 7
+#icscatterv 5
+#icscatterv 7
+#icalltoall 5
+#icalltoall 7
+#icalltoallv 5
+#icalltoallv 7
+#icalltoallw 5
+#icalltoallw 7
+# the opxxx tests look at optional types, and are included for MPICH testing.
+# MPI implementations may instead signal errors for these types
+#opland 4
+#oplor 4
+#oplxor 4
+#oplxor 5
+#opband 4
+#opbor 4
+#opbxor 4
+#opbxor 5
+#opprod 5
+#opprod 6
+#opsum 4
+#opmin 4
+#opminloc 4
+#opmax 5
+#opmaxloc 5
+#uoplong 4
+#uoplong 11
+#uoplong 16
+nonblocking 4 mpiversion=3.0
+nonblocking 5 mpiversion=3.0
+nonblocking 10 mpiversion=3.0
+nonblocking2 1 mpiversion=3.0
+nonblocking2 4 mpiversion=3.0
+nonblocking2 5 mpiversion=3.0
+nonblocking2 10 timeLimit=420 mpiversion=3.0
+nonblocking3 1 mpiversion=3.0
+nonblocking3 4 mpiversion=3.0
+nonblocking3 5 mpiversion=3.0
+nonblocking3 10 timeLimit=600 mpiversion=3.0
+iallred 2 mpiversion=3.0
+# ibarrier will hang forever if it fails, but will complete quickly if it
+# succeeds
+ibarrier 2 mpiversion=3.0 timeLimit=30
+
+# run some of the tests, relinked with the nbc_pmpi_adaptor.o file
+nballtoall1 8 mpiversion=3.0
+nbcoll2 5 mpiversion=3.0
+nbredscat 4 mpiversion=3.0
+nbredscat 8 mpiversion=3.0
+nbredscat3 8 mpiversion=3.0
+nbredscatinter 8 mpiversion=3.0
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+/*
+ * Test user-defined operations with a large number of elements.
+ * Added because a talk at EuroMPI'12 claimed that these failed with
+ * more than 64k elements
+ */
+
+#define MAX_ERRS 10
+#define MAX_COUNT 1200000
+
+void myop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+
+/*
+ * myop takes a datatype that is a triple of doubles, and computes
+ * the sum, max, min of the respective elements of the triple.
+ */
+void myop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ int i, n = *count;
+ double const *cin = (double *)cinPtr;
+ double *cout = (double *)coutPtr;
+
+ for (i=0; i<n; i++) {
+ cout[0] += cin[0];
+ cout[1] = (cout[1] > cin[1]) ? cout[1] : cin[1];
+ cout[2] = (cout[2] < cin[2]) ? cout[2] : cin[2];
+ cin += 3;
+ cout += 3;
+ }
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int wsize, wrank, i, count;
+ MPI_Datatype tripleType;
+ double *inVal, *outVal;
+ double maxval, sumval;
+ MPI_Op op;
+
+ MTest_Init( &argc, &argv );
+ MPI_Op_create( myop, 0, &op );
+ MPI_Type_contiguous( 3, MPI_DOUBLE, &tripleType );
+ MPI_Type_commit( &tripleType );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &wsize );
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ for (count=1; count<MAX_COUNT; count += count) {
+ if (wrank == 0)
+ MTestPrintfMsg( 1, "Count = %d\n", count );
+ inVal = (double *)malloc( 3 * count * sizeof(double) );
+ outVal = (double *)malloc( 3 * count * sizeof(double) );
+ if (!inVal || !outVal) {
+ fprintf( stderr, "Unable to allocated %d words for data\n",
+ 3 * count );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<count*3; i++) {
+ outVal[i] = -1;
+ inVal[i] = 1 + (i & 0x3);
+ }
+ MPI_Reduce( inVal, outVal, count, tripleType, op, 0, MPI_COMM_WORLD );
+ /* Check Result values */
+ if (wrank == 0) {
+ for (i=0; i<3*count; i+=3) {
+ sumval = wsize * (1 + (i & 0x3));
+ maxval = 1 + ((i+1) & 0x3);
+ if (outVal[i] != sumval) {
+ if (errs < MAX_ERRS)
+ fprintf( stderr, "%d: outval[%d] = %f, expected %f (sum)\n",
+ count, i, outVal[i], sumval );
+ errs++;
+ }
+ if (outVal[i+1] != maxval) {
+ if (errs < MAX_ERRS)
+ fprintf( stderr, "%d: outval[%d] = %f, expected %f (max)\n",
+ count, i+1, outVal[i+1], maxval );
+ errs++;
+ }
+ if (outVal[i+2] != 1 + ((i+2)&0x3)) {
+ if (errs < MAX_ERRS)
+ fprintf( stderr, "%d: outval[%d] = %f, expected %f (min)\n",
+ count, i+2, outVal[i+2], (double)(1 + ((i+2)^0x3)) );
+ errs++;
+ }
+ }
+ }
+
+ free( inVal );
+ free( outVal );
+ }
+
+ MPI_Op_free( &op );
+ MPI_Type_free( &tripleType );
+ MTest_Finalize( errs );
+ MPI_Finalize( );
+ return 0;
+}
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+
+ add_executable(cmake_install cmake_install.cmake ../util/mtest.c)
+ add_executable(cmfree cmfree.c ../util/mtest.c)
+ add_executable(cmsplit2 cmsplit2.c ../util/mtest.c)
+ add_executable(cmsplit cmsplit.c ../util/mtest.c)
+ add_executable(cmsplit_type cmsplit_type.c ../util/mtest.c)
+ add_executable(commcreate1 commcreate1.c ../util/mtest.c)
+ add_executable(comm_create_group comm_create_group.c ../util/mtest.c)
+ add_executable(comm_group_half comm_group_half.c ../util/mtest.c)
+ add_executable(comm_group_rand comm_group_rand.c ../util/mtest.c)
+ # add_executable(comm_idup comm_idup.c ../util/mtest.c)
+ add_executable(comm_info comm_info.c ../util/mtest.c)
+ add_executable(commname commname.c ../util/mtest.c)
+ add_executable(ctxalloc ctxalloc.c ../util/mtest.c)
+ add_executable(ctxsplit ctxsplit.c ../util/mtest.c)
+ add_executable(dup dup.c ../util/mtest.c)
+ add_executable(dupic dupic.c ../util/mtest.c)
+ add_executable(dup_with_info dup_with_info.c ../util/mtest.c)
+ add_executable(ic1 ic1.c ../util/mtest.c)
+ add_executable(ic2 ic2.c ../util/mtest.c)
+ add_executable(iccreate iccreate.c ../util/mtest.c)
+ add_executable(icgroup icgroup.c ../util/mtest.c)
+ add_executable(icm icm.c ../util/mtest.c)
+ add_executable(icsplit icsplit.c ../util/mtest.c)
+ add_executable(probe-intercomm probe-intercomm.c ../util/mtest.c)
+
+
+
+ target_link_libraries(cmake_install simgrid)
+ target_link_libraries(cmfree simgrid)
+ target_link_libraries(cmsplit2 simgrid)
+ target_link_libraries(cmsplit simgrid)
+ target_link_libraries(cmsplit_type simgrid)
+ target_link_libraries(commcreate1 simgrid)
+ target_link_libraries(comm_create_group simgrid)
+ target_link_libraries(comm_group_half simgrid)
+ target_link_libraries(comm_group_rand simgrid)
+ # target_link_libraries(comm_idup simgrid)
+ target_link_libraries(comm_info simgrid)
+ target_link_libraries(commname simgrid)
+ target_link_libraries(ctxalloc simgrid)
+ target_link_libraries(ctxsplit simgrid)
+ target_link_libraries(dup simgrid)
+ target_link_libraries(dupic simgrid)
+ target_link_libraries(dup_with_info simgrid)
+ target_link_libraries(ic1 simgrid)
+ target_link_libraries(ic2 simgrid)
+ target_link_libraries(iccreate simgrid)
+ target_link_libraries(icgroup simgrid)
+ target_link_libraries(icm simgrid)
+ target_link_libraries(icsplit simgrid)
+ target_link_libraries(probe-intercomm simgrid)
+
+
+
+ set_target_properties(cmake_install PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(cmfree PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(cmsplit2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(cmsplit PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(cmsplit_type PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(commcreate1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(comm_create_group PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(comm_group_half PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(comm_group_rand PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+# set_target_properties(comm_idup PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(comm_info PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(commname PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(ctxalloc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(ctxsplit PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(dup PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(dupic PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(dup_with_info PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(ic1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(ic2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(iccreate PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icgroup PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icsplit PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(probe-intercomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmake_install.cmake
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmfree.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmsplit2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmsplit.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmsplit_type.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/commcreate1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_create_group.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_group_half.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_group_rand.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_idup.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm_info.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/commname.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ctxalloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ctxsplit.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dup.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dupic.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dup_with_info.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ic1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ic2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/iccreate.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icgroup.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icsplit.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/probe-intercomm.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test that communicators have reference count semantics";
+*/
+
+#define NELM 128
+#define NCOMM 1020
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest, i;
+ MPI_Comm comm;
+ MPI_Comm tmpComm[NCOMM];
+ MPI_Status status;
+ MPI_Request req;
+ int *buf=0;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+
+ /* This is similar to the datatype test, except that we post
+ an irecv on a simple data buffer but use a rank-reordered communicator.
+ In this case, an error in handling the reference count will most
+ likely cause the program to hang, so this should be run only
+ if (a) you are confident that the code is correct or (b)
+ a timeout is set for mpiexec
+ */
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ if (size < 2) {
+ fprintf( stderr, "This test requires at least two processes." );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ source = 0;
+ dest = size - 1;
+
+ if (rank == dest) {
+ buf = (int *)malloc( NELM * sizeof(int) );
+ for (i=0; i<NELM; i++) buf[i] = -i;
+ MPI_Irecv( buf, NELM, MPI_INT, source, 0, comm, &req );
+ MPI_Comm_free( &comm );
+
+ if (comm != MPI_COMM_NULL) {
+ errs++;
+ printf( "Freed comm was not set to COMM_NULL\n" );
+ }
+
+ for (i=0; i<NCOMM; i++) {
+ MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
+ }
+
+ MPI_Sendrecv( 0, 0, MPI_INT, source, 1,
+ 0, 0, MPI_INT, source, 1, MPI_COMM_WORLD, &status );
+
+ MPI_Wait( &req, &status );
+ for (i=0; i<NELM; i++) {
+ if (buf[i] != i) {
+ errs++;
+ if (errs < 10) {
+ printf( "buf[%d] = %d, expected %d\n", i, buf[i], i );
+ }
+ }
+ }
+ for (i=0; i<NCOMM; i++) {
+ MPI_Comm_free( &tmpComm[i] );
+ }
+ free( buf );
+ }
+ else if (rank == source) {
+ buf = (int *)malloc( NELM * sizeof(int) );
+ for (i=0; i<NELM; i++) buf[i] = i;
+
+ for (i=0; i<NCOMM; i++) {
+ MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
+ }
+ /* Synchronize with the receiver */
+ MPI_Sendrecv( 0, 0, MPI_INT, dest, 1,
+ 0, 0, MPI_INT, dest, 1, MPI_COMM_WORLD, &status );
+ MPI_Send( buf, NELM, MPI_INT, dest, 0, comm );
+ free( buf );
+ }
+ else {
+ for (i=0; i<NCOMM; i++) {
+ MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
+ }
+ }
+
+ MPI_Barrier( MPI_COMM_WORLD );
+
+ if (rank != dest) {
+ /* Clean up the communicators */
+ for (i=0; i<NCOMM; i++) {
+ MPI_Comm_free( &tmpComm[i] );
+ }
+ }
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_free( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test comm split";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, color, srank;
+ MPI_Comm comm, scomm;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ if (size < 4) {
+ fprintf( stderr, "This test requires at least four processes." );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ color = MPI_UNDEFINED;
+ if (rank < 2) color = 1;
+ MPI_Comm_split( comm, color, size - rank, &scomm );
+
+ if (rank < 2) {
+ /* Check that the ranks are ordered correctly */
+ MPI_Comm_rank( scomm, &srank );
+ if (srank != 1 - rank) {
+ errs++;
+ }
+ MPI_Comm_free( &scomm );
+ }
+ else {
+ if (scomm != MPI_COMM_NULL) {
+ errs++;
+ }
+ }
+ MPI_Comm_free( &comm );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This test ensures that MPI_Comm_split breaks ties in key values by using the
+ * original rank in the input communicator. This typically corresponds to
+ * the difference between using a stable sort or using an unstable sort.
+ *
+ * It checks all sizes from 1..comm_size(world)-1, so this test does not need to
+ * be run multiple times at process counts from a higher-level test driver. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpi.h"
+
+#define ERRLIMIT (10)
+
+#define my_assert(cond_) \
+ do { \
+ if (!(cond_)) { \
+ if (errs < ERRLIMIT) \
+ printf("assertion \"%s\" failed\n", #cond_); \
+ ++errs; \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ int i, j, pos, modulus, cs, rank, size;
+ int wrank, wsize;
+ int newrank, newsize;
+ int errs = 0;
+ int key;
+ int *oldranks = NULL;
+ int *identity = NULL;
+ int verbose = 0;
+ MPI_Comm comm, splitcomm;
+ MPI_Group wgroup, newgroup;
+
+ MPI_Init(&argc, &argv);
+
+ if (getenv("MPITEST_VERBOSE"))
+ verbose = 1;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+
+ oldranks = malloc(wsize * sizeof(int));
+ identity = malloc(wsize * sizeof(int));
+ for (i = 0; i < wsize; ++i) {
+ identity[i] = i;
+ }
+
+ for (cs = 1; cs <= wsize; ++cs) {
+ /* yes, we are using comm_split to test comm_split, but this test is
+ * mainly about ensuring that the stable sort behavior is correct, not
+ * about whether the partitioning by color behavior is correct */
+ MPI_Comm_split(MPI_COMM_WORLD, (wrank < cs ? 0 : MPI_UNDEFINED), wrank, &comm);
+ if (comm != MPI_COMM_NULL) {
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (modulus = 1; modulus <= size; ++modulus) {
+ /* Divide all ranks into one of "modulus" equivalence classes. Ranks in
+ * output comm will be ordered first by class, then within the class by
+ * rank in comm world. */
+ key = rank % modulus;
+
+ /* all pass same color, variable keys */
+ MPI_Comm_split(comm, 5, key, &splitcomm);
+ MPI_Comm_rank(splitcomm, &newrank);
+ MPI_Comm_size(splitcomm, &newsize);
+ my_assert(newsize == size);
+
+ MPI_Comm_group(MPI_COMM_WORLD, &wgroup);
+ MPI_Comm_group(splitcomm, &newgroup);
+ int gsize;
+ MPI_Group_size(newgroup, &gsize);
+ MPI_Group_translate_ranks(newgroup, size, identity, wgroup, oldranks);
+ MPI_Group_free(&wgroup);
+ MPI_Group_free(&newgroup);
+
+ if (splitcomm != MPI_COMM_NULL)
+ MPI_Comm_free(&splitcomm);
+
+ /* now check that comm_split broke any ties correctly */
+ if (rank == 0) {
+ if (verbose) {
+ /* debugging code that is useful when the test fails */
+ printf("modulus=%d oldranks={", modulus);
+ for (i = 0; i < size - 1; ++i) {
+ printf("%d,", oldranks[i]);
+ }
+ printf("%d} keys={", oldranks[i]);
+ for (i = 0; i < size - 1; ++i) {
+ printf("%d,", i % modulus);
+ }
+ printf("%d}\n", i % modulus);
+ }
+
+ pos = 0;
+ for (i = 0; i < modulus; ++i) {
+ /* there's probably a better way to write these loop bounds and
+ * indices, but this is the first (correct) way that occurred to me */
+ for (j = 0; j < (size / modulus + (i < size % modulus ? 1 : 0)); ++j) {
+ if (errs < ERRLIMIT && oldranks[pos] != i+modulus*j) {
+ printf("size=%d i=%d j=%d modulus=%d pos=%d i+modulus*j=%d oldranks[pos]=%d\n",
+ size, i, j, modulus, pos, i+modulus*j, oldranks[pos]);
+ }
+ my_assert(oldranks[pos] == i+modulus*j);
+ ++pos;
+ }
+ }
+ }
+ }
+ MPI_Comm_free(&comm);
+ }
+ }
+
+ if (oldranks != NULL)
+ free(oldranks);
+ if (identity != NULL)
+ free(identity);
+
+ if (rank == 0) {
+ if (errs)
+ printf("found %d errors\n", errs);
+ else
+ printf(" No errors\n");
+ }
+
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+/* FIXME: This test only checks that the MPI_Comm_split_type routine
+ doesn't fail. It does not check for correct behavior */
+
+int main(int argc, char *argv[])
+{
+ int rank, size, verbose=0;
+ int wrank;
+ MPI_Comm comm;
+
+ MPI_Init(&argc, &argv);
+
+ if (getenv("MPITEST_VERBOSE"))
+ verbose = 1;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ /* Check to see if MPI_COMM_TYPE_SHARED works correctly */
+ MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &comm);
+ if (comm == MPI_COMM_NULL)
+ printf("Expected a non-null communicator, but got MPI_COMM_NULL\n");
+ else {
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+ if (rank == 0 && verbose)
+ printf("Created subcommunicator of size %d\n", size);
+ MPI_Comm_free(&comm);
+ }
+
+ /* Check to see if MPI_UNDEFINED is respected */
+ MPI_Comm_split_type(MPI_COMM_WORLD, (wrank % 2 == 0) ? MPI_COMM_TYPE_SHARED : MPI_UNDEFINED,
+ 0, MPI_INFO_NULL, &comm);
+ if ((wrank % 2) && (comm != MPI_COMM_NULL))
+ printf("Expected MPI_COMM_NULL, but did not get one\n");
+ if (wrank % 2 == 0) {
+ if (comm == MPI_COMM_NULL)
+ printf("Expected a non-null communicator, but got MPI_COMM_NULL\n");
+ else {
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+ if (rank == 0 && verbose)
+ printf("Created subcommunicator of size %d\n", size);
+ MPI_Comm_free(&comm);
+ }
+ }
+
+ /* Use wrank because Comm_split_type may return more than one communicator
+ across the job, and if so, each will have a rank 0 entry. Test
+ output rules are for a single process to write the successful
+ test (No Errors) output. */
+ if (wrank == 0)
+ printf(" No errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2011 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+
+int main(int argc, char *argv[])
+{
+ int size, rank, i, *excl;
+ MPI_Group world_group, even_group;
+ MPI_Comm __attribute__((unused)) even_comm;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (size % 2) {
+ fprintf(stderr, "this program requires a multiple of 2 number of processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ excl = malloc((size / 2) * sizeof(int));
+ assert(excl);
+
+ /* exclude the odd ranks */
+ for (i = 0; i < size / 2; i++)
+ excl[i] = (2 * i) + 1;
+
+ /* Create some groups */
+ MPI_Comm_group(MPI_COMM_WORLD, &world_group);
+ MPI_Group_excl(world_group, size / 2, excl, &even_group);
+ MPI_Group_free(&world_group);
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+ if (rank % 2 == 0) {
+ /* Even processes create a group for themselves */
+ MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &even_comm);
+ MPI_Barrier(even_comm);
+ MPI_Comm_free(&even_comm);
+ }
+#endif /* USE_STRICT_MPI */
+
+ MPI_Group_free(&even_group);
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (rank == 0)
+ printf(" No errors\n");
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <mpi.h>
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+
+int main(int argc, char **argv)
+{
+ int rank, size;
+ MPI_Group full_group, half_group;
+ int range[1][3];
+ MPI_Comm __attribute__((unused)) comm;
+
+ MPI_Init(NULL, NULL);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ MPI_Comm_group(MPI_COMM_WORLD, &full_group);
+ range[0][0] = 0;
+ range[0][1] = size / 2;
+ range[0][2] = 1;
+ MPI_Group_range_incl(full_group, 1, range, &half_group);
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+ if (rank <= size / 2) {
+ MPI_Comm_create_group(MPI_COMM_WORLD, half_group, 0, &comm);
+ MPI_Barrier(comm);
+ MPI_Comm_free(&comm);
+ }
+#endif /* USE_STRICT_MPI */
+
+ MPI_Group_free(&half_group);
+ MPI_Group_free(&full_group);
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+/* USE_STRICT_MPI may be defined in mpitestconf.h */
+#include "mpitestconf.h"
+
+#define LOOPS 100
+
+int main(int argc, char **argv)
+{
+ int rank, size, i, j, count;
+ MPI_Group full_group, sub_group;
+ int *included, *ranks;
+ MPI_Comm __attribute__((unused)) comm;
+
+ MPI_Init(NULL, NULL);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ ranks = malloc(size * sizeof(int));
+ included = malloc(size * sizeof(int));
+ MPI_Comm_group(MPI_COMM_WORLD, &full_group);
+
+ for (j = 0; j < LOOPS; j++) {
+ srand(j); /* Deterministic seed */
+
+ count = 0;
+ for (i = 0; i < size; i++) {
+ if (rand() % 2) { /* randomly include a rank */
+ included[i] = 1;
+ ranks[count++] = i;
+ }
+ else
+ included[i] = 0;
+ }
+
+ MPI_Group_incl(full_group, count, ranks, &sub_group);
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+ if (included[rank]) {
+ MPI_Comm_create_group(MPI_COMM_WORLD, sub_group, 0, &comm);
+ MPI_Barrier(comm);
+ MPI_Comm_free(&comm);
+ }
+#endif /* USE_STRICT_MPI */
+
+ MPI_Group_free(&sub_group);
+ }
+
+ MPI_Group_free(&full_group);
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* This is a temporary #ifdef to control whether we test this functionality. A
+ * configure-test or similar would be better. Eventually the MPI-3 standard
+ * will be released and this can be gated on a MPI_VERSION check */
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_IDUP 1
+#endif
+
+/* assert-like macro that bumps the err count and emits a message */
+#define check(x_) \
+ do { \
+ if (!(x_)) { \
+ ++errs; \
+ if (errs < 10) { \
+ fprintf(stderr, "check failed: (%s), line %d\n", #x_, __LINE__); \
+ } \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i;
+ int rank, size, lrank, lsize, rsize;
+ int buf[2];
+ MPI_Comm newcomm, ic, localcomm, stagger_comm;
+ MPI_Request rreq;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+#ifdef TEST_IDUP
+
+ /* test plan: make rank 0 wait in a blocking recv until all other processes
+ * have posted their MPI_Comm_idup ops, then post last. Should ensure that
+ * idup doesn't block on the non-zero ranks, otherwise we'll get a deadlock.
+ */
+
+ if (rank == 0) {
+ for (i = 1; i < size; ++i) {
+ buf[0] = 0x01234567;
+ buf[1] = 0x89abcdef;
+ MPI_Recv(buf, 2, MPI_INT, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ }
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &rreq);
+ MPI_Wait(&rreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &rreq);
+ buf[0] = rank;
+ buf[1] = size + rank;
+ MPI_Ssend(buf, 2, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ MPI_Wait(&rreq, MPI_STATUS_IGNORE);
+ }
+
+ /* do some communication to make sure that newcomm works */
+ buf[0] = rank;
+ buf[1] = 0xfeedface;
+ MPI_Allreduce(&buf[0], &buf[1], 1, MPI_INT, MPI_SUM, newcomm);
+ check(buf[1] == (size * (size-1) / 2));
+
+ MPI_Comm_free(&newcomm);
+
+ /* now construct an intercomm and make sure we can dup that too */
+ MPI_Comm_split(MPI_COMM_WORLD, rank % 2, rank, &localcomm);
+ MPI_Intercomm_create(localcomm, 0, MPI_COMM_WORLD, (rank == 0 ? 1 : 0), 1234, &ic);
+
+ /* Create a communicator on just the "right hand group" of the intercomm in
+ * order to make it more likely to catch bugs related to incorrectly
+ * swapping the context_id and recvcontext_id in the idup code. */
+ stagger_comm = MPI_COMM_NULL;
+ if (rank % 2) {
+ MPI_Comm_dup(localcomm, &stagger_comm);
+ }
+
+ MPI_Comm_rank(ic, &lrank);
+ MPI_Comm_size(ic, &lsize);
+ MPI_Comm_remote_size(ic, &rsize);
+
+ /* Similar to above pattern, but all non-local-rank-0 processes send to
+ * remote rank 0. Both sides participate in this way. */
+ if (lrank == 0) {
+ for (i = 1; i < rsize; ++i) {
+ buf[0] = 0x01234567;
+ buf[1] = 0x89abcdef;
+ MPI_Recv(buf, 2, MPI_INT, i, 0, ic, MPI_STATUS_IGNORE);
+ }
+ MPI_Comm_idup(ic, &newcomm, &rreq);
+ MPI_Wait(&rreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(ic, &newcomm, &rreq);
+ buf[0] = lrank;
+ buf[1] = lsize + lrank;
+ MPI_Ssend(buf, 2, MPI_INT, 0, 0, ic);
+ MPI_Wait(&rreq, MPI_STATUS_IGNORE);
+ }
+
+ /* do some communication to make sure that newcomm works */
+ buf[0] = lrank;
+ buf[1] = 0xfeedface;
+ MPI_Allreduce(&buf[0], &buf[1], 1, MPI_INT, MPI_SUM, newcomm);
+ check(buf[1] == (rsize * (rsize-1) / 2));
+
+ /* free this down here, not before idup, otherwise it will undo our
+ * stagger_comm work */
+ MPI_Comm_free(&localcomm);
+
+ if (stagger_comm != MPI_COMM_NULL) {
+ MPI_Comm_free(&stagger_comm);
+ }
+ MPI_Comm_free(&newcomm);
+ MPI_Comm_free(&ic);
+
+#endif /* TEST_IDUP */
+
+ MPI_Reduce((rank == 0 ? MPI_IN_PLACE : &errs), &errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ if (rank == 0) {
+ if (errs) {
+ printf("found %d errors\n", errs);
+ }
+ else {
+ printf(" No errors\n");
+ }
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <mpi.h>
+#include "mpitest.h"
+
+#define VERBOSE 0
+
+int main(int argc, char **argv)
+{
+ int rank;
+ MPI_Info info_in, info_out;
+ int errors = 0, all_errors = 0;
+ MPI_Comm comm;
+ char __attribute__((unused)) invalid_key[] = "invalid_test_key";
+ char buf[MPI_MAX_INFO_VAL];
+ int flag;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ MPI_Info_create(&info_in);
+ MPI_Info_set(info_in, invalid_key, (char *) "true");
+
+ MPI_Comm_dup(MPI_COMM_WORLD, &comm);
+
+ MPI_Comm_set_info(comm, info_in);
+ MPI_Comm_get_info(comm, &info_out);
+
+ MPI_Info_get(info_out, invalid_key, MPI_MAX_INFO_VAL, buf, &flag);
+#ifndef USE_STRICT_MPI
+ /* Check if our invalid key was ignored. Note, this check's MPICH's
+ * behavior, but this behavior may not be required for a standard
+ * conforming MPI implementation. */
+ if (flag) {
+ printf("%d: %s was not ignored\n", rank, invalid_key);
+ errors++;
+ }
+#endif
+
+ MPI_Info_free(&info_in);
+ MPI_Info_free(&info_out);
+ MPI_Comm_free(&comm);
+
+ MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+
+ if (rank == 0 && all_errors == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2007 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <string.h>
+#include "mpitest.h"
+
+/* Check that Communicators can be created from various subsets of the
+ processes in the communicator.
+*/
+
+void abortMsg( const char *, int );
+int BuildComm( MPI_Comm, MPI_Group, const char [] );
+
+void abortMsg( const char *str, int code )
+{
+ char msg[MPI_MAX_ERROR_STRING];
+ int class, resultLen;
+
+ MPI_Error_class( code, &class );
+ MPI_Error_string( code, msg, &resultLen );
+ fprintf( stderr, "%s: errcode = %d, class = %d, msg = %s\n",
+ str, code, class, msg );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int main( int argc, char *argv[] )
+{
+ MPI_Comm dupWorld;
+ int wrank, wsize, gsize, err, errs = 0;
+ int ranges[1][3];
+ MPI_Group wGroup, godd, ghigh, geven;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &wsize );
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ /* Create some groups */
+ MPI_Comm_group( MPI_COMM_WORLD, &wGroup );
+
+ MTestPrintfMsg( 2, "Creating groups\n" );
+ ranges[0][0] = 2*(wsize/2)-1;
+ ranges[0][1] = 1;
+ ranges[0][2] = -2;
+ err = MPI_Group_range_incl( wGroup, 1, ranges, &godd );
+ if (err) abortMsg( "Failed to create odd group: ", err );
+ err = MPI_Group_size( godd, &gsize );
+ if (err) abortMsg( "Failed to get size of odd group: ", err );
+ if (gsize != wsize/2) {
+ fprintf( stderr, "Group godd size is %d should be %d\n", gsize,
+ wsize/2 );
+ errs++;
+ }
+
+ ranges[0][0] = wsize/2+1;
+ ranges[0][1] = wsize-1;
+ ranges[0][2] = 1;
+ err = MPI_Group_range_incl( wGroup, 1, ranges, &ghigh );
+ if (err) abortMsg( "Failed to create high group\n", err );
+ ranges[0][0] = 0;
+ ranges[0][1] = wsize-1;
+ ranges[0][2] = 2;
+ err = MPI_Group_range_incl( wGroup, 1, ranges, &geven );
+ if (err) abortMsg( "Failed to create even group:", err );
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dupWorld );
+ MPI_Comm_set_name( dupWorld, (char*)"Dup of world" );
+ /* First, use the groups to create communicators from world and a dup
+ of world */
+ errs += BuildComm( MPI_COMM_WORLD, ghigh, "ghigh" );
+ errs += BuildComm( MPI_COMM_WORLD, godd, "godd" );
+ errs += BuildComm( MPI_COMM_WORLD, geven, "geven" );
+ errs += BuildComm( dupWorld, ghigh, "ghigh" );
+ errs += BuildComm( dupWorld, godd, "godd" );
+ errs += BuildComm( dupWorld, geven, "geven" );
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check that we can create multiple communicators from a single collective
+ * call to MPI_Comm_create as long as the groups are all disjoint */
+ errs += BuildComm( MPI_COMM_WORLD, (wrank % 2 ? godd : geven), "godd+geven" );
+ errs += BuildComm( dupWorld, (wrank % 2 ? godd : geven), "godd+geven" );
+ errs += BuildComm( MPI_COMM_WORLD, MPI_GROUP_EMPTY, "MPI_GROUP_EMPTY" );
+ errs += BuildComm( dupWorld, MPI_GROUP_EMPTY, "MPI_GROUP_EMPTY" );
+#endif
+
+ MPI_Comm_free( &dupWorld );
+ MPI_Group_free( &ghigh );
+ MPI_Group_free( &godd );
+ MPI_Group_free( &geven );
+ MPI_Group_free( &wGroup );
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+ return 0;
+}
+
+int BuildComm( MPI_Comm oldcomm, MPI_Group group, const char gname[] )
+{
+ MPI_Comm newcomm;
+ int grank, gsize, rank, size, errs = 0;
+ char cname[MPI_MAX_OBJECT_NAME+1];
+ int cnamelen;
+
+ MPI_Group_rank( group, &grank );
+ MPI_Group_size( group, &gsize );
+ MPI_Comm_get_name( oldcomm, cname, &cnamelen );
+ MTestPrintfMsg( 2, "Testing comm %s from %s\n", cname, gname );
+ MPI_Comm_create( oldcomm, group, &newcomm );
+ if (newcomm == MPI_COMM_NULL && grank != MPI_UNDEFINED) {
+ errs ++;
+ fprintf( stderr, "newcomm is null but process is in group\n" );
+ }
+ if (newcomm != MPI_COMM_NULL && grank == MPI_UNDEFINED) {
+ errs ++;
+ fprintf( stderr, "newcomm is not null but process is not in group\n" );
+ }
+ if (newcomm != MPI_COMM_NULL && grank != MPI_UNDEFINED) {
+ MPI_Comm_rank( newcomm, &rank );
+ if (rank != grank) {
+ errs ++;
+ fprintf( stderr, "Rank is %d should be %d in comm from %s\n",
+ rank, grank, gname );
+ }
+ MPI_Comm_size( newcomm, &size );
+ if (size != gsize) {
+ errs++;
+ fprintf( stderr, "Size is %d should be %d in comm from %s\n",
+ size, gsize, gname );
+ }
+ MPI_Comm_free( &newcomm );
+ MTestPrintfMsg( 2, "Done testing comm %s from %s\n", cname, gname );
+ }
+ return errs;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include "mpitestconf.h"
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ MPI_Comm comm;
+ int cnt, rlen;
+ char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME];
+ MTest_Init( &argc, &argv );
+
+ /* Check world and self firt */
+ nameout[0] = 0;
+ MPI_Comm_get_name( MPI_COMM_WORLD, nameout, &rlen );
+ if (strcmp(nameout,"MPI_COMM_WORLD")) {
+ errs++;
+ printf( "Name of comm world is %s, should be MPI_COMM_WORLD\n",
+ nameout );
+ }
+
+ nameout[0] = 0;
+ MPI_Comm_get_name( MPI_COMM_SELF, nameout, &rlen );
+ if (strcmp(nameout,"MPI_COMM_SELF")) {
+ errs++;
+ printf( "Name of comm self is %s, should be MPI_COMM_SELF\n",
+ nameout );
+ }
+
+ /* Now, handle other communicators, including world/self */
+ cnt = 0;
+ while (MTestGetComm( &comm, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ sprintf( name, "comm-%d", cnt );
+ cnt++;
+ MPI_Comm_set_name( comm, name );
+ nameout[0] = 0;
+ MPI_Comm_get_name( comm, nameout, &rlen );
+ if (strcmp( name, nameout )) {
+ errs++;
+ printf( "Unexpected name, was %s but should be %s\n",
+ nameout, name );
+ }
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+ * This program tests the allocation (and deallocation) of contexts.
+ *
+ */
+int main( int argc, char **argv )
+{
+ int errs = 0;
+ int i, j, err;
+ MPI_Comm newcomm1, newcomm2[200];
+
+ MTest_Init( &argc, &argv );
+
+ /* Get a separate communicator to duplicate */
+ MPI_Comm_dup( MPI_COMM_WORLD, &newcomm1 );
+
+ MPI_Errhandler_set( newcomm1, MPI_ERRORS_RETURN );
+ /* Allocate many communicators in batches, then free them */
+ for (i=0; i<1000; i++) {
+ for (j=0; j<200; j++) {
+ err = MPI_Comm_dup( newcomm1, &newcomm2[j] );
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Failed to duplicate communicator for (%d,%d)\n", i, j );
+ MTestPrintError( err );
+ }
+ }
+ }
+ for (j=0; j<200; j++) {
+ err = MPI_Comm_free( &newcomm2[j] );
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Failed to free %d,%d\n", i, j );
+ MTestPrintError( err );
+ }
+ }
+ }
+ }
+ err = MPI_Comm_free( &newcomm1 );
+ if (err) {
+ errs++;
+ fprintf( stderr, "Failed to free newcomm1\n" );
+ MTestPrintError( err );
+ }
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/*
+ * This check is intended to fail if there is a leak of context ids.
+ * Because this is trying to exhaust the number of context ids, it needs
+ * to run for a longer time than many tests. The for loop uses 100,000
+ * iterations, which is adequate for MPICH (with only about 1k context ids
+ * available).
+ */
+
+int main(int argc, char** argv) {
+
+ int i=0;
+ int randval;
+ int rank;
+ int errs = 0;
+ MPI_Comm newcomm;
+ double startTime;
+ int nLoop = 100000;
+
+ MTest_Init(&argc,&argv);
+
+ for (i=1; i<argc; i++) {
+ if (strcmp( argv[i], "--loopcount" ) == 0) {
+ i++;
+ nLoop = atoi( argv[i] );
+ }
+ else {
+ fprintf( stderr, "Unrecognized argument %s\n", argv[i] );
+ }
+ }
+
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+
+ startTime = MPI_Wtime();
+ for (i=0; i<nLoop; i++) {
+
+ if ( rank == 0 && (i%100 == 0) ) {
+ double rate = MPI_Wtime() - startTime;
+ if (rate > 0) {
+ rate = i / rate;
+ MTestPrintfMsg( 10, "After %d (%f)\n", i, rate );
+ }
+ else {
+ MTestPrintfMsg( 10, "After %d\n", i );
+ }
+ }
+
+ /* FIXME: Explain the rationale behind rand in this test */
+ randval=rand();
+
+ if (randval%(rank+2) == 0) {
+ MPI_Comm_split(MPI_COMM_WORLD,1,rank,&newcomm);
+ MPI_Comm_free( &newcomm );
+ }
+ else {
+ MPI_Comm_split(MPI_COMM_WORLD,MPI_UNDEFINED,rank,&newcomm);
+ if (newcomm != MPI_COMM_NULL) {
+ errs++;
+ printf( "Created a non-null communicator with MPI_UNDEFINED\n" );
+ }
+ }
+
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char **argv )
+{
+ int errs = 0;
+ int rank, size, wrank, wsize, dest, a, b;
+ MPI_Comm newcomm;
+ MPI_Status status;
+
+ MTest_Init( &argc, &argv );
+
+ /* Can we run comm dup at all? */
+ MPI_Comm_dup( MPI_COMM_WORLD, &newcomm );
+
+ /* Check basic properties */
+ MPI_Comm_size( MPI_COMM_WORLD, &wsize );
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_size( newcomm, &size );
+ MPI_Comm_rank( newcomm, &rank );
+
+ if (size != wsize || rank != wrank) {
+ errs++;
+ fprintf( stderr, "Size (%d) or rank (%d) wrong\n", size, rank );
+ fflush( stderr );
+ }
+
+ /* Can we communicate with this new communicator? */
+ dest = MPI_PROC_NULL;
+ if (rank == 0) {
+ dest = size - 1;
+ a = rank;
+ b = -1;
+ MPI_Sendrecv( &a, 1, MPI_INT, dest, 0,
+ &b, 1, MPI_INT, dest, 0, newcomm, &status );
+ if (b != dest) {
+ errs++;
+ fprintf( stderr, "Received %d expected %d on %d\n", b, dest, rank );
+ fflush( stderr );
+ }
+ if (status.MPI_SOURCE != dest) {
+ errs++;
+ fprintf( stderr, "Source not set correctly in status on %d\n",
+ rank );
+ fflush( stderr );
+ }
+ }
+ else if (rank == size-1) {
+ dest = 0;
+ a = rank;
+ b = -1;
+ MPI_Sendrecv( &a, 1, MPI_INT, dest, 0,
+ &b, 1, MPI_INT, dest, 0, newcomm, &status );
+ if (b != dest) {
+ errs++;
+ fprintf( stderr, "Received %d expected %d on %d\n", b, dest, rank );
+ fflush( stderr );
+ }
+ if (status.MPI_SOURCE != dest) {
+ errs++;
+ fprintf( stderr, "Source not set correctly in status on %d\n",
+ rank );
+ fflush( stderr );
+ }
+ }
+
+ MPI_Comm_free( &newcomm );
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+int run_tests(MPI_Comm comm);
+int run_tests(MPI_Comm comm)
+{
+ int rank, size, wrank, wsize, dest, a, b, errs = 0;
+ MPI_Status status;
+
+ /* Check basic properties */
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ if (size != wsize || rank != wrank) {
+ errs++;
+ fprintf(stderr, "Size (%d) or rank (%d) wrong\n", size, rank);
+ fflush(stderr);
+ }
+
+ MPI_Barrier(comm);
+
+ /* Can we communicate with this new communicator? */
+ dest = MPI_PROC_NULL;
+ if (rank == 0) {
+ dest = size - 1;
+ a = rank;
+ b = -1;
+ MPI_Sendrecv(&a, 1, MPI_INT, dest, 0, &b, 1, MPI_INT, dest, 0, comm, &status);
+ if (b != dest) {
+ errs++;
+ fprintf(stderr, "Received %d expected %d on %d\n", b, dest, rank);
+ fflush(stderr);
+ }
+ if (status.MPI_SOURCE != dest) {
+ errs++;
+ fprintf(stderr, "Source not set correctly in status on %d\n", rank);
+ fflush(stderr);
+ }
+ }
+ else if (rank == size - 1) {
+ dest = 0;
+ a = rank;
+ b = -1;
+ MPI_Sendrecv(&a, 1, MPI_INT, dest, 0, &b, 1, MPI_INT, dest, 0, comm, &status);
+ if (b != dest) {
+ errs++;
+ fprintf(stderr, "Received %d expected %d on %d\n", b, dest, rank);
+ fflush(stderr);
+ }
+ if (status.MPI_SOURCE != dest) {
+ errs++;
+ fprintf(stderr, "Source not set correctly in status on %d\n", rank);
+ fflush(stderr);
+ }
+ }
+
+ MPI_Barrier(comm);
+
+ return errs;
+}
+
+int main(int argc, char **argv)
+{
+ int total_errs = 0;
+ MPI_Comm newcomm;
+ MPI_Info info;
+
+ MTest_Init(&argc, &argv);
+
+ /* Dup with no info */
+ MPI_Comm_dup_with_info(MPI_COMM_WORLD, MPI_INFO_NULL, &newcomm);
+ total_errs += run_tests(newcomm);
+ MPI_Comm_free(&newcomm);
+
+ /* Dup with info keys */
+ MPI_Info_create(&info);
+ MPI_Info_set(info, (char *) "host", (char *) "myhost.myorg.org");
+ MPI_Info_set(info, (char *) "file", (char *) "runfile.txt");
+ MPI_Info_set(info, (char *) "soft", (char *) "2:1000:4,3:1000:7");
+ MPI_Comm_dup_with_info(MPI_COMM_WORLD, info, &newcomm);
+ total_errs += run_tests(newcomm);
+ MPI_Info_free(&info);
+ MPI_Comm_free(&newcomm);
+
+ /* Dup with deleted info keys */
+ MPI_Info_create(&info);
+ MPI_Info_set(info, (char *) "host", (char *) "myhost.myorg.org");
+ MPI_Info_set(info, (char *) "file", (char *) "runfile.txt");
+ MPI_Info_set(info, (char *) "soft", (char *) "2:1000:4,3:1000:7");
+ MPI_Comm_dup_with_info(MPI_COMM_WORLD, info, &newcomm);
+ MPI_Info_free(&info);
+ total_errs += run_tests(newcomm);
+ MPI_Comm_free(&newcomm);
+
+ MTest_Finalize(total_errs);
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ MPI_Comm comm, dupcomm, dupcomm2;
+ MPI_Request rreq[2];
+ int count;
+ int indicies[2];
+ int r1buf, r2buf, s1buf, s2buf;
+ int rank, isLeft;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntercomm( &comm, &isLeft, 2 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_dup( comm, &dupcomm );
+
+ /* Check that there are separate contexts. We do this by setting
+ up nonblocking received on both communicators, and then
+ sending to them. If the contexts are different, tests on the
+ unsatisfied communicator should indicate no available message */
+ MPI_Comm_rank( comm, &rank );
+ if (rank == 0) {
+ s1buf = 456;
+ s2buf = 17;
+ r1buf = r2buf = -1;
+ /* These are send/receives to the process with rank zero
+ in the other group (these are intercommunicators) */
+ MPI_Irecv( &r1buf, 1, MPI_INT, 0, 0, dupcomm, &rreq[0] );
+ MPI_Irecv( &r2buf, 1, MPI_INT, 0, 0, comm, &rreq[1] );
+ MPI_Send( &s2buf, 1, MPI_INT, 0, 0, comm );
+ MPI_Waitsome(2, rreq, &count, indicies, MPI_STATUSES_IGNORE);
+ if (count != 1 || indicies[0] != 1) {
+ /* The only valid return is that exactly one message
+ has been received */
+ errs++;
+ if (count == 1 && indicies[0] != 1) {
+ printf( "Error in context values for intercomm\n" );
+ }
+ else if (count == 2) {
+ printf( "Error: two messages received!\n" );
+ }
+ else {
+ int i;
+ printf( "Error: count = %d", count );
+ for (i=0; i<count; i++) {
+ printf( " indicies[%d] = %d", i, indicies[i] );
+ }
+ printf( "\n" );
+ }
+ }
+
+ /* Make sure that we do not send the next message until
+ the other process (rank zero in the other group)
+ has also completed the first step */
+ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 0, 37,
+ MPI_BOTTOM, 0, MPI_BYTE, 0, 37, comm,
+ MPI_STATUS_IGNORE );
+
+ /* Complete the receive on dupcomm */
+ MPI_Send( &s1buf, 1, MPI_INT, 0, 0, dupcomm );
+ MPI_Wait( &rreq[0], MPI_STATUS_IGNORE );
+ if (r1buf != s1buf) {
+ errs++;
+ printf( "Wrong value in communication on dupcomm %d != %d\n",
+ r1buf, s1buf );
+ }
+ if (r2buf != s2buf) {
+ errs++;
+ printf( "Wrong value in communication on comm %d != %d\n",
+ r2buf, s2buf );
+ }
+ }
+ /* Try to duplicate a duplicated intercomm. (This caused problems
+ with some MPIs) */
+ MPI_Comm_dup( dupcomm, &dupcomm2 );
+ MPI_Comm_free( &dupcomm2 );
+ MPI_Comm_free( &dupcomm );
+ MTestFreeComm( &comm );
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/*
+ * A simple test of the intercomm create routine, with a communication test
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ MPI_Comm intercomm;
+ int remote_rank, rank, size, errs = 0;
+
+ MTest_Init( &argc, &argv );
+
+
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (size < 2) {
+ printf( "Size must be at least 2\n" );
+ MPI_Abort( MPI_COMM_WORLD, 0 );
+ }
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ /* Make an intercomm of the first two elements of comm_world */
+ if (rank < 2) {
+ int lrank = rank, rrank = -1;
+ MPI_Status status;
+
+ remote_rank = 1 - rank;
+ MPI_Intercomm_create( MPI_COMM_SELF, 0,
+ MPI_COMM_WORLD, remote_rank, 27,
+ &intercomm );
+
+ /* Now, communicate between them */
+ MPI_Sendrecv( &lrank, 1, MPI_INT, 0, 13,
+ &rrank, 1, MPI_INT, 0, 13, intercomm, &status );
+
+ if (rrank != remote_rank) {
+ errs++;
+ printf( "%d Expected %d but received %d\n",
+ rank, remote_rank, rrank );
+ }
+
+ MPI_Comm_free( &intercomm );
+ }
+
+ /* The next test should create an intercomm with groups of different
+ sizes FIXME */
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* regression test for ticket #1574
+ *
+ * Based on test code from N. Radclif @ Cray. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+int main(int argc, char **argv)
+{
+ MPI_Comm c0, c1, ic;
+ MPI_Group g0, g1, gworld;
+ int a, b, c, d;
+ int rank, size, remote_leader, tag;
+ int ranks[2];
+ int errs = 0;
+
+ tag = 5;
+ c0 = c1 = ic = MPI_COMM_NULL;
+ g0 = g1 = gworld = MPI_GROUP_NULL;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < 33) {
+ printf("ERROR: this test requires at least 33 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ return 1;
+ }
+
+ /* group of c0
+ * NOTE: a>=32 is essential for exercising the loop bounds bug from tt#1574 */
+ a = 32;
+ b = 24;
+
+ /* group of c1 */
+ c = 25;
+ d = 26;
+
+ MPI_Comm_group(MPI_COMM_WORLD, &gworld);
+
+ ranks[0] = a;
+ ranks[1] = b;
+ MPI_Group_incl(gworld, 2, ranks, &g0);
+ MPI_Comm_create(MPI_COMM_WORLD, g0, &c0);
+
+ ranks[0] = c;
+ ranks[1] = d;
+ MPI_Group_incl(gworld, 2, ranks, &g1);
+ MPI_Comm_create(MPI_COMM_WORLD, g1, &c1);
+
+ if (rank == a || rank == b) {
+ remote_leader = c;
+ MPI_Intercomm_create(c0, 0, MPI_COMM_WORLD, remote_leader, tag, &ic);
+ }
+ else if (rank == c || rank == d) {
+ remote_leader = a;
+ MPI_Intercomm_create(c1, 0, MPI_COMM_WORLD, remote_leader, tag, &ic);
+ }
+
+ MPI_Group_free(&g0);
+ MPI_Group_free(&g1);
+ MPI_Group_free(&gworld);
+
+ if (c0 != MPI_COMM_NULL)
+ MPI_Comm_free(&c0);
+ if (c1 != MPI_COMM_NULL)
+ MPI_Comm_free(&c1);
+ if (ic != MPI_COMM_NULL)
+ MPI_Comm_free(&ic);
+
+
+ MPI_Reduce((rank == 0 ? MPI_IN_PLACE : &errs), &errs,
+ 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ if (rank == 0) {
+ if (errs) {
+ printf("found %d errors\n", errs);
+ }
+ else {
+ printf(" No errors\n");
+ }
+ }
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2007 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+ * This program tests that MPI_Comm_create applies to intercommunicators;
+ * this is an extension added in MPI-2
+ */
+
+int TestIntercomm( MPI_Comm );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, isLeft, wrank;
+ MPI_Comm intercomm, newcomm;
+ MPI_Group oldgroup, newgroup;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (size < 4) {
+ printf( "This test requires at least 4 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
+ int ranks[10], nranks, result;
+
+ if (intercomm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_group( intercomm, &oldgroup );
+ ranks[0] = 0;
+ nranks = 1;
+ MTestPrintfMsg( 1, "Creating a new intercomm 0-0\n" );
+ MPI_Group_incl( oldgroup, nranks, ranks, &newgroup );
+ MPI_Comm_create( intercomm, newgroup, &newcomm );
+
+ /* Make sure that the new communicator has the appropriate pieces */
+ if (newcomm != MPI_COMM_NULL) {
+ int new_rsize, new_size, flag, commok = 1;
+
+ MPI_Comm_set_name( newcomm, (char*)"Single rank in each group" );
+ MPI_Comm_test_inter( intercomm, &flag );
+ if (!flag) {
+ errs++;
+ printf( "[%d] Output communicator is not an intercomm\n",
+ wrank );
+ commok = 0;
+ }
+
+ MPI_Comm_remote_size( newcomm, &new_rsize );
+ MPI_Comm_size( newcomm, &new_size );
+ /* The new communicator has 1 process in each group */
+ if (new_rsize != 1) {
+ errs++;
+ printf( "[%d] Remote size is %d, should be one\n",
+ wrank, new_rsize );
+ commok = 0;
+ }
+ if (new_size != 1) {
+ errs++;
+ printf( "[%d] Local size is %d, should be one\n",
+ wrank, new_size );
+ commok = 0;
+ }
+ /* ... more to do */
+ if (commok) {
+ errs += TestIntercomm( newcomm );
+ }
+ }
+ MPI_Group_free( &newgroup );
+ if (newcomm != MPI_COMM_NULL) {
+ MPI_Comm_free( &newcomm );
+ }
+
+ /* Now, do a sort of dup, using the original group */
+ MTestPrintfMsg( 1, "Creating a new intercomm (manual dup)\n" );
+ MPI_Comm_create( intercomm, oldgroup, &newcomm );
+ MPI_Comm_set_name( newcomm, (char*)"Dup of original" );
+ MTestPrintfMsg( 1, "Creating a new intercomm (manual dup (done))\n" );
+
+ MPI_Comm_compare( intercomm, newcomm, &result );
+ MTestPrintfMsg( 1, "Result of comm/intercomm compare is %d\n", result );
+ if (result != MPI_CONGRUENT) {
+ const char *rname=0;
+ errs++;
+ switch (result) {
+ case MPI_IDENT: rname = "IDENT"; break;
+ case MPI_CONGRUENT: rname = "CONGRUENT"; break;
+ case MPI_SIMILAR: rname = "SIMILAR"; break;
+ case MPI_UNEQUAL: rname = "UNEQUAL"; break;
+ printf( "[%d] Expected MPI_CONGRUENT but saw %d (%s)",
+ wrank, result, rname ); fflush(stdout);
+ }
+ }
+ else {
+ /* Try to communication between each member of intercomm */
+ errs += TestIntercomm( newcomm );
+ }
+
+ if (newcomm != MPI_COMM_NULL) {
+ MPI_Comm_free(&newcomm);
+ }
+ /* test that an empty group in either side of the intercomm results in
+ * MPI_COMM_NULL for all members of the comm */
+ if (isLeft) {
+ /* left side reuses oldgroup, our local group in intercomm */
+ MPI_Comm_create(intercomm, oldgroup, &newcomm);
+ }
+ else {
+ /* right side passes MPI_GROUP_EMPTY */
+ MPI_Comm_create(intercomm, MPI_GROUP_EMPTY, &newcomm);
+ }
+ if (newcomm != MPI_COMM_NULL) {
+ printf("[%d] expected MPI_COMM_NULL, but got a different communicator\n", wrank); fflush(stdout);
+ errs++;
+ }
+
+ if (newcomm != MPI_COMM_NULL) {
+ MPI_Comm_free(&newcomm);
+ }
+ MPI_Group_free( &oldgroup );
+ MPI_Comm_free( &intercomm );
+ }
+
+ MTest_Finalize(errs);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+int TestIntercomm( MPI_Comm comm )
+{
+ int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
+ int errs = 0, wrank, nsize;
+ char commname[MPI_MAX_OBJECT_NAME+1];
+ MPI_Request *reqs;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_size( comm, &local_size );
+ MPI_Comm_remote_size( comm, &remote_size );
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_get_name( comm, commname, &nsize );
+
+ MTestPrintfMsg( 1, "Testing communication on intercomm '%s', remote_size=%d\n",
+ commname, remote_size );
+
+ reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
+ if (!reqs) {
+ printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
+ wrank, remote_size, commname );
+ errs++;
+ return errs;
+ }
+ bufs = (int **) malloc( remote_size * sizeof(int *) );
+ if (!bufs) {
+ printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
+ wrank, remote_size, commname );
+ errs++;
+ return errs;
+ }
+ bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
+ if (!bufmem) {
+ printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
+ wrank, 2*remote_size, commname );
+ errs++;
+ return errs;
+ }
+
+ /* Each process sends a message containing its own rank and the
+ rank of the destination with a nonblocking send. Because we're using
+ nonblocking sends, we need to use different buffers for each isend */
+ /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although
+ it doesn't really hurt to keep separate buffers for our purposes */
+ for (j=0; j<remote_size; j++) {
+ bufs[j] = &bufmem[2*j];
+ bufs[j][0] = rank;
+ bufs[j][1] = j;
+ MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
+ }
+ MTestPrintfMsg( 2, "isends posted, about to recv\n" );
+
+ for (j=0; j<remote_size; j++) {
+ MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
+ if (rbuf[0] != j) {
+ printf( "[%d] Expected rank %d but saw %d in %s\n",
+ wrank, j, rbuf[0], commname );
+ errs++;
+ }
+ if (rbuf[1] != rank) {
+ printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
+ wrank, rank, rbuf[1], j, commname );
+ errs++;
+ }
+ }
+ if (errs)
+ fflush(stdout);
+ MTestPrintfMsg( 2, "my recvs completed, about to waitall\n" );
+ MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );
+
+ free( reqs );
+ free( bufs );
+ free( bufmem );
+
+ return errs;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Get the group of an intercommunicator";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, grank, gsize;
+ int minsize = 2, isleft;
+ MPI_Comm comm;
+ MPI_Group group;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntercomm( &comm, &isleft, minsize )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_group( comm, &group );
+ MPI_Group_rank( group, &grank );
+ MPI_Group_size( group, &gsize );
+ if (rank != grank) {
+ errs++;
+ fprintf( stderr, "Ranks of groups do not match %d != %d\n",
+ rank, grank );
+ }
+ if (size != gsize) {
+ errs++;
+ fprintf( stderr, "Sizes of groups do not match %d != %d\n",
+ size, gsize );
+ }
+ MPI_Group_free( &group );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2004 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test intercomm merge, including the choice of the high value";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, rsize;
+ int nsize, nrank;
+ int minsize = 2;
+ int isLeft;
+ MPI_Comm comm, comm1, comm2, comm3, comm4;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntercomm( &comm, &isLeft, minsize )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_size( comm, &size );
+
+ /* Try building intercomms */
+ MPI_Intercomm_merge( comm, isLeft, &comm1 );
+ /* Check the size and ranks */
+ MPI_Comm_size( comm1, &nsize );
+ MPI_Comm_rank( comm1, &nrank );
+ if (nsize != size + rsize) {
+ errs++;
+ printf( "(1) Comm size is %d but should be %d\n", nsize,
+ size + rsize );
+ if (isLeft) {
+ /* The left processes should be high */
+ if (nrank != rsize + rank) {
+ errs++;
+ printf( "(1) rank for high process is %d should be %d\n",
+ nrank, rsize + rank );
+ }
+ }
+ else {
+ /* The right processes should be low */
+ if (nrank != rank) {
+ errs++;
+ printf( "(1) rank for low process is %d should be %d\n",
+ nrank, rank );
+ }
+ }
+ }
+
+ MPI_Intercomm_merge( comm, !isLeft, &comm2 );
+ /* Check the size and ranks */
+ MPI_Comm_size( comm1, &nsize );
+ MPI_Comm_rank( comm1, &nrank );
+ if (nsize != size + rsize) {
+ errs++;
+ printf( "(2) Comm size is %d but should be %d\n", nsize,
+ size + rsize );
+ if (!isLeft) {
+ /* The right processes should be high */
+ if (nrank != rsize + rank) {
+ errs++;
+ printf( "(2) rank for high process is %d should be %d\n",
+ nrank, rsize + rank );
+ }
+ }
+ else {
+ /* The left processes should be low */
+ if (nrank != rank) {
+ errs++;
+ printf( "(2) rank for low process is %d should be %d\n",
+ nrank, rank );
+ }
+ }
+ }
+
+
+ MPI_Intercomm_merge( comm, 0, &comm3 );
+
+ MPI_Intercomm_merge( comm, 1, &comm4 );
+
+ MPI_Comm_free( &comm1 );
+ MPI_Comm_free( &comm2 );
+ MPI_Comm_free( &comm3 );
+ MPI_Comm_free( &comm4 );
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2007 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+ * This program tests that MPI_Comm_split applies to intercommunicators;
+ * this is an extension added in MPI-2
+ */
+
+int TestIntercomm( MPI_Comm );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, isLeft;
+ MPI_Comm intercomm, newcomm;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (size < 4) {
+ printf( "This test requires at least 4 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
+ int key, color;
+
+ if (intercomm == MPI_COMM_NULL) continue;
+
+ /* Split this intercomm. The new intercomms contain the
+ processes that had odd (resp even) rank in their local group
+ in the original intercomm */
+ MTestPrintfMsg( 1, "Created intercomm %s\n", MTestGetIntercommName() );
+ MPI_Comm_rank( intercomm, &key );
+ color = (key % 2);
+ MPI_Comm_split( intercomm, color, key, &newcomm );
+ /* Make sure that the new communicator has the appropriate pieces */
+ if (newcomm != MPI_COMM_NULL) {
+ int orig_rsize, orig_size, new_rsize, new_size;
+ int predicted_size, flag, commok=1;
+
+ MPI_Comm_test_inter( intercomm, &flag );
+ if (!flag) {
+ errs++;
+ printf( "Output communicator is not an intercomm\n" );
+ commok = 0;
+ }
+
+ MPI_Comm_remote_size( intercomm, &orig_rsize );
+ MPI_Comm_remote_size( newcomm, &new_rsize );
+ MPI_Comm_size( intercomm, &orig_size );
+ MPI_Comm_size( newcomm, &new_size );
+ /* The local size is 1/2 the original size, +1 if the
+ size was odd and the color was even. More precisely,
+ let n be the orig_size. Then
+ color 0 color 1
+ orig size even n/2 n/2
+ orig size odd (n+1)/2 n/2
+
+ However, since these are integer valued, if n is even,
+ then (n+1)/2 = n/2, so this table is much simpler:
+ color 0 color 1
+ orig size even (n+1)/2 n/2
+ orig size odd (n+1)/2 n/2
+
+ */
+ predicted_size = (orig_size + !color) / 2;
+ if (predicted_size != new_size) {
+ errs++;
+ printf( "Predicted size = %d but found %d for %s (%d,%d)\n",
+ predicted_size, new_size, MTestGetIntercommName(),
+ orig_size, orig_rsize );
+ commok = 0;
+ }
+ predicted_size = (orig_rsize + !color) / 2;
+ if (predicted_size != new_rsize) {
+ errs++;
+ printf( "Predicted remote size = %d but found %d for %s (%d,%d)\n",
+ predicted_size, new_rsize, MTestGetIntercommName(),
+ orig_size, orig_rsize );
+ commok = 0;
+ }
+ /* ... more to do */
+ if (commok) {
+ errs += TestIntercomm( newcomm );
+ }
+ }
+ else {
+ int orig_rsize;
+ /* If the newcomm is null, then this means that remote group
+ for this color is of size zero (since all processes in this
+ test have been given colors other than MPI_UNDEFINED).
+ Confirm that here */
+ /* FIXME: ToDo */
+ MPI_Comm_remote_size( intercomm, &orig_rsize );
+ if (orig_rsize == 1) {
+ if (color == 0) {
+ errs++;
+ printf( "Returned null intercomm when non-null expected\n" );
+ }
+ }
+ }
+ if (newcomm != MPI_COMM_NULL)
+ MPI_Comm_free( &newcomm );
+ MPI_Comm_free( &intercomm );
+ }
+ MTest_Finalize(errs);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+/* FIXME: This is copied from iccreate. It should be in one place */
+int TestIntercomm( MPI_Comm comm )
+{
+ int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
+ int errs = 0, wrank, nsize;
+ char commname[MPI_MAX_OBJECT_NAME+1];
+ MPI_Request *reqs;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_size( comm, &local_size );
+ MPI_Comm_remote_size( comm, &remote_size );
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_get_name( comm, commname, &nsize );
+
+ MTestPrintfMsg( 1, "Testing communication on intercomm %s\n", commname );
+
+ reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
+ if (!reqs) {
+ printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
+ wrank, remote_size, commname );
+ errs++;
+ return errs;
+ }
+ bufs = (int **) malloc( remote_size * sizeof(int *) );
+ if (!bufs) {
+ printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
+ wrank, remote_size, commname );
+ errs++;
+ return errs;
+ }
+ bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
+ if (!bufmem) {
+ printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
+ wrank, 2*remote_size, commname );
+ errs++;
+ return errs;
+ }
+
+ /* Each process sends a message containing its own rank and the
+ rank of the destination with a nonblocking send. Because we're using
+ nonblocking sends, we need to use different buffers for each isend */
+ for (j=0; j<remote_size; j++) {
+ bufs[j] = &bufmem[2*j];
+ bufs[j][0] = rank;
+ bufs[j][1] = j;
+ MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
+ }
+
+ for (j=0; j<remote_size; j++) {
+ MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
+ if (rbuf[0] != j) {
+ printf( "[%d] Expected rank %d but saw %d in %s\n",
+ wrank, j, rbuf[0], commname );
+ errs++;
+ }
+ if (rbuf[1] != rank) {
+ printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
+ wrank, rank, rbuf[1], j, commname );
+ errs++;
+ }
+ }
+ if (errs)
+ fflush(stdout);
+ MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );
+
+ free( reqs );
+ free( bufs );
+ free( bufmem );
+
+ return errs;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Probe() for an intercomm";
+*/
+#define MAX_DATA_LEN 100
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, recvlen, isLeft;
+ MPI_Status status;
+ int rank, size;
+ MPI_Comm intercomm;
+ char buf[MAX_DATA_LEN];
+ const char *test_str = "test";
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ if (size < 2) {
+ fprintf( stderr, "This test requires at least two processes." );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
+ if (intercomm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_rank(intercomm, &rank);
+
+ /* 0 ranks on each side communicate, everyone else does nothing */
+ if(rank == 0) {
+ if (isLeft) {
+ recvlen = -1;
+ MPI_Probe(0, 0, intercomm, &status);
+ MPI_Get_count(&status, MPI_CHAR, &recvlen);
+ if (recvlen != (strlen(test_str) + 1)) {
+ printf(" Error: recvlen (%d) != strlen(\"%s\")+1 (%d)\n", recvlen, test_str, (int)strlen(test_str) + 1);
+ ++errs;
+ }
+ buf[0] = '\0';
+ MPI_Recv(buf, recvlen, MPI_CHAR, 0, 0, intercomm, &status);
+ if (strcmp(test_str,buf)) {
+ printf(" Error: strcmp(test_str,buf)!=0\n");
+ ++errs;
+ }
+ }
+ else {
+ strncpy(buf, test_str, 5);
+ MPI_Send(buf, strlen(buf)+1, MPI_CHAR, 0, 0, intercomm);
+ }
+ }
+ MTestFreeComm(&intercomm);
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+dup 2
+#needs MPI_Intercomm_create
+#dupic 4
+#works, but needs MPI_Comm_set_name
+commcreate1 8
+#needs MPI_Comm_set_name and MPI_Intercomm_create
+#commname 4
+#ic1 4
+# ic2 needs an unusually large number of processes (>= 33)
+#ic2 33
+#icgroup 8
+#icm 8
+#icsplit 8
+#iccreate 8
+ctxalloc 2 timeLimit=300
+ctxsplit 4 timeLimit=300
+cmfree 4
+cmsplit 4
+cmsplit2 12
+#probe-intercomm 2
+cmsplit_type 4 mpiversion=3.0
+comm_create_group 4 mpiversion=3.0
+comm_create_group 8 mpiversion=3.0
+comm_group_half 2 mpiversion=3.0
+comm_group_half 4 mpiversion=3.0
+comm_group_half 8 mpiversion=3.0
+comm_group_rand 2 mpiversion=3.0
+comm_group_rand 4 mpiversion=3.0
+comm_group_rand 8 mpiversion=3.0
+comm_idup 2 mpiversion=3.0
+comm_idup 4 mpiversion=3.0
+comm_idup 9 mpiversion=3.0
+dup_with_info 2 mpiversion=3.0
+dup_with_info 4 mpiversion=3.0
+dup_with_info 9 mpiversion=3.0
+comm_info 6 mpiversion=3.0
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1 -Wno-error=unused-variable")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+
+ add_executable(groupcreate groupcreate.c ../util/mtest.c)
+ add_executable(groupnullincl groupnullincl.c ../util/mtest.c)
+ add_executable(grouptest2 grouptest2.c ../util/mtest.c)
+ add_executable(grouptest grouptest.c ../util/mtest.c)
+ add_executable(gtranks gtranks.c ../util/mtest.c)
+ add_executable(gtranksperf gtranksperf.c ../util/mtest.c)
+
+
+
+ target_link_libraries(groupcreate simgrid)
+ target_link_libraries(groupnullincl simgrid)
+ target_link_libraries(grouptest2 simgrid)
+ target_link_libraries(grouptest simgrid)
+ target_link_libraries(gtranks simgrid)
+ target_link_libraries(gtranksperf simgrid)
+
+
+
+ set_target_properties(groupcreate PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(groupnullincl PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(grouptest2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(grouptest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gtranks PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gtranksperf PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/groupcreate.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/groupnullincl.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/grouptest2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/grouptest.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gtranks.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gtranksperf.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+/* stdlib.h Needed for malloc declaration */
+#include <stdlib.h>
+
+int main( int argc, char **argv )
+{
+ int i, n, n_goal = 2048, n_all, rc, n_ranks, *ranks, rank, size, len;
+ int group_size;
+ MPI_Group *group_array, world_group;
+ char msg[MPI_MAX_ERROR_STRING];
+
+ MPI_Init( &argc, &argv );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ n = n_goal;
+
+ group_array = (MPI_Group *)malloc( n * sizeof(MPI_Group) );
+
+ MPI_Comm_group( MPI_COMM_WORLD, &world_group );
+
+ n_ranks = size;
+ ranks = (int *)malloc( size * sizeof(int) );
+ for (i=0; i<size; i++) ranks[i] = i;
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+ for (i=0; i<n; i++) {
+ rc = MPI_Group_incl( world_group, n_ranks, ranks, group_array + i );
+ if (rc) {
+ fprintf( stderr, "Error when creating group number %d\n", i );
+ MPI_Error_string( rc, msg, &len );
+ fprintf( stderr, "%s\n", msg );
+ n = i + 1;
+ break;
+ }
+ else {
+ /* Check that the group was created (and that any errors were
+ caught) */
+ rc = MPI_Group_size( group_array[i], &group_size );
+ if (group_size != size) {
+ fprintf( stderr, "Group number %d not correct (size = %d)\n",
+ i, size );
+ n = i + 1;
+ break;
+ }
+ }
+
+ }
+
+ for (i=0; i<n; i++) {
+ rc = MPI_Group_free( group_array + i );
+ if (rc) {
+ fprintf( stderr, "Error when freeing group number %d\n", i );
+ MPI_Error_string( rc, msg, &len );
+ fprintf( stderr, "%s\n", msg );
+ break;
+ }
+ }
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
+ MPI_Group_free( &world_group );
+
+ MPI_Reduce( &n, &n_all, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD );
+ if (rank == 0) {
+ /* printf( "Completed test of %d type creations\n", n_all ); */
+ if (n_all != n_goal) {
+ printf (
+"This MPI implementation limits the number of groups that can be created\n\
+This is allowed by the standard and is not a bug, but is a limit on the\n\
+implementation\n" );
+ }
+ else {
+ printf( " No Errors\n" );
+ }
+ }
+
+ free( group_array );
+
+ MPI_Finalize( );
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rc, result;
+ int ranks[1];
+ MPI_Group group, outgroup;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ while (MTestGetComm( &comm, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_group( comm, &group );
+ rc = MPI_Group_incl( group, 0, 0, &outgroup );
+ if (rc) {
+ errs++;
+ MTestPrintError( rc );
+ printf( "Error in creating an empty group with (0,0)\n" );
+
+ /* Some MPI implementations may reject a null "ranks" pointer */
+ rc = MPI_Group_incl( group, 0, ranks, &outgroup );
+ if (rc) {
+ errs++;
+ MTestPrintError( rc );
+ printf( "Error in creating an empty group with (0,ranks)\n" );
+ }
+ }
+
+ if (outgroup != MPI_GROUP_EMPTY) {
+ /* Is the group equivalent to group empty? */
+ rc = MPI_Group_compare( outgroup, MPI_GROUP_EMPTY, &result );
+ if (result != MPI_IDENT) {
+ errs++;
+ MTestPrintError( rc );
+ printf( "Did not create a group equivalent to an empty group\n" );
+ }
+ }
+ rc = MPI_Group_free( &group );
+ if (rc) {
+ errs++;
+ MTestPrintError( rc );
+ }
+ if (outgroup != MPI_GROUP_NULL) {
+ rc = MPI_Group_free( &outgroup );
+ if (rc) {
+ errs++;
+ MTestPrintError( rc );
+ }
+ }
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int main( int argc, char *argv[] )
+{
+ MPI_Group g1, g2, g4, g5, g45, selfgroup, g6;
+ int ranks[16], size, rank, myrank, range[1][3];
+ int errs = 0;
+ int i, rin[16], rout[16], result;
+
+ MPI_Init(&argc,&argv);
+
+ MPI_Comm_group( MPI_COMM_WORLD, &g1 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (size < 8) {
+ fprintf( stderr,
+ "Test requires 8 processes (16 prefered) only %d provided\n",
+ size );
+ errs++;
+ }
+
+ /* 16 members, this process is rank 0, return in group 1 */
+ ranks[0] = myrank; ranks[1] = 2; ranks[2] = 7;
+ if (myrank == 2) ranks[1] = 3;
+ if (myrank == 7) ranks[2] = 6;
+ MPI_Group_incl( g1, 3, ranks, &g2 );
+
+ /* Check the resulting group */
+ MPI_Group_size( g2, &size );
+ MPI_Group_rank( g2, &rank );
+
+ if (size != 3) {
+ fprintf( stderr, "Size should be %d, is %d\n", 3, size );
+ errs++;
+ }
+ if (rank != 0) {
+ fprintf( stderr, "Rank should be %d, is %d\n", 0, rank );
+ errs++;
+ }
+
+ rin[0] = 0; rin[1] = 1; rin[2] = 2;
+ MPI_Group_translate_ranks( g2, 3, rin, g1, rout );
+ for (i=0; i<3; i++) {
+ if (rout[i] != ranks[i]) {
+ fprintf( stderr, "translated rank[%d] %d should be %d\n",
+ i, rout[i], ranks[i] );
+ errs++;
+ }
+ }
+
+ /* Translate the process of the self group against another group */
+ MPI_Comm_group( MPI_COMM_SELF, &selfgroup );
+ rin[0] = 0;
+ MPI_Group_translate_ranks( selfgroup, 1, rin, g1, rout );
+ if (rout[0] != myrank) {
+ fprintf( stderr, "translated of self is %d should be %d\n",
+ rout[0], myrank );
+ errs++;
+ }
+
+ for (i=0; i<size; i++)
+ rin[i] = i;
+ MPI_Group_translate_ranks( g1, size, rin, selfgroup, rout );
+ for (i=0; i<size; i++) {
+ if (i == myrank && rout[i] != 0) {
+ fprintf( stderr, "translated world to self of %d is %d\n",
+ i, rout[i] );
+ errs++;
+ }
+ else if (i != myrank && rout[i] != MPI_UNDEFINED) {
+ fprintf( stderr, "translated world to self of %d should be undefined, is %d\n",
+ i, rout[i] );
+ errs++;
+ }
+ }
+ MPI_Group_free( &selfgroup );
+
+ /* Exclude everyone in our group */
+ {
+ int ii, *lranks, g1size;
+
+ MPI_Group_size( g1, &g1size );
+
+ lranks = (int *)malloc( g1size * sizeof(int) );
+ for (ii=0; ii<g1size; ii++) lranks[ii] = ii;
+ MPI_Group_excl( g1, g1size, lranks, &g6 );
+ if (g6 != MPI_GROUP_EMPTY) {
+ fprintf( stderr, "Group formed by excluding all ranks not empty\n" );
+ errs++;
+ MPI_Group_free( &g6 );
+ }
+ free( lranks );
+ }
+
+ /* Add tests for additional group operations */
+ /*
+ g2 = incl 1,3,7
+ g3 = excl 1,3,7
+ intersect ( w, g2 ) => g2
+ intersect ( w, g3 ) => g3
+ intersect ( g2, g3 ) => empty
+
+ g4 = rincl 1:n-1:2
+ g5 = rexcl 1:n-1:2
+ union( g4, g5 ) => world
+ g6 = rincl n-1:1:-1
+ g7 = rexcl n-1:1:-1
+ union( g6, g7 ) => concat of entries, similar to world
+ diff( w, g2 ) => g3
+ */
+ MPI_Group_free( &g2 );
+
+ range[0][0] = 1;
+ range[0][1] = size-1;
+ range[0][2] = 2;
+ MPI_Group_range_excl( g1, 1, range, &g5 );
+
+ range[0][0] = 1;
+ range[0][1] = size-1;
+ range[0][2] = 2;
+ MPI_Group_range_incl( g1, 1, range, &g4 );
+ MPI_Group_union( g4, g5, &g45 );
+ MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
+ if (result != MPI_UNEQUAL) {
+ errs++;
+ fprintf( stderr, "Comparison with empty group gave %d, not 3\n",
+ result );
+ }
+ MPI_Group_free( &g4 );
+ MPI_Group_free( &g5 );
+ MPI_Group_free( &g45 );
+
+ /* Now, duplicate the test, but using negative strides */
+ range[0][0] = size-1;
+ range[0][1] = 1;
+ range[0][2] = -2;
+ MPI_Group_range_excl( g1, 1, range, &g5 );
+
+ range[0][0] = size-1;
+ range[0][1] = 1;
+ range[0][2] = -2;
+ MPI_Group_range_incl( g1, 1, range, &g4 );
+
+ MPI_Group_union( g4, g5, &g45 );
+
+ MPI_Group_compare( MPI_GROUP_EMPTY, g4, &result );
+ if (result != MPI_UNEQUAL) {
+ errs++;
+ fprintf( stderr, "Comparison with empty group (formed with negative strides) gave %d, not 3\n",
+ result );
+ }
+ MPI_Group_free( &g4 );
+ MPI_Group_free( &g5 );
+ MPI_Group_free( &g45 );
+ MPI_Group_free( &g1 );
+
+ if (myrank == 0)
+ {
+ if (errs == 0) {
+ printf( " No Errors\n" );
+ }
+ else {
+ printf( "Found %d errors\n", errs );
+ }
+ }
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/*
+ Test the group routines
+ (some tested elsewere)
+
+MPI_Group_compare
+MPI_Group_excl
+MPI_Group_intersection
+MPI_Group_range_excl
+MPI_Group_rank
+MPI_Group_size
+MPI_Group_translate_ranks
+MPI_Group_union
+
+ */
+#include "mpi.h"
+#include <stdio.h>
+/* stdlib.h Needed for malloc declaration */
+#include <stdlib.h>
+
+int main( int argc, char **argv )
+{
+ int errs=0, toterr;
+ MPI_Group basegroup;
+ MPI_Group g1, g2, g3, g4, g5, g6, g7, g8, g9, g10;
+ MPI_Group g3a, g3b;
+ MPI_Comm comm, newcomm, splitcomm, dupcomm;
+ int i, grp_rank, rank, grp_size, size, result;
+ int nranks, *ranks, *ranks_out;
+ int range[1][3];
+ int worldrank;
+
+ MPI_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &worldrank );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_group( comm, &basegroup );
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+/* Get the basic information on this group */
+ MPI_Group_rank( basegroup, &grp_rank );
+ if (grp_rank != rank) {
+ errs++;
+ fprintf( stdout, "group rank %d != comm rank %d\n", grp_rank, rank );
+ }
+
+ MPI_Group_size( basegroup, &grp_size );
+ if (grp_size != size) {
+ errs++;
+ fprintf( stdout, "group size %d != comm size %d\n", grp_size, size );
+ }
+
+
+/* Form a new communicator with inverted ranking */
+ MPI_Comm_split( comm, 0, size - rank, &newcomm );
+ MPI_Comm_group( newcomm, &g1 );
+ ranks = (int *)malloc( size * sizeof(int) );
+ ranks_out = (int *)malloc( size * sizeof(int) );
+ for (i=0; i<size; i++) ranks[i] = i;
+ nranks = size;
+ MPI_Group_translate_ranks( g1, nranks, ranks, basegroup, ranks_out );
+ for (i=0; i<size; i++) {
+ if (ranks_out[i] != (size - 1) - i) {
+ errs++;
+ fprintf( stdout, "Translate ranks got %d expected %d\n",
+ ranks_out[i], (size - 1) - i );
+ }
+ }
+
+/* Check Compare */
+ MPI_Group_compare( basegroup, g1, &result );
+ if (result != MPI_SIMILAR) {
+ errs++;
+ fprintf( stdout, "Group compare should have been similar, was %d\n",
+ result );
+ }
+ MPI_Comm_dup( comm, &dupcomm );
+ MPI_Comm_group( dupcomm, &g2 );
+ MPI_Group_compare( basegroup, g2, &result );
+ if (result != MPI_IDENT) {
+ errs++;
+ fprintf( stdout, "Group compare should have been ident, was %d\n",
+ result );
+ }
+ MPI_Comm_split( comm, rank < size/2, rank, &splitcomm );
+ MPI_Comm_group( splitcomm, &g3 );
+ MPI_Group_compare( basegroup, g3, &result );
+ if (result != MPI_UNEQUAL) {
+ errs++;
+ fprintf( stdout, "Group compare should have been unequal, was %d\n",
+ result );
+ }
+
+ /* Build two groups that have this process and one other, but do not
+ have the same processes */
+ ranks[0] = rank;
+ ranks[1] = (rank + 1) % size;
+ MPI_Group_incl( basegroup, 2, ranks, &g3a );
+ ranks[1] = (rank + size - 1) % size;
+ MPI_Group_incl( basegroup, 2, ranks, &g3b );
+ MPI_Group_compare( g3a, g3b, &result );
+ if (result != MPI_UNEQUAL) {
+ errs++;
+ fprintf( stdout, "Group compare of equal sized but different groups should have been unequal, was %d\n", result );
+ }
+
+
+/* Build two new groups by excluding members; use Union to put them
+ together again */
+
+/* Exclude 0 */
+ for (i=0; i<size; i++) ranks[i] = i;
+ MPI_Group_excl( basegroup, 1, ranks, &g4 );
+/* Exclude 1-(size-1) */
+ MPI_Group_excl( basegroup, size-1, ranks+1, &g5 );
+ MPI_Group_union( g5, g4, &g6 );
+ MPI_Group_compare( basegroup, g6, &result );
+ if (result != MPI_IDENT) {
+ int usize;
+ errs++;
+ /* See ordering requirements on union */
+ fprintf( stdout, "Group excl and union did not give ident groups\n" );
+ fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
+ MPI_Group_size( g6, &usize );
+ fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
+ }
+ MPI_Group_union( basegroup, g4, &g7 );
+ MPI_Group_compare( basegroup, g7, &result );
+ if (result != MPI_IDENT) {
+ int usize;
+ errs++;
+ fprintf( stdout, "Group union of overlapping groups failed\n" );
+ fprintf( stdout, "[%d] result of compare was %d\n", rank, result );
+ MPI_Group_size( g7, &usize );
+ fprintf( stdout, "Size of union is %d, should be %d\n", usize, size );
+ }
+
+/* Use range_excl instead of ranks */
+ /* printf ("range excl\n" ); fflush( stdout ); */
+ range[0][0] = 1;
+ range[0][1] = size-1;
+ range[0][2] = 1;
+ MPI_Group_range_excl( basegroup, 1, range, &g8 );
+ /* printf( "out of range excl\n" ); fflush( stdout ); */
+ MPI_Group_compare( g5, g8, &result );
+ /* printf( "out of compare\n" ); fflush( stdout ); */
+ if (result != MPI_IDENT) {
+ errs++;
+ fprintf( stdout, "Group range excl did not give ident groups\n" );
+ }
+
+ /* printf( "intersection\n" ); fflush( stdout ); */
+ MPI_Group_intersection( basegroup, g4, &g9 );
+ MPI_Group_compare( g9, g4, &result );
+ if (result != MPI_IDENT) {
+ errs++;
+ fprintf( stdout, "Group intersection did not give ident groups\n" );
+ }
+
+/* Exclude EVERYTHING and check against MPI_GROUP_EMPTY */
+ /* printf( "range excl all\n" ); fflush( stdout ); */
+ range[0][0] = 0;
+ range[0][1] = size-1;
+ range[0][2] = 1;
+ MPI_Group_range_excl( basegroup, 1, range, &g10 );
+
+ /* printf( "done range excl all\n" ); fflush(stdout); */
+ MPI_Group_compare( g10, MPI_GROUP_EMPTY, &result );
+ /* printf( "done compare to MPI_GROUP_EMPTY\n" ); fflush(stdout); */
+
+ if (result != MPI_IDENT) {
+ errs++;
+ fprintf( stdout,
+ "MPI_GROUP_EMPTY didn't compare against empty group\n");
+ }
+
+ /* printf( "freeing groups\n" ); fflush( stdout ); */
+ MPI_Group_free( &basegroup );
+ MPI_Group_free( &g1 );
+ MPI_Group_free( &g2 );
+ MPI_Group_free( &g3 );
+ MPI_Group_free( &g3a );
+ MPI_Group_free( &g3b );
+ MPI_Group_free( &g4 );
+ MPI_Group_free( &g5 );
+ MPI_Group_free( &g6 );
+ MPI_Group_free( &g7 );
+ MPI_Group_free( &g8 );
+ MPI_Group_free( &g9 );
+ MPI_Group_free( &g10 );
+ MPI_Comm_free( &dupcomm );
+ MPI_Comm_free( &splitcomm );
+ MPI_Comm_free( &newcomm );
+
+ MPI_Allreduce( &errs, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (worldrank == 0) {
+ if (toterr == 0)
+ printf( " No Errors\n" );
+ else
+ printf( "Found %d errors in MPI Group routines\n", toterr );
+ }
+
+ MPI_Finalize();
+ return toterr;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_WORLD_SIZE 1024
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int ranks[MAX_WORLD_SIZE], ranksout[MAX_WORLD_SIZE],
+ ranksin[MAX_WORLD_SIZE];
+ int range[1][3];
+ MPI_Group gworld, gself, ngroup, galt;
+ MPI_Comm comm;
+ int rank, size, i, nelms;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_group( MPI_COMM_SELF, &gself );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (size > MAX_WORLD_SIZE) {
+ fprintf( stderr,
+ "This test requires a comm world with no more than %d processes\n",
+ MAX_WORLD_SIZE );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ if (size < 4) {
+ fprintf( stderr, "This test requiers at least 4 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ MPI_Comm_group( comm, &gworld );
+ for (i=0; i<size; i++) {
+ ranks[i] = i;
+ ranksout[i] = -1;
+ }
+ /* Try translating ranks from comm world compared against
+ comm self, so most will be UNDEFINED */
+ MPI_Group_translate_ranks( gworld, size, ranks, gself, ranksout );
+
+ for (i=0; i<size; i++) {
+ if (i == rank) {
+ if (ranksout[i] != 0) {
+ printf( "[%d] Rank %d is %d but should be 0\n", rank,
+ i, ranksout[i] );
+ errs++;
+ }
+ }
+ else {
+ if (ranksout[i] != MPI_UNDEFINED) {
+ printf( "[%d] Rank %d is %d but should be undefined\n", rank,
+ i, ranksout[i] );
+ errs++;
+ }
+ }
+ }
+
+ /* MPI-2 Errata requires that MPI_PROC_NULL is mapped to MPI_PROC_NULL */
+ ranks[0] = MPI_PROC_NULL;
+ ranks[1] = 1;
+ ranks[2] = rank;
+ ranks[3] = MPI_PROC_NULL;
+ for (i=0; i<4; i++) ranksout[i] = -1;
+
+ MPI_Group_translate_ranks( gworld, 4, ranks, gself, ranksout );
+ if (ranksout[0] != MPI_PROC_NULL) {
+ printf( "[%d] Rank[0] should be MPI_PROC_NULL but is %d\n",
+ rank, ranksout[0] );
+ errs++;
+ }
+ if (rank != 1 && ranksout[1] != MPI_UNDEFINED) {
+ printf( "[%d] Rank[1] should be MPI_UNDEFINED but is %d\n",
+ rank, ranksout[1] );
+ errs++;
+ }
+ if (rank == 1 && ranksout[1] != 0) {
+ printf( "[%d] Rank[1] should be 0 but is %d\n",
+ rank, ranksout[1] );
+ errs++;
+ }
+ if (ranksout[2] != 0) {
+ printf( "[%d] Rank[2] should be 0 but is %d\n",
+ rank, ranksout[2] );
+ errs++;
+ }
+ if (ranksout[3] != MPI_PROC_NULL) {
+ printf( "[%d] Rank[3] should be MPI_PROC_NULL but is %d\n",
+ rank, ranksout[3] );
+ errs++;
+ }
+
+ MPI_Group_free(&gself);
+
+ /* Now, try comparing small groups against larger groups, and use groups
+ with irregular members (to bypass optimizations in group_translate_ranks
+ for simple groups)
+ */
+ nelms = 0;
+ ranks[nelms++] = size - 2;
+ ranks[nelms++] = 0;
+ if (rank != 0 && rank != size - 2) {
+ ranks[nelms++] = rank;
+ }
+
+ MPI_Group_incl( gworld, nelms, ranks, &ngroup );
+
+ for (i=0; i<nelms; i++) ranksout[i] = -1;
+ ranksin[0] = 1;
+ ranksin[1] = 0;
+ ranksin[2] = MPI_PROC_NULL;
+ ranksin[3] = 2;
+ MPI_Group_translate_ranks( ngroup, nelms+1, ranksin, gworld, ranksout );
+ for (i=0; i<nelms+1; i++) {
+ if (ranksin[i] == MPI_PROC_NULL) {
+ if (ranksout[i] != MPI_PROC_NULL) {
+ fprintf( stderr, "Input rank for proc_null but output was %d\n",
+ ranksout[i] );
+ errs++;
+ }
+ }
+ else if (ranksout[i] != ranks[ranksin[i]]) {
+ fprintf( stderr, "Expected ranksout[%d] = %d but found %d\n",
+ i, ranks[ranksin[i]], ranksout[i] );
+ errs++;
+ }
+ }
+
+ range[0][0] = size -1 ;
+ range[0][1] = 0;
+ range[0][2] = -1;
+ MPI_Group_range_incl( gworld, 1, range, &galt);
+ for (i=0; i<nelms+1; i++) ranksout[i] = -1;
+ MPI_Group_translate_ranks( ngroup, nelms+1, ranksin, galt, ranksout );
+ for (i=0; i<nelms+1; i++) {
+ if (ranksin[i] == MPI_PROC_NULL) {
+ if (ranksout[i] != MPI_PROC_NULL) {
+ fprintf( stderr, "Input rank for proc_null but output was %d\n",
+ ranksout[i] );
+ errs++;
+ }
+ }
+ else if (ranksout[i] != (size-1)-ranks[ranksin[i]]) {
+ fprintf( stderr, "Expected ranksout[%d] = %d but found %d\n",
+ i, (size-1)-ranks[ranksin[i]], ranksout[i] );
+ errs++;
+ }
+ }
+
+
+ MPI_Group_free(&gworld);
+ MPI_Group_free(&galt);
+ MPI_Group_free(&ngroup);
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+#include <math.h> /* for fabs(3) */
+
+/* Measure and compare the relative performance of MPI_Group_translate_ranks
+ * with small and large group2 sizes but a constant number of ranks. This
+ * serves as a performance sanity check for the Scalasca use case where we
+ * translate to MPI_COMM_WORLD ranks. The performance should only depend on the
+ * number of ranks passed, not the size of either group (especially group2).
+ *
+ * This test is probably only meaningful for large-ish process counts, so we may
+ * not be able to run this test by default in the nightlies. */
+
+/* number of iterations used for timing */
+#define NUM_LOOPS (1000000)
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int *ranks;
+ int *ranksout;
+ MPI_Group gworld, grev, gself;
+ MPI_Comm comm;
+ MPI_Comm commrev;
+ int rank, size, i;
+ double start, end, time1, time2;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ ranks = malloc(size*sizeof(int));
+ ranksout = malloc(size*sizeof(int));
+ if (!ranks || !ranksout) {
+ fprintf(stderr, "out of memory\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ /* generate a comm with the rank order reversed */
+ MPI_Comm_split(comm, 0, (size-rank-1), &commrev);
+ MPI_Comm_group(commrev, &grev);
+ MPI_Comm_group(MPI_COMM_SELF, &gself);
+ MPI_Comm_group(comm, &gworld);
+
+ /* sanity check correctness first */
+ for (i=0; i < size; i++) {
+ ranks[i] = i;
+ ranksout[i] = -1;
+ }
+ MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout);
+ for (i=0; i < size; i++) {
+ if (ranksout[i] != (size-i-1)) {
+ if (rank == 0)
+ printf("%d: (gworld) expected ranksout[%d]=%d, got %d\n", rank, i, (size-rank-1), ranksout[i]);
+ ++errs;
+ }
+ }
+ MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout);
+ for (i=0; i < size; i++) {
+ int expected = (i == (size-rank-1) ? 0 : MPI_UNDEFINED);
+ if (ranksout[i] != expected) {
+ if (rank == 0)
+ printf("%d: (gself) expected ranksout[%d]=%d, got %d\n", rank, i, expected, ranksout[i]);
+ ++errs;
+ }
+ }
+
+ /* now compare relative performance */
+
+ /* we needs lots of procs to get a group large enough to have meaningful
+ * numbers. On most testing machines this means that we're oversubscribing
+ * cores in a big way, which might perturb the timing results. So we make
+ * sure everyone started up and then everyone but rank 0 goes to sleep to
+ * let rank 0 do all the timings. */
+ MPI_Barrier(comm);
+
+ if (rank != 0) {
+ MTestSleep(10);
+ }
+ else /* rank==0 */ {
+ MTestSleep(1); /* try to avoid timing while everyone else is making syscalls */
+
+ MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout); /*throwaway iter*/
+ start = MPI_Wtime();
+ for (i = 0; i < NUM_LOOPS; ++i) {
+ MPI_Group_translate_ranks(grev, size, ranks, gworld, ranksout);
+ }
+ end = MPI_Wtime();
+ time1 = end - start;
+
+ MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout); /*throwaway iter*/
+ start = MPI_Wtime();
+ for (i = 0; i < NUM_LOOPS; ++i) {
+ MPI_Group_translate_ranks(grev, size, ranks, gself, ranksout);
+ }
+ end = MPI_Wtime();
+ time2 = end - start;
+
+ /* complain if the "gworld" time exceeds 2x the "gself" time */
+ if (fabs(time1 - time2) > (2.00 * time2)) {
+ printf("too much difference in MPI_Group_translate_ranks performance:\n");
+ printf("time1=%f time2=%f\n", time1, time2);
+ printf("(fabs(time1-time2)/time2)=%f\n", (fabs(time1-time2)/time2));
+ if (time1 < time2) {
+ printf("also, (time1<time2) is surprising...\n");
+ }
+ ++errs;
+ }
+ }
+
+ free(ranks);
+ free(ranksout);
+
+ MPI_Group_free(&grev);
+ MPI_Group_free(&gself);
+ MPI_Group_free(&gworld);
+
+ MPI_Comm_free(&commrev);
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+groupcreate 4
+grouptest 8
+grouptest2 4
+#needs MPI_Intercomm_create
+#groupnullincl 4
+gtranks 8
+# this may be too many processes for some systems, but the test needs a
+# large-ish number of processes to yield an effective performance check
+#gtranksperf 20
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1 -Wno-error=unused-variable")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+
+ add_executable(attrself attrself.c ../util/mtest.c)
+ add_executable(exitst1 exitst1.c ../util/mtest.c)
+ add_executable(exitst2 exitst2.c ../util/mtest.c)
+ add_executable(exitst3 exitst3.c ../util/mtest.c)
+ add_executable(finalized finalized.c ../util/mtest.c)
+ add_executable(initstat initstat.c ../util/mtest.c)
+ add_executable(library_version library_version.c ../util/mtest.c)
+ add_executable(timeout timeout.c ../util/mtest.c)
+ add_executable(version version.c ../util/mtest.c)
+
+
+
+ target_link_libraries(attrself simgrid)
+ target_link_libraries(exitst1 simgrid)
+ target_link_libraries(exitst2 simgrid)
+ target_link_libraries(exitst3 simgrid)
+ target_link_libraries(finalized simgrid)
+ target_link_libraries(initstat simgrid)
+ target_link_libraries(library_version simgrid)
+ target_link_libraries(timeout simgrid)
+ target_link_libraries(version simgrid)
+
+
+
+ set_target_properties(attrself PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exitst1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exitst2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exitst3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(finalized PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(initstat PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(library_version PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(timeout PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(version PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrself.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exitst1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exitst2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exitst3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/finalized.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/initstat.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/library_version.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/timeout.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/version.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Comm, int, int [], int [] );
+int delete_fn( MPI_Comm, int, void *, void *);
+
+#define NKEYS 5
+static int key[NKEYS]; /* Keys in creation order */
+static int keyorder[NKEYS]; /* Index (into key) of keys in order added to comm
+ (key[keyorder[0]] is first set) */
+static int nkeys = 0;
+static int ncall = 0;
+static int errs = 0;
+/*
+ * Test that attributes on comm self are deleted in LIFO order
+ */
+
+int main( int argc, char *argv[] )
+{
+ int attrval[10];
+ int wrank, i;
+ MPI_Comm comm;
+
+ MPI_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ comm = MPI_COMM_SELF;
+
+ /* Create key values */
+ for (nkeys=0; nkeys<NKEYS; nkeys++) {
+ MPI_Comm_create_keyval( MPI_NULL_COPY_FN, delete_fn,
+ &key[nkeys], (void *)0 );
+ attrval[nkeys] = 1024 * nkeys;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Comm_set_attr( comm, key[3], &attrval[3] ); keyorder[0] = 3;
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] ); keyorder[1] = 2;
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] ); keyorder[2] = 0;
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] ); keyorder[3] = 1;
+ MPI_Comm_set_attr( comm, key[4], &attrval[4] ); keyorder[4] = 4;
+
+ errs += checkAttrs( comm, NKEYS, key, attrval );
+
+ for (i=0; i<NKEYS; i++) {
+ /* Save the key value so that we can compare it in the
+ delete function */
+ int keyval = key[i];
+ MPI_Comm_free_keyval( &keyval );
+ }
+
+ MPI_Finalize();
+
+ if (wrank == 0) {
+ if (ncall != nkeys) {
+ printf( "Deleted %d keys but should have deleted %d\n",
+ ncall, nkeys );
+ errs++;
+ }
+ if (errs == 0) printf( " No Errors\n" );
+ else printf( " Found %d errors\n", errs );
+ }
+ return 0;
+
+}
+
+int checkAttrs( MPI_Comm comm, int n, int lkey[], int attrval[] )
+{
+ int lerrs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Comm_get_attr( comm, lkey[i], &val_p, &flag );
+ if (!flag) {
+ lerrs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ lerrs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return lerrs;
+}
+
+/* We *should* be deleting key[keyorder[nkeys-ncall]] */
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ if (ncall >= nkeys) {
+ printf( "delete function called too many times!\n" );
+ errs++;
+ }
+
+ /* As of MPI 2.2, the order of deletion of attributes on
+ MPI_COMM_SELF is defined */
+ if (MPI_VERSION > 2 || (MPI_VERSION == 2 && MPI_SUBVERSION >= 2)) {
+ if (keyval != key[keyorder[nkeys-1-ncall]]) {
+ printf( "Expected key # %d but found key with value %d\n",
+ keyorder[nkeys-1-ncall], keyval );
+ errs++;
+ }
+ }
+ ncall++;
+ return MPI_SUCCESS;
+}
+
+/*
+int checkNoAttrs( MPI_Comm comm, int n, int lkey[] )
+{
+ int lerrs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Comm_get_attr( comm, lkey[i], &val_p, &flag );
+ if (flag) {
+ lerrs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return lerrs;
+}
+*/
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+
+/*
+ * This is a special test to check that mpiexec handles zero/non-zero
+ * return status from an application
+ */
+int main( int argc, char *argv[] )
+{
+ MPI_Init( 0, 0 );
+ MPI_Finalize( );
+ return 1;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+
+/*
+ * This is a special test to check that mpiexec handles zero/non-zero
+ * return status from an application. In this case, each process
+ * returns a different return status
+ */
+int main( int argc, char *argv[] )
+{
+ int rank;
+ MPI_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Finalize( );
+ return rank;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+
+/*
+ * This is a special test to check that mpiexec handles the death of
+ * some processes without an Abort or clean exit
+ */
+int main( int argc, char *argv[] )
+{
+ int rank, size;
+ MPI_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Barrier( MPI_COMM_WORLD );
+ if (rank == size-1) {
+ /* Cause some processes to exit */
+ int *p =0 ;
+ *p = rank;
+ }
+ MPI_Finalize( );
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+
+/* FIXME: This test program assumes that MPI_Error_string will work even
+ if MPI is not initialized. That is not guaranteed. */
+
+/* Normally, when checking for error returns from MPI calls, you must ensure
+ that the error handler on the relevant object (communicator, file, or
+ window) has been set to MPI_ERRORS_RETURN. The tests in this
+ program are a special case, as either a failure or an abort will
+ indicate a problem */
+
+int main( int argc, char *argv[] )
+{
+ int error;
+ int flag;
+ char err_string[1024];
+ int length = 1024;
+ int rank;
+
+ flag = 0;
+ error = MPI_Finalized(&flag);
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Finalized failed: %s\n", err_string);
+ fflush(stdout);
+ return error;
+ }
+ if (flag)
+ {
+ printf("MPI_Finalized returned true before MPI_Init.\n");
+ return -1;
+ }
+
+ error = MPI_Init(&argc, &argv);
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Init failed: %s\n", err_string);
+ fflush(stdout);
+ return error;
+ }
+
+ error = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Comm_rank failed: %s\n", err_string);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, error);
+ return error;
+ }
+
+ flag = 0;
+ error = MPI_Finalized(&flag);
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Finalized failed: %s\n", err_string);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, error);
+ return error;
+ }
+ if (flag)
+ {
+ printf("MPI_Finalized returned true before MPI_Finalize.\n");
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, error);
+ return -1;
+ }
+
+ error = MPI_Barrier(MPI_COMM_WORLD);
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Barrier failed: %s\n", err_string);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, error);
+ return error;
+ }
+
+ error = MPI_Finalize();
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Finalize failed: %s\n", err_string);
+ fflush(stdout);
+ return error;
+ }
+
+ flag = 0;
+ error = MPI_Finalized(&flag);
+ if (error != MPI_SUCCESS)
+ {
+ MPI_Error_string(error, err_string, &length);
+ printf("MPI_Finalized failed: %s\n", err_string);
+ fflush(stdout);
+ return error;
+ }
+ if (!flag)
+ {
+ printf("MPI_Finalized returned false after MPI_Finalize.\n");
+ return -1;
+ }
+ if (rank == 0)
+ {
+ printf(" No Errors\n");
+ }
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int provided, flag, claimed;
+
+ /* MTest_Init( &argc, &argv ); */
+
+ MPI_Init_thread( &argc, &argv, MPI_THREAD_MULTIPLE, &provided );
+
+ MPI_Is_thread_main( &flag );
+ if (!flag) {
+ errs++;
+ printf( "This thread called init_thread but Is_thread_main gave false\n" );
+ }
+ MPI_Query_thread( &claimed );
+ if (claimed != provided) {
+ errs++;
+ printf( "Query thread gave thread level %d but Init_thread gave %d\n",
+ claimed, provided );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+static int verbose = 0;
+
+int main(int argc, char *argv[])
+{
+ int errs = 0, resultlen = -1;
+ char version[MPI_MAX_LIBRARY_VERSION_STRING];
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Get_library_version(version, &resultlen);
+ if (resultlen < 0) {
+ errs++;
+ printf("Resultlen is %d\n", resultlen);
+ }
+ else {
+ if (verbose)
+ printf("%s\n", version);
+ }
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+exitst1 2 resultTest=TestStatus
+exitst2 4 resultTest=TestStatus
+initstat 1
+#timeout 2 resultTest=TestTimeout timeLimit=10
+version 1
+finalized 1
+#needs PMPI_Comm_free_keyval
+#attrself 1
+library_version 1 mpiversion=3.0
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+
+/* This is a program that tests the ability of mpiexec to timeout a process
+ after no more than 3 minutes. By default, it will run for 5 minutes */
+int main( int argc, char *argv[] )
+{
+ double t1;
+ double deltaTime = 300;
+
+ MPI_Init( &argc, &argv );
+ t1 = MPI_Wtime();
+ while (MPI_Wtime() - t1 < deltaTime) ;
+ MPI_Finalize( );
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int majversion, subversion;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Get_version( &majversion, &subversion );
+ if (majversion != MPI_VERSION) {
+ errs++;
+ printf( "Major version is %d but is %d in the mpi.h file\n",
+ majversion, MPI_VERSION );
+ }
+ if (subversion != MPI_SUBVERSION) {
+ errs++;
+ printf( "Minor version is %d but is %d in the mpi.h file\n",
+ subversion, MPI_SUBVERSION );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1 -Wno-error=unused-variable")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+
+ add_executable(anyall anyall.c ../util/mtest.c)
+ add_executable(bottom bottom.c ../util/mtest.c)
+ add_executable(bsend1 bsend1.c ../util/mtest.c)
+ add_executable(bsend2 bsend2.c ../util/mtest.c)
+ add_executable(bsend3 bsend3.c ../util/mtest.c)
+ add_executable(bsend4 bsend4.c ../util/mtest.c)
+ add_executable(bsend5 bsend5.c ../util/mtest.c)
+ add_executable(bsendalign bsendalign.c ../util/mtest.c)
+ add_executable(bsendfrag bsendfrag.c ../util/mtest.c)
+ add_executable(bsendpending bsendpending.c ../util/mtest.c)
+ add_executable(cancelrecv cancelrecv.c ../util/mtest.c)
+ add_executable(eagerdt eagerdt.c ../util/mtest.c)
+ add_executable(greq1 greq1.c ../util/mtest.c)
+ add_executable(icsend icsend.c ../util/mtest.c)
+ add_executable(inactivereq inactivereq.c ../util/mtest.c)
+ add_executable(isendself isendself.c ../util/mtest.c)
+ add_executable(isendselfprobe isendselfprobe.c ../util/mtest.c)
+ add_executable(large_message large_message.c ../util/mtest.c)
+ add_executable(mprobe mprobe.c ../util/mtest.c)
+ add_executable(pingping pingping.c ../util/mtest.c)
+ add_executable(probenull probenull.c ../util/mtest.c)
+ add_executable(probe-unexp probe-unexp.c ../util/mtest.c)
+ add_executable(pscancel pscancel.c ../util/mtest.c)
+ add_executable(rcancel rcancel.c ../util/mtest.c)
+ add_executable(rqfreeb rqfreeb.c ../util/mtest.c)
+ add_executable(rqstatus rqstatus.c ../util/mtest.c)
+ add_executable(scancel2 scancel2.c ../util/mtest.c)
+ add_executable(scancel scancel.c ../util/mtest.c)
+ add_executable(sendall sendall.c ../util/mtest.c)
+ add_executable(sendflood sendflood.c ../util/mtest.c)
+ add_executable(sendrecv1 sendrecv1.c ../util/mtest.c)
+ add_executable(sendrecv2 sendrecv2.c ../util/mtest.c)
+ add_executable(sendrecv3 sendrecv3.c ../util/mtest.c)
+ add_executable(sendself sendself.c ../util/mtest.c)
+ add_executable(waitany-null waitany-null.c ../util/mtest.c)
+ add_executable(waittestnull waittestnull.c ../util/mtest.c)
+
+
+
+ target_link_libraries(anyall simgrid)
+ target_link_libraries(bottom simgrid)
+ target_link_libraries(bsend1 simgrid)
+ target_link_libraries(bsend2 simgrid)
+ target_link_libraries(bsend3 simgrid)
+ target_link_libraries(bsend4 simgrid)
+ target_link_libraries(bsend5 simgrid)
+ target_link_libraries(bsendalign simgrid)
+ target_link_libraries(bsendfrag simgrid)
+ target_link_libraries(bsendpending simgrid)
+ target_link_libraries(cancelrecv simgrid)
+ target_link_libraries(eagerdt simgrid)
+ target_link_libraries(greq1 simgrid)
+ target_link_libraries(icsend simgrid)
+ target_link_libraries(inactivereq simgrid)
+ target_link_libraries(isendself simgrid)
+ target_link_libraries(isendselfprobe simgrid)
+ target_link_libraries(large_message simgrid)
+ target_link_libraries(mprobe simgrid)
+ target_link_libraries(pingping simgrid)
+ target_link_libraries(probenull simgrid)
+ target_link_libraries(probe-unexp simgrid)
+ target_link_libraries(pscancel simgrid)
+ target_link_libraries(rcancel simgrid)
+ target_link_libraries(rqfreeb simgrid)
+ target_link_libraries(rqstatus simgrid)
+ target_link_libraries(scancel2 simgrid)
+ target_link_libraries(scancel simgrid)
+ target_link_libraries(sendall simgrid)
+ target_link_libraries(sendflood simgrid)
+ target_link_libraries(sendrecv1 simgrid)
+ target_link_libraries(sendrecv2 simgrid)
+ target_link_libraries(sendrecv3 simgrid)
+ target_link_libraries(sendself simgrid)
+ target_link_libraries(waitany-null simgrid)
+ target_link_libraries(waittestnull simgrid)
+
+
+
+ set_target_properties(anyall PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bottom PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsend1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsend2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsend3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsend4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsend5 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsendalign PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsendfrag PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bsendpending PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(cancelrecv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(eagerdt PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(greq1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icsend PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(inactivereq PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(isendself PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(isendselfprobe PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(large_message PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(mprobe PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(pingping PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(probenull PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(probe-unexp PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(pscancel PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(rcancel PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(rqfreeb PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(rqstatus PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scancel2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scancel PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(sendall PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(sendflood PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(sendrecv1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(sendrecv2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(sendrecv3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(sendself PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(waitany-null PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(waittestnull PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/anyall.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bottom.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsend1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsend2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsend3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsend4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsend5.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsendalign.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsendfrag.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bsendpending.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cancelrecv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/eagerdt.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/greq1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icsend.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/inactivereq.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/isendself.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/isendselfprobe.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/large_message.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mprobe.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pingping.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/probenull.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/probe-unexp.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pscancel.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/rcancel.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/rqfreeb.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/rqstatus.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scancel2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scancel.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendall.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendflood.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/sendself.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/waitany-null.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/waittestnull.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+#define MAX_MSGS 30
+
+/*
+static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI recieve uses both ANY_SOURCE and ANY_TAG";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int wrank, wsize, master, worker, i, j, idx, count;
+ int errs = 0;
+ MPI_Request r[MAX_MSGS];
+ int buf[MAX_MSGS][MAX_MSGS];
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_size( MPI_COMM_WORLD, &wsize );
+
+ comm = MPI_COMM_WORLD;
+ master = 0;
+ worker = 1;
+
+ /* The test takes advantage of the ordering rules for messages*/
+
+ if (wrank == master) {
+ /* Initialize the send buffer */
+ for (i=0; i<MAX_MSGS; i++) {
+ for (j=0; j<MAX_MSGS; j++) {
+ buf[i][j] = i*MAX_MSGS + j;
+ }
+ }
+ MPI_Barrier( MPI_COMM_WORLD );
+ for (i=0; i<MAX_MSGS; i++) {
+ MPI_Send( buf[i], MAX_MSGS-i, MPI_INT, worker, 3, comm );
+ }
+ }
+ else if (wrank == worker) {
+ /* Initialize the recv buffer */
+ for (i=0; i<MAX_MSGS; i++) {
+ for (j=0; j<MAX_MSGS; j++) {
+ buf[i][j] = -1;
+ }
+ }
+ for (i=0; i<MAX_MSGS; i++) {
+ MPI_Irecv( buf[i], MAX_MSGS-i, MPI_INT, MPI_ANY_SOURCE,
+ MPI_ANY_TAG, comm, &r[i] );
+ }
+ MPI_Barrier( MPI_COMM_WORLD );
+ for (i=0; i<MAX_MSGS; i++) {
+ MPI_Waitany( MAX_MSGS, r, &idx, &status );
+ /* Message idx should have length MAX_MSGS-idx */
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != MAX_MSGS-idx) {
+ errs++;
+ }
+ else {
+ /* Check for the correct answers */
+ for (j=0; j < MAX_MSGS-idx; j++) {
+ if (buf[idx][j] != idx * MAX_MSGS + j) {
+ errs ++;
+ printf( "Message %d [%d] is %d, should be %d\n",
+ idx, j, buf[idx][j], idx * MAX_MSGS + j );
+ }
+ }
+ }
+ }
+ }
+ else {
+ MPI_Barrier( MPI_COMM_WORLD );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Use of MPI_BOTTOM in communication";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, source, dest, len, ii;
+ MPI_Comm comm;
+ MPI_Status status;
+ MPI_Datatype newtype, oldtype;
+ MPI_Aint disp;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Get_address( &ii, &disp );
+
+ len = 1;
+ oldtype = MPI_INT;
+ MPI_Type_create_struct( 1, &len, &disp, &oldtype, &newtype );
+ MPI_Type_commit( &newtype );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (size < 2) {
+ errs++;
+ fprintf( stderr, "This test requires at least two processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ source = 0;
+ dest = 1;
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ if (rank == source) {
+ ii = 2;
+ err = MPI_Send( MPI_BOTTOM, 1, newtype, dest, 0, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ printf( "MPI_Send did not return MPI_SUCCESS\n" );
+ }
+ }
+ else if (rank == dest) {
+ ii = -1;
+ err = MPI_Recv( MPI_BOTTOM, 1, newtype, source, 0, comm, &status );
+ if (err) {
+ MTestPrintError( err );
+ errs++;
+ printf( "MPI_Recv did not return MPI_SUCCESS\n" );
+ }
+ if (ii != 2) {
+ errs++;
+ printf( "Received %d but expected %d\n", ii, 2 );
+ }
+ }
+
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_ARE_FATAL );
+
+ MPI_Type_free( &newtype );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include "mpitestconf.h"
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+
+/*
+ * This is a simple program that tests bsend. It may be run as a single
+ * process to simplify debugging; in addition, bsend allows send-to-self
+ * programs.
+ */
+int main( int argc, char *argv[] )
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ int dest = 0, src = 0, tag = 1;
+ int s1, s2, s3;
+ char *buf, *bbuf;
+ char msg1[7], msg3[17];
+ double msg2[2];
+ char rmsg1[64], rmsg3[64];
+ double rmsg2[64];
+ int errs = 0, rank;
+ int bufsize, bsize;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ /* According to the standard, we must use the PACK_SIZE length of each
+ message in the computation of the message buffer size */
+ MPI_Pack_size( 7, MPI_CHAR, comm, &s1 );
+ MPI_Pack_size( 2, MPI_DOUBLE, comm, &s2 );
+ MPI_Pack_size( 17, MPI_CHAR, comm, &s3 );
+ bufsize = 3 * MPI_BSEND_OVERHEAD + s1 + s2 + s3;
+ buf = (char *)malloc( bufsize );
+ MPI_Buffer_attach( buf, bufsize );
+
+ strncpy( msg1, "012345", 7 );
+ strncpy( msg3, "0123401234012341", 17 );
+ msg2[0] = 1.23; msg2[1] = 3.21;
+
+ if (rank == src) {
+ /* These message sizes are chosen to expose any alignment problems */
+ MPI_Bsend( msg1, 7, MPI_CHAR, dest, tag, comm );
+ MPI_Bsend( msg2, 2, MPI_DOUBLE, dest, tag, comm );
+ MPI_Bsend( msg3, 17, MPI_CHAR, dest, tag, comm );
+ }
+
+ if (rank == dest) {
+ MPI_Recv( rmsg1, 7, MPI_CHAR, src, tag, comm, MPI_STATUS_IGNORE );
+ MPI_Recv( rmsg2, 10, MPI_DOUBLE, src, tag, comm, MPI_STATUS_IGNORE );
+ MPI_Recv( rmsg3, 17, MPI_CHAR, src, tag, comm, MPI_STATUS_IGNORE );
+
+ if (strcmp( rmsg1, msg1 ) != 0) {
+ errs++;
+ fprintf( stderr, "message 1 (%s) should be %s\n", rmsg1, msg1 );
+ }
+ if (rmsg2[0] != msg2[0] || rmsg2[1] != msg2[1]) {
+ errs++;
+ fprintf( stderr,
+ "message 2 incorrect, values are (%f,%f) but should be (%f,%f)\n",
+ rmsg2[0], rmsg2[1], msg2[0], msg2[1] );
+ }
+ if (strcmp( rmsg3, msg3 ) != 0) {
+ errs++;
+ fprintf( stderr, "message 3 (%s) should be %s\n", rmsg3, msg3 );
+ }
+ }
+
+ /* We can't guarantee that messages arrive until the detach */
+ MPI_Buffer_detach( &bbuf, &bsize );
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define BUFSIZE 2000
+int main( int argc, char *argv[] )
+{
+ MPI_Status status;
+ int a[10], b[10];
+ int buf[BUFSIZE], *bptr, bl, i, j, rank, size;
+ int errs = 0;
+
+ MTest_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Buffer_attach( buf, BUFSIZE );
+
+ for (j=0; j<10; j++) {
+ for (i=0; i<10; i++) {
+ a[i] = (rank + 10 * j) * size + i;
+ }
+ MPI_Bsend( a, 10, MPI_INT, 0, 27+j, MPI_COMM_WORLD );
+ }
+ if (rank == 0) {
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<10; j++) {
+ int k;
+ status.MPI_TAG = -10;
+ status.MPI_SOURCE = -20;
+ MPI_Recv( b, 10, MPI_INT, i, 27+j, MPI_COMM_WORLD, &status );
+
+ if (status.MPI_TAG != 27+j) {
+ errs ++;
+ printf( "Wrong tag = %d\n", status.MPI_TAG );
+ }
+ if (status.MPI_SOURCE != i) {
+ errs++;
+ printf( "Wrong source = %d\n", status.MPI_SOURCE );
+ }
+ for (k=0; k<10; k++) {
+ if (b[k] != (i + 10 * j) * size + k) {
+ errs++;
+ printf( "received b[%d] = %d from %d tag %d\n",
+ k, b[k], i, 27+j );
+ }
+ }
+ }
+ }
+ }
+ MPI_Buffer_detach( &bptr, &bl );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define BUFSIZE 2000
+int main( int argc, char *argv[] )
+{
+ MPI_Status status;
+ MPI_Request request;
+ int a[10], b[10];
+ int buf[BUFSIZE], *bptr, bl, i, j, rank, size;
+ int errs = 0;
+
+ MTest_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Buffer_attach( buf, BUFSIZE );
+
+ for (j=0; j<10; j++) {
+ MPI_Bsend_init( a, 10, MPI_INT, 0, 27+j, MPI_COMM_WORLD, &request );
+ for (i=0; i<10; i++) {
+ a[i] = (rank + 10 * j) * size + i;
+ }
+ MPI_Start( &request );
+ MPI_Wait( &request, &status );
+ MPI_Request_free( &request );
+ }
+ if (rank == 0) {
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<10; j++) {
+ int k;
+ status.MPI_TAG = -10;
+ status.MPI_SOURCE = -20;
+ MPI_Recv( b, 10, MPI_INT, i, 27+j, MPI_COMM_WORLD, &status );
+
+ if (status.MPI_TAG != 27+j) {
+ errs++;
+ printf( "Wrong tag = %d\n", status.MPI_TAG );
+ }
+ if (status.MPI_SOURCE != i) {
+ errs++;
+ printf( "Wrong source = %d\n", status.MPI_SOURCE );
+ }
+ for (k=0; k<10; k++) {
+ if (b[k] != (i + 10 * j) * size + k) {
+ errs++;
+ printf( "received b[%d] = %d from %d tag %d\n",
+ k, b[k], i, 27+j );
+ }
+ }
+ }
+ }
+ }
+ MPI_Buffer_detach( &bptr, &bl );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define BUFSIZE 2000
+int main( int argc, char *argv[] )
+{
+ MPI_Status status;
+ MPI_Request request;
+ int a[10], b[10];
+ int buf[BUFSIZE], *bptr, bl, i, j, rank, size, errs=0;
+
+ MTest_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Buffer_attach( buf, BUFSIZE );
+
+ for (j=0; j<10; j++) {
+ for (i=0; i<10; i++) {
+ a[i] = (rank + 10 * j) * size + i;
+ }
+ MPI_Ibsend( a, 10, MPI_INT, 0, 27+j, MPI_COMM_WORLD, &request );
+ MPI_Wait( &request, &status );
+ }
+ if (rank == 0) {
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<10; j++) {
+ int k;
+ status.MPI_TAG = -10;
+ status.MPI_SOURCE = -20;
+ MPI_Recv( b, 10, MPI_INT, i, 27+j, MPI_COMM_WORLD, &status );
+
+ if (status.MPI_TAG != 27+j) {
+ errs++;
+ printf( "Wrong tag = %d\n", status.MPI_TAG );
+ }
+ if (status.MPI_SOURCE != i) {
+ errs++;
+ printf( "Wrong source = %d\n", status.MPI_SOURCE );
+ }
+ for (k=0; k<10; k++) {
+ if (b[k] != (i + 10 * j) * size + k) {
+ errs ++;
+ printf( "received b[%d] = %d from %d tag %d\n",
+ k, b[k], i, 27+j );
+ }
+ }
+ }
+ }
+ }
+ MPI_Buffer_detach( &bptr, &bl );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define BUFSIZE 2000
+int main( int argc, char *argv[] )
+{
+ MPI_Status status;
+ MPI_Comm comm,scomm;
+ int a[10], b[10];
+ int buf[BUFSIZE], *bptr, bl, i, j, rank, size, color, errs=0;
+
+ MTest_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ color = rank % 2;
+ MPI_Comm_split( MPI_COMM_WORLD, color, rank, &scomm );
+ MPI_Intercomm_create( scomm, 0, MPI_COMM_WORLD, 1-color, 52, &comm);
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &size );
+ MPI_Buffer_attach( buf, BUFSIZE );
+
+ for (j=0; j<10; j++) {
+ for (i=0; i<10; i++) {
+ a[i] = (rank + 10 * j) * size + i;
+ }
+ MPI_Bsend( a, 10, MPI_INT, 0, 27+j, comm );
+ }
+ if (rank == 0) {
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<10; j++) {
+ int k;
+ status.MPI_TAG = -10;
+ status.MPI_SOURCE = -20;
+ MPI_Recv( b, 10, MPI_INT, i, 27+j, comm, &status );
+
+ if (status.MPI_TAG != 27+j) {
+ errs++;
+ printf( "Wrong tag = %d\n", status.MPI_TAG );
+ }
+ if (status.MPI_SOURCE != i) {
+ errs++;
+ printf( "Wrong source = %d\n", status.MPI_SOURCE );
+ }
+ for (k=0; k<10; k++) {
+ if (b[k] != (i + 10 * j) * size + k) {
+ errs++;
+ printf( "received b[%d] = %d from %d tag %d\n",
+ k, b[k], i, 27+j );
+ }
+ }
+ }
+ }
+ }
+ MPI_Buffer_detach( &bptr, &bl );
+
+ MPI_Comm_free(&scomm);
+ MPI_Comm_free(&comm);
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* Test bsend with a buffer with arbitray alignment */
+#define BUFSIZE 2000*4
+int main( int argc, char *argv[] )
+{
+ MPI_Status status;
+ int a[10], b[10];
+ int align;
+ char buf[BUFSIZE+8], *bptr;
+ int bl, i, j, rank, size;
+ int errs = 0;
+
+ MTest_Init( 0, 0 );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ for (align = 0; align < 7; align++) {
+ MPI_Buffer_attach( buf+align, BUFSIZE);
+
+ for (j=0; j<10; j++) {
+ for (i=0; i<10; i++) {
+ a[i] = (rank + 10 * j) * size + i;
+ }
+ MPI_Bsend( a, 10, MPI_INT, 0, 27+j, MPI_COMM_WORLD );
+ }
+ if (rank == 0) {
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<10; j++) {
+ int k;
+ status.MPI_TAG = -10;
+ status.MPI_SOURCE = -20;
+ MPI_Recv( b, 10, MPI_INT, i, 27+j, MPI_COMM_WORLD, &status );
+
+ if (status.MPI_TAG != 27+j) {
+ errs ++;
+ printf( "Wrong tag = %d\n", status.MPI_TAG );
+ }
+ if (status.MPI_SOURCE != i) {
+ errs++;
+ printf( "Wrong source = %d\n", status.MPI_SOURCE );
+ }
+ for (k=0; k<10; k++) {
+ if (b[k] != (i + 10 * j) * size + k) {
+ errs++;
+ printf( "(Align=%d) received b[%d] = %d (expected %d) from %d tag %d\n",
+ align, k, b[k], (i+10*j), i, 27+j );
+ }
+ }
+ }
+ }
+ }
+ MPI_Buffer_detach( &bptr, &bl );
+ if (bptr != buf+align) {
+ errs++;
+ printf( "Did not recieve the same buffer on detach that was provided on init (%p vs %p)\n", bptr, buf );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test bsend message handling where \
+different messages are received in different orders";
+*/
+
+/*
+ * Notes on the test.
+ *
+ * To ensure that messages remain in the bsend buffer until received,
+ * messages are sent with size MSG_SIZE (ints).
+ */
+
+#define MSG_SIZE 17000
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int b1[MSG_SIZE], b2[MSG_SIZE], b3[MSG_SIZE], b4[MSG_SIZE];
+ int src, dest, size, rank, i;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ if (size < 2) {
+ errs++;
+ fprintf( stderr, "At least 2 processes required\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ src = 0;
+ dest = 1;
+
+ if (rank == src) {
+ int *buf, bufsize, bsize;
+
+ bufsize = 4 * (MSG_SIZE * sizeof(int) + MPI_BSEND_OVERHEAD);
+ buf = (int *)malloc( bufsize );
+ if (!buf) {
+ fprintf( stderr, "Could not allocate buffer of %d bytes\n",
+ bufsize );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Buffer_attach( buf, bufsize );
+
+ /* Initialize data */
+ for (i=0; i<MSG_SIZE; i++) {
+ b1[i] = i;
+ b2[i] = MSG_SIZE + i;
+ b3[i] = 2 * MSG_SIZE + i;
+ b4[i] = 3 * MSG_SIZE + i;
+ }
+ /* Send and reset buffers after bsend returns */
+ MPI_Bsend( b1, MSG_SIZE, MPI_INT, dest, 0, comm );
+ for (i=0; i<MSG_SIZE; i++) b1[i] = -b1[i];
+ MPI_Bsend( b2, MSG_SIZE, MPI_INT, dest, 1, comm );
+ for (i=0; i<MSG_SIZE; i++) b2[i] = -b2[i];
+ MPI_Bsend( b3, MSG_SIZE, MPI_INT, dest, 2, comm );
+ for (i=0; i<MSG_SIZE; i++) b3[i] = -b3[i];
+ MPI_Bsend( b4, MSG_SIZE, MPI_INT, dest, 3, comm );
+ for (i=0; i<MSG_SIZE; i++) b4[i] = -b4[i];
+
+ MPI_Barrier( comm );
+ /* Detach waits until all messages received */
+ MPI_Buffer_detach( &buf, &bsize );
+ }
+ else if (rank == dest) {
+
+ MPI_Barrier( comm );
+ MPI_Recv( b2, MSG_SIZE, MPI_INT, src, 1, comm, &status );
+ MPI_Recv( b1, MSG_SIZE, MPI_INT, src, 0, comm, &status );
+ MPI_Recv( b4, MSG_SIZE, MPI_INT, src, 3, comm, &status );
+ MPI_Recv( b3, MSG_SIZE, MPI_INT, src, 2, comm, &status );
+
+ /* Check received data */
+ for (i=0; i<MSG_SIZE; i++) {
+ if (b1[i] != i) {
+ errs++;
+ if (errs < 16) printf( "b1[%d] is %d\n", i, b1[i] );
+ }
+ if (b2[i] != MSG_SIZE + i) {
+ errs++;
+ if (errs < 16) printf( "b2[%d] is %d\n", i, b2[i] );
+ }
+ if (b3[i] != 2 * MSG_SIZE + i) {
+ errs++;
+ if (errs < 16) printf( "b3[%d] is %d\n", i, b3[i] );
+ }
+ if (b4[i] != 3 * MSG_SIZE + i) {
+ errs++;
+ if (errs < 16) printf( "b4[%d] is %d\n", i, b4[i] );
+ }
+ }
+ }
+ else {
+ MPI_Barrier( comm );
+ }
+
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test the handling of BSend operations when a detach occurs before the bsend data has been sent.";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest;
+ unsigned char *buf, *bufp;
+ int minsize = 2;
+ int i, msgsize, bufsize, outsize;
+ unsigned char *msg1, *msg2, *msg3;
+ MPI_Comm comm;
+ MPI_Status status1, status2, status3;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ msgsize = 128 * 1024;
+ msg1 = (unsigned char *)malloc( 3 * msgsize );
+ msg2 = msg1 + msgsize;
+ msg3 = msg2 + msgsize;
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ source = 0;
+ dest = size - 1;
+
+ /* Here is the test: The sender */
+ if (rank == source) {
+ /* Get a bsend buffer. Make it large enough that the Bsend
+ internals will (probably) not use a eager send for the data.
+ Have three such messages */
+ bufsize = 3 * (MPI_BSEND_OVERHEAD + msgsize);
+ buf = (unsigned char *)malloc( bufsize );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate a buffer of %d bytes\n",
+ bufsize );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ MPI_Buffer_attach( buf, bufsize );
+
+ /* Initialize the buffers */
+ for (i=0; i<msgsize; i++) {
+ msg1[i] = 0xff ^ (i & 0xff);
+ msg2[i] = 0xff ^ (3*i & 0xff);
+ msg3[i] = 0xff ^ (5*i & 0xff);
+ }
+
+ /* Initiate the bsends */
+ MPI_Bsend( msg1, msgsize, MPI_CHAR, dest, 0, comm );
+ MPI_Bsend( msg2, msgsize, MPI_CHAR, dest, 0, comm );
+ MPI_Bsend( msg3, msgsize, MPI_CHAR, dest, 0, comm );
+
+ /* Synchronize with our partner */
+ MPI_Sendrecv( 0, 0, MPI_CHAR, dest, 10,
+ 0, 0, MPI_CHAR, dest, 10, comm, MPI_STATUS_IGNORE );
+
+ /* Detach the buffers. There should be pending operations */
+ MPI_Buffer_detach ( &bufp, &outsize );
+ if (bufp != buf) {
+ fprintf( stderr, "Wrong buffer returned\n" );
+ errs++;
+ }
+ if (outsize != bufsize) {
+ fprintf( stderr, "Wrong buffer size returned\n" );
+ errs++;
+ }
+ }
+ else if (rank == dest) {
+ double tstart;
+
+ /* Clear the message buffers */
+ for (i=0; i<msgsize; i++) {
+ msg1[i] = 0;
+ msg2[i] = 0;
+ msg3[i] = 0;
+ }
+
+ /* Wait for the synchronize */
+ MPI_Sendrecv( 0, 0, MPI_CHAR, source, 10,
+ 0, 0, MPI_CHAR, source, 10, comm, MPI_STATUS_IGNORE );
+
+ /* Wait 2 seconds */
+ tstart = MPI_Wtime();
+ while (MPI_Wtime() - tstart < 2.0) ;
+
+ /* Now receive the messages */
+ MPI_Recv( msg1, msgsize, MPI_CHAR, source, 0, comm, &status1 );
+ MPI_Recv( msg2, msgsize, MPI_CHAR, source, 0, comm, &status2 );
+ MPI_Recv( msg3, msgsize, MPI_CHAR, source, 0, comm, &status3 );
+
+ /* Check that we have the correct data */
+ for (i=0; i<msgsize; i++) {
+ if (msg1[i] != (0xff ^ (i & 0xff))) {
+ if (errs < 10) {
+ fprintf( stderr, "msg1[%d] = %d\n", i, msg1[i] );
+ }
+ errs++;
+ }
+ if (msg2[i] != (0xff ^ (3*i & 0xff))) {
+ if (errs < 10) {
+ fprintf( stderr, "msg2[%d] = %d\n", i, msg2[i] );
+ }
+ errs++;
+ }
+ if (msg3[i] != (0xff ^ (5*i & 0xff))) {
+ if (errs < 10) {
+ fprintf( stderr, "msg2[%d] = %d\n", i, msg2[i] );
+ }
+ errs++;
+ }
+ }
+
+ }
+
+
+ MTestFreeComm( &comm );
+ }
+ free( msg1 );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <string.h> /* For memset */
+
+int main( int argc, char *argv[] )
+{
+ MPI_Request r[3];
+ MPI_Status s[3];
+ int *buf0, *buf1, *buf2;
+ int rank, size, src, dest, flag, errs = 0;
+ int n0, n1, n2;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (size < 2) {
+ fprintf( stderr, "Must run with at least 2 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ dest = 0;
+ src = 1;
+ comm = MPI_COMM_WORLD;
+
+ n0 = n1 = n2 = 65536;
+ buf0 = (int *)malloc( n0 * sizeof(int) );
+ buf1 = (int *)malloc( n1 * sizeof(int) );
+ buf2 = (int *)malloc( n2 * sizeof(int) );
+ if (!buf0 || !buf1 || !buf2) {
+ fprintf( stderr, "Unable to allocate buffers of size %d\n",
+ n0 * (int)sizeof(int) );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ memset( buf0, -1, n0 * sizeof(int) );
+ memset( buf1, -1, n0 * sizeof(int) );
+ memset( buf2, -1, n0 * sizeof(int) );
+
+ if (rank == dest) {
+ MPI_Irecv( buf0, n0, MPI_INT, src, 0, comm, &r[0] );
+ MPI_Irecv( buf1, n1, MPI_INT, src, 1, comm, &r[1] );
+ MPI_Irecv( buf2, n2, MPI_INT, src, 2, comm, &r[2] );
+
+ MPI_Barrier( comm );
+
+ MPI_Cancel( &r[1] );
+ MPI_Barrier( comm );
+ memset( s, -1, sizeof(s) );
+ MPI_Waitall( 3, r, s );
+ MPI_Test_cancelled( &s[0], &flag );
+ if (flag) {
+ errs++;
+ printf( "request 0 was cancelled!\n" );
+ }
+ MPI_Test_cancelled( &s[1], &flag );
+ if (!flag) {
+ errs++;
+ printf( "request 1 was not cancelled!\n" );
+ }
+ MPI_Test_cancelled( &s[2], &flag );
+ if (flag) {
+ errs++;
+ printf( "request 2 was cancelled!\n" );
+ }
+ MPI_Barrier( comm );
+ }
+ if (rank == src) {
+ int tflag;
+ MPI_Barrier( comm );
+ MPI_Barrier( comm );
+ MPI_Send( buf0, n0, MPI_INT, dest, 0, comm );
+ MPI_Isend( buf2, n2, MPI_INT, dest, 2, comm, &r[1] );
+ MPI_Isend( buf1, n1, MPI_INT, dest, 4, comm, &r[0] );
+ MPI_Cancel( &r[0] );
+ memset( s, -3, sizeof(s) );
+ s[0].MPI_ERROR = -3;
+ s[1].MPI_ERROR = -3;
+ MPI_Testall( 2, r, &tflag, s );
+ if (tflag) {
+ MPI_Test_cancelled( &s[0], &flag );
+ if (!flag) {
+ errs++;
+ printf( "send request 0 was not cancelled!\n" );
+ }
+ MPI_Test_cancelled( &s[1], &flag );
+ if (flag) {
+ errs++;
+ printf( "send request 1 was cancelled!\n" );
+ }
+ }
+ else {
+ /* If all requests are not complete, then neither r nor s
+ may be changed */
+ if ( (s[0].MPI_ERROR) != -3) {
+ errs++;
+ printf( "Send request status 0 modified. s[0].MPI_ERROR = %x\n",
+ s[0].MPI_ERROR );
+ }
+ if ( (s[1].MPI_ERROR) != -3) {
+ errs++;
+ printf( "Send request status 1 modified. s[1].MPI_ERROR = %x\n",
+ s[1].MPI_ERROR );
+ }
+ }
+ MPI_Barrier( comm );
+ while (!tflag) {
+ MPI_Testall( 2, r, &tflag, s );
+ }
+ MPI_Test_cancelled( &s[0], &flag );
+ if (!flag) {
+ errs++;
+ printf( "send request 0 was not cancelled!\n" );
+ }
+ MPI_Test_cancelled( &s[1], &flag );
+ if (flag) {
+ errs++;
+ printf( "send request 1 was cancelled!\n" );
+ }
+ }
+ if (rank != src && rank != dest) {
+ MPI_Barrier( comm );
+ MPI_Barrier( comm );
+ MPI_Barrier( comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of a large number of derived-datatype messages eagerly, with no preposted receive so that an MPI implementation may have to queue up messages on the sending side";
+*/
+
+#define MAX_MSGS 30
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, dest, source;
+ int i, indices[40];
+ MPI_Aint extent;
+ int *buf, *bufs[MAX_MSGS];
+ MPI_Comm comm;
+ MPI_Datatype dtype;
+ MPI_Request req[MAX_MSGS];
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ source = 0;
+ dest = size - 1;
+
+ /* Setup by creating a blocked datatype that is likely to be processed
+ in a piecemeal fashion */
+ for (i=0; i<30; i++) {
+ indices[i] = i*40;
+ }
+
+ /* 30 blocks of size 10 */
+ MPI_Type_create_indexed_block( 30, 10, indices, MPI_INT, &dtype );
+ MPI_Type_commit( &dtype );
+
+ /* Create the corresponding message buffers */
+ MPI_Type_extent( dtype, &extent );
+ for (i=0; i<MAX_MSGS; i++) {
+ bufs[i] = (int *)malloc( extent );
+ if (!bufs[i]) {
+ fprintf( stderr, "Unable to allocate buffer %d of size %ld\n",
+ i, (long)extent );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ }
+ buf = (int *)malloc( 10 * 30 * sizeof(int) );
+
+ MPI_Barrier( MPI_COMM_WORLD );
+ if (rank == dest) {
+ MTestSleep( 2 );
+ for (i=0; i<MAX_MSGS; i++) {
+ MPI_Recv( buf, 10*30, MPI_INT, source, i, comm,
+ MPI_STATUS_IGNORE );
+ }
+ }
+ else if (rank == source ) {
+ for (i=0; i<MAX_MSGS; i++) {
+ MPI_Isend( bufs[i], 1, dtype, dest, i, comm, &req[i] );
+ }
+ MPI_Waitall( MAX_MSGS, req, MPI_STATUSES_IGNORE );
+ }
+
+ MPI_Type_free( &dtype );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple test of generalized requests";
+*/
+
+
+int query_fn( void *extra_state, MPI_Status *status );
+int query_fn( void *extra_state, MPI_Status *status )
+{
+ /* Set a default status */
+ status->MPI_SOURCE = MPI_UNDEFINED;
+ status->MPI_TAG = MPI_UNDEFINED;
+ MPI_Status_set_cancelled( status, 0 );
+ MPI_Status_set_elements( status, MPI_BYTE, 0 );
+ return 0;
+}
+int free_fn( void *extra_state );
+int free_fn( void *extra_state )
+{
+ int *b = (int *)extra_state;
+ if (b) *b = *b - 1;
+ /* The value returned by the free function is the error code
+ returned by the wait/test function */
+ return 0;
+}
+int cancel_fn( void *extra_state, int complete );
+int cancel_fn( void *extra_state, int complete )
+{
+ return 0;
+}
+
+/*
+ * This is a very simple test of generalized requests. Normally, the
+ * MPI_Grequest_complete function would be called from another routine,
+ * often running in a separate thread. This simple code allows us to
+ * check that requests can be created, tested, and waited on in the
+ * case where the request is complete before the wait is called.
+ *
+ * Note that MPI did *not* define a routine that can be called within
+ * test or wait to advance the state of a generalized request.
+ * Most uses of generalized requests will need to use a separate thread.
+ */
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int counter, flag;
+ MPI_Status status;
+ MPI_Request request;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Grequest_start( query_fn, free_fn, cancel_fn, NULL, &request );
+
+ MPI_Test( &request, &flag, &status );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Generalized request marked as complete\n" );
+ }
+
+ MPI_Grequest_complete( request );
+
+ MPI_Wait( &request, &status );
+
+ counter = 1;
+ MPI_Grequest_start( query_fn, free_fn, cancel_fn, &counter, &request );
+ MPI_Grequest_complete( request );
+ MPI_Wait( &request, MPI_STATUS_IGNORE );
+
+ if (counter) {
+ errs++;
+ fprintf( stderr, "Free routine not called, or not called with extra_data" );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple test of intercommunicator send and receive";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int leftGroup, buf, rank, remote_size, i;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ if (leftGroup) {
+ MPI_Comm_rank( comm, &rank );
+ buf = rank;
+ MPI_Send( &buf, 1, MPI_INT, 0, 0, comm );
+ }
+ else {
+ MPI_Comm_remote_size( comm, &remote_size );
+ MPI_Comm_rank( comm, &rank );
+ if (rank == 0) {
+ for (i=0; i<remote_size; i++) {
+ buf = -1;
+ MPI_Recv( &buf, 1, MPI_INT, i, 0, comm, &status );
+ if (buf != i) {
+ errs++;
+ fprintf( stderr, "buf = %d, should be %d\n", buf, i );
+ }
+ }
+ }
+ }
+ /* Now, reverse it and send back */
+ if (!leftGroup) {
+ MPI_Comm_rank( comm, &rank );
+ buf = rank;
+ MPI_Send( &buf, 1, MPI_INT, 0, 0, comm );
+ }
+ else {
+ MPI_Comm_remote_size( comm, &remote_size );
+ MPI_Comm_rank( comm, &rank );
+ if (rank == 0) {
+ for (i=0; i<remote_size; i++) {
+ buf = -1;
+ MPI_Recv( &buf, 1, MPI_INT, i, 0, comm, &status );
+ if (buf != i) {
+ errs++;
+ fprintf( stderr, "buf = %d, should be %d\n", buf, i );
+ }
+ }
+ }
+ }
+ MTestFreeComm(&comm);
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2005 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/* This test program checks that the point-to-point completion routines
+ can be applied to an inactive persistent request, as required by the
+ MPI-1 standard. See section 3.7.3, for example,
+
+ One is allowed to call MPI TEST with a null or inactive request argument.
+ In such a case the operation returns with flag = true and empty status.
+
+*/
+
+int StatusEmpty( MPI_Status *s );
+int StatusEmpty( MPI_Status *s )
+{
+ int errs = 0;
+ int count = 10;
+
+ if (s->MPI_TAG != MPI_ANY_TAG) {
+ errs++;
+ printf( "MPI_TAG not MPI_ANY_TAG in status\n" );
+ }
+ if (s->MPI_SOURCE != MPI_ANY_SOURCE) {
+ errs++;
+ printf( "MPI_SOURCE not MPI_ANY_SOURCE in status\n" );
+ }
+ MPI_Get_count( s, MPI_INT, &count );
+ if (count != 0) {
+ errs++;
+ printf( "count in status is not 0\n" );
+ }
+ /* Return true only if status passed all tests */
+ return errs ? 0 : 1;
+}
+
+int main(int argc, char *argv[])
+{
+ MPI_Request r;
+ MPI_Status s;
+ int errs = 0;
+ int flag;
+ int buf[10];
+ int rbuf[10];
+ int tag = 27;
+ int dest = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ /* Create a persistent send request */
+ MPI_Send_init( buf, 10, MPI_INT, dest, tag, MPI_COMM_WORLD, &r );
+
+ flag = 0;
+ s.MPI_TAG = 10;
+ s.MPI_SOURCE = 10;
+ MPI_Test( &r, &flag, &s );
+ if (!flag) {
+ errs++;
+ printf( "Flag not true after MPI_Test (send)\n" );
+ printf( "Aborting further tests to avoid hanging in MPI_Wait\n" );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+ }
+ if (!StatusEmpty( &s )) {
+ errs++;
+ printf( "Status not empty after MPI_Test (send)\n" );
+ }
+
+ s.MPI_TAG = 10;
+ s.MPI_SOURCE = 10;
+ MPI_Wait( &r, &s );
+ if (!StatusEmpty( &s )) {
+ errs++;
+ printf( "Status not empty after MPI_Wait (send)\n" );
+ }
+
+ /* Now try to use that request, then check again */
+ if (rank == 0) {
+ int i;
+ MPI_Request *rr = (MPI_Request *)malloc(size * sizeof(MPI_Request));
+ for (i=0; i<size; i++) {
+ MPI_Irecv( rbuf, 10, MPI_INT, i, tag, MPI_COMM_WORLD, &rr[i] );
+ }
+ MPI_Start( &r );
+ MPI_Wait( &r, &s );
+ MPI_Waitall( size, rr, MPI_STATUSES_IGNORE );
+ }
+ else {
+ MPI_Start( &r );
+ MPI_Wait( &r, &s );
+ }
+
+ flag = 0;
+ s.MPI_TAG = 10;
+ s.MPI_SOURCE = 10;
+ MPI_Test( &r, &flag, &s );
+ if (!flag) {
+ errs++;
+ printf( "Flag not true after MPI_Test (send)\n" );
+ printf( "Aborting further tests to avoid hanging in MPI_Wait\n" );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+ }
+ if (!StatusEmpty( &s )) {
+ errs++;
+ printf( "Status not empty after MPI_Test (send)\n" );
+ }
+
+ s.MPI_TAG = 10;
+ s.MPI_SOURCE = 10;
+ MPI_Wait( &r, &s );
+ if (!StatusEmpty( &s )) {
+ errs++;
+ printf( "Status not empty after MPI_Wait (send)\n" );
+ }
+
+
+
+ MPI_Request_free( &r );
+
+ /* Create a persistent receive request */
+ MPI_Recv_init( buf, 10, MPI_INT, dest, tag, MPI_COMM_WORLD, &r );
+
+ flag = 0;
+ s.MPI_TAG = 10;
+ s.MPI_SOURCE = 10;
+ MPI_Test( &r, &flag, &s );
+ if (!flag) {
+ errs++;
+ printf( "Flag not true after MPI_Test (recv)\n" );
+ printf( "Aborting further tests to avoid hanging in MPI_Wait\n" );
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+ }
+ if (!StatusEmpty( &s )) {
+ errs++;
+ printf( "Status not empty after MPI_Test (recv)\n" );
+ }
+
+ s.MPI_TAG = 10;
+ s.MPI_SOURCE = 10;
+ MPI_Wait( &r, &s );
+ if (!StatusEmpty( &s )) {
+ errs++;
+ printf( "Status not empty after MPI_Wait (recv)\n" );
+ }
+
+ MPI_Request_free( &r );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int a[10], b[10], i;
+ MPI_Status status;
+ MPI_Request request;
+ int rank, count;
+ int errs = 0;
+
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ for (i=0; i<10; i++) a[i] = i+1;
+
+ status.MPI_ERROR = 0;
+ MPI_Isend( a, 0, MPI_INT, rank, 0, MPI_COMM_WORLD, &request );
+ MPI_Recv( b, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD,
+ &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (status.MPI_SOURCE != rank ||
+ status.MPI_TAG != 0 ||
+ status.MPI_ERROR != 0 ||
+ count != 0) {
+ errs++;
+ printf ("1 status = %d %d %d %d\n", status.MPI_SOURCE, status.MPI_TAG,
+ status.MPI_ERROR, count );
+ }
+ /* printf( "b[0] = %d\n", b[0] );*/
+ MPI_Wait( &request, &status );
+
+ MPI_Isend( 0, 0, MPI_INT, rank, 0, MPI_COMM_WORLD, &request );
+ MPI_Recv( 0, 0, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD,
+ &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (status.MPI_SOURCE != rank ||
+ status.MPI_TAG != 0 ||
+ status.MPI_ERROR != 0 ||
+ count != 0) {
+ errs++;
+ printf ("2 status = %d %d %d %d\n", status.MPI_SOURCE, status.MPI_TAG,
+ status.MPI_ERROR, count );
+ }
+ MPI_Wait( &request, &status );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int main( int argc, char * argv[] )
+{
+ int rank;
+ int sendMsg = 123;
+ int recvMsg = 0;
+ int flag = 0;
+ int count;
+ MPI_Status status;
+ MPI_Request request;
+ int errs = 0;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if(rank == 0)
+ {
+ MPI_Isend( &sendMsg, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &request );
+ while(!flag)
+ {
+ MPI_Iprobe( 0, 0, MPI_COMM_WORLD, &flag, &status );
+ }
+ MPI_Get_count( &status, MPI_INT, &count );
+ if(count != 1)
+ {
+ errs++;
+ }
+ MPI_Recv( &recvMsg, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status );
+ if(recvMsg != 123)
+ {
+ errs++;
+ }
+ MPI_Wait( &request, &status );
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/* tests send/recv of a message > 2GB. count=270M, type=long long
+ run with 3 processes to exercise both shared memory and TCP in Nemesis tests*/
+
+int main(int argc, char *argv[])
+{
+ int ierr,i,size,rank;
+ int cnt = 270000000;
+ MPI_Status status;
+ long long *cols;
+ int errs = 0;
+
+
+ MTest_Init(&argc,&argv);
+
+/* need large memory */
+ if (sizeof(void *) < 8) {
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+ }
+
+ ierr = MPI_Comm_size(MPI_COMM_WORLD,&size);
+ ierr = MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ if (size != 3) {
+ fprintf(stderr,"[%d] usage: mpiexec -n 3 %s\n",rank,argv[0]);
+ MPI_Abort(MPI_COMM_WORLD,1);
+ }
+
+ cols = malloc(cnt*sizeof(long long));
+ if (cols == NULL) {
+ printf("malloc of >2GB array failed\n");
+ errs++;
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+ }
+
+ if (rank == 0) {
+ for (i=0; i<cnt; i++) cols[i] = i;
+ /* printf("[%d] sending...\n",rank);*/
+ ierr = MPI_Send(cols,cnt,MPI_LONG_LONG_INT,1,0,MPI_COMM_WORLD);
+ ierr = MPI_Send(cols,cnt,MPI_LONG_LONG_INT,2,0,MPI_COMM_WORLD);
+ } else {
+ /* printf("[%d] receiving...\n",rank); */
+ for (i=0; i<cnt; i++) cols[i] = -1;
+ ierr = MPI_Recv(cols,cnt,MPI_LONG_LONG_INT,0,0,MPI_COMM_WORLD,&status);
+ /* ierr = MPI_Get_count(&status,MPI_LONG_LONG_INT,&cnt);
+ Get_count still fails because status.count is not 64 bit */
+ for (i=0; i<cnt; i++) {
+ if (cols[i] != i) {
+ /*printf("Rank %d, cols[i]=%lld, should be %d\n", rank, cols[i], i);*/
+ errs++;
+ }
+ }
+ }
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* This is a temporary #ifdef to control whether we test this functionality. A
+ * configure-test or similar would be better. Eventually the MPI-3 standard
+ * will be released and this can be gated on a MPI_VERSION check */
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_MPROBE_ROUTINES 1
+#endif
+
+/* assert-like macro that bumps the err count and emits a message */
+#define check(x_) \
+ do { \
+ if (!(x_)) { \
+ ++errs; \
+ if (errs < 10) { \
+ fprintf(stderr, "check failed: (%s), line %d\n", #x_, __LINE__); \
+ } \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int found, completed;
+ int rank, size;
+ int sendbuf[8], recvbuf[8];
+ int count;
+#ifdef TEST_MPROBE_ROUTINES
+ MPI_Message msg;
+#endif
+ MPI_Request rreq;
+ MPI_Status s1, s2;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ /* all processes besides ranks 0 & 1 aren't used by this test */
+ if (rank >= 2) {
+ goto epilogue;
+ }
+
+#ifdef TEST_MPROBE_ROUTINES
+ /* test 0: simple send & mprobe+mrecv */
+ if (rank == 0) {
+ sendbuf[0] = 0xdeadbeef;
+ sendbuf[1] = 0xfeedface;
+ MPI_Send(sendbuf, 2, MPI_INT, 1, 5, MPI_COMM_WORLD);
+ }
+ else {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ MPI_Mprobe(0, 5, MPI_COMM_WORLD, &msg, &s1);
+ check(s1.MPI_SOURCE == 0);
+ check(s1.MPI_TAG == 5);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+ check(msg != MPI_MESSAGE_NULL);
+
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 2);
+
+ recvbuf[0] = 0x01234567;
+ recvbuf[1] = 0x89abcdef;
+ MPI_Mrecv(recvbuf, count, MPI_INT, &msg, &s2);
+ check(recvbuf[0] == 0xdeadbeef);
+ check(recvbuf[1] == 0xfeedface);
+ check(s2.MPI_SOURCE == 0);
+ check(s2.MPI_TAG == 5);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ }
+
+ /* test 1: simple send & mprobe+imrecv */
+ if (rank == 0) {
+ sendbuf[0] = 0xdeadbeef;
+ sendbuf[1] = 0xfeedface;
+ MPI_Send(sendbuf, 2, MPI_INT, 1, 5, MPI_COMM_WORLD);
+ }
+ else {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ MPI_Mprobe(0, 5, MPI_COMM_WORLD, &msg, &s1);
+ check(s1.MPI_SOURCE == 0);
+ check(s1.MPI_TAG == 5);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+ check(msg != MPI_MESSAGE_NULL);
+
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 2);
+
+ rreq = MPI_REQUEST_NULL;
+ recvbuf[0] = 0x01234567;
+ recvbuf[1] = 0x89abcdef;
+ MPI_Imrecv(recvbuf, count, MPI_INT, &msg, &rreq);
+ check(rreq != MPI_REQUEST_NULL);
+ MPI_Wait(&rreq, &s2);
+ check(recvbuf[0] == 0xdeadbeef);
+ check(recvbuf[1] == 0xfeedface);
+ check(s2.MPI_SOURCE == 0);
+ check(s2.MPI_TAG == 5);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ }
+
+ /* test 2: simple send & improbe+mrecv */
+ if (rank == 0) {
+ sendbuf[0] = 0xdeadbeef;
+ sendbuf[1] = 0xfeedface;
+ MPI_Send(sendbuf, 2, MPI_INT, 1, 5, MPI_COMM_WORLD);
+ }
+ else {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ do {
+ check(msg == MPI_MESSAGE_NULL);
+ MPI_Improbe(0, 5, MPI_COMM_WORLD, &found, &msg, &s1);
+ } while (!found);
+ check(msg != MPI_MESSAGE_NULL);
+ check(s1.MPI_SOURCE == 0);
+ check(s1.MPI_TAG == 5);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 2);
+
+ recvbuf[0] = 0x01234567;
+ recvbuf[1] = 0x89abcdef;
+ MPI_Mrecv(recvbuf, count, MPI_INT, &msg, &s2);
+ check(recvbuf[0] == 0xdeadbeef);
+ check(recvbuf[1] == 0xfeedface);
+ check(s2.MPI_SOURCE == 0);
+ check(s2.MPI_TAG == 5);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ }
+
+ /* test 3: simple send & improbe+imrecv */
+ if (rank == 0) {
+ sendbuf[0] = 0xdeadbeef;
+ sendbuf[1] = 0xfeedface;
+ MPI_Send(sendbuf, 2, MPI_INT, 1, 5, MPI_COMM_WORLD);
+ }
+ else {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ do {
+ check(msg == MPI_MESSAGE_NULL);
+ MPI_Improbe(0, 5, MPI_COMM_WORLD, &found, &msg, &s1);
+ } while (!found);
+ check(msg != MPI_MESSAGE_NULL);
+ check(s1.MPI_SOURCE == 0);
+ check(s1.MPI_TAG == 5);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 2);
+
+ rreq = MPI_REQUEST_NULL;
+ MPI_Imrecv(recvbuf, count, MPI_INT, &msg, &rreq);
+ check(rreq != MPI_REQUEST_NULL);
+ MPI_Wait(&rreq, &s2);
+ check(recvbuf[0] == 0xdeadbeef);
+ check(recvbuf[1] == 0xfeedface);
+ check(s2.MPI_SOURCE == 0);
+ check(s2.MPI_TAG == 5);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ }
+
+ /* test 4: mprobe+mrecv with MPI_PROC_NULL */
+ {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ MPI_Mprobe(MPI_PROC_NULL, 5, MPI_COMM_WORLD, &msg, &s1);
+ check(s1.MPI_SOURCE == MPI_PROC_NULL);
+ check(s1.MPI_TAG == MPI_ANY_TAG);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+ check(msg == MPI_MESSAGE_NO_PROC);
+
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 0);
+
+ recvbuf[0] = 0x01234567;
+ recvbuf[1] = 0x89abcdef;
+ MPI_Mrecv(recvbuf, count, MPI_INT, &msg, &s2);
+ /* recvbuf should remain unmodified */
+ check(recvbuf[0] == 0x01234567);
+ check(recvbuf[1] == 0x89abcdef);
+ /* should get back "proc null status" */
+ check(s2.MPI_SOURCE == MPI_PROC_NULL);
+ check(s2.MPI_TAG == MPI_ANY_TAG);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ count = -1;
+ MPI_Get_count(&s2, MPI_INT, &count);
+ check(count == 0);
+ }
+
+ /* test 5: mprobe+imrecv with MPI_PROC_NULL */
+ {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ MPI_Mprobe(MPI_PROC_NULL, 5, MPI_COMM_WORLD, &msg, &s1);
+ check(s1.MPI_SOURCE == MPI_PROC_NULL);
+ check(s1.MPI_TAG == MPI_ANY_TAG);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+ check(msg == MPI_MESSAGE_NO_PROC);
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 0);
+
+ rreq = MPI_REQUEST_NULL;
+ recvbuf[0] = 0x01234567;
+ recvbuf[1] = 0x89abcdef;
+ MPI_Imrecv(recvbuf, count, MPI_INT, &msg, &rreq);
+ check(rreq != MPI_REQUEST_NULL);
+ completed = 0;
+ MPI_Test(&rreq, &completed, &s2); /* single test should always succeed */
+ check(completed);
+ /* recvbuf should remain unmodified */
+ check(recvbuf[0] == 0x01234567);
+ check(recvbuf[1] == 0x89abcdef);
+ /* should get back "proc null status" */
+ check(s2.MPI_SOURCE == MPI_PROC_NULL);
+ check(s2.MPI_TAG == MPI_ANY_TAG);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ count = -1;
+ MPI_Get_count(&s2, MPI_INT, &count);
+ check(count == 0);
+ }
+
+ /* test 6: improbe+mrecv with MPI_PROC_NULL */
+ {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ found = 0;
+ MPI_Improbe(MPI_PROC_NULL, 5, MPI_COMM_WORLD, &found, &msg, &s1);
+ check(found);
+ check(msg == MPI_MESSAGE_NO_PROC);
+ check(s1.MPI_SOURCE == MPI_PROC_NULL);
+ check(s1.MPI_TAG == MPI_ANY_TAG);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 0);
+
+ recvbuf[0] = 0x01234567;
+ recvbuf[1] = 0x89abcdef;
+ MPI_Mrecv(recvbuf, count, MPI_INT, &msg, &s2);
+ /* recvbuf should remain unmodified */
+ check(recvbuf[0] == 0x01234567);
+ check(recvbuf[1] == 0x89abcdef);
+ /* should get back "proc null status" */
+ check(s2.MPI_SOURCE == MPI_PROC_NULL);
+ check(s2.MPI_TAG == MPI_ANY_TAG);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ count = -1;
+ MPI_Get_count(&s2, MPI_INT, &count);
+ check(count == 0);
+ }
+
+ /* test 7: improbe+imrecv */
+ {
+ memset(&s1, 0xab, sizeof(MPI_Status));
+ memset(&s2, 0xab, sizeof(MPI_Status));
+ /* the error field should remain unmodified */
+ s1.MPI_ERROR = MPI_ERR_DIMS;
+ s2.MPI_ERROR = MPI_ERR_TOPOLOGY;
+
+ msg = MPI_MESSAGE_NULL;
+ MPI_Improbe(MPI_PROC_NULL, 5, MPI_COMM_WORLD, &found, &msg, &s1);
+ check(found);
+ check(msg == MPI_MESSAGE_NO_PROC);
+ check(s1.MPI_SOURCE == MPI_PROC_NULL);
+ check(s1.MPI_TAG == MPI_ANY_TAG);
+ check(s1.MPI_ERROR == MPI_ERR_DIMS);
+ count = -1;
+ MPI_Get_count(&s1, MPI_INT, &count);
+ check(count == 0);
+
+ rreq = MPI_REQUEST_NULL;
+ MPI_Imrecv(recvbuf, count, MPI_INT, &msg, &rreq);
+ check(rreq != MPI_REQUEST_NULL);
+ completed = 0;
+ MPI_Test(&rreq, &completed, &s2); /* single test should always succeed */
+ check(completed);
+ /* recvbuf should remain unmodified */
+ check(recvbuf[0] == 0x01234567);
+ check(recvbuf[1] == 0x89abcdef);
+ /* should get back "proc null status" */
+ check(s2.MPI_SOURCE == MPI_PROC_NULL);
+ check(s2.MPI_TAG == MPI_ANY_TAG);
+ check(s2.MPI_ERROR == MPI_ERR_TOPOLOGY);
+ check(msg == MPI_MESSAGE_NULL);
+ count = -1;
+ MPI_Get_count(&s2, MPI_INT, &count);
+ check(count == 0);
+ }
+
+ /* TODO MPI_ANY_SOURCE and MPI_ANY_TAG should be tested as well */
+ /* TODO a full range of message sizes should be tested too */
+ /* TODO threaded tests are also needed, but they should go in a separate
+ * program */
+
+ /* simple test to ensure that c2f/f2c routines are present (initially missed
+ * in MPICH impl) */
+ {
+ MPI_Fint f_handle = 0xdeadbeef;
+ f_handle = MPI_Message_c2f(MPI_MESSAGE_NULL);
+ msg = MPI_Message_f2c(f_handle);
+ check(f_handle != 0xdeadbeef);
+ check(msg == MPI_MESSAGE_NULL);
+
+ /* PMPI_ versions should also exists */
+ f_handle = 0xdeadbeef;
+ f_handle = PMPI_Message_c2f(MPI_MESSAGE_NULL);
+ msg = PMPI_Message_f2c(f_handle);
+ check(f_handle != 0xdeadbeef);
+ check(msg == MPI_MESSAGE_NULL);
+ }
+
+#endif /* TEST_MPROBE_ROUTINES */
+
+epilogue:
+ MPI_Reduce((rank == 0 ? MPI_IN_PLACE : &errs), &errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ if (rank == 0) {
+ if (errs) {
+ printf("found %d errors\n", errs);
+ }
+ else {
+ printf(" No errors\n");
+ }
+ }
+
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Send flood test";
+*/
+
+#define MAX_MSG_SIZE 40000000
+#define MAX_COUNT 4000
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, source, dest;
+ int minsize = 2, count, nmsg, maxmsg;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ source = 0;
+ dest = size - 1;
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < MAX_COUNT; count = count * 2) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
+ int nbytes;
+ MPI_Type_size( sendtype.datatype, &nbytes );
+
+ /* We may want to limit the total message size sent */
+ if (nbytes > MAX_MSG_SIZE) {
+ /* We do not need to free, as we haven't
+ initialized any of the buffers (?) */
+ continue;
+ }
+ maxmsg = MAX_COUNT - count;
+ MTestPrintfMsg( 1, "Sending count = %d of sendtype %s of total size %d bytes\n",
+ count, MTestGetDatatypeName( &sendtype ),
+ nbytes*count );
+ /* Make sure that everyone has a recv buffer */
+ recvtype.InitBuf( &recvtype );
+
+ if (rank == source) {
+ sendtype.InitBuf( &sendtype );
+
+ for (nmsg=1; nmsg<maxmsg; nmsg++) {
+ err = MPI_Send( sendtype.buf, sendtype.count,
+ sendtype.datatype, dest, 0, comm);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+ }
+ }
+ else if (rank == dest) {
+ for (nmsg=1; nmsg<maxmsg; nmsg++) {
+ err = MPI_Recv( recvtype.buf, recvtype.count,
+ recvtype.datatype, source, 0,
+ comm, MPI_STATUS_IGNORE);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ if (errs < 10) {
+ printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d, message iteration %d of %d\n",
+ MTestGetDatatypeName( &recvtype ),
+ MTestGetDatatypeName( &sendtype ),
+ count, nmsg, maxmsg );
+ recvtype.printErrors = 1;
+ (void)MTestCheckRecv( 0, &recvtype );
+ }
+ errs += err;
+ }
+ }
+ }
+ MTestFreeDatatype( &recvtype );
+ MTestFreeDatatype( &sendtype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define MAX_BUF_SIZE_LG 22
+#define NUM_MSGS_PER_BUF_SIZE 5
+char buf[1 << MAX_BUF_SIZE_LG];
+
+/*
+ * This program verifies that MPI_Probe() is operating properly in the face of
+ * unexpected messages arriving after MPI_Probe() has
+ * been called. This program may hang if MPI_Probe() does not return when the
+ * message finally arrives (see req #375).
+ */
+int main(int argc, char **argv)
+{
+ int p_size;
+ int p_rank;
+ int msg_size_lg;
+ int errs = 0;
+ int mpi_errno;
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &p_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &p_rank);
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+
+ for (msg_size_lg = 0; msg_size_lg <= MAX_BUF_SIZE_LG; msg_size_lg++)
+ {
+ const int msg_size = 1 << msg_size_lg;
+ int msg_cnt;
+
+ MTestPrintfMsg( 2, "testing messages of size %d\n", msg_size );
+ for (msg_cnt = 0; msg_cnt < NUM_MSGS_PER_BUF_SIZE; msg_cnt++)
+ {
+ MPI_Status status;
+ const int tag = msg_size_lg * NUM_MSGS_PER_BUF_SIZE + msg_cnt;
+
+ MTestPrintfMsg( 2, "Message count %d\n", msg_cnt );
+ if (p_rank == 0)
+ {
+ int p;
+
+ for (p = 1; p < p_size; p ++)
+ {
+ /* Wait for synchronization message */
+ mpi_errno = MPI_Recv(NULL, 0, MPI_BYTE, MPI_ANY_SOURCE,
+ tag, MPI_COMM_WORLD, &status);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+
+ if (status.MPI_TAG != tag && errs++ < 10)
+ {
+ printf("ERROR: unexpected message tag from MPI_Recv(): lp=0, rp=%d, expected=%d, actual=%d, count=%d\n",
+ status.MPI_SOURCE, status.MPI_TAG, tag, msg_cnt);
+ }
+
+# if defined(VERBOSE)
+ {
+ printf("sending message: p=%d s=%d c=%d\n",
+ status.MPI_SOURCE, msg_size, msg_cnt);
+ }
+# endif
+
+ /* Send unexpected message which hopefully MPI_Probe() is
+ already waiting for at the remote process */
+ mpi_errno = MPI_Send (buf, msg_size, MPI_BYTE,
+ status.MPI_SOURCE, status.MPI_TAG, MPI_COMM_WORLD);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+ }
+ }
+ else
+ {
+ int incoming_msg_size;
+
+ /* Send synchronization message */
+ mpi_errno = MPI_Send(NULL, 0, MPI_BYTE, 0, tag, MPI_COMM_WORLD);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+
+ /* Perform probe, hopefully before the master process can
+ send its reply */
+ mpi_errno = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG,
+ MPI_COMM_WORLD, &status);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+ mpi_errno = MPI_Get_count(&status, MPI_BYTE, &incoming_msg_size);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+ if (status.MPI_SOURCE != 0 && errs++ < 10)
+ {
+ printf("ERROR: unexpected message source from MPI_Probe(): p=%d, expected=0, actual=%d, count=%d\n",
+ p_rank, status.MPI_SOURCE, msg_cnt);
+ }
+ if (status.MPI_TAG != tag && errs++ < 10)
+ {
+ printf("ERROR: unexpected message tag from MPI_Probe(): p=%d, expected=%d, actual=%d, count=%d\n",
+ p_rank, tag, status.MPI_TAG, msg_cnt);
+ }
+ if (incoming_msg_size != msg_size && errs++ < 10)
+ {
+ printf("ERROR: unexpected message size from MPI_Probe(): p=%d, expected=%d, actual=%d, count=%d\n",
+ p_rank, msg_size, incoming_msg_size, msg_cnt);
+ }
+
+ /* Receive the probed message from the master process */
+ mpi_errno = MPI_Recv(buf, msg_size, MPI_BYTE, 0, tag,
+ MPI_COMM_WORLD, &status);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+ mpi_errno = MPI_Get_count(&status, MPI_BYTE, &incoming_msg_size);
+ if (mpi_errno != MPI_SUCCESS && errs++ < 10)
+ {
+ MTestPrintError(mpi_errno);
+ }
+ if (status.MPI_SOURCE != 0 && errs++ < 10)
+ {
+ printf("ERROR: unexpected message source from MPI_Recv(): p=%d, expected=0, actual=%d, count=%d\n",
+ p_rank, status.MPI_SOURCE, msg_cnt);
+ }
+ if (status.MPI_TAG != tag && errs++ < 10)
+ {
+ printf("ERROR: unexpected message tag from MPI_Recv(): p=%d, expected=%d, actual=%d, count=%d\n",
+ p_rank, tag, status.MPI_TAG, msg_cnt);
+ }
+ if (incoming_msg_size != msg_size && errs++ < 10)
+ {
+ printf("ERROR: unexpected message size from MPI_Recv(): p=%d, expected=%d, actual=%d, count=%d\n",
+ p_rank, msg_size, incoming_msg_size, msg_cnt);
+ }
+ }
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2005 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/*
+ * This program checks that MPI_Iprobe and MPI_Probe correctly handle
+ * a source of MPI_PROC_NULL
+ */
+
+int main(int argc, char **argv)
+{
+ int flag;
+ int errs = 0;
+ MPI_Status status;
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Iprobe( MPI_PROC_NULL, 10, MPI_COMM_WORLD, &flag, &status );
+ if (!flag) {
+ errs++;
+ printf( "Iprobe of source=MPI_PROC_NULL returned flag=false\n" );
+ }
+ else {
+ if (status.MPI_SOURCE != MPI_PROC_NULL) {
+ printf( "Status.MPI_SOURCE was %d, should be MPI_PROC_NULL\n",
+ status.MPI_SOURCE );
+ errs++;
+ }
+ if (status.MPI_TAG != MPI_ANY_TAG) {
+ printf( "Status.MPI_TAG was %d, should be MPI_ANY_TAGL\n",
+ status.MPI_TAG );
+ errs++;
+ }
+ }
+ /* If Iprobe failed, probe is likely to as well. Avoid a possible hang
+ by testing Probe only if Iprobe test passed */
+ if (errs == 0) {
+ MPI_Probe( MPI_PROC_NULL, 10, MPI_COMM_WORLD, &status );
+ if (status.MPI_SOURCE != MPI_PROC_NULL) {
+ printf( "Status.MPI_SOURCE was %d, should be MPI_PROC_NULL\n",
+ status.MPI_SOURCE );
+ errs++;
+ }
+ if (status.MPI_TAG != MPI_ANY_TAG) {
+ printf( "Status.MPI_TAG was %d, should be MPI_ANY_TAGL\n",
+ status.MPI_TAG );
+ errs++;
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of various send cancel calls";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest;
+ MPI_Comm comm;
+ MPI_Status status;
+ MPI_Request req;
+ static int bufsizes[4] = { 1, 100, 10000, 1000000 };
+ char *buf;
+ int cs, flag, n;
+#ifdef TEST_IRSEND
+ int veryPicky = 0; /* Set to 1 to test "quality of implementation" in
+ a tricky part of cancel */
+#endif
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ source = 0;
+ dest = size - 1;
+
+ for (cs=0; cs<4; cs++) {
+ if (rank == 0) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Send_init( buf, n, MPI_CHAR, dest, cs+n+1, comm, &req );
+ MPI_Start( &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel a persistent send request\n" );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ MPI_Request_free( &req );
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+1;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ }
+ else if (rank == dest)
+ {
+ int nn, tag;
+ char *btemp;
+ MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (nn > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( nn );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", nn);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+
+ if (rank == 0) {
+ char *bsendbuf;
+ int bsendbufsize;
+ int bf, bs;
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ bsendbufsize = n + MPI_BSEND_OVERHEAD;
+ bsendbuf = (char *)malloc( bsendbufsize );
+ if (!bsendbuf) {
+ fprintf( stderr, "Unable to allocate %d bytes for bsend\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Buffer_attach( bsendbuf, bsendbufsize );
+ MPI_Bsend_init( buf, n, MPI_CHAR, dest, cs+n+2, comm, &req );
+ MPI_Start( &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel a persistent bsend request\n" );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ MPI_Request_free( &req );
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+2;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ MPI_Buffer_detach( &bf, &bs );
+ free( bsendbuf );
+ }
+ else if (rank == dest)
+ {
+ int nn, tag;
+ char *btemp;
+ MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (nn > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( nn );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", nn);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+
+ /* Because this test is erroneous, we do not perform it unless
+ TEST_IRSEND is defined. */
+#ifdef TEST_IRSEND
+ /* We avoid ready send to self because an implementation
+ is free to detect the error in delivering a message to
+ itself without a pending receive; we could also check
+ for an error return from the MPI_Irsend */
+ if (rank == 0 && dest != rank) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Rsend_init( buf, n, MPI_CHAR, dest, cs+n+3, comm, &req );
+ MPI_Start( &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ /* This can be pretty ugly. The standard is clear (Section 3.8)
+ that either a sent message is received or the
+ sent message is successfully cancelled. Since this message
+ can never be received, the cancel must complete
+ successfully.
+
+ However, since there is no matching receive, this
+ program is erroneous. In this case, we can't really
+ flag this as an error */
+ if (!flag && veryPicky) {
+ errs ++;
+ printf( "Failed to cancel a persistent rsend request\n" );
+ fflush(stdout);
+ }
+ if (flag)
+ {
+ n = 0;
+ }
+ MPI_Request_free( &req );
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+3;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ }
+ else if (rank == dest)
+ {
+ int n, tag;
+ char *btemp;
+ MPI_Recv( &n, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (n > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( n );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, n, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+#endif
+
+ if (rank == 0) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Ssend_init( buf, n, MPI_CHAR, dest, cs+n+4, comm, &req );
+ MPI_Start( &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel a persistent ssend request\n" );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ MPI_Request_free( &req );
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+4;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ }
+ else if (rank == dest)
+ {
+ int nn, tag;
+ char *btemp;
+ MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (nn > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( nn );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", nn);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of various receive cancel calls, with multiple requests to cancel";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest;
+ MPI_Comm comm;
+ MPI_Status status;
+ MPI_Request req[4];
+ static int bufsizes[4] = { 1, 100, 10000, 1000000 };
+ char *bufs[4];
+ int flag, i;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ source = 0;
+ dest = size - 1;
+
+ if (rank == source) {
+ MPI_Send( MPI_BOTTOM, 0, MPI_CHAR, dest, 1, MPI_COMM_WORLD );
+ }
+ else if (rank == dest) {
+ /* Create 3 requests to cancel, plus one to use.
+ Then receive one message and exit */
+ for (i=0; i<4; i++) {
+ bufs[i] = (char *) malloc( bufsizes[i] );
+ MPI_Irecv( bufs[i], bufsizes[i], MPI_CHAR, source,
+ i, MPI_COMM_WORLD, &req[i] );
+ }
+ /* Now, cancel them in a more interesting order, to ensure that the
+ queue operation work properly */
+ MPI_Cancel( &req[2] );
+ MPI_Wait( &req[2], &status );
+ MTestPrintfMsg( 1, "Completed wait on irecv[2]\n" );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel a Irecv[2] request\n" );
+ fflush(stdout);
+ }
+ MPI_Cancel( &req[3] );
+ MPI_Wait( &req[3], &status );
+ MTestPrintfMsg( 1, "Completed wait on irecv[3]\n" );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel a Irecv[3] request\n" );
+ fflush(stdout);
+ }
+ MPI_Cancel( &req[0] );
+ MPI_Wait( &req[0], &status );
+ MTestPrintfMsg( 1, "Completed wait on irecv[0]\n" );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel a Irecv[0] request\n" );
+ fflush(stdout);
+ }
+ MPI_Wait( &req[1], &status );
+ MPI_Test_cancelled( &status, &flag );
+ if (flag) {
+ errs ++;
+ printf( "Incorrectly cancelled Irecv[1]\n" ); fflush(stdout);
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/* Test Ibsend and Request_free */
+int main( int argc, char *argv[] )
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ int dest = 1, src = 0, tag = 1;
+ int s1;
+ char *buf, *bbuf;
+ int smsg[5], rmsg[5];
+ int errs = 0, rank, size;
+ int bufsize, bsize;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (src >= size || dest >= size) {
+ int r = src;
+ if (dest > r) r = dest;
+ fprintf( stderr, "This program requires %d processes\n", r-1 );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ if (rank == src) {
+ MPI_Request r;
+
+ MPI_Barrier( MPI_COMM_WORLD );
+
+ /* According to the standard, we must use the PACK_SIZE length of each
+ message in the computation of the message buffer size */
+ MPI_Pack_size( 5, MPI_INT, comm, &s1 );
+ bufsize = MPI_BSEND_OVERHEAD + s1 + 2000;
+ buf = (char *)malloc( bufsize );
+ MPI_Buffer_attach( buf, bufsize );
+
+ MTestPrintfMsg( 10, "About create and free Isend request\n" );
+ smsg[0] = 10;
+ MPI_Isend( &smsg[0], 1, MPI_INT, dest, tag, comm, &r );
+ MPI_Request_free( &r );
+ if (r != MPI_REQUEST_NULL) {
+ errs++;
+ fprintf( stderr, "Request not set to NULL after request free\n" );
+ }
+ MTestPrintfMsg( 10, "About create and free Ibsend request\n" );
+ smsg[1] = 11;
+ MPI_Ibsend( &smsg[1], 1, MPI_INT, dest, tag+1, comm, &r );
+ MPI_Request_free( &r );
+ if (r != MPI_REQUEST_NULL) {
+ errs++;
+ fprintf( stderr, "Request not set to NULL after request free\n" );
+ }
+ MTestPrintfMsg( 10, "About create and free Issend request\n" );
+ smsg[2] = 12;
+ MPI_Issend( &smsg[2], 1, MPI_INT, dest, tag+2, comm, &r );
+ MPI_Request_free( &r );
+ if (r != MPI_REQUEST_NULL) {
+ errs++;
+ fprintf( stderr, "Request not set to NULL after request free\n" );
+ }
+ MTestPrintfMsg( 10, "About create and free Irsend request\n" );
+ smsg[3] = 13;
+ MPI_Irsend( &smsg[3], 1, MPI_INT, dest, tag+3, comm, &r );
+ MPI_Request_free( &r );
+ if (r != MPI_REQUEST_NULL) {
+ errs++;
+ fprintf( stderr, "Request not set to NULL after request free\n" );
+ }
+ smsg[4] = 14;
+ MPI_Isend( &smsg[4], 1, MPI_INT, dest, tag+4, comm, &r );
+ MPI_Wait( &r, MPI_STATUS_IGNORE );
+
+ /* We can't guarantee that messages arrive until the detach */
+ MPI_Buffer_detach( &bbuf, &bsize );
+ }
+
+ if (rank == dest) {
+ MPI_Request r[5];
+ int i;
+
+ for (i=0; i<5; i++) {
+ MPI_Irecv( &rmsg[i], 1, MPI_INT, src, tag+i, comm, &r[i] );
+ }
+ if (rank != src) /* Just in case rank == src */
+ MPI_Barrier( MPI_COMM_WORLD );
+
+ for (i=0; i<4; i++) {
+ MPI_Wait( &r[i], MPI_STATUS_IGNORE );
+ if (rmsg[i] != 10+i) {
+ errs++;
+ fprintf( stderr, "message %d (%d) should be %d\n", i, rmsg[i], 10+i );
+ }
+ }
+ /* The MPI standard says that there is no way to use MPI_Request_free
+ safely with receive requests. A strict MPI implementation may
+ choose to consider these erroreous (an IBM MPI implementation
+ does so) */
+#ifdef USE_STRICT_MPI
+ MPI_Wait( &r[4], MPI_STATUS_IGNORE );
+#else
+ MTestPrintfMsg( 10, "About free Irecv request\n" );
+ MPI_Request_free( &r[4] );
+#endif
+ }
+
+ if (rank != dest && rank != src) {
+ MPI_Barrier( MPI_COMM_WORLD );
+ }
+
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test Request_get_status";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest;
+ int buf[2], flag, count;
+ MPI_Comm comm;
+ MPI_Status status, status2;
+ MPI_Request req;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ source = 0;
+ dest = size - 1;
+
+
+ /* Handling MPI_REQUEST_NULL in MPI_Request_get_status was only required
+ starting with MPI-2.2. */
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ MPI_Request_get_status( MPI_REQUEST_NULL, &flag, &status );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "flag not true for MPI_REQUEST_NULL, flag=%d\n", flag );
+ }
+ if ((status.MPI_SOURCE != MPI_ANY_SOURCE) ||
+ (status.MPI_TAG != MPI_ANY_TAG) ||
+ (status.MPI_ERROR != MPI_SUCCESS))
+ {
+ errs++;
+ fprintf( stderr, "non-empty MPI_Status returned for MPI_REQUEST_NULL\n" );
+ }
+
+ /* also pass MPI_STATUS_IGNORE to make sure the implementation doesn't
+ * blow up when it is passed as the status argument */
+ MPI_Request_get_status( MPI_REQUEST_NULL, &flag, MPI_STATUS_IGNORE );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "flag not true for MPI_REQUEST_NULL with MPI_STATUS_IGNORE, flag=%d\n", flag );
+ }
+#endif
+
+ if (rank == source) {
+ buf[0] = size;
+ buf[1] = 3;
+ MPI_Ssend( buf, 2, MPI_INT, dest, 10, comm );
+ }
+ if (rank == dest) {
+ MPI_Irecv( buf, 2, MPI_INT, source, 10, comm, &req );
+ }
+ MPI_Barrier( comm );
+ /* At this point, we know that the receive has at least started,
+ because of the Ssend. Check the status on the request */
+ if (rank == dest) {
+ status.MPI_SOURCE = -1;
+ status.MPI_TAG = -1;
+ MPI_Request_get_status( req, &flag, &status );
+ if (flag) {
+ if (status.MPI_TAG != 10) {
+ errs++;
+ fprintf( stderr, "Tag value %d should be 10\n", status.MPI_TAG );
+ }
+ if (status.MPI_SOURCE != source) {
+ errs++;
+ fprintf( stderr, "Source value %d should be %d\n", status.MPI_SOURCE, source );
+ }
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 2) {
+ errs++;
+ fprintf( stderr, "Count value %d should be 2\n", count );
+ }
+ }
+ else {
+ errs++;
+ fprintf( stderr, "Unexpected flag value from get_status\n" );
+ }
+ /* Now, complete the request */
+ MPI_Wait( &req, &status2 );
+ /* Check that the status is correct */
+ if (status2.MPI_TAG != 10) {
+ errs++;
+ fprintf( stderr, "(wait)Tag value %d should be 10\n", status2.MPI_TAG );
+ }
+ if (status2.MPI_SOURCE != source) {
+ errs++;
+ fprintf( stderr, "(wait)Source value %d should be %d\n", status2.MPI_SOURCE, source );
+ }
+ MPI_Get_count( &status2, MPI_INT, &count );
+ if (count != 2) {
+ errs++;
+ fprintf( stderr, "(wait)Count value %d should be 2\n", count );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of various send cancel calls";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest;
+ MPI_Comm comm;
+ MPI_Status status;
+ MPI_Request req;
+ static int bufsizes[4] = { 1, 100, 10000, 1000000 };
+ char *buf;
+#ifdef TEST_IRSEND
+ int veryPicky = 0; /* Set to 1 to test "quality of implementation" in
+ a tricky part of cancel */
+#endif
+ int cs, flag, n;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ source = 0;
+ dest = size - 1;
+
+ MTestPrintfMsg( 1, "Starting scancel test\n" );
+ for (cs=0; cs<4; cs++) {
+ if (rank == 0) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MTestPrintfMsg( 1, "(%d) About to create isend and cancel\n",cs );
+ MPI_Isend( buf, n, MPI_CHAR, dest, cs+n+1, comm, &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MTestPrintfMsg( 1, "Completed wait on isend\n" );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel an Isend request\n" );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+1;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ }
+ else if (rank == dest)
+ {
+ int nn, tag;
+ char *btemp;
+ MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (nn > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( nn );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", nn );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+
+ if (rank == 0) {
+ char *bsendbuf;
+ int bsendbufsize;
+ int bf, bs;
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ bsendbufsize = n + MPI_BSEND_OVERHEAD;
+ bsendbuf = (char *)malloc( bsendbufsize );
+ if (!bsendbuf) {
+ fprintf( stderr, "Unable to allocate %d bytes for bsend\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Buffer_attach( bsendbuf, bsendbufsize );
+ MTestPrintfMsg( 1, "About to create and cancel ibsend\n" );
+ MPI_Ibsend( buf, n, MPI_CHAR, dest, cs+n+2, comm, &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel an Ibsend request\n" );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+2;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ MPI_Buffer_detach( &bf, &bs );
+ free( bsendbuf );
+ }
+ else if (rank == dest)
+ {
+ int nn, tag;
+ char *btemp;
+ MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (nn > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( nn );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", nn);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+
+ /* Because this test is erroneous, we do not perform it unless
+ TEST_IRSEND is defined. */
+#ifdef TEST_IRSEND
+ /* We avoid ready send to self because an implementation
+ is free to detect the error in delivering a message to
+ itself without a pending receive; we could also check
+ for an error return from the MPI_Irsend */
+ if (rank == 0 && dest != rank) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MTestPrintfMsg( 1, "About to create and cancel irsend\n" );
+ MPI_Irsend( buf, n, MPI_CHAR, dest, cs+n+3, comm, &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ /* This can be pretty ugly. The standard is clear (Section 3.8)
+ that either a sent message is received or the
+ sent message is successfully cancelled. Since this message
+ can never be received, the cancel must complete
+ successfully.
+
+ However, since there is no matching receive, this
+ program is erroneous. In this case, we can't really
+ flag this as an error */
+ if (!flag && veryPicky) {
+ errs ++;
+ printf( "Failed to cancel an Irsend request\n" );
+ fflush(stdout);
+ }
+ if (flag)
+ {
+ n = 0;
+ }
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+3;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ }
+ else if (rank == dest)
+ {
+ int n, tag;
+ char *btemp;
+ MPI_Recv( &n, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (n > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( n );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, n, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+#endif
+
+ if (rank == 0) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MTestPrintfMsg( 1, "About to create and cancel issend\n" );
+ MPI_Issend( buf, n, MPI_CHAR, dest, cs+n+4, comm, &req );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MPI_Test_cancelled( &status, &flag );
+ if (!flag) {
+ errs ++;
+ printf( "Failed to cancel an Issend request\n" );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ /* Send the size, zero for successfully cancelled */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ /* Send the tag so the message can be received */
+ n = cs+n+4;
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ free( buf );
+ }
+ else if (rank == dest)
+ {
+ int nn, tag;
+ char *btemp;
+ MPI_Recv( &nn, 1, MPI_INT, 0, 123, comm, &status );
+ MPI_Recv( &tag, 1, MPI_INT, 0, 123, comm, &status );
+ if (nn > 0)
+ {
+ /* If the message was not cancelled, receive it here */
+ btemp = (char*)malloc( nn );
+ if (!btemp)
+ {
+ fprintf( stderr, "Unable to allocate %d bytes\n", nn);
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Recv( btemp, nn, MPI_CHAR, 0, tag, comm, &status );
+ free(btemp);
+ }
+ }
+ MPI_Barrier( comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of send cancel (failure) calls";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest;
+ MPI_Comm comm;
+ MPI_Status status;
+ MPI_Request req;
+ static int bufsizes[4] = { 1, 100, 10000, 1000000 };
+ char *buf;
+ int cs, flag, n;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ source = 0;
+ dest = size - 1;
+
+ MTestPrintfMsg( 1, "Starting scancel test\n" );
+
+ for (cs=0; cs<4; cs++) {
+ n = bufsizes[cs];
+ buf = (char *)malloc( n );
+ if (!buf) {
+ fprintf( stderr, "Unable to allocate %d bytes\n", n );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ if (rank == source) {
+ MTestPrintfMsg( 1, "(%d) About to create isend and cancel\n",cs );
+ MPI_Isend( buf, n, MPI_CHAR, dest, cs+n+1, comm, &req );
+ MPI_Barrier( comm );
+ MPI_Cancel( &req );
+ MPI_Wait( &req, &status );
+ MTestPrintfMsg( 1, "Completed wait on isend\n" );
+ MPI_Test_cancelled( &status, &flag );
+ if (flag) {
+ errs ++;
+ printf( "Cancelled a matched Isend request (msg size = %d)!\n",
+ n );
+ fflush(stdout);
+ }
+ else
+ {
+ n = 0;
+ }
+ /* Send the size, zero for not cancelled (success) */
+ MPI_Send( &n, 1, MPI_INT, dest, 123, comm );
+ }
+ else if (rank == dest)
+ {
+ MPI_Recv( buf, n, MPI_CHAR, source, cs+n+1, comm, &status );
+ MPI_Barrier( comm );
+ MPI_Recv( &n, 1, MPI_INT, source, 123, comm, &status );
+ }
+ else {
+ MPI_Barrier( comm );
+ }
+
+ MPI_Barrier( comm );
+ free( buf );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2007 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/*
+ * This test makes sure that each process can send to each other process.
+ * If there are bugs in the handling of request completions or in
+ * queue operations, then this test may fail on them (it did with
+ * early EagerShort handling).
+ */
+
+#define MAXPES 32
+#define MYBUFSIZE 16*1024
+static int buffer[MAXPES][MYBUFSIZE];
+
+#define NUM_RUNS 10
+
+int main ( int argc, char *argv[] )
+{
+ int i;
+ int count, size;
+ int self, npes;
+ double secs;
+ MPI_Request request[MAXPES];
+ MPI_Status status;
+
+ MTest_Init (&argc, &argv);
+ MPI_Comm_rank (MPI_COMM_WORLD, &self);
+ MPI_Comm_size (MPI_COMM_WORLD, &npes);
+
+ if (npes > MAXPES) {
+ fprintf( stderr, "This program requires a comm_world no larger than %d",
+ MAXPES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ for (size = 1; size <= MYBUFSIZE ; size += size) {
+ secs = -MPI_Wtime ();
+ for (count = 0; count < NUM_RUNS; count++) {
+ MPI_Barrier (MPI_COMM_WORLD);
+
+ for (i = 0; i < npes; i++) {
+ if (i != self)
+ MPI_Irecv (buffer[i], size, MPI_INT, i,
+ MPI_ANY_TAG, MPI_COMM_WORLD, &request[i]);
+ }
+
+ for (i = 0; i < npes; i++) {
+ if (i != self)
+ MPI_Send (buffer[self], size, MPI_INT, i, 0, MPI_COMM_WORLD);
+ }
+
+ for (i = 0; i < npes; i++) {
+ if (i != self)
+ MPI_Wait (&request[i], &status);
+ }
+
+ }
+ MPI_Barrier (MPI_COMM_WORLD);
+ secs += MPI_Wtime ();
+
+ if (self == 0) {
+ secs = secs / (double) NUM_RUNS;
+ MTestPrintfMsg( 1, "length = %d ints\n", size );
+ }
+ }
+
+ /* Simple completion is all that we normally ask of this program */
+
+ MTest_Finalize( 0 );
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2008 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "mpi.h"
+
+/*
+ * Run this test with 8 processes. This test was submitted by xxx
+ * as a result of problems seen with the ch3:shm device on a Solaris
+ * system. The symptom is that the test hangs; this is due to losing
+ * a message, probably due to a race condition in a message-queue update.
+ * As a test for race conditions, it may need to be run multiple times
+ * to expose a problem if a problem does exist.
+ */
+
+#define LOOP_COUNT 10000
+#define DATA_SIZE 4
+#define MP_TAG 999
+
+#define PROGRESS_COUNT 0xfff
+static int verbose = 0;
+static int loopProgress = 0;
+
+int main( int argc, char *argv[] )
+{
+ int nProc, rank ;
+ int i, j, status ;
+ FILE *pf=0 ;
+
+ MPI_Init( &argc, &argv ) ;
+ MPI_Comm_size( MPI_COMM_WORLD, &nProc ) ;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank ) ;
+
+ for (i=1; i<argc; i++) {
+ if (strcmp(argv[i],"-v") == 0 ||
+ strcmp(argv[i],"--verbose") == 0) verbose = 1;
+ else if (strcmp(argv[i],"-p") == 0 ||
+ strcmp(argv[i],"--progress") == 0) loopProgress = 1;
+ else {
+ if (rank == 0) {
+ fprintf( stderr, "%s: [ -v | --verbose ] [ -p | --progress ]\n",
+ argv[0] );
+ fflush(stderr);
+ }
+ }
+ }
+
+ if (verbose) {
+ char buf[ 128 ] ;
+ sprintf( buf, "fast_mpi_%d.dmp", rank ) ;
+ pf = fopen( buf, "w" ) ;
+ }
+ else if (loopProgress) {
+ pf = stdout;
+ }
+
+ if( !rank ) {
+ int **psend ;
+ int **precv ;
+ psend = (int**)calloc( nProc, sizeof( int *) ) ;
+ precv = (int**)calloc( nProc, sizeof( int *) ) ;
+ for( i = 0 ; i < nProc ; i++ ) {
+ psend[ i ] = (int*)calloc( DATA_SIZE, sizeof( int ) ) ;
+ precv[ i ] = (int*)calloc( DATA_SIZE, sizeof( int ) ) ;
+ }
+ for( i = 0 ; i < LOOP_COUNT ; i++ ) {
+ if (verbose) {
+ fprintf( pf, "Master : loop %d\n", i ) ;
+ fflush( pf ) ;
+ }
+ else if (loopProgress && (i & PROGRESS_COUNT) == 0) {
+ fprintf( pf, "Master: loop %d\n", i ); fflush( pf );
+ }
+ for( j = 1 ; j < nProc ; j++ ) {
+ if (verbose) {
+ fprintf( pf, " read from child %d\n", j ) ;
+ fflush( pf ) ;
+ }
+ status = MPI_Recv( precv[ j ], DATA_SIZE, MPI_INT, j, MP_TAG,
+ MPI_COMM_WORLD, MPI_STATUS_IGNORE ) ;
+ if (verbose) {
+ fprintf( pf, " read from child %d done, status = %d\n", j,
+ status ) ;
+ fflush( pf ) ;
+ }
+ }
+ for( j = 1 ; j < nProc ; j++ ) {
+ if (verbose) {
+ fprintf( pf, " send to child %d\n", j ) ;
+ fflush( pf ) ;
+ }
+ status = MPI_Send( psend[ j ], DATA_SIZE - 1, MPI_INT, j,
+ MP_TAG, MPI_COMM_WORLD ) ;
+ if (verbose) {
+ fprintf( pf, " send to child %d done, status = %d\n", j,
+ status ) ;
+ fflush( pf ) ;
+ }
+ }
+ }
+ for( i = 0 ; i < nProc ; i++ ) {
+ free( psend[ i ] );
+ free( precv[ i ] );
+ }
+ free( psend );
+ free( precv );
+ } else {
+ int *psend ;
+ int *precv ;
+ psend = (int*)calloc( DATA_SIZE, sizeof( int ) ) ;
+ precv = (int*)calloc( DATA_SIZE, sizeof( int ) ) ;
+ for( i = 0 ; i < LOOP_COUNT ; i++ ) {
+ if (verbose) {
+ fprintf( pf, " send to master\n" ) ;
+ fflush( pf ) ;
+ }
+ /*
+ else if (loopProgress && (i & PROGRESS_COUNT) == 0) {
+ fprintf( pf, "Slave: loop %d\n", i ); fflush( pf );
+ }
+ */
+ status = MPI_Send( psend, DATA_SIZE - 1, MPI_INT, 0, MP_TAG,
+ MPI_COMM_WORLD ) ;
+ if (verbose) {
+ fprintf( pf, " send to master done, status = %d\n", status ) ;
+ fflush( pf ) ;
+ fprintf( pf, " read from master\n" ) ;
+ fflush( pf ) ;
+ }
+ status = MPI_Recv( precv, DATA_SIZE, MPI_INT, 0, MP_TAG,
+ MPI_COMM_WORLD, MPI_STATUS_IGNORE ) ;
+ if (verbose) {
+ fprintf( pf, " read from master done, status = %d\n", status ) ;
+ fflush( pf ) ;
+ }
+ }
+ free( psend );
+ free( precv );
+ }
+ if (verbose) {
+ fclose( pf ) ;
+ }
+ MPI_Finalize() ;
+
+ /* This test fails if it hangs */
+ if (rank == 0) {
+ printf( " No Errors\n" );
+ }
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Send-Recv";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, source, dest;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ source = 0;
+ dest = size - 1;
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = count * 2) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
+ /* Make sure that everyone has a recv buffer */
+ recvtype.InitBuf( &recvtype );
+
+ if (rank == source) {
+ sendtype.InitBuf( &sendtype );
+
+ err = MPI_Send( sendtype.buf, sendtype.count,
+ sendtype.datatype, dest, 0, comm);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+ }
+ else if (rank == dest) {
+ err = MPI_Recv( recvtype.buf, recvtype.count,
+ recvtype.datatype, source, 0, comm, MPI_STATUS_IGNORE);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ if (errs < 10) {
+ printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
+ MTestGetDatatypeName( &recvtype ),
+ MTestGetDatatypeName( &sendtype ),
+ count );
+ recvtype.printErrors = 1;
+ (void)MTestCheckRecv( 0, &recvtype );
+ }
+ errs += err;
+ }
+ }
+ MTestFreeDatatype( &sendtype );
+ MTestFreeDatatype( &recvtype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+
+static int verbose = 0;
+
+static int parse_args(int argc, char **argv);
+
+int main( int argc, char *argv[] )
+{
+ int i, j, errs = 0;
+ int rank, size;
+ MPI_Datatype newtype;
+ char *buf = NULL;
+
+ MPI_Init(&argc, &argv);
+ parse_args(argc, argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < 2) {
+ if (verbose) fprintf(stderr, "comm size must be > 1\n");
+ errs++;
+ goto fn_exit;
+ }
+
+ buf = malloc(64 * 129);
+ if (buf == NULL) {
+ if (verbose) fprintf(stderr, "error allocating buffer\n");
+ errs++;
+ goto fn_exit;
+ }
+
+ for (i = 8; i < 64; i += 4) {
+ MPI_Type_vector(i, 128, 129, MPI_CHAR, &newtype);
+
+ MPI_Type_commit(&newtype);
+ memset(buf, 0, 64*129);
+
+ if (rank == 0) {
+ /* init buffer */
+ for (j=0; j < i; j++) {
+ int k;
+ for (k=0; k < 129; k++) {
+ buf[129*j + k] = (char) j;
+ }
+ }
+
+ /* send */
+ MPI_Send(buf, 1, newtype, 1, i, MPI_COMM_WORLD);
+ }
+ else if (rank == 1) {
+ /* recv */
+ MPI_Recv(buf, 1, newtype, 0, i, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+
+ /* check buffer */
+ for (j=0; j < i; j++) {
+ int k;
+ for (k=0; k < 129; k++) {
+ if (k < 128 && buf[129*j + k] != (char) j) {
+ if (verbose) fprintf(stderr,
+ "(i=%d, pos=%d) should be %d but is %d\n",
+ i, 129*j + k, j, (int) buf[129*j + k]);
+ errs++;
+ }
+ else if (k == 128 && buf[129*j + k] != (char) 0) {
+ if (verbose) fprintf(stderr,
+ "(i=%d, pos=%d) should be %d but is %d\n",
+ i, 129*j + k, 0, (int) buf[129*j + k]);
+ errs++;
+ }
+ }
+ }
+ }
+
+ MPI_Type_free(&newtype);
+ }
+
+ if (rank == 0) {
+ int recv_errs = 0;
+
+ MPI_Recv(&recv_errs, 1, MPI_INT, 1, 0, MPI_COMM_WORLD,
+ MPI_STATUS_IGNORE);
+ if (recv_errs) {
+ if (verbose) fprintf(stderr, "%d errors reported from receiver\n",
+ recv_errs);
+ errs += recv_errs;
+ }
+ }
+ else if (rank == 1) {
+ MPI_Send(&errs, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
+ }
+
+ fn_exit:
+
+ free(buf);
+ /* print message and exit */
+ if (errs) {
+ if (rank == 0) fprintf(stderr, "Found %d errors\n", errs);
+ }
+ else {
+ if (rank == 0) printf(" No Errors\n");
+ }
+ MPI_Finalize();
+ return 0;
+}
+
+static int parse_args(int argc, char **argv)
+{
+ /*
+ int ret;
+
+ while ((ret = getopt(argc, argv, "v")) >= 0)
+ {
+ switch (ret) {
+ case 'v':
+ verbose = 1;
+ break;
+ }
+ }
+ */
+ if (argc > 1 && strcmp(argv[1], "-v") == 0)
+ verbose = 1;
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Head to head send-recv to test backoff in device when large messages are being transferred";
+*/
+
+#define MAX_NMSGS 100
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size, source, dest, partner;
+ int i, testnum;
+ double tsend;
+ static int msgsizes[] = { 100, 1000, 10000, 100000, -1 };
+ static int nmsgs[] = { 100, 10, 10, 4 };
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+ source = 0;
+ dest = 1;
+ if (size < 2) {
+ printf( "This test requires at least 2 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ for (testnum=0; msgsizes[testnum] > 0; testnum++) {
+ if (rank == source || rank == dest) {
+ int nmsg = nmsgs[testnum];
+ int msgSize = msgsizes[testnum];
+ MPI_Request r[MAX_NMSGS];
+ int *buf[MAX_NMSGS];
+
+ for (i=0; i<nmsg; i++) {
+ buf[i] = (int *)malloc( msgSize );
+ if (!buf[i]) {
+ fprintf( stderr, "Unable to allocate %d bytes\n",
+ msgSize );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ }
+ partner = (rank + 1) % size;
+
+ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 10,
+ MPI_BOTTOM, 0, MPI_INT, partner, 10, comm,
+ MPI_STATUS_IGNORE );
+ /* Try to fill up the outgoing message buffers */
+ for (i=0; i<nmsg; i++) {
+ MPI_Isend( buf[i], msgSize, MPI_CHAR, partner, testnum, comm,
+ &r[i] );
+ }
+ for (i=0; i<nmsg; i++) {
+ MPI_Recv( buf[i], msgSize, MPI_CHAR, partner, testnum, comm,
+ MPI_STATUS_IGNORE );
+ }
+ MPI_Waitall( nmsg, r, MPI_STATUSES_IGNORE );
+
+ /* Repeat the test, but make one of the processes sleep */
+ MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 10,
+ MPI_BOTTOM, 0, MPI_INT, partner, 10, comm,
+ MPI_STATUS_IGNORE );
+ if (rank == dest) MTestSleep( 1 );
+ /* Try to fill up the outgoing message buffers */
+ tsend = MPI_Wtime();
+ for (i=0; i<nmsg; i++) {
+ MPI_Isend( buf[i], msgSize, MPI_CHAR, partner, testnum, comm,
+ &r[i] );
+ }
+ tsend = MPI_Wtime() - tsend;
+ for (i=0; i<nmsg; i++) {
+ MPI_Recv( buf[i], msgSize, MPI_CHAR, partner, testnum, comm,
+ MPI_STATUS_IGNORE );
+ }
+ MPI_Waitall( nmsg, r, MPI_STATUSES_IGNORE );
+
+ if (tsend > 0.5) {
+ printf( "Isends for %d messages of size %d took too long (%f seconds)\n", nmsg, msgSize, tsend );
+ errs++;
+ }
+ MTestPrintfMsg( 1, "%d Isends for size = %d took %f seconds\n",
+ nmsg, msgSize, tsend );
+
+ for (i=0; i<nmsg; i++) {
+ free( buf[i] );
+ }
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of sending to self (with a preposted receive)";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size;
+ int count;
+ MPI_Comm comm;
+ MPI_Request req;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = count * 2) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
+
+ sendtype.InitBuf( &sendtype );
+ recvtype.InitBuf( &recvtype );
+
+ err = MPI_Irecv( recvtype.buf, recvtype.count,
+ recvtype.datatype, rank, 0, comm, &req );
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+
+ err = MPI_Send( sendtype.buf, sendtype.count,
+ sendtype.datatype, rank, 0, comm);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+ err = MPI_Wait( &req, MPI_STATUS_IGNORE );
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ if (errs < 10) {
+ printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
+ MTestGetDatatypeName( &recvtype ),
+ MTestGetDatatypeName( &sendtype ),
+ count );
+ recvtype.printErrors = 1;
+ (void)MTestCheckRecv( 0, &recvtype );
+ }
+ errs += err;
+ }
+
+ err = MPI_Irecv( recvtype.buf, recvtype.count,
+ recvtype.datatype, rank, 0, comm, &req );
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+
+ err = MPI_Ssend( sendtype.buf, sendtype.count,
+ sendtype.datatype, rank, 0, comm);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+ err = MPI_Wait( &req, MPI_STATUS_IGNORE );
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ if (errs < 10) {
+ printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
+ MTestGetDatatypeName( &recvtype ),
+ MTestGetDatatypeName( &sendtype ),
+ count );
+ recvtype.printErrors = 1;
+ (void)MTestCheckRecv( 0, &recvtype );
+ }
+ errs += err;
+ }
+
+ err = MPI_Irecv( recvtype.buf, recvtype.count,
+ recvtype.datatype, rank, 0, comm, &req );
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+
+ err = MPI_Rsend( sendtype.buf, sendtype.count,
+ sendtype.datatype, rank, 0, comm);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ MTestPrintError( err );
+ }
+ }
+ err = MPI_Wait( &req, MPI_STATUS_IGNORE );
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ if (errs < 10) {
+ printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d\n",
+ MTestGetDatatypeName( &recvtype ),
+ MTestGetDatatypeName( &sendtype ),
+ count );
+ recvtype.printErrors = 1;
+ (void)MTestCheckRecv( 0, &recvtype );
+ }
+ errs += err;
+ }
+
+ MTestFreeDatatype( &sendtype );
+ MTestFreeDatatype( &recvtype );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+#needs MPI_Type_dup, MPI_Type_set_name
+#sendrecv1 4
+sendrecv2 2
+sendrecv3 2
+sendflood 8 timeLimit=600
+#needs rsend
+#sendself 1
+sendall 4
+anyall 2
+eagerdt 2
+#needs MPI_Type_get_name, MPI_Type_dup
+#pingping 2
+bottom 2
+#needs MPI_Bsend
+#bsend1 1
+#bsend2 1
+#bsend3 1
+#bsend4 1
+#bsend5 4
+#bsendalign 2
+#bsendpending 2
+isendself 1
+#needs MPI_Buffer_attach, MPI_Bsend, MPI_Buffer_detach
+#bsendfrag 2
+#needs MPI_Intercomm_create
+#icsend 4
+#needs MPI_Request_get_status
+#rqstatus 2
+#needs MPI_Pack, MPI_Buffer_attach, MPI_Buffer_detach, MPI_Irsend, MPI_Ibsend
+#rqfreeb 4
+#needs MPI_Grequest_start MPI_Grequest_complete
+#greq1 1
+probe-unexp 4
+probenull 1
+# For testing, scancel will run with 1 process as well
+#needs MPI_Cancel, MPI_Test_cancelled, MPI_Ibsend
+#scancel 2 xfail=ticket287
+#needs MPI_Cancel, MPI_Test_cancelled
+#scancel2 2
+#pscancel 2 xfail=ticket287
+#needs MPI_Cancel
+#rcancel 2
+#cancelrecv 2 xfail=ticket287
+isendselfprobe 1
+inactivereq 1
+#needs MPI_Error_string, but fails with testany
+waittestnull 1
+waitany-null 1
+# this should be run only on machines with large amount of memory (>=8GB)
+# perhaps disable in the release tarball
+#large_message 3
+mprobe 2 mpiversion=3.0
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitestconf.h"
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+#include "mpi.h"
+
+static int verbose = 0;
+
+int main(int argc, char *argv[]);
+int parse_args(int argc, char **argv);
+
+int main(int argc, char *argv[])
+{
+ int i, err, errs = 0, rank, toterrs;
+
+ int index;
+ MPI_Request requests[10];
+ MPI_Status statuses[10];
+
+ MPI_Init(&argc, &argv);
+ parse_args(argc, argv);
+
+ for (i=0; i < 10; i++) {
+ requests[i] = MPI_REQUEST_NULL;
+ }
+
+ /* begin testing */
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ err = MPI_Waitany(10, requests, &index, statuses);
+
+ if (err != MPI_SUCCESS) {
+ errs++;
+ fprintf(stderr, "MPI_Waitany did not return MPI_SUCCESS\n");
+ }
+
+ if (index != MPI_UNDEFINED) {
+ errs++;
+ fprintf(stderr, "MPI_Waitany did not set index to MPI_UNDEFINED\n");
+ }
+
+ /* end testing */
+
+ MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
+ MPI_Comm_rank( MPI_COMM_WORLD, & rank );
+ MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (rank == 0) {
+ if (toterrs) {
+ fprintf(stderr, " Found %d errors\n", toterrs);
+ }
+ else {
+ printf(" No Errors\n");
+ }
+ }
+ MPI_Finalize();
+ return 0;
+}
+
+int parse_args(int argc, char **argv)
+{
+ /*
+ int ret;
+
+ while ((ret = getopt(argc, argv, "v")) >= 0)
+ {
+ switch (ret) {
+ case 'v':
+ verbose = 1;
+ break;
+ }
+ }
+ */
+ if (argc > 1 && strcmp(argv[1], "-v") == 0)
+ verbose = 1;
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2005 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/*
+ * This program checks that the various MPI_Test and MPI_Wait routines
+ * allow both null requests and in the multiple completion cases, empty
+ * lists of requests.
+ */
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ MPI_Status status, *status_array = 0;
+ int count = 0, flag, idx, rc, errlen, *indices=0, outcnt;
+ MPI_Request *reqs = 0;
+ char errmsg[MPI_MAX_ERROR_STRING];
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ rc = MPI_Testall( count, reqs, &flag, status_array );
+ if (rc != MPI_SUCCESS) {
+ MPI_Error_string( rc, errmsg, &errlen );
+ printf( "MPI_Testall returned failure: %s\n", errmsg );
+ errs ++;
+ }
+ else if (!flag) {
+ printf( "MPI_Testall( 0, ... ) did not return a true flag\n") ;
+ errs++;
+ }
+
+ rc = MPI_Waitall( count, reqs, status_array );
+ if (rc != MPI_SUCCESS) {
+ MPI_Error_string( rc, errmsg, &errlen );
+ printf( "MPI_Waitall returned failure: %s\n", errmsg );
+ errs ++;
+ }
+
+ rc = MPI_Testany( count, reqs, &idx, &flag, &status );
+ if (rc != MPI_SUCCESS) {
+ MPI_Error_string( rc, errmsg, &errlen );
+ printf( "MPI_Testany returned failure: %s\n", errmsg );
+ errs ++;
+ }
+ else if (!flag) {
+ printf( "MPI_Testany( 0, ... ) did not return a true flag\n") ;
+ errs++;
+ }
+
+ rc = MPI_Waitany( count, reqs, &idx, &status );
+ if (rc != MPI_SUCCESS) {
+ MPI_Error_string( rc, errmsg, &errlen );
+ printf( "MPI_Waitany returned failure: %s\n", errmsg );
+ errs ++;
+ }
+
+ rc = MPI_Testsome( count, reqs, &outcnt, indices, status_array );
+ if (rc != MPI_SUCCESS) {
+ MPI_Error_string( rc, errmsg, &errlen );
+ printf( "MPI_Testsome returned failure: %s\n", errmsg );
+ errs ++;
+ }
+
+ rc = MPI_Waitsome( count, reqs, &outcnt, indices, status_array );
+ if (rc != MPI_SUCCESS) {
+ MPI_Error_string( rc, errmsg, &errlen );
+ printf( "MPI_Waitsome returned failure: %s\n", errmsg );
+ errs ++;
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+#! /usr/local/bin/perl
+# -*- Mode: perl; -*-
+#
+# This script is the beginnings of a script to run a sequence of test
+# programs. See the MPICH document for a description of the test
+# strategy and requirements.
+#
+# Description
+# Tests are controlled by a file listing test programs; if the file is
+# a directory, then all of the programs in the directory and subdirectories
+# are run
+#
+# To run a test, the following steps are executed
+# Build the executable:
+# make programname
+# Run the executable
+# mpiexec -n <np> ./programname >out 2>err
+# Check the return code (non zero is failure)
+# Check the stderr output (non empty is failure)
+# Check the stdout output (No Errors or Test passed are the only valid
+# output)
+# Remove executable, out, err files
+#
+# The format of a list file is
+# programname number-of-processes
+# If number-of-processes is missing, $np_default is used (this is 2 but can
+# be overridden with -np=new-value)
+#
+# Special feature:
+# Because these tests can take a long time to run, there is an
+# option to cause the tests to stop is a "stopfile" is found.
+# The stopfile can be created by a separate, watchdog process, to ensure that
+# tests end at a certain time.
+# The name of this file is (by default) .stoptest
+# in the top-level run directory. The environment variable
+# MPITEST_STOPTEST
+# can specify a different file name.
+#
+# Import the mkpath command
+use File::Path;
+
+# Global variables
+$MPIMajorVersion = "1";
+$MPIMinorVersion = "1";
+$mpiexec = "smpirun"; # Name of mpiexec program (including path, if necessary)
+$testIsStrict = "true";
+$MPIhasMPIX = "no";
+$np_arg = "-np"; # Name of argument to specify the number of processes
+$err_count = 0; # Number of programs that failed.
+$total_run = 0; # Number of programs tested
+$total_seen = 0; # Number of programs considered for testing
+$np_default = 2; # Default number of processes to use
+$np_max = -1; # Maximum number of processes to use (overrides any
+ # value in the test list files. -1 is Infinity
+$defaultTimeLimit = 180; # default timeout
+
+$srcdir = "."; # Used to set the source dir for testlist files
+
+$curdir = "."; # used to track the relative current directory
+
+# Output forms
+$xmloutput = 0; # Set to true to get xml output (also specify file)
+$closeXMLOutput = 1; # Set to false to leave XML output file open to
+ # accept additional data
+$verbose = 1; # Set to true to get more output
+$showProgress = 0; # Set to true to get a "." with each run program.
+$newline = "\r\n"; # Set to \r\n for Windows-friendly, \n for Unix only
+$batchRun = 0; # Set to true to batch the execution of the tests
+ # (i.e., run them together, then test output,
+ # rather than build/run/check for each test)
+$testCount = 0; # Used with batchRun to count tests.
+$batrundir = "."; # Set to the directory into which to run the examples
+
+$execarg="";
+# TAP (Test Anything Protocol) output
+my $tapoutput = 0;
+my $tapfile = '';
+my $tapfullfile = '';
+
+$debug = 1;
+
+$depth = 0; # This is used to manage multiple open list files
+
+# Build flags
+$remove_this_pgm = 0;
+$clean_pgms = 1;
+
+my $program_wrapper = '';
+
+#---------------------------------------------------------------------------
+# Get some arguments from the environment
+# Currently, only the following are understood:
+# VERBOSE
+# RUNTESTS_VERBOSE (an alias for VERBOSE in case you want to
+# reserve VERBOSE)
+# RUNTESTS_SHOWPROGRESS
+# MPITEST_STOPTEST
+# MPITEST_TIMEOUT
+# MPITEST_PROGRAM_WRAPPER (Value is added after -np but before test
+# executable. Tools like valgrind may be inserted
+# this way.)
+#---------------------------------------------------------------------------
+if ( defined($ENV{"VERBOSE"}) || defined($ENV{"V"}) || defined($ENV{"RUNTESTS_VERBOSE"}) ) {
+ $verbose = 1;
+}
+if ( defined($ENV{"RUNTESTS_SHOWPROGRESS"} ) ) {
+ $showProgress = 1;
+}
+if (defined($ENV{"MPITEST_STOPTEST"})) {
+ $stopfile = $ENV{"MPITEST_STOPTEST"};
+}
+else {
+ $stopfile = `pwd` . "/.stoptest";
+ $stopfile =~ s/\r*\n*//g; # Remove any newlines (from pwd)
+}
+
+if (defined($ENV{"MPITEST_TIMEOUT"})) {
+ $defaultTimeLimit = $ENV{"MPITEST_TIMEOUT"};
+}
+
+# Define this to leave the XML output file open to receive additional data
+if (defined($ENV{'NOXMLCLOSE'}) && $ENV{'NOXMLCLOSE'} eq 'YES') {
+ $closeXMLOutput = 0;
+}
+
+if (defined($ENV{'MPITEST_PROGRAM_WRAPPER'})) {
+ $program_wrapper = $ENV{'MPITEST_PROGRAM_WRAPPER'};
+}
+
+if (defined($ENV{'MPITEST_BATCH'})) {
+ if ($ENV{'MPITEST_BATCH'} eq 'YES' || $ENV{'MPITEST_BATCH'} eq 'yes') {
+ $batchRun = 1;
+ } elsif ($ENV{'MPITEST_BATCH'} eq 'NO' || $ENV{'MPITEST_BATCH'} eq 'no') {
+ $batchRun = 0;
+ }
+ else {
+ print STDERR "Unrecognized value for MPITEST_BATCH = $ENV{'MPITEST_BATCH'}\n";
+ }
+}
+if (defined($ENV{'MPITEST_BATCHDIR'})) {
+ $batrundir = $ENV{'MPITEST_BATCHDIR'};
+}
+
+#---------------------------------------------------------------------------
+# Process arguments and override any defaults
+#---------------------------------------------------------------------------
+foreach $_ (@ARGV) {
+ if (/--?mpiexec=(.*)/) {
+ # Use mpiexec as given - it may be in the path, and
+ # we don't want to bother to try and find it.
+ $mpiexec = $1;
+ }
+ elsif (/--?np=(.*)/) { $np_default = $1; }
+ elsif (/--?maxnp=(.*)/) { $np_max = $1; }
+ elsif (/--?tests=(.*)/) { $listfiles = $1; }
+ elsif (/--?srcdir=(.*)/) { $srcdir = $1;
+ $mpiexec="$mpiexec -platform ${srcdir}/../../../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir}/../hostfile --log=root.thr:critical" }
+ elsif (/--?verbose/) { $verbose = 1; }
+ elsif (/--?showprogress/) { $showProgress = 1; }
+ elsif (/--?debug/) { $debug = 1; }
+ elsif (/--?batch/) { $batchRun = 1; }
+ elsif (/--?batchdir=(.*)/) { $batrundir = $1; }
+ elsif (/--?timeoutarg=(.*)/) { $timeoutArgPattern = $1; }
+ elsif (/--?execarg=(.*)/) { $execarg = "$execarg $1"; }
+ elsif (/--?xmlfile=(.*)/) {
+ $xmlfile = $1;
+ if (! ($xmlfile =~ /^\//)) {
+ $thisdir = `pwd`;
+ chop $thisdir;
+ $xmlfullfile = $thisdir . "/" . $xmlfile ;
+ }
+ else {
+ $xmlfullfile = $xmlfile;
+ }
+ $xmloutput = 1;
+ open( XMLOUT, ">$xmlfile" ) || die "Cannot open $xmlfile\n";
+ my $date = `date "+%Y-%m-%d-%H-%M"`;
+ $date =~ s/\r?\n//;
+ # MPISOURCE can be used to describe the source of MPI for this
+ # test.
+ print XMLOUT "<?xml version='1.0' ?>$newline";
+ print XMLOUT "<?xml-stylesheet href=\"TestResults.xsl\" type=\"text/xsl\" ?>$newline";
+ print XMLOUT "<MPITESTRESULTS>$newline";
+ print XMLOUT "<DATE>$date</DATE>$newline";
+ print XMLOUT "<MPISOURCE></MPISOURCE>$newline";
+ }
+ elsif (/--?noxmlclose/) {
+ $closeXMLOutput = 0;
+ }
+ elsif (/--?tapfile=(.*)/) {
+ $tapfile = $1;
+ if ($tapfile !~ m|^/|) {
+ $thisdir = `pwd`;
+ chomp $thisdir;
+ $tapfullfile = $thisdir . "/" . $tapfile ;
+ }
+ else {
+ $tapfullfile = $tapfile;
+ }
+ $tapoutput = 1;
+ open( TAPOUT, ">$tapfile" ) || die "Cannot open $tapfile\n";
+ my $date = `date "+%Y-%m-%d-%H-%M"`;
+ $date =~ s/\r?\n//;
+ print TAPOUT "TAP version 13\n";
+ print TAPOUT "# MPICH test suite results (TAP format)\n";
+ print TAPOUT "# date ${date}\n";
+ # we do not know at this point how many tests will be run, so do
+ # not print a test plan line like "1..450" until the very end
+ }
+ else {
+ print STDERR "Unrecognized argument $_\n";
+ print STDERR "runtests [-tests=testfile] [-np=nprocesses] \
+ [-maxnp=max-nprocesses] [-srcdir=location-of-tests] \
+ [-xmlfile=filename ] [-noxmlclose] \
+ [-verbose] [-showprogress] [-debug] [-batch]\n";
+ exit(1);
+ }
+}
+
+# Perform any post argument processing
+if ($batchRun) {
+ if (! -d $batrundir) {
+ mkpath $batrundir || die "Could not create $batrundir\n";
+ }
+ open( BATOUT, ">$batrundir/runtests.batch" ) || die "Could not open $batrundir/runtests.batch\n";
+}
+else {
+ # We must have mpiexec
+ if ("$mpiexec" eq "") {
+ print STDERR "No mpiexec found!\n";
+ exit(1);
+ }
+}
+
+#
+# Process any files
+if ($listfiles eq "") {
+ if ($batchRun) {
+ print STDERR "An implicit list of tests is not permitted in batch mode\n";
+ exit(1);
+ }
+ else {
+ &ProcessImplicitList;
+ }
+}
+elsif (-d $listfiles) {
+ print STDERR "Testing by directories not yet supported\n";
+}
+else {
+ &RunList( $listfiles );
+}
+
+if ($xmloutput && $closeXMLOutput) {
+ print XMLOUT "</MPITESTRESULTS>$newline";
+ close XMLOUT;
+}
+
+if ($tapoutput) {
+ print TAPOUT "1..$total_seen\n";
+ close TAPOUT;
+}
+
+# Output a summary:
+if ($batchRun) {
+ print "Programs created along with a runtest.batch file in $batrundir\n";
+ print "Run that script and then use checktests to summarize the results\n";
+}
+else {
+ if ($err_count) {
+ print "$err_count tests failed out of $total_run\n";
+ if ($xmloutput) {
+ print "Details in $xmlfullfile\n";
+ }
+ }
+ else {
+ print " All $total_run tests passed!\n";
+ }
+ if ($tapoutput) {
+ print "TAP formatted results in $tapfullfile\n";
+ }
+}
+#\f
+# ---------------------------------------------------------------------------
+# Routines
+#
+# Enter a new directory and process a list file.
+# ProcessDir( directory-name, list-file-name )
+sub ProcessDir {
+ my $dir = $_[0]; $dir =~ s/\/$//;
+ my $listfile = $_[1];
+ my $savedir = `pwd`;
+ my $savecurdir = $curdir;
+ my $savesrcdir = $srcdir;
+
+ chop $savedir;
+ if (substr($srcdir,0,3) eq "../") {
+ $srcdir = "../$srcdir";
+ }
+
+ print "Processing directory $dir\n" if ($verbose || $debug);
+ chdir $dir;
+ if ($dir =~ /\//) {
+ print STDERR "only direct subdirectories allowed in list files";
+ }
+ $curdir .= "/$dir";
+
+ &RunList( $listfile );
+ print "\n" if $showProgress; # Terminate line from progress output
+ chdir $savedir;
+ $curdir = $savecurdir;
+ $srcdir = $savesrcdir;
+}
+# ---------------------------------------------------------------------------
+# Run the programs listed in the file given as the argument.
+# This file describes the tests in the format
+# programname number-of-processes [ key=value ... ]
+# If the second value is not given, the default value is used.
+#
+sub RunList {
+ my $LIST = "LIST$depth"; $depth++;
+ my $listfile = $_[0];
+ my $ResultTest = "";
+ my $InitForRun = "";
+ my $listfileSource = $listfile;
+
+ print "Looking in $curdir/$listfile\n" if $debug;
+ if (! -s "$listfile" && -s "$srcdir/$curdir/$listfile" ) {
+ $listfileSource = "$srcdir/$curdir/$listfile";
+ }
+ open( $LIST, "<$listfileSource" ) ||
+ die "Could not open $listfileSource\n";
+ while (<$LIST>) {
+ # Check for stop file
+ if (-s $stopfile) {
+ # Exit because we found a stopfile
+ print STDERR "Terminating test because stopfile $stopfile found\n";
+ last;
+ }
+ # Skip comments
+ s/#.*//g;
+ # Remove any trailing newlines/returns
+ s/\r?\n//;
+ # Remove any leading whitespace
+ s/^\s*//;
+ # Some tests require that support routines are built first
+ # This is specified with !<dir>:<target>
+ if (/^\s*\!([^:]*):(.*)/) {
+ # Hack: just execute in a subshell. This discards any
+ # output.
+ `cd $1 && make $2`;
+ next;
+ }
+ # List file entries have the form:
+ # program [ np [ name=value ... ] ]
+ # See files errhan/testlist, init/testlist, and spawn/testlist
+ # for examples of using the key=value form
+ my @args = split(/\s+/,$_);
+ my $programname = $args[0];
+ my $np = "";
+ my $ResultTest = "";
+ my $InitForRun = "";
+ my $timeLimit = "";
+ my $progArgs = "";
+ my $mpiexecArgs = "$execarg";
+ my $requiresStrict = "";
+ my $requiresMPIX = "";
+ my $progEnv = "";
+ my $mpiVersion = "";
+ my $xfail = "";
+ if ($#args >= 1) { $np = $args[1]; }
+ # Process the key=value arguments
+ for (my $i=2; $i <= $#args; $i++) {
+ if ($args[$i] =~ /([^=]+)=(.*)/) {
+ my $key = $1;
+ my $value = $2;
+ if ($key eq "resultTest") {
+ $ResultTest = $value;
+ }
+ elsif ($key eq "init") {
+ $InitForRun = $value;
+ }
+ elsif ($key eq "timeLimit") {
+ $timeLimit = $value;
+ }
+ elsif ($key eq "arg") {
+ $progArgs = "$progArgs $value";
+ }
+ elsif ($key eq "mpiexecarg") {
+ $mpiexecArgs = "$mpiexecArgs $value";
+ }
+ elsif ($key eq "env") {
+ $progEnv = "$progEnv $value";
+ }
+ elsif ($key eq "mpiversion") {
+ $mpiVersion = $value;
+ }
+ elsif ($key eq "strict") {
+ $requiresStrict = $value
+ }
+ elsif ($key eq "mpix") {
+ $requiresMPIX = $value
+ }
+ elsif ($key eq "xfail") {
+ if ($value eq "") {
+ print STDERR "\"xfail=\" requires an argument\n";
+ }
+ $xfail = $value;
+ }
+ else {
+ print STDERR "Unrecognized key $key in $listfileSource\n";
+ }
+ }
+ }
+
+ # skip empty lines
+ if ($programname eq "") { next; }
+
+ if ($np eq "") { $np = $np_default; }
+ if ($np_max > 0 && $np > $np_max) { $np = $np_max; }
+
+ # allows us to accurately output TAP test numbers without disturbing the
+ # original totals that have traditionally been reported
+ #
+ # These "unless" blocks are ugly, but permit us to honor skipping
+ # criteria for directories as well without counting directories as tests
+ # in our XML/TAP output.
+ unless (-d $programname) {
+ $total_seen++;
+ }
+
+ # If a minimum MPI version is specified, check against the
+ # available MPI. If the version is unknown, we ignore this
+ # test (thus, all tests will be run).
+ if ($mpiVersion ne "" && $MPIMajorVersion ne "unknown" &&
+ $MPIMinorVersion ne "unknown") {
+ my ($majorReq,$minorReq) = split(/\./,$mpiVersion);
+ if ($majorReq > $MPIMajorVersion or
+ ($majorReq == $MPIMajorVersion && $minorReq > $MPIMinorVersion))
+ {
+ unless (-d $programname) {
+ SkippedTest($programname, $np, $workdir, "requires MPI version $mpiVersion");
+ }
+ next;
+ }
+ }
+ # Check whether strict is required by MPI but not by the
+ # test (use strict=false for tests that use non-standard extensions)
+ if (lc($requiresStrict) eq "false" && lc($testIsStrict) eq "true") {
+ unless (-d $programname) {
+ SkippedTest($programname, $np, $workdir, "non-strict test, strict MPI mode requested");
+ }
+ next;
+ }
+
+ if (lc($testIsStrict) eq "true") {
+ # Strict MPI testing was requested, so assume that a non-MPICH MPI
+ # implementation is being tested and the "xfail" implementation
+ # assumptions do not hold.
+ $xfail = '';
+ }
+
+ if (lc($requiresMPIX) eq "true" && lc($MPIHasMPIX) eq "no") {
+ unless (-d $programname) {
+ SkippedTest($programname, $np, $workdir, "tests MPIX extensions, MPIX testing disabled");
+ }
+ next;
+ }
+
+ if (-d $programname) {
+ # If a directory, go into the that directory and
+ # look for a new list file
+ &ProcessDir( $programname, $listfile );
+ }
+ else {
+ $total_run++;
+ if (&BuildMPIProgram( $programname, $xfail ) == 0) {
+ if ($batchRun == 1) {
+ &AddMPIProgram( $programname, $np, $ResultTest,
+ $InitForRun, $timeLimit, $progArgs,
+ $progEnv, $mpiexecArgs, $xfail );
+ }
+ else {
+ &RunMPIProgram( $programname, $np, $ResultTest,
+ $InitForRun, $timeLimit, $progArgs,
+ $progEnv, $mpiexecArgs, $xfail );
+ }
+ }
+ elsif ($xfail ne '') {
+ # We expected to run this program, so failure to build
+ # is an error
+ $found_error = 1;
+ $err_count++;
+ }
+ if ($batchRun == 0) {
+ &CleanUpAfterRun( $programname );
+ }
+ }
+ }
+ close( $LIST );
+}
+#
+# This routine tries to run all of the files in the current
+# directory
+sub ProcessImplicitList {
+ # The default is to run every file in the current directory.
+ # If there are no built programs, build and run every file
+ # WARNING: This assumes that anything executable should be run as
+ # an MPI test.
+ $found_exec = 0;
+ $found_src = 0;
+ open (PGMS, "ls -1 |" ) || die "Cannot list directory\n";
+ while (<PGMS>) {
+ s/\r?\n//;
+ $programname = $_;
+ if (-d $programname) { next; } # Ignore directories
+ if ($programname eq "runtests") { next; } # Ignore self
+ if ($programname eq "checktests") { next; } # Ignore helper
+ if ($programname eq "configure") { next; } # Ignore configure script
+ if ($programname eq "config.status") { next; } # Ignore configure helper
+ if (-x $programname) { $found_exec++; }
+ if ($programname =~ /\.[cf]$/) { $found_src++; }
+ }
+ close PGMS;
+
+ if ($found_exec) {
+ print "Found executables\n" if $debug;
+ open (PGMS, "ls -1 |" ) || die "Cannot list programs\n";
+ while (<PGMS>) {
+ # Check for stop file
+ if (-s $stopfile) {
+ # Exit because we found a stopfile
+ print STDERR "Terminating test because stopfile $stopfile found\n";
+ last;
+ }
+ s/\r?\n//;
+ $programname = $_;
+ if (-d $programname) { next; } # Ignore directories
+ if ($programname eq "runtests") { next; } # Ignore self
+ if (-x $programname) {
+ $total_run++;
+ &RunMPIProgram( $programname, $np_default, "", "", "", "", "", "", "" );
+ }
+ }
+ close PGMS;
+ }
+ elsif ($found_src) {
+ print "Found source files\n" if $debug;
+ open (PGMS, "ls -1 *.c |" ) || die "Cannot list programs\n";
+ while (<PGMS>) {
+ if (-s $stopfile) {
+ # Exit because we found a stopfile
+ print STDERR "Terminating test because stopfile $stopfile found\n";
+ last;
+ }
+ s/\r?\n//;
+ $programname = $_;
+ # Skip messages from ls about no files
+ if (! -s $programname) { next; }
+ $programname =~ s/\.c//;
+ $total_run++;
+ if (&BuildMPIProgram( $programname, "") == 0) {
+ &RunMPIProgram( $programname, $np_default, "", "", "", "", "", "", "" );
+ }
+ else {
+ # We expected to run this program, so failure to build
+ # is an error
+ $found_error = 1;
+ $err_count++;
+ }
+ &CleanUpAfterRun( $programname );
+ }
+ close PGMS;
+ }
+}
+# Run the program.
+# ToDo: Add a way to limit the time that any particular program may run.
+# The arguments are
+# name of program, number of processes, name of routine to check results
+# init for testing, timelimit, and any additional program arguments
+# If the 3rd arg is not present, the a default that simply checks that the
+# return status is 0 and that the output is " No Errors" is used.
+sub RunMPIProgram {
+ my ($programname,$np,$ResultTest,$InitForTest,$timeLimit,$progArgs,$progEnv,$mpiexecArgs,$xfail) = @_;
+ my $found_error = 0;
+ my $found_noerror = 0;
+ my $inline = "";
+
+ &RunPreMsg( $programname, $np, $curdir );
+
+ unlink "err";
+
+ # Set a default timeout on tests (3 minutes for now)
+ my $timeout = $defaultTimeLimit;
+ if (defined($timeLimit) && $timeLimit =~ /^\d+$/) {
+ $timeout = $timeLimit;
+ }
+ $ENV{"MPIEXEC_TIMEOUT"} = $timeout;
+
+ # Run the optional setup routine. For example, the timeout tests could
+ # be set to a shorter timeout.
+ if ($InitForTest ne "") {
+ &$InitForTest();
+ }
+ print STDOUT "Env includes $progEnv\n" if $verbose;
+ print STDOUT "$mpiexec $mpiexecArgs $np_arg $np $program_wrapper ./$programname $progArgs\n" if $verbose;
+ print STDOUT "." if $showProgress;
+ # Save and restore the environment if necessary before running mpiexec.
+ if ($progEnv ne "") {
+ %saveEnv = %ENV;
+ foreach $val (split(/\s+/, $progEnv)) {
+ if ($val =~ /([^=]+)=(.*)/) {
+ $ENV{$1} = $2;
+ }
+ else {
+ print STDERR "Environment variable/value $val not in a=b form\n";
+ }
+ }
+ }
+ open ( MPIOUT, "$mpiexec $np_arg $np $mpiexecArgs $program_wrapper ./$programname $progArgs 2>&1 |" ) ||
+ die "Could not run ./$programname\n";
+ if ($progEnv ne "") {
+ %ENV = %saveEnv;
+ }
+ if ($ResultTest ne "") {
+ # Read and process the output
+ ($found_error, $inline) = &$ResultTest( MPIOUT, $programname );
+ }
+ else {
+ if ($verbose) {
+ $inline = "$mpiexec $np_arg $np $program_wrapper ./$programname\n";
+ }
+ else {
+ $inline = "";
+ }
+ while (<MPIOUT>) {
+ print STDOUT $_ if $verbose;
+ # Skip FORTRAN STOP
+ if (/FORTRAN STOP/) { next; }
+ $inline .= $_;
+ if (/^\s*No [Ee]rrors\s*$/ && $found_noerror == 0) {
+ $found_noerror = 1;
+ }
+ if (! /^\s*No [Ee]rrors\s*$/ && !/^\s*Test Passed\s*$/) {
+ print STDERR "Unexpected output in $programname: $_";
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ }
+ }
+ if ($found_noerror == 0) {
+ print STDERR "Program $programname exited without No Errors\n";
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ }
+ $rc = close ( MPIOUT );
+ if ($rc == 0) {
+ # Only generate a message if we think that the program
+ # passed the test.
+ if (!$found_error) {
+ $run_status = $?;
+ $signal_num = $run_status & 127;
+ if ($run_status > 255) { $run_status >>= 8; }
+ print STDERR "Program $programname exited with non-zero status $run_status\n";
+ if ($signal_num != 0) {
+ print STDERR "Program $programname exited with signal $signal_num\n";
+ }
+ $found_error = 1;
+ $err_count ++;
+ }
+ }
+ }
+ if ($found_error) {
+ &RunTestFailed( $programname, $np, $curdir, $inline, $xfail );
+ }
+ else {
+ &RunTestPassed( $programname, $np, $curdir, $xfail );
+ }
+ &RunPostMsg( $programname, $np, $curdir );
+}
+
+# This version simply writes the mpiexec command out, with the output going
+# into a file, and recording the output status of the run.
+sub AddMPIProgram {
+ my ($programname,$np,$ResultTest,$InitForTest,$timeLimit,$progArgs,$progEnv,$mpiexecArgs, $xfail) = @_;
+
+ if (! -x $programname) {
+ print STDERR "Could not find $programname!";
+ return;
+ }
+
+ if ($ResultTest ne "") {
+ # This test really needs to be run manually, with this test
+ # Eventually, we can update this to include handleing in checktests.
+ print STDERR "Run $curdir/$programname with $np processes and use $ResultTest to check the results\n";
+ return;
+ }
+
+ # Set a default timeout on tests (3 minutes for now)
+ my $timeout = $defaultTimeLimit;
+ if (defined($timeLimit) && $timeLimit =~ /^\d+$/) {
+ # On some systems, there is no effective time limit on
+ # individual mpi program runs. In that case, we may
+ # want to treat these also as "run manually".
+ $timeout = $timeLimit;
+ }
+ print BATOUT "export MPIEXEC_TIMEOUT=$timeout\n";
+
+ # Run the optional setup routine. For example, the timeout tests could
+ # be set to a shorter timeout.
+ if ($InitForTest ne "") {
+ &$InitForTest();
+ }
+
+ # For non-MPICH versions of mpiexec, a timeout may require a different
+ # environment variable or command line option (e.g., for Cray aprun,
+ # the option -t <sec> must be given, there is no environment variable
+ # to set the timeout.
+ $extraArgs = "";
+ if (defined($timeoutArgPattern) && $timeoutArgPattern ne "") {
+ my $timeArg = $timeoutArgPattern;
+ $timeoutArg =~ s/<SEC>/$timeout/;
+ $extraArgs .= $timeoutArg
+ }
+
+ print STDOUT "Env includes $progEnv\n" if $verbose;
+ print STDOUT "$mpiexec $np_arg $np $extraArgs $program_wrapper ./$programname $progArgs\n" if $verbose;
+ print STDOUT "." if $showProgress;
+ # Save and restore the environment if necessary before running mpiexec.
+ if ($progEnv ne "") {
+ # Need to fix:
+ # save_NAME_is_set=is old name set
+ # save_NAME=oldValue
+ # export NAME=newvalue
+ # (run)
+ # export NAME=oldValue (if set!)
+ print STDERR "Batch output does not permit changes to environment\n";
+ }
+ # The approach here is to move the test codes to a single directory from
+ # which they can be run; this avoids complex code to change directories
+ # and ensure that the output goes "into the right place".
+ $testCount++;
+ rename $programname, "$batrundir/$programname";
+ print BATOUT "echo \"# $mpiexec $np_arg $np $extraArgs $mpiexecArgs $program_wrapper $curdir/$programname $progArgs\" > runtests.$testCount.out\n";
+ # Some programs expect to run in the same directory as the executable
+ print BATOUT "$mpiexec $np_arg $np $extraArgs $mpiexecArgs $program_wrapper ./$programname $progArgs >> runtests.$testCount.out 2>&1\n";
+ print BATOUT "echo \$? > runtests.$testCount.status\n";
+}
+
+#
+# Return value is 0 on success, non zero on failure
+sub BuildMPIProgram {
+ my $programname = shift;
+ my $xfail = shift;
+ my $rc = 0;
+ if ($verbose) { print STDERR "making $programname\n"; }
+ if (! -x $programname) { $remove_this_pgm = 1; }
+ else { $remove_this_pgm = 0; }
+ my $output = `make $programname 2>&1`;
+ $rc = $?;
+ if ($rc > 255) { $rc >>= 8; }
+ if (! -x $programname) {
+ print STDERR "Failed to build $programname; $output\n";
+ if ($rc == 0) {
+ $rc = 1;
+ }
+ # Add a line to the summary file describing the failure
+ # This will ensure that failures to build will end up
+ # in the summary file (which is otherwise written by the
+ # RunMPIProgram step)
+ &RunPreMsg( $programname, $np, $curdir );
+ &RunTestFailed( $programname, $np, $curdir, "Failed to build $programname; $output", $xfail );
+ &RunPostMsg( $programname, $np, $curdir );
+ }
+ return $rc;
+}
+
+sub CleanUpAfterRun {
+ my $programname = $_[0];
+
+ # Check for that this program has exited. If it is still running,
+ # issue a warning and leave the application. Of course, this
+ # check is complicated by the lack of a standard access to the
+ # running processes for this user in Unix.
+ @stillRunning = &FindRunning( $programname );
+
+ if ($#stillRunning > -1) {
+ print STDERR "Some programs ($programname) may still be running:\npids = ";
+ for (my $i=0; $i <= $#stillRunning; $i++ ) {
+ print STDERR $stillRunning[$i] . " ";
+ }
+ print STDERR "\n";
+ # Remind the user that the executable remains; we leave it around
+ # to allow the programmer to debug the running program, for which
+ # the executable is needed.
+ print STDERR "The executable ($programname) will not be removed.\n";
+ }
+ else {
+ if ($remove_this_pgm && $clean_pgms) {
+ unlink $programname, "$programname.o";
+ }
+ $remove_this_pgm = 0;
+ }
+}
+# ----------------------------------------------------------------------------
+sub FindRunning {
+ my $programname = $_[0];
+ my @pids = ();
+
+ my $logname = $ENV{'USER'};
+ my $pidloc = 1;
+ my $rc = open PSFD, "ps auxw -U $logname 2>&1 |";
+
+ if ($rc == 0) {
+ $rc = open PSFD, "ps -fu $logname 2>&1 |";
+ }
+ if ($rc == 0) {
+ print STDERR "Could not execute ps command\n";
+ return @pids;
+ }
+
+ while (<PSFD>) {
+ if (/$programname/) {
+ @fields = split(/\s+/);
+ my $pid = $fields[$pidloc];
+ # Check that we've found a numeric pid
+ if ($pid =~ /^\d+$/) {
+ $pids[$#pids + 1] = $pid;
+ }
+ }
+ }
+ close PSFD;
+
+ return @pids;
+}
+# ----------------------------------------------------------------------------
+#
+# TestStatus is a special test that reports success *only* when the
+# status return is NONZERO
+sub TestStatus {
+ my $MPIOUT = $_[0];
+ my $programname = $_[1];
+ my $found_error = 0;
+
+ my $inline = "";
+ while (<$MPIOUT>) {
+ #print STDOUT $_ if $verbose;
+ # Skip FORTRAN STOP
+ if (/FORTRAN STOP/) { next; }
+ $inline .= $_;
+ # ANY output is an error. We have the following output
+ # exception for the Hydra process manager.
+ if (/=*/) { last; }
+ if (! /^\s*$/) {
+ print STDERR "Unexpected output in $programname: $_";
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ }
+ }
+ $rc = close ( MPIOUT );
+ if ($rc == 0) {
+ $run_status = $?;
+ $signal_num = $run_status & 127;
+ if ($run_status > 255) { $run_status >>= 8; }
+ }
+ else {
+ # This test *requires* non-zero return codes
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ $inline .= "$mpiexec returned a zero status but the program returned a nonzero status\n";
+ }
+ return ($found_error,$inline);
+}
+#
+# TestTimeout is a special test that reports success *only* when the
+# status return is NONZERO and there are no processes left over.
+# This test currently checks only for the return status.
+sub TestTimeout {
+ my $MPIOUT = $_[0];
+ my $programname = $_[1];
+ my $found_error = 0;
+
+ my $inline = "";
+ while (<$MPIOUT>) {
+ #print STDOUT $_ if $verbose;
+ # Skip FORTRAN STOP
+ if (/FORTRAN STOP/) { next; }
+ $inline .= $_;
+ if (/[Tt]imeout/) { next; }
+ # Allow 'signaled with Interrupt' (see gforker mpiexec)
+ if (/signaled with Interrupt/) { next; }
+ # Allow 'job ending due to env var MPIEXEC_TIMEOUT' (mpd)
+ if (/job ending due to env var MPIEXEC_TIMEOUT/) { next; }
+ # Allow 'APPLICATION TIMED OUT' (hydra)
+ if (/\[mpiexec@.*\] APPLICATION TIMED OUT/) { last; }
+ # ANY output is an error (other than timeout)
+ if (! /^\s*$/) {
+ print STDERR "Unexpected output in $programname: $_";
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ }
+ }
+ $rc = close ( MPIOUT );
+ if ($rc == 0) {
+ $run_status = $?;
+ $signal_num = $run_status & 127;
+ if ($run_status > 255) { $run_status >>= 8; }
+ }
+ else {
+ # This test *requires* non-zero return codes
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ $inline .= "$mpiexec returned a zero status but the program returned a nonzero status\n";
+ }
+ #
+ # Here should go a check of the processes
+ # open( PFD, "ps -fu $LOGNAME | grep -v grep | grep $programname |" );
+ # while (<PFD>) {
+ #
+ # }
+ # close PFD;
+ return ($found_error,$inline);
+}
+#
+# TestErrFatal is a special test that reports success *only* when the
+# status return is NONZERO; it ignores error messages
+sub TestErrFatal {
+ my $MPIOUT = $_[0];
+ my $programname = $_[1];
+ my $found_error = 0;
+
+ my $inline = "";
+ while (<$MPIOUT>) {
+ #print STDOUT $_ if $verbose;
+ # Skip FORTRAN STOP
+ if (/FORTRAN STOP/) { next; }
+ $inline .= $_;
+ # ALL output is allowed.
+ }
+ $rc = close ( MPIOUT );
+ if ($rc == 0) {
+ $run_status = $?;
+ $signal_num = $run_status & 127;
+ if ($run_status > 255) { $run_status >>= 8; }
+ }
+ else {
+ # This test *requires* non-zero return codes
+ if (!$found_error) {
+ $found_error = 1;
+ $err_count ++;
+ }
+ $inline .= "$mpiexec returned a zero status but the program returned a nonzero status\n";
+ }
+ return ($found_error,$inline);
+}
+
+# ----------------------------------------------------------------------------
+# Output routines:
+# RunPreMsg( programname, np, workdir ) - Call before running a program
+# RunTestFailed, RunTestPassed - Call after test
+# RunPostMsg - Call at end of each test
+#
+sub RunPreMsg {
+ my ($programname,$np,$workdir) = @_;
+ if ($xmloutput) {
+ print XMLOUT "<MPITEST>$newline<NAME>$programname</NAME>$newline";
+ print XMLOUT "<NP>$np</NP>$newline";
+ print XMLOUT "<WORKDIR>$workdir</WORKDIR>$newline";
+ }
+}
+sub RunPostMsg {
+ my ($programname, $np, $workdir) = @_;
+ if ($xmloutput) {
+ print XMLOUT "</MPITEST>$newline";
+ }
+}
+sub RunTestPassed {
+ my ($programname, $np, $workdir, $xfail) = @_;
+ if ($xmloutput) {
+ print XMLOUT "<STATUS>pass</STATUS>$newline";
+ }
+ if ($tapoutput) {
+ my $xfailstr = '';
+ if ($xfail ne '') {
+ $xfailstr = " # TODO $xfail";
+ }
+ print TAPOUT "ok ${total_run} - $workdir/$programname ${np}${xfailstr}\n";
+ }
+}
+sub RunTestFailed {
+ my $programname = shift;
+ my $np = shift;
+ my $workdir = shift;
+ my $output = shift;
+ my $xfail = shift;
+
+ if ($xmloutput) {
+ my $xout = $output;
+ # basic escapes that wreck the XML output
+ $xout =~ s/</\*AMP\*lt;/g;
+ $xout =~ s/>/\*AMP\*gt;/g;
+ $xout =~ s/&/\*AMP\*amp;/g;
+ $xout =~ s/\*AMP\*/&/g;
+ # TODO: Also capture any non-printing characters (XML doesn't like them
+ # either).
+ print XMLOUT "<STATUS>fail</STATUS>$newline";
+ print XMLOUT "<TESTDIFF>$newline$xout</TESTDIFF>$newline";
+ }
+
+ if ($tapoutput) {
+ my $xfailstr = '';
+ if ($xfail ne '') {
+ $xfailstr = " # TODO $xfail";
+ }
+ print TAPOUT "not ok ${total_run} - $workdir/$programname ${np}${xfailstr}\n";
+ print TAPOUT " ---\n";
+ print TAPOUT " Directory: $workdir\n";
+ print TAPOUT " File: $programname\n";
+ print TAPOUT " Num-procs: $np\n";
+ print TAPOUT " Date: \"" . localtime . "\"\n";
+
+ # The following would be nice, but it leads to unfortunate formatting in
+ # the Jenkins web output for now. Using comment lines instead, since
+ # they are easier to read/find in a browser.
+## print TAPOUT " Output: |\n";
+## # using block literal format, requires that all chars are printable
+## # UTF-8 (or UTF-16, but we won't encounter that)
+## foreach my $line (split m/\r?\n/, $output) {
+## chomp $line;
+## # 4 spaces, 2 for TAP indent, 2 more for YAML block indent
+## print TAPOUT " $line\n";
+## }
+
+ print TAPOUT " ...\n";
+
+ # Alternative to the "Output:" YAML block literal above. Do not put any
+ # spaces before the '#', this causes some TAP parsers (including Perl's
+ # TAP::Parser) to treat the line as "unknown" instead of a proper
+ # comment.
+ print TAPOUT "## Test output (expected 'No Errors'):\n";
+ foreach my $line (split m/\r?\n/, $output) {
+ chomp $line;
+ print TAPOUT "## $line\n";
+ }
+ }
+}
+
+sub SkippedTest {
+ my $programname = shift;
+ my $np = shift;
+ my $workdir = shift;
+ my $reason = shift;
+
+ # simply omit from the XML output
+
+ if ($tapoutput) {
+ print TAPOUT "ok ${total_seen} - $workdir/$programname $np # SKIP $reason\n";
+ }
+}
+
+# ----------------------------------------------------------------------------
+# Alternate init routines
+sub InitQuickTimeout {
+ $ENV{"MPIEXEC_TIMEOUT"} = 10;
+}
--- /dev/null
+# The next item ensures that the support routines are built first
+!util:all
+attr
+#basic
+coll
+comm
+datatype
+#errhan
+group
+#info
+init
+#mpi_t
+pt2pt
+#
+#spawn
+#topo
+#perf
+#io
+#f77
+#cxx
+#
+#
+