This one is more complete, but a lot of tests are for MPI functions not supported in SMPI
Lots of tests are disabled, and some folders not (yet) included.
if(enable_smpi)
if(HAVE_RAWCTX)
- ADD_TEST(smpi-mpich-env-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/env ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/pt2pt ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-context-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/context ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-profile-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/profile ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-coll-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C})
- ADD_TEST(smpi-mpich-coll-selector-mpich-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C} -selector=mpich)
- ADD_TEST(smpi-mpich-coll-selector-ompi-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR}/smpi_script/ -fort=${SMPI_F2C} -selector=ompi)
- set_tests_properties(smpi-mpich-env-raw smpi-mpich-context-raw smpi-mpich-pt2pt-raw smpi-mpich-coll-raw smpi-mpich-coll-selector-ompi-raw smpi-mpich-coll-selector-mpich-raw smpi-mpich-profile-raw PROPERTIES PASS_REGULAR_EXPRESSION "-- No differences found; test successful")
+ ADD_TEST(smpi-mpich3-attr-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/attr ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/attr -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-coll-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread)
+ ADD_TEST(smpi-mpich3-coll-ompi-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0)
+ ADD_TEST(smpi-mpich3-coll-mpich-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:mpich)
+ ADD_TEST(smpi-mpich3-comm-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/comm ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/comm -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-init-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/init ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/init -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-datatype-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/datatype ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-group-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/group ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/group -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ ADD_TEST(smpi-mpich3-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/pt2pt ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt -tests=testlist -execarg=--cfg=contexts/factory:raw)
+ set_tests_properties(smpi-mpich3-attr-raw smpi-mpich3-coll-thread smpi-mpich3-coll-ompi-thread smpi-mpich3-coll-mpich-thread smpi-mpich3-comm-raw smpi-mpich3-init-raw smpi-mpich3-datatype-raw smpi-mpich3-pt2pt-raw smpi-mpich3-group-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
endif()
teshsuite/simdag/partask/CMakeLists.txt
teshsuite/simdag/platforms/CMakeLists.txt
teshsuite/smpi/CMakeLists.txt
- teshsuite/smpi/mpich-test/CMakeLists.txt
- teshsuite/smpi/mpich-test/coll/CMakeLists.txt
- teshsuite/smpi/mpich-test/context/CMakeLists.txt
- teshsuite/smpi/mpich-test/env/CMakeLists.txt
- teshsuite/smpi/mpich-test/profile/CMakeLists.txt
- teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/coll/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/context/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/env/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/profile/CMakeLists.txt
+ # teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/attr/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/comm/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/coll/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/datatype/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/group/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/init/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt
teshsuite/xbt/CMakeLists.txt
)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/partask)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile)
-add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile)
+#add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/attr)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/comm)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/group)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/init)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/xbt)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/testsuite/surf)
--- /dev/null
+set(tesh_files
+ ${tesh_files}
+
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+
+#set(stdo_std_smpi
+# ${CMAKE_CURRENT_SOURCE_DIR}/topol/cartmap.std
+# ${CMAKE_CURRENT_SOURCE_DIR}/topol/graphtest.std
+# ${CMAKE_CURRENT_SOURCE_DIR}/topol/cartf.std
+#)
+
+if("${CMAKE_BINARY_DIR}" STREQUAL "${CMAKE_HOME_DIRECTORY}")
+else()
+ foreach(srcfile ${stdo_std_smpi})
+ set(dstfile ${srcfile})
+ string(REPLACE "${CMAKE_HOME_DIRECTORY}" "${CMAKE_BINARY_DIR}" dstfile "${dstfile}")
+ #message("copy ${srcfile} to ${dstfile}")
+ configure_file("${srcfile}" "${dstfile}" COPYONLY)
+ endforeach()
+endif()
+
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/README
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtest
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ ${CMAKE_CURRENT_SOURCE_DIR}/checktest
+ PARENT_SCOPE)
--- /dev/null
+MPICH Test Suite
+
+This test suite is a *supplement* to other test suites, including the
+original MPICH testsuite, the Intel testsuite, and the IBM MPI test suite
+(or test suites derived from that test, including the MPI C++ tests).
+
+Building the Test Suite
+=======================
+In many cases, configure will find the MPI implementation
+automatically. In some cases, it will need some help. For example:
+
+For IBM MPI, where the compilation commands are not mpicc and mpif77 etc.:
+
+./configure CC=xlc MPICC=mpcc F77=xlf MPIF77=mpxlf CXX=xlC \
+ MPICXX="mpCC -cpp" F90=xlf90 MPIF90=mpxlf90 \
+ --disable-spawn \
+ --enable-strictmpi
+
+(or the _r versions of the compilers)
+
+If mpicc and friends are not in your default path (and you do not want to
+add them), you can specify the path with --with-mpi=<path>. For example,
+if they are in /usr/local/mympi/bin, use
+
+./configure --with-mpi=/usr/local/mympi
+
+(configure will append the bin to the path that you give).
+
+You may need to add MPI_SIZEOF_OFFSET=8 .
+
+The option "-cpp" is needed for at least some versions of mpCC to define the
+C++ bindings of the MPI routines.
+
+For implementations that do not implement all of MPI-2, there are --disable
+options, including --disable-spawn and --disable-cxx. To restrict tests to
+just what is defined in the MPI specification, use --enable-strictmpi .
+
+The script that runs the tests assumes that the MPI implementation
+supports mpiexec; you should consider this the first test of the implementation.
+
+Setting Options
+===============
+The following environment variables will modify the behavior of the tests
+
+MPITEST_DEBUG - if set, output information for debugging the test suite
+MPITEST_VERBOSE - if set to an integer value, output messages whose
+ level is at least that value (0 is a good choice here)
+MPITEST_RETURN_WITH_CODE - Set the return code from the test programs based on
+ success or failure, with a zero for success and one
+ for failure (value must be yes, YES, true, or TRUE to
+ turn this on)
+MPITEST_THREADLEVEL_DEFAULT - Set the default thread level. Values are
+ multiple, serialized, funneled, and single.
+
+Batch Systems
+=============
+For systems that run applications through a batch system, the option "-batch"
+to the runtests script will create a script file that can be edited and
+submitted to the batch system. The script checktests can be run to
+summarize the results.
+
+Specifically, (assuming the bash shell, and that the directory "btest", a
+subdirectory of the test suite directory, is used for running the tests):
+
+export MPITEST_BATCHDIR=`pwd`/btest
+runtests -batch -tests=testlist
+... edit btest/runtests.batch to make it a value batch submissions script
+... run that script and wait for the batch job to complete
+cd btest && ../checktests
+
+If a program other than mpiexec is used in the batch form to run programs, then
+specify that to runtests:
+
+ runtests -batch -mpiexec=aprun -tests=testlist
+
+(Here, aprun is the command used on Cray XE6 systems.)
+
+Note that some programs that are used to run MPI programs add extra output,
+which can confuse any tool that depends on clean output in STDOUT. Since
+such unfortunate behavior is common, the option -ignorebogus can be given
+to checktests:
+
+cd btest && ../checktests --ignorebogus
+
+Controlling the Tests that are Run
+==================================
+The tests are actually built and run by the script "runtests". This script
+can be given a file that contains a list of the tests to run. This file has
+two primary types of entries:
+
+ directories: Enter directory and look for the file "testlist".
+ Recursively run the contents of that file
+ program names: Build and run that program
+
+Lines may also be commented out with "#".
+
+The simplest program line contains the name of the program and the number of
+MPI processes to use. For example, the following will build the
+program sendrecv1 and run it with 4 processes:
+
+sendrecv1 4
+
+In addition, the program line can contain key=value pairs that provide
+special information about running the test. For example,
+
+sendflood 8 timeLimit=600
+
+says to build and run the program sendflood with 8 MPI processes and
+permit the test to run for 600 seconds (by default, at least for
+MPICH, the default timelimit is 180 seconds). Other key=value pairs
+can be used to select whether a program should be run at all,
+depending on the abilities of the MPI implementation (this is
+particularly important for Fortran programs, since preprocessor
+support for Fortran is a non-standard extension to the Fortran
+language, and there are some compilers that would not accept Fortran
+programs that used the preprocessor).
+
+The most important key=value pairs are:
+
+
+timeLimit=n : Use a timelimit of n seconds
+
+arg=string : Run the program with string as an argument to the program
+
+mpiexecarg=string : Run the program with string as an argument to mpiexec
+
+env=name=value : Run the program with environment variable "name" given the
+ value "value"
+
+mpiversion=x.y : Build and run the program only if the MPI version is at
+ least x.y. For example,
+
+ distgraph1 4 mpiversion=2.2
+
+ will build and run distgraph1 with 4 MPI processes only
+ if the MPI version is at least 2.2.
+
+strict=bool : If bool is false, only build and run the program if
+ --enable-strictmpi was not used in configuring the test suite.
+ That is, a line such as
+
+ neighb_coll 4 strict=false
+
+ Says that this test is not valid for a strict MPI implementation;
+ it contains extensions to the standard, or in the case of some
+ MPICH development, MPIX routines
+
+resultTest=proc : This is used to change the way in which the success or
+ failure of a test is evaluated. proc is one of several
+ Perl subroutines defined within the runtest program. These
+ are primarily used within the testsuite for tests programs
+ exit with expected status values or that timeouts are
+ in fact handled.
+
+
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1")
+
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+ add_executable(attr2type attr2type.c ../util/mtest.c)
+ add_executable(attrend2 attrend2.c ../util/mtest.c)
+ add_executable(attrend attrend.c ../util/mtest.c)
+ add_executable(attrerr attrerr.c ../util/mtest.c)
+ add_executable(attrerrcomm attrerrcomm.c ../util/mtest.c)
+ add_executable(attrerrtype attrerrtype.c ../util/mtest.c)
+ add_executable(attric attric.c ../util/mtest.c)
+ add_executable(attrorder attrorder.c ../util/mtest.c)
+ add_executable(attrordercomm attrordercomm.c ../util/mtest.c)
+ add_executable(attrordertype attrordertype.c ../util/mtest.c)
+ add_executable(attrt attrt.c ../util/mtest.c)
+ add_executable(baseattr2 baseattr2.c ../util/mtest.c)
+ add_executable(baseattrcomm baseattrcomm.c ../util/mtest.c)
+ add_executable(fkeyval fkeyval.c ../util/mtest.c)
+ add_executable(fkeyvalcomm fkeyvalcomm.c ../util/mtest.c)
+ add_executable(fkeyvaltype fkeyvaltype.c ../util/mtest.c)
+ add_executable(keyval_double_free keyval_double_free.c ../util/mtest.c)
+
+
+ target_link_libraries(attr2type simgrid)
+ target_link_libraries(attrend2 simgrid)
+ target_link_libraries(attrend simgrid)
+ target_link_libraries(attrerr simgrid)
+ target_link_libraries(attrerrcomm simgrid)
+ target_link_libraries(attrerrtype simgrid)
+ target_link_libraries(attric simgrid)
+ target_link_libraries(attrorder simgrid)
+ target_link_libraries(attrordercomm simgrid)
+ target_link_libraries(attrordertype simgrid)
+ target_link_libraries(attrt simgrid)
+ target_link_libraries(baseattr2 simgrid)
+ target_link_libraries(baseattrcomm simgrid)
+ target_link_libraries(fkeyval simgrid)
+ target_link_libraries(fkeyvalcomm simgrid)
+ target_link_libraries(fkeyvaltype simgrid)
+ target_link_libraries(keyval_double_free simgrid)
+
+
+ set_target_properties(attr2type PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrend2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrend PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrerr PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrerrcomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrerrtype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attric PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrorder PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrordercomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrordertype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(attrt PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(baseattr2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(baseattrcomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(fkeyval PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(fkeyvalcomm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(fkeyvaltype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(keyval_double_free PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/attr2type.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrend2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrend.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrerr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrerrcomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrerrtype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attric.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrorder.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrordercomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrordertype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/attrt.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/baseattr2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/baseattrcomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/fkeyval.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/fkeyvalcomm.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/fkeyvaltype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/keyval_double_free.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2007 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+static int foo_keyval = MPI_KEYVAL_INVALID;
+
+int foo_initialize(void);
+void foo_finalize(void);
+
+int foo_copy_attr_function(MPI_Datatype type, int type_keyval,
+ void *extra_state, void *attribute_val_in,
+ void *attribute_val_out, int *flag);
+int foo_delete_attr_function(MPI_Datatype type, int type_keyval,
+ void *attribute_val, void *extra_state);
+static const char *my_func = 0;
+static int verbose = 0;
+static int delete_called = 0;
+static int copy_called = 0;
+
+int main(int argc, char *argv[])
+{
+ int mpi_errno;
+ MPI_Datatype type, duptype;
+ int rank;
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ foo_initialize();
+
+ mpi_errno = MPI_Type_contiguous(2, MPI_INT, &type);
+
+ mpi_errno = MPI_Type_set_attr(type, foo_keyval, NULL);
+
+ mpi_errno = MPI_Type_dup(type, &duptype);
+
+ my_func = "Free of type";
+ mpi_errno = MPI_Type_free(&type);
+
+ my_func = "free of duptype";
+ mpi_errno = MPI_Type_free(&duptype);
+
+ foo_finalize();
+
+ if (rank == 0) {
+ int errs = 0;
+ if (copy_called != 1) {
+ printf( "Copy called %d times; expected once\n", copy_called );
+ errs++;
+ }
+ if (delete_called != 2) {
+ printf( "Delete called %d times; expected twice\n", delete_called );
+ errs++;
+ }
+ if (errs == 0) {
+ printf( " No Errors\n" );
+ }else if(mpi_errno!=MPI_SUCCESS){
+ printf( " Output fail - Found %d errors\n", errs );
+ }else {
+ printf( " Found %d errors\n", errs );
+ }
+ fflush(stdout);
+ }
+
+ MPI_Finalize();
+ return 0;
+}
+
+int foo_copy_attr_function(MPI_Datatype type,
+ int type_keyval,
+ void *extra_state,
+ void *attribute_val_in,
+ void *attribute_val_out,
+ int *flag)
+{
+ if (verbose) printf("copy fn. called\n");
+ copy_called ++;
+ * (char **) attribute_val_out = NULL;
+ *flag = 1;
+
+ return MPI_SUCCESS;
+}
+
+int foo_delete_attr_function(MPI_Datatype type,
+ int type_keyval,
+ void *attribute_val,
+ void *extra_state)
+{
+ if (verbose) printf("delete fn. called in %s\n", my_func );
+ delete_called ++;
+
+ return MPI_SUCCESS;
+}
+
+int foo_initialize(void)
+{
+ int mpi_errno;
+
+ /* create keyval for use later */
+ mpi_errno = MPI_Type_create_keyval(foo_copy_attr_function,
+ foo_delete_attr_function,
+ &foo_keyval,
+ NULL);
+ if (mpi_errno==MPI_SUCCESS && verbose) printf("created keyval\n");
+
+ return 0;
+}
+
+void foo_finalize(void)
+{
+ int mpi_errno;
+
+ /* remove keyval */
+ mpi_errno = MPI_Type_free_keyval(&foo_keyval);
+
+ if (mpi_errno==MPI_SUCCESS && verbose) printf("freed keyval\n");
+
+ return;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2008 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ The MPI-2 specification makes it clear that delect attributes are
+ called on MPI_COMM_WORLD and MPI_COMM_SELF at the very beginning of
+ MPI_Finalize. This is useful for tools that want to perform the MPI
+ equivalent of an "at_exit" action.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int exit_key = MPI_KEYVAL_INVALID;
+int wasCalled = 0;
+int foundError = 0;
+/* #define DEBUG */
+int delete_fn ( MPI_Comm, int, void *, void * );
+#ifdef DEBUG
+#define FFLUSH fflush(stdout);
+#else
+#define FFLUSH
+#endif
+
+int main( int argc, char **argv )
+{
+ int errs = 0, wrank;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ /* create the keyval for the exit handler */
+ MPI_Keyval_create( MPI_NULL_COPY_FN, delete_fn, &exit_key, (void *)0 );
+
+ /* Attach to comm_self */
+ MPI_Attr_put( MPI_COMM_SELF, exit_key, (void*)0 );
+ /* We can free the key now */
+ MPI_Keyval_free( &exit_key );
+
+ /* Now, exit MPI */
+ /* MTest_Finalize( errs ); */
+ MPI_Finalize();
+
+ /* Check that the exit handler was called, and without error */
+ if (wrank == 0) {
+ /* In case more than one process exits MPI_Finalize */
+ if (wasCalled != 1) {
+ errs++;
+ printf( "Attribute delete function on MPI_COMM_SELF was not called\n" );
+ }
+ if (foundError != 0) {
+ errs++;
+ printf( "Found %d errors while executing delete function in MPI_COMM_SELF\n", foundError );
+ }
+ if (errs == 0) {
+ printf( " No Errors\n" );
+ }
+ else {
+ printf( " Found %d errors\n", errs );
+ }
+ fflush(stdout );
+ }
+
+ return 0;
+}
+
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ int flag;
+ wasCalled++;
+ MPI_Finalized( &flag );
+ if (flag) {
+ foundError++;
+ }
+ return MPI_SUCCESS;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2008 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+ The MPI-2.2 specification makes it clear that attributes are called on
+ MPI_COMM_WORLD and MPI_COMM_SELF at the very beginning of MPI_Finalize in
+ LIFO order with respect to the order in which they are set. This is
+ useful for tools that want to perform the MPI equivalent of an "at_exit"
+ action.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* 20 ought to be enough attributes to ensure that hash-table based MPI
+ * implementations do not accidentally pass the test except by being extremely
+ * "lucky". There are (20!) possible permutations which means that there is
+ * about a 1 in 2.43e18 chance of getting LIFO ordering out of a hash table,
+ * assuming a decent hash function is used. */
+#define NUM_TEST_ATTRS (20)
+
+static __attribute__((unused)) int exit_keys[NUM_TEST_ATTRS]; /* init to MPI_KEYVAL_INVALID */
+static __attribute__((unused)) int was_called[NUM_TEST_ATTRS];
+int foundError = 0;
+int delete_fn (MPI_Comm, int, void *, void *);
+
+int main(int argc, char **argv)
+{
+ int wrank;
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ int errs = 0, wrank;
+ int i;
+ for (i = 0; i < NUM_TEST_ATTRS; ++i) {
+ exit_keys[i] = MPI_KEYVAL_INVALID;
+ was_called[i] = 0;
+
+ /* create the keyval for the exit handler */
+ MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, delete_fn, &exit_keys[i], NULL);
+ /* attach to comm_self */
+ MPI_Comm_set_attr(MPI_COMM_SELF, exit_keys[i], (void*)(long)i);
+ }
+
+ /* we can free the keys now */
+ for (i = 0; i < NUM_TEST_ATTRS; ++i) {
+ MPI_Comm_free_keyval(&exit_keys[i]);
+ }
+
+ /* now, exit MPI */
+ MPI_Finalize();
+
+ /* check that the exit handlers were called in LIFO order, and without error */
+ if (wrank == 0) {
+ /* In case more than one process exits MPI_Finalize */
+ for (i = 0; i < NUM_TEST_ATTRS; ++i) {
+ if (was_called[i] < 1) {
+ errs++;
+ printf("Attribute delete function on MPI_COMM_SELF was not called for idx=%d\n", i);
+ }
+ else if (was_called[i] > 1) {
+ errs++;
+ printf("Attribute delete function on MPI_COMM_SELF was called multiple times for idx=%d\n", i);
+ }
+ }
+ if (foundError != 0) {
+ errs++;
+ printf("Found %d errors while executing delete function in MPI_COMM_SELF\n", foundError);
+ }
+ if (errs == 0) {
+ printf(" No Errors\n");
+ }
+ else {
+ printf(" Found %d errors\n", errs);
+ }
+ fflush(stdout);
+ }
+#else /* this is a pre-MPI-2.2 implementation, ordering is not defined */
+ MPI_Finalize();
+ if (wrank == 0)
+ printf(" No Errors\n");
+#endif
+
+ return 0;
+}
+
+int delete_fn(MPI_Comm comm, int keyval, void *attribute_val, void *extra_state)
+{
+ int flag;
+ int i;
+ int my_idx = (int)(long)attribute_val;
+
+ if (my_idx < 0 || my_idx > NUM_TEST_ATTRS) {
+ printf("internal error, my_idx=%d is invalid!\n", my_idx);
+ fflush(stdout);
+ }
+
+ was_called[my_idx]++;
+
+ MPI_Finalized(&flag);
+ if (flag) {
+ printf("my_idx=%d, MPI_Finalized returned %d, should have been 0", my_idx, flag);
+ foundError++;
+ }
+
+ /* since attributes were added in 0..(NUM_TEST_ATTRS-1) order, they will be
+ * called in (NUM_TEST_ATTRS-1)..0 order */
+ for (i = 0; i < my_idx; ++i) {
+ if (was_called[i] != 0) {
+ printf("my_idx=%d, was_called[%d]=%d but should be 0\n", my_idx, i, was_called[i]);
+ foundError++;
+ }
+ }
+ for (i = my_idx; i < NUM_TEST_ATTRS; ++i) {
+ if (was_called[i] != 1) {
+ printf("my_idx=%d, was_called[%d]=%d but should be 1\n", my_idx, i, was_called[i]);
+ foundError++;
+ }
+ }
+
+ return MPI_SUCCESS;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise attribute routines.
+ This version checks for correct behavior of the copy and delete functions
+ on an attribute, particularly the correct behavior when the routine returns
+ failure.
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int test_communicators ( void );
+void abort_msg ( const char *, int );
+int copybomb_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int deletebomb_fn ( MPI_Comm, int, void *, void * );
+
+int main( int argc, char **argv )
+{
+ int errs;
+ MTest_Init( &argc, &argv );
+ errs = test_communicators();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * MPI 1.2 Clarification: Clarification of Error Behavior of
+ * Attribute Callback Functions
+ * Any return value other than MPI_SUCCESS is erroneous. The specific value
+ * returned to the user is undefined (other than it can't be MPI_SUCCESS).
+ * Proposals to specify particular values (e.g., user's value) failed.
+ */
+/* Return an error as the value */
+int copybomb_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *flag = 1;
+ return MPI_ERR_OTHER;
+}
+
+/* Set delete flag to 1 to allow the attribute to be deleted */
+static int delete_flag = 0;
+
+int deletebomb_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ if (delete_flag) return MPI_SUCCESS;
+ return MPI_ERR_OTHER;
+}
+
+void abort_msg( const char *str, int code )
+{
+ fprintf( stderr, "%s, err = %d\n", str, code );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm_world, d2;
+ int world_rank, world_size, key_1;
+ int err, errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Attribute copy/delete return codes ***\n" );
+ }
+#endif
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
+ MPI_Barrier( dup_comm_world );
+
+ MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN );
+
+ value = - 11;
+ if ((err=MPI_Keyval_create( copybomb_fn, deletebomb_fn, &key_1, &value )))
+ abort_msg( "Keyval_create", err );
+
+ err = MPI_Attr_put( dup_comm_world, key_1, (void *) (MPI_Aint) world_rank );
+ if (err) {
+ errs++;
+ printf( "Error with first put\n" );
+ }
+
+ err = MPI_Attr_put( dup_comm_world, key_1,
+ (void *) (MPI_Aint) (2*world_rank) );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in put\n" );
+ }
+
+ /* Because the attribute delete function should fail, the attribute
+ should *not be removed* */
+ err = MPI_Attr_delete( dup_comm_world, key_1 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in delete\n" );
+ }
+
+ err = MPI_Comm_dup( dup_comm_world, &d2 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "copy function return code was MPI_SUCCESS in dup\n" );
+ }
+#ifndef USE_STRICT_MPI
+ /* Another interpretation is to leave d2 unchanged on error */
+ if (err && d2 != MPI_COMM_NULL) {
+ errs++;
+ printf( "dup did not return MPI_COMM_NULL on error\n" );
+ }
+#endif
+
+ delete_flag = 1;
+ MPI_Comm_free( &dup_comm_world );
+ MPI_Keyval_free( &key_1 );
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise attribute routines.
+ This version checks for correct behavior of the copy and delete functions
+ on an attribute, particularly the correct behavior when the routine returns
+ failure.
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int test_communicators ( void );
+void abort_msg ( const char *, int );
+int copybomb_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int deletebomb_fn ( MPI_Comm, int, void *, void * );
+
+int main( int argc, char **argv )
+{
+ int errs;
+ MTest_Init( &argc, &argv );
+ errs = test_communicators();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * MPI 1.2 Clarification: Clarification of Error Behavior of
+ * Attribute Callback Functions
+ * Any return value other than MPI_SUCCESS is erroneous. The specific value
+ * returned to the user is undefined (other than it can't be MPI_SUCCESS).
+ * Proposals to specify particular values (e.g., user's value) failed.
+ */
+/* Return an error as the value */
+int copybomb_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *flag = 1;
+ return MPI_ERR_OTHER;
+}
+
+/* Set delete flag to 1 to allow the attribute to be deleted */
+static int delete_flag = 0;
+
+int deletebomb_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ if (delete_flag) return MPI_SUCCESS;
+ return MPI_ERR_OTHER;
+}
+
+void abort_msg( const char *str, int code )
+{
+ fprintf( stderr, "%s, err = %d\n", str, code );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm_world, d2;
+ int world_rank, world_size, key_1;
+ int err, errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Attribute copy/delete return codes ***\n" );
+ }
+#endif
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
+ MPI_Barrier( dup_comm_world );
+
+ MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN );
+
+ value = - 11;
+ if ((err=MPI_Comm_create_keyval( copybomb_fn, deletebomb_fn, &key_1, &value )))
+ abort_msg( "Keyval_create", err );
+
+ err = MPI_Comm_set_attr( dup_comm_world, key_1, (void *) (MPI_Aint) world_rank );
+ if (err) {
+ errs++;
+ printf( "Error with first put\n" );
+ }
+
+ err = MPI_Comm_set_attr( dup_comm_world, key_1, (void *) (MPI_Aint) (2*world_rank) );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in put\n" );
+ }
+
+ /* Because the attribute delete function should fail, the attribute
+ should *not be removed* */
+ err = MPI_Comm_delete_attr( dup_comm_world, key_1 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in delete\n" );
+ }
+
+ err = MPI_Comm_dup( dup_comm_world, &d2 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "copy function return code was MPI_SUCCESS in dup\n" );
+ }
+ if (err != MPI_ERR_OTHER) {
+ int lerrclass;
+ MPI_Error_class( err, &lerrclass );
+ if (lerrclass != MPI_ERR_OTHER) {
+ errs++;
+ printf( "dup did not return an error code of class ERR_OTHER; " );
+ printf( "err = %d, class = %d\n", err, lerrclass );
+ }
+ }
+#ifndef USE_STRICT_MPI
+ /* Another interpretation is to leave d2 unchanged on error */
+ if (err && d2 != MPI_COMM_NULL) {
+ errs++;
+ printf( "dup did not return MPI_COMM_NULL on error\n" );
+ }
+#endif
+
+ delete_flag = 1;
+ MPI_Comm_free( &dup_comm_world );
+
+ MPI_Comm_free_keyval( &key_1 );
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise attribute routines.
+ This version checks for correct behavior of the copy and delete functions
+ on an attribute, particularly the correct behavior when the routine returns
+ failure.
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int test_attrs ( void );
+void abort_msg ( const char *, int );
+int copybomb_fn ( MPI_Datatype, int, void *, void *, void *, int * );
+int deletebomb_fn ( MPI_Datatype, int, void *, void * );
+
+int main( int argc, char **argv )
+{
+ int errs;
+ MTest_Init( &argc, &argv );
+ errs = test_attrs();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+/*
+ * MPI 1.2 Clarification: Clarification of Error Behavior of
+ * Attribute Callback Functions
+ * Any return value other than MPI_SUCCESS is erroneous. The specific value
+ * returned to the user is undefined (other than it can't be MPI_SUCCESS).
+ * Proposals to specify particular values (e.g., user's value) failed.
+ */
+/* Return an error as the value */
+int copybomb_fn( MPI_Datatype oldtype, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *flag = 1;
+ return MPI_ERR_OTHER;
+}
+
+/* Set delete flag to 1 to allow the attribute to be deleted */
+static int delete_flag = 0;
+static int deleteCalled = 0;
+
+int deletebomb_fn( MPI_Datatype type, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ deleteCalled ++;
+ if (delete_flag) return MPI_SUCCESS;
+ return MPI_ERR_OTHER;
+}
+
+void abort_msg( const char *str, int code )
+{
+ fprintf( stderr, "%s, err = %d\n", str, code );
+ MPI_Abort( MPI_COMM_WORLD, code );
+}
+
+int test_attrs( void )
+{
+ MPI_Datatype dup_type, d2;
+ int world_rank, world_size, key_1;
+ int err, errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Attribute copy/delete return codes ***\n" );
+ }
+#endif
+
+
+ MPI_Type_dup( MPI_DOUBLE, &dup_type );
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ value = - 11;
+ if ((err=MPI_Type_create_keyval( copybomb_fn, deletebomb_fn, &key_1, &value )))
+ abort_msg( "Keyval_create", err );
+
+ err = MPI_Type_set_attr( dup_type, key_1, (void *) (MPI_Aint) world_rank );
+ if (err) {
+ errs++;
+ printf( "Error with first put\n" );
+ }
+
+ err = MPI_Type_set_attr( dup_type, key_1, (void *) (MPI_Aint) (2*world_rank) );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in put\n" );
+ }
+
+ /* Because the attribute delete function should fail, the attribute
+ should *not be removed* */
+ err = MPI_Type_delete_attr( dup_type, key_1 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "delete function return code was MPI_SUCCESS in delete\n" );
+ }
+
+ err = MPI_Type_dup( dup_type, &d2 );
+ if (err == MPI_SUCCESS) {
+ errs++;
+ printf( "copy function return code was MPI_SUCCESS in dup\n" );
+ }
+#ifndef USE_STRICT_MPI
+ /* Another interpretation is to leave d2 unchanged on error */
+ if (err && d2 != MPI_DATATYPE_NULL) {
+ errs++;
+ printf( "dup did not return MPI_DATATYPE_NULL on error\n" );
+ }
+#endif
+
+ delete_flag = 1;
+ deleteCalled = 0;
+ if (d2 != MPI_DATATYPE_NULL)
+ MPI_Type_free(&d2);
+ MPI_Type_free( &dup_type );
+ if (deleteCalled == 0) {
+ errs++;
+ printf( "Free of a datatype did not invoke the attribute delete routine\n" );
+ }
+ MPI_Type_free_keyval( &key_1 );
+
+ return errs;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise communicator routines for intercommunicators
+
+ This C version derived from attrt, which in turn was
+ derived from a Fortran test program from ...
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* #define DEBUG */
+int test_communicators ( void );
+int copy_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int delete_fn ( MPI_Comm, int, void *, void * );
+#ifdef DEBUG
+#define FFLUSH fflush(stdout);
+#else
+#define FFLUSH
+#endif
+
+int main( int argc, char **argv )
+{
+ int errs = 0;
+ MTest_Init( &argc, &argv );
+
+ errs = test_communicators();
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *(MPI_Aint *)attribute_val_out = (MPI_Aint)attribute_val_in;
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ int world_rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ if ((MPI_Aint)attribute_val != (MPI_Aint)world_rank) {
+ printf( "incorrect attribute value %d\n", *(int*)attribute_val );
+ MPI_Abort(MPI_COMM_WORLD, 1005 );
+ }
+ return MPI_SUCCESS;
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm, comm;
+ void *vvalue;
+ int flag, world_rank, world_size, key_1, key_3;
+ int errs = 0;
+ MPI_Aint value;
+ int isLeft;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Communicators ***\n" ); fflush(stdout);
+ }
+#endif
+
+ while (MTestGetIntercomm( &comm, &isLeft, 2 )) {
+ MTestPrintfMsg(1, "start while loop, isLeft=%s\n", (isLeft ? "TRUE" : "FALSE"));
+
+ if (comm == MPI_COMM_NULL) {
+ MTestPrintfMsg(1, "got COMM_NULL, skipping\n");
+ continue;
+ }
+
+ /*
+ Check Comm_dup by adding attributes to comm & duplicating
+ */
+
+ value = 9;
+ MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value );
+ MTestPrintfMsg(1, "Keyval_create key=%#x value=%d\n", key_1, value);
+ value = 7;
+ MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key_3, &value );
+ MTestPrintfMsg(1, "Keyval_create key=%#x value=%d\n", key_3, value);
+
+ /* This may generate a compilation warning; it is, however, an
+ easy way to cache a value instead of a pointer */
+ /* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
+ MPI_Attr_put(comm, key_1, (void *) (MPI_Aint) world_rank );
+ MPI_Attr_put(comm, key_3, (void *)0 );
+
+ MTestPrintfMsg(1, "Comm_dup\n" );
+ MPI_Comm_dup(comm, &dup_comm );
+
+ /* Note that if sizeof(int) < sizeof(void *), we can't use
+ (void **)&value to get the value we passed into Attr_put. To avoid
+ problems (e.g., alignment errors), we recover the value into
+ a (void *) and cast to int. Note that this may generate warning
+ messages from the compiler. */
+ MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+
+ if (! flag) {
+ errs++;
+ printf( "dup_comm key_1 not found on %d\n", world_rank );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3004 );
+ }
+
+ if (value != world_rank) {
+ errs++;
+ printf( "dup_comm key_1 value incorrect: %ld\n", (long)value );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3005 );
+ }
+
+ MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+ if (flag) {
+ errs++;
+ printf( "dup_comm key_3 found!\n" );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3008 );
+ }
+ MTestPrintfMsg(1, "Keyval_free key=%#x\n", key_1);
+ MPI_Keyval_free(&key_1 );
+ MTestPrintfMsg(1, "Keyval_free key=%#x\n", key_3);
+ MPI_Keyval_free(&key_3 );
+ /*
+ Free all communicators created
+ */
+ MTestPrintfMsg(1, "Comm_free comm\n");
+ MPI_Comm_free( &comm );
+ MTestPrintfMsg(1, "Comm_free dup_comm\n");
+ MPI_Comm_free( &dup_comm );
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] );
+int checkNoAttrs( MPI_Comm comm, int n, int key[] );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int key[3], attrval[3];
+ int i;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ {
+ comm = MPI_COMM_WORLD;
+ /* Create key values */
+ for (i=0; i<3; i++) {
+ MPI_Keyval_create( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ attrval[i] = 1024 * i;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Attr_put( comm, key[2], &attrval[2] );
+ MPI_Attr_put( comm, key[1], &attrval[1] );
+ MPI_Attr_put( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Attr_delete( comm, key[0] );
+ MPI_Attr_delete( comm, key[1] );
+ MPI_Attr_delete( comm, key[2] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Attr_put( comm, key[1], &attrval[1] );
+ MPI_Attr_put( comm, key[2], &attrval[2] );
+ MPI_Attr_put( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Attr_delete( comm, key[2] );
+ MPI_Attr_delete( comm, key[1] );
+ MPI_Attr_delete( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Attr_put( comm, key[0], &attrval[0] );
+ MPI_Attr_put( comm, key[1], &attrval[1] );
+ MPI_Attr_put( comm, key[2], &attrval[2] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Attr_delete( comm, key[1] );
+ MPI_Attr_delete( comm, key[2] );
+ MPI_Attr_delete( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ for (i=0; i<3; i++) {
+ MPI_Keyval_free( &key[i] );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Attr_get( comm, key[i], &val_p, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ errs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return errs;
+}
+
+int checkNoAttrs( MPI_Comm comm, int n, int key[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Attr_get( comm, key[i], &val_p, &flag );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] );
+int checkNoAttrs( MPI_Comm comm, int n, int key[] );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int key[3], attrval[3];
+ int i;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ {
+ comm = MPI_COMM_WORLD;
+ /* Create key values */
+ for (i=0; i<3; i++) {
+ MPI_Comm_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ attrval[i] = 1024 * i;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] );
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] );
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Comm_delete_attr( comm, key[0] );
+ MPI_Comm_delete_attr( comm, key[1] );
+ MPI_Comm_delete_attr( comm, key[2] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] );
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] );
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Comm_delete_attr( comm, key[2] );
+ MPI_Comm_delete_attr( comm, key[1] );
+ MPI_Comm_delete_attr( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ MPI_Comm_set_attr( comm, key[0], &attrval[0] );
+ MPI_Comm_set_attr( comm, key[1], &attrval[1] );
+ MPI_Comm_set_attr( comm, key[2], &attrval[2] );
+
+ errs += checkAttrs( comm, 3, key, attrval );
+
+ MPI_Comm_delete_attr( comm, key[1] );
+ MPI_Comm_delete_attr( comm, key[2] );
+ MPI_Comm_delete_attr( comm, key[0] );
+
+ errs += checkNoAttrs( comm, 3, key );
+
+ for (i=0; i<3; i++) {
+ MPI_Comm_free_keyval( &key[i] );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
+
+int checkAttrs( MPI_Comm comm, int n, int key[], int attrval[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Comm_get_attr( comm, key[i], &val_p, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ errs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return errs;
+}
+
+int checkNoAttrs( MPI_Comm comm, int n, int key[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Comm_get_attr( comm, key[i], &val_p, &flag );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test creating and inserting attributes in \
+different orders to ensure that the list management code handles all cases.";
+*/
+
+int checkAttrs( MPI_Datatype type, int n, int key[], int attrval[] );
+int checkNoAttrs( MPI_Datatype type, int n, int key[] );
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int key[3], attrval[3];
+ int i;
+ MPI_Datatype type;
+
+ MTest_Init( &argc, &argv );
+
+ {
+ type = MPI_INT;
+ /* Create key values */
+ for (i=0; i<3; i++) {
+ MPI_Type_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ attrval[i] = 1024 * i;
+ }
+
+ /* Insert attribute in several orders. Test after put with get,
+ then delete, then confirm delete with get. */
+
+ MPI_Type_set_attr( type, key[2], &attrval[2] );
+ MPI_Type_set_attr( type, key[1], &attrval[1] );
+ MPI_Type_set_attr( type, key[0], &attrval[0] );
+
+ errs += checkAttrs( type, 3, key, attrval );
+
+ MPI_Type_delete_attr( type, key[0] );
+ MPI_Type_delete_attr( type, key[1] );
+ MPI_Type_delete_attr( type, key[2] );
+
+ errs += checkNoAttrs( type, 3, key );
+
+ MPI_Type_set_attr( type, key[1], &attrval[1] );
+ MPI_Type_set_attr( type, key[2], &attrval[2] );
+ MPI_Type_set_attr( type, key[0], &attrval[0] );
+
+ errs += checkAttrs( type, 3, key, attrval );
+
+ MPI_Type_delete_attr( type, key[2] );
+ MPI_Type_delete_attr( type, key[1] );
+ MPI_Type_delete_attr( type, key[0] );
+
+ errs += checkNoAttrs( type, 3, key );
+
+ MPI_Type_set_attr( type, key[0], &attrval[0] );
+ MPI_Type_set_attr( type, key[1], &attrval[1] );
+ MPI_Type_set_attr( type, key[2], &attrval[2] );
+
+ errs += checkAttrs( type, 3, key, attrval );
+
+ MPI_Type_delete_attr( type, key[1] );
+ MPI_Type_delete_attr( type, key[2] );
+ MPI_Type_delete_attr( type, key[0] );
+
+ errs += checkNoAttrs( type, 3, key );
+
+ for (i=0; i<3; i++) {
+ MPI_Type_free_keyval( &key[i] );
+ }
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
+
+int checkAttrs( MPI_Datatype type, int n, int key[], int attrval[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Type_get_attr( type, key[i], &val_p, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d not set\n", i );
+ }
+ else if (val_p != &attrval[i]) {
+ errs++;
+ fprintf( stderr, "Atribute value for key %d not correct\n",
+ i );
+ }
+ }
+
+ return errs;
+}
+
+int checkNoAttrs( MPI_Datatype type, int n, int key[] )
+{
+ int errs = 0;
+ int i, flag, *val_p;
+
+ for (i=0; i<n; i++) {
+ MPI_Type_get_attr( type, key[i], &val_p, &flag );
+ if (flag) {
+ errs++;
+ fprintf( stderr, "Attribute for key %d set but should be deleted\n", i );
+ }
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/*
+
+ Exercise communicator routines.
+
+ This C version derived from a Fortran test program from ....
+
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+//#define DEBUG
+int test_communicators ( void );
+int copy_fn ( MPI_Comm, int, void *, void *, void *, int * );
+int delete_fn ( MPI_Comm, int, void *, void * );
+#ifdef DEBUG
+#define FFLUSH fflush(stdout);
+#else
+#define FFLUSH
+#endif
+
+int main( int argc, char **argv )
+{
+ int errs = 0;
+ MTest_Init( &argc, &argv );
+
+ errs = test_communicators();
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out, int *flag)
+{
+ /* Note that if (sizeof(int) < sizeof(void *), just setting the int
+ part of attribute_val_out may leave some dirty bits
+ */
+ *(MPI_Aint *)attribute_val_out = (MPI_Aint)attribute_val_in;
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ int world_rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ if ((MPI_Aint)attribute_val != (MPI_Aint)world_rank) {
+ printf( "incorrect attribute value %d\n", *(int*)attribute_val );
+ MPI_Abort(MPI_COMM_WORLD, 1005 );
+ }
+ return MPI_SUCCESS;
+}
+
+int test_communicators( void )
+{
+ MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm,
+ split_comm, world_comm;
+ MPI_Group world_group, lo_group, rev_group;
+ void *vvalue;
+ int ranges[1][3];
+ int flag, world_rank, world_size, rank, size, n, key_1, key_3;
+ int color, key, result;
+ int errs = 0;
+ MPI_Aint value;
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &world_size );
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( "*** Communicators ***\n" ); fflush(stdout);
+ }
+#endif
+
+ MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world );
+
+ /*
+ Exercise Comm_create by creating an equivalent to dup_comm_world
+ (sans attributes) and a half-world communicator.
+ */
+
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_create\n" ); fflush(stdout);
+ }
+#endif
+
+ MPI_Comm_group( dup_comm_world, &world_group );
+ MPI_Comm_create( dup_comm_world, world_group, &world_comm );
+ MPI_Comm_rank( world_comm, &rank );
+ if (rank != world_rank) {
+ errs++;
+ printf( "incorrect rank in world comm: %d\n", rank );
+ MPI_Abort(MPI_COMM_WORLD, 3001 );
+ }
+
+ n = world_size / 2;
+
+ ranges[0][0] = 0;
+ ranges[0][1] = (world_size - n) - 1;
+ ranges[0][2] = 1;
+
+#ifdef DEBUG
+ printf( "world rank = %d before range incl\n", world_rank );FFLUSH;
+#endif
+ MPI_Group_range_incl(world_group, 1, ranges, &lo_group );
+#ifdef DEBUG
+ printf( "world rank = %d after range incl\n", world_rank );FFLUSH;
+#endif
+ MPI_Comm_create(world_comm, lo_group, &lo_comm );
+#ifdef DEBUG
+ printf( "world rank = %d before group free\n", world_rank );FFLUSH;
+#endif
+ MPI_Group_free( &lo_group );
+
+#ifdef DEBUG
+ printf( "world rank = %d after group free\n", world_rank );FFLUSH;
+#endif
+
+ if (world_rank < (world_size - n)) {
+ MPI_Comm_rank(lo_comm, &rank );
+ if (rank == MPI_UNDEFINED) {
+ errs++;
+ printf( "incorrect lo group rank: %d\n", rank ); fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, 3002 );
+ }
+ else {
+ /* printf( "lo in\n" );FFLUSH; */
+ MPI_Barrier(lo_comm );
+ /* printf( "lo out\n" );FFLUSH; */
+ }
+ }
+ else {
+ if (lo_comm != MPI_COMM_NULL) {
+ errs++;
+ printf( "rank : %d incorrect lo comm:\n", rank ); fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, 3003 );
+ }
+ }
+
+#ifdef DEBUG
+ printf( "worldrank = %d\n", world_rank );FFLUSH;
+#endif
+ MPI_Barrier(world_comm);
+
+#ifdef DEBUG
+ printf( "bar!\n" );FFLUSH;
+#endif
+ /*
+ Check Comm_dup by adding attributes to lo_comm & duplicating
+ */
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_dup\n" );
+ fflush(stdout);
+ }
+#endif
+
+ if (lo_comm != MPI_COMM_NULL) {
+ value = 9;
+ MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value );
+ value = 8;
+ value = 7;
+ MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key_3, &value );
+
+ /* This may generate a compilation warning; it is, however, an
+ easy way to cache a value instead of a pointer */
+ /* printf( "key1 = %x key3 = %x\n", key_1, key_3 ); */
+ MPI_Attr_put(lo_comm, key_1, (void *) (MPI_Aint) world_rank );
+ MPI_Attr_put(lo_comm, key_3, (void *)0 );
+
+ MPI_Comm_dup(lo_comm, &dup_comm );
+
+ /* Note that if sizeof(int) < sizeof(void *), we can't use
+ (void **)&value to get the value we passed into Attr_put. To avoid
+ problems (e.g., alignment errors), we recover the value into
+ a (void *) and cast to int. Note that this may generate warning
+ messages from the compiler. */
+ MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+
+ if (! flag) {
+ errs++;
+ printf( "dup_comm key_1 not found on %d\n", world_rank );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3004 );
+ }
+
+ if (value != world_rank) {
+ errs++;
+ printf( "dup_comm key_1 value incorrect: %ld, expected %d\n",
+ (long)value, world_rank );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3005 );
+ }
+
+ MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag );
+ value = (MPI_Aint)vvalue;
+ if (flag) {
+ errs++;
+ printf( "dup_comm key_3 found!\n" );
+ fflush( stdout );
+ MPI_Abort(MPI_COMM_WORLD, 3008 );
+ }
+ MPI_Keyval_free(&key_1 );
+ MPI_Keyval_free(&key_3 );
+ }
+ /*
+ Split the world into even & odd communicators with reversed ranks.
+ */
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_split\n" );
+ fflush(stdout);
+ }
+#endif
+
+ color = world_rank % 2;
+ key = world_size - world_rank;
+
+ MPI_Comm_split(dup_comm_world, color, key, &split_comm );
+ MPI_Comm_size(split_comm, &size );
+ MPI_Comm_rank(split_comm, &rank );
+ if (rank != ((size - world_rank/2) - 1)) {
+ errs++;
+ printf( "incorrect split rank: %d\n", rank ); fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, 3009 );
+ }
+
+ MPI_Barrier(split_comm );
+ /*
+ Test each possible Comm_compare result
+ */
+#ifdef DEBUG
+ if (world_rank == 0) {
+ printf( " Comm_compare\n" );
+ fflush(stdout);
+ }
+#endif
+
+ MPI_Comm_compare(world_comm, world_comm, &result );
+ if (result != MPI_IDENT) {
+ errs++;
+ printf( "incorrect ident result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3010 );
+ }
+
+ if (lo_comm != MPI_COMM_NULL) {
+ MPI_Comm_compare(lo_comm, dup_comm, &result );
+ if (result != MPI_CONGRUENT) {
+ errs++;
+ printf( "incorrect congruent result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3011 );
+ }
+ }
+
+ ranges[0][0] = world_size - 1;
+ ranges[0][1] = 0;
+ ranges[0][2] = -1;
+
+ MPI_Group_range_incl(world_group, 1, ranges, &rev_group );
+ MPI_Comm_create(world_comm, rev_group, &rev_comm );
+
+ MPI_Comm_compare(world_comm, rev_comm, &result );
+ if (result != MPI_SIMILAR && world_size != 1) {
+ errs++;
+ printf( "incorrect similar result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3012 );
+ }
+
+ if (lo_comm != MPI_COMM_NULL) {
+ MPI_Comm_compare(world_comm, lo_comm, &result );
+ if (result != MPI_UNEQUAL && world_size != 1) {
+ errs++;
+ printf( "incorrect unequal result: %d\n", result );
+ MPI_Abort(MPI_COMM_WORLD, 3013 );
+ }
+ }
+ /*
+ Free all communicators created
+ */
+#ifdef DEBUG
+ if (world_rank == 0)
+ printf( " Comm_free\n" );
+#endif
+
+ MPI_Comm_free( &world_comm );
+ MPI_Comm_free( &dup_comm_world );
+
+ MPI_Comm_free( &rev_comm );
+ MPI_Comm_free( &split_comm );
+
+ MPI_Group_free( &world_group );
+ MPI_Group_free( &rev_group );
+
+ if (lo_comm != MPI_COMM_NULL) {
+ MPI_Comm_free( &lo_comm );
+ MPI_Comm_free( &dup_comm );
+ }
+
+ return errs;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+void MissingKeyval( int rc, const char keyname[] );
+
+int main( int argc, char **argv)
+{
+ int errs = 0;
+ int rc;
+ void *v;
+ int flag;
+ int vval;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the attribute values */
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_TAG_UB" );
+ errs++;
+ }
+ else {
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get TAG_UB\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if (vval < 32767) {
+ errs++;
+ fprintf( stderr, "Got too-small value (%d) for TAG_UB\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_HOST, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_HOST" );
+ errs++;
+ }
+ else {
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get HOST\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for HOST\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_IO, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_IO" );
+ errs++;
+ }
+ else {
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get IO\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_ANY_SOURCE &&
+ vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for IO\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_WTIME_IS_GLOBAL" );
+ errs++;
+ }
+ else {
+ if (flag) {
+ /* Wtime need not be set */
+ vval = *(int*)v;
+ if (vval < 0 || vval > 1) {
+ errs++;
+ fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n",
+ vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_APPNUM, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_APPNUM" );
+ errs++;
+ }
+ else {
+ /* appnum need not be set */
+ if (flag) {
+ vval = *(int *)v;
+ if (vval < 0) {
+ errs++;
+ fprintf( stderr, "MPI_APPNUM is defined as %d but must be nonnegative\n", vval );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_UNIVERSE_SIZE" );
+ errs++;
+ }
+ else {
+ /* MPI_UNIVERSE_SIZE need not be set */
+ if (flag) {
+ vval = *(int *)v;
+ if (vval < size) {
+ errs++;
+ fprintf( stderr, "MPI_UNIVERSE_SIZE = %d, less than comm world (%d)\n", vval, size );
+ }
+ }
+ }
+
+ rc = MPI_Attr_get( MPI_COMM_WORLD, MPI_LASTUSEDCODE, &v, &flag );
+ if (rc) {
+ MissingKeyval( rc, "MPI_LASTUSEDCODE" );
+ errs++;
+ }
+ else {
+ /* Last used code must be defined and >= MPI_ERR_LASTCODE */
+ if (flag) {
+ vval = *(int*)v;
+ if (vval < MPI_ERR_LASTCODE) {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSEDCODE points to an integer (%d) smaller than MPI_ERR_LASTCODE (%d)\n", vval, MPI_ERR_LASTCODE );
+ }
+ }
+ else {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSECODE is not defined\n" );
+ }
+ }
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
+
+ MTest_Finalize( errs );
+ MPI_Finalize( );
+
+ return 0;
+}
+
+void MissingKeyval( int errcode, const char keyname[] )
+{
+ int errclass, slen;
+ char string[MPI_MAX_ERROR_STRING];
+
+ MPI_Error_class( errcode, &errclass );
+ MPI_Error_string( errcode, string, &slen );
+ printf( "For key %s: Error class %d (%s)\n", keyname, errclass, string );
+ fflush( stdout );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+int main( int argc, char **argv)
+{
+ int errs = 0;
+ void *v;
+ int flag;
+ int vval;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_TAG_UB, &v, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get TAG_UB\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if (vval < 32767) {
+ errs++;
+ fprintf( stderr, "Got too-small value (%d) for TAG_UB\n", vval );
+ }
+ }
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_HOST, &v, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get HOST\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for HOST\n", vval );
+ }
+ }
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_IO, &v, &flag );
+ if (!flag) {
+ errs++;
+ fprintf( stderr, "Could not get IO\n" );
+ }
+ else {
+ vval = *(int*)v;
+ if ((vval < 0 || vval >= size) && vval != MPI_ANY_SOURCE &&
+ vval != MPI_PROC_NULL) {
+ errs++;
+ fprintf( stderr, "Got invalid value %d for IO\n", vval );
+ }
+ }
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &v, &flag );
+ if (flag) {
+ /* Wtime need not be set */
+ vval = *(int*)v;
+ if (vval < 0 || vval > 1) {
+ errs++;
+ fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n",
+ vval );
+ }
+ }
+
+ /* MPI 2.0, section 5.5.3 - MPI_APPNUM should be set if the program is
+ started with more than one executable name (e.g., in MPMD instead
+ of SPMD mode). This is independent of the dynamic process routines,
+ and should be supported even if MPI_COMM_SPAWN and friends are not. */
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_APPNUM, &v, &flag );
+ /* appnum need not be set */
+ if (flag) {
+ vval = *(int *)v;
+ if (vval < 0) {
+ errs++;
+ fprintf( stderr, "MPI_APPNUM is defined as %d but must be nonnegative\n", vval );
+ }
+ }
+
+ /* MPI 2.0 section 5.5.1. MPI_UNIVERSE_SIZE need not be set, but
+ should be present. */
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag );
+ /* MPI_UNIVERSE_SIZE need not be set */
+ if (flag) {
+ /* But if it is set, it must be at least the size of comm_world */
+ vval = *(int *)v;
+ if (vval < size) {
+ errs++;
+ fprintf( stderr, "MPI_UNIVERSE_SIZE = %d, less than comm world (%d)\n", vval, size );
+ }
+ }
+
+ MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_LASTUSEDCODE, &v, &flag );
+ /* Last used code must be defined and >= MPI_ERR_LASTCODE */
+ if (flag) {
+ vval = *(int*)v;
+ if (vval < MPI_ERR_LASTCODE) {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSEDCODE points to an integer (%d) smaller than MPI_ERR_LASTCODE (%d)\n", vval, MPI_ERR_LASTCODE );
+ }
+ }
+ else {
+ errs++;
+ fprintf( stderr, "MPI_LASTUSECODE is not defined\n" );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize( );
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test freeing keyvals while still attached to \
+a communicator, then make sure that the keyval delete and copy code are still \
+executed";
+*/
+
+/* Function prototypes to keep compilers happy */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag);
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state);
+
+/* Copy increments the attribute value */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag)
+{
+ /* Copy the address of the attribute */
+ *(void **)attribute_val_out = attribute_val_in;
+ /* Change the value */
+ *(int *)attribute_val_in = *(int *)attribute_val_in + 1;
+ /* set flag to 1 to tell comm dup to insert this attribute
+ into the new communicator */
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+/* Delete decrements the attribute value */
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ *(int *)attribute_val = *(int *)attribute_val - 1;
+ return MPI_SUCCESS;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int attrval;
+ int i, key[32], keyval, saveKeyval;
+ MPI_Comm comm, dupcomm;
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracomm( &comm, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Keyval_create( copy_fn, delete_fn, &keyval, (void *)0 );
+ saveKeyval = keyval; /* in case we need to free explicitly */
+ attrval = 1;
+ MPI_Attr_put( comm, keyval, (void*)&attrval );
+ /* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
+ is in use in an attribute */
+ MPI_Keyval_free( &keyval );
+
+ /* We create some dummy keyvals here in case the same keyval
+ is reused */
+ for (i=0; i<32; i++) {
+ MPI_Keyval_create( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ }
+
+ MPI_Comm_dup( comm, &dupcomm );
+ /* Check that the attribute was copied */
+ if (attrval != 2) {
+ errs++;
+ printf( "Attribute not incremented when comm dup'ed (%s)\n",
+ MTestGetIntracommName() );
+ }
+ MPI_Comm_free( &dupcomm );
+ if (attrval != 1) {
+ errs++;
+ printf( "Attribute not decremented when dupcomm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ /* Check that the attribute was freed in the dupcomm */
+
+ if (comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF) {
+ MPI_Comm_free( &comm );
+ /* Check that the original attribute was freed */
+ if (attrval != 0) {
+ errs++;
+ printf( "Attribute not decremented when comm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ }
+ else {
+ /* Explicitly delete the attributes from world and self */
+ MPI_Attr_delete( comm, saveKeyval );
+ }
+ /* Free those other keyvals */
+ for (i=0; i<32; i++) {
+ MPI_Keyval_free( &key[i] );
+ }
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ /* The attributes on comm self and world were deleted by finalize
+ (see separate test) */
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTestDescrip[] = "Test freeing keyvals while still attached to \
+a communicator, then make sure that the keyval delete and copy code are still \
+executed";
+*/
+
+/* Function prototypes to keep compilers happy */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag);
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state);
+
+/* Copy increments the attribute value */
+int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag)
+{
+ /* Copy the address of the attribute */
+ *(void **)attribute_val_out = attribute_val_in;
+ /* Change the value */
+ *(int *)attribute_val_in = *(int *)attribute_val_in + 1;
+ /* set flag to 1 to tell comm dup to insert this attribute
+ into the new communicator */
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+/* Delete decrements the attribute value */
+int delete_fn( MPI_Comm comm, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ *(int *)attribute_val = *(int *)attribute_val - 1;
+ return MPI_SUCCESS;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int attrval;
+ int i, key[32], keyval, saveKeyval;
+ MPI_Comm comm, dupcomm;
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracomm( &comm, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_create_keyval( copy_fn, delete_fn, &keyval, (void *)0 );
+ saveKeyval = keyval; /* in case we need to free explicitly */
+ attrval = 1;
+ MPI_Comm_set_attr( comm, keyval, (void*)&attrval );
+ /* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
+ is in use in an attribute */
+ MPI_Comm_free_keyval( &keyval );
+
+ /* We create some dummy keyvals here in case the same keyval
+ is reused */
+ for (i=0; i<32; i++) {
+ MPI_Comm_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ }
+
+ MPI_Comm_dup( comm, &dupcomm );
+ /* Check that the attribute was copied */
+ if (attrval != 2) {
+ errs++;
+ printf( "Attribute not incremented when comm dup'ed (%s)\n",
+ MTestGetIntracommName() );
+ }
+ MPI_Comm_free( &dupcomm );
+ if (attrval != 1) {
+ errs++;
+ printf( "Attribute not decremented when dupcomm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ /* Check that the attribute was freed in the dupcomm */
+
+ if (comm != MPI_COMM_WORLD && comm != MPI_COMM_SELF) {
+ MPI_Comm_free( &comm );
+ /* Check that the original attribute was freed */
+ if (attrval != 0) {
+ errs++;
+ printf( "Attribute not decremented when comm %s freed\n",
+ MTestGetIntracommName() );
+ }
+ }
+ else {
+ /* Explicitly delete the attributes from world and self */
+ MPI_Comm_delete_attr( comm, saveKeyval );
+ }
+ /* Free those other keyvals */
+ for (i=0; i<32; i++) {
+ MPI_Comm_free_keyval( &key[i] );
+ }
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ /* The attributes on comm self and world were deleted by finalize
+ (see separate test) */
+
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+#include "stdlib.h"
+
+/*
+static char MTestDescrip[] = "Test freeing keyvals while still attached to \
+a datatype, then make sure that the keyval delete and copy code are still \
+executed";
+*/
+
+/* Copy increments the attribute value */
+int copy_fn( MPI_Datatype oldtype, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag);
+int copy_fn( MPI_Datatype oldtype, int keyval, void *extra_state,
+ void *attribute_val_in, void *attribute_val_out,
+ int *flag)
+{
+ /* Copy the address of the attribute */
+ *(void **)attribute_val_out = attribute_val_in;
+ /* Change the value */
+ *(int *)attribute_val_in = *(int *)attribute_val_in + 1;
+ /* set flag to 1 to tell comm dup to insert this attribute
+ into the new communicator */
+ *flag = 1;
+ return MPI_SUCCESS;
+}
+
+/* Delete decrements the attribute value */
+int delete_fn( MPI_Datatype type, int keyval, void *attribute_val,
+ void *extra_state);
+int delete_fn( MPI_Datatype type, int keyval, void *attribute_val,
+ void *extra_state)
+{
+ *(int *)attribute_val = *(int *)attribute_val - 1;
+ return MPI_SUCCESS;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int attrval;
+ int i, key[32], keyval, saveKeyval;
+ MPI_Datatype type, duptype;
+ MTestDatatype mstype, mrtype;
+ char typename[MPI_MAX_OBJECT_NAME];
+ int tnlen;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetDatatypes( &mstype, &mrtype, 1 )) {
+ type = mstype.datatype;
+ MPI_Type_create_keyval( copy_fn, delete_fn, &keyval, (void *)0 );
+ saveKeyval = keyval; /* in case we need to free explicitly */
+ attrval = 1;
+ MPI_Type_set_attr( type, keyval, (void*)&attrval );
+ /* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
+ is in use in an attribute */
+ MPI_Type_free_keyval( &keyval );
+
+ /* We create some dummy keyvals here in case the same keyval
+ is reused */
+ for (i=0; i<32; i++) {
+ MPI_Type_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
+ &key[i], (void *)0 );
+ }
+
+ if (attrval != 1) {
+ errs++;
+ MPI_Type_get_name( type, typename, &tnlen );
+ printf( "attrval is %d, should be 1, before dup in type %s\n",
+ attrval, typename );
+ }
+ MPI_Type_dup( type, &duptype );
+ /* Check that the attribute was copied */
+ if (attrval != 2) {
+ errs++;
+ MPI_Type_get_name( type, typename, &tnlen );
+ printf( "Attribute not incremented when type dup'ed (%s)\n",
+ typename );
+ }
+ MPI_Type_free( &duptype );
+ if (attrval != 1) {
+ errs++;
+ MPI_Type_get_name( type, typename, &tnlen );
+ printf( "Attribute not decremented when duptype %s freed\n",
+ typename );
+ }
+ /* Check that the attribute was freed in the duptype */
+
+ if (!mstype.isBasic) {
+ MPI_Type_get_name( type, typename, &tnlen );
+ MTestFreeDatatype(&mstype);
+ /* Check that the original attribute was freed */
+ if (attrval != 0) {
+ errs++;
+ printf( "Attribute not decremented when type %s freed\n",
+ typename );
+ }
+ }
+ else {
+ /* Explicitly delete the attributes from world and self */
+ MPI_Type_delete_attr( type, saveKeyval );
+ if (mstype.buf) {
+ free(mstype.buf);
+ mstype.buf = 0;
+ }
+ }
+ /* Free those other keyvals */
+ for (i=0; i<32; i++) {
+ MPI_Type_free_keyval( &key[i] );
+ }
+ MTestFreeDatatype(&mrtype);
+ }
+ MTest_Finalize( errs );
+ MPI_Finalize();
+
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <mpi.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "mpitest.h"
+
+/* tests multiple invocations of Keyval_free on the same keyval */
+
+int delete_fn(MPI_Comm comm, int keyval, void *attr, void *extra);
+int delete_fn(MPI_Comm comm, int keyval, void *attr, void *extra) {
+ MPI_Keyval_free(&keyval);
+ return MPI_SUCCESS;
+}
+
+int main (int argc, char **argv)
+{
+ MPI_Comm duped;
+ int keyval = MPI_KEYVAL_INVALID;
+ int keyval_copy = MPI_KEYVAL_INVALID;
+ int errs=0;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_dup(MPI_COMM_SELF, &duped);
+
+ MPI_Keyval_create(MPI_NULL_COPY_FN, delete_fn, &keyval, NULL);
+ keyval_copy = keyval;
+
+ MPI_Attr_put(MPI_COMM_SELF, keyval, NULL);
+ MPI_Attr_put(duped, keyval, NULL);
+
+ MPI_Comm_free(&duped); /* first MPI_Keyval_free */
+ MPI_Keyval_free(&keyval); /* second MPI_Keyval_free */
+ MPI_Keyval_free(&keyval_copy); /* third MPI_Keyval_free */
+ MTest_Finalize( errs );
+ MPI_Finalize(); /* fourth MPI_Keyval_free */
+ return 0;
+}
--- /dev/null
+#needs MPI_Keyval_create and MPI_Attr_get
+#attrt 2
+#needs MPI_Intercomm_create
+#attric 4
+#needs MPI_Errhandler_set, MPI_Keyval_create, MPI_Keyval_free, MPI_Attr_put
+#attrerr 1
+#needs MPI_Keyval_create, MPI_Keyval_free, MPI_Attr_put
+#attrend 1
+#attrend 4
+attrend2 1
+attrend2 5
+#needs MPI_Errhandler_set, MPI_Comm_create_keyval, MPI_Comm_free_keyval, MPI_Comm_set_attr, MPI_Comm_delete_attr
+#attrerrcomm 1
+#needs MPI_Errhandler_set, MPI_Type_create_keyval, MPI_Type_dup, MPI_Type_set_attr, MPI_Type_delete_attr
+#attrerrtype 1
+#needs MPI_Type_create_keyval, MPI_Type_dup, MPI_Type_set_attr
+#attr2type 1
+#needs MPI_Keyval_create, MPI_Keyval_free, MPI_Attr_put, MPI_Attr_get, MPI_Attr_delete
+#attrorder 1
+#needs MPI_Comm_create_keyval, MPI_Comm_free_keyval, MPI_Comm_get_attr, MPI_Comm_set_attr, MPI_Comm_delete_attr
+#attrordercomm 1
+#needs MPI_Type_create_keyval, MPI_Type_delete_keyval, MPI_Type_set_attr, MPI_Type_delete_attr
+#attrordertype 1
+#needs MPI_Errhandler_set, MPI_Attr_get
+#baseattr2 1
+#needs MPI_Comm_get_attr
+#baseattrcomm 1
+#MPI_Keyval_create, MPI_Keyval_free for type and comm also
+#fkeyval 1
+#fkeyvalcomm 1
+#fkeyvaltype 1
+#keyval_double_free 1
--- /dev/null
+#! /usr/local/bin/perl
+
+$debug = 1;
+$verbose = 1;
+$ignoreBogusOutput = 0;
+$filePattern = "runtests.*.status";
+
+$testsPassed = 0;
+$testsFailed = 0;
+
+foreach $_ (@ARGV) {
+ if (/^--?ignorebogus/) {
+ $ignoreBogusOutput = 1;
+ }
+ else {
+ print STDERR "checktests [ -ignorebogus ]\n";
+ exit(1);
+ }
+}
+
+open( RESULTS, "ls -1 $filePattern |" ) || die "Cannot list directory using ls -1 $filePattern\n";
+
+while (<RESULTS>) {
+ chop;
+ $statusFile = $_;
+ $resultsFile = $statusFile;
+ $resultsFile =~ s/\.status/.out/;
+
+ if ($resultsFile =~ /runtests\.([0-9]+)\.out/) {
+ $count = $1;
+ }
+ else {
+ $count = -1;
+ print STDERR "Unable to determine test number from $resultsFile!\n";
+ $testsFailed ++;
+ next;
+ }
+ open (SFD, "<$statusFile" );
+ while (<SFD>) {
+ chop;
+ $testStatus = $_;
+ }
+ close (SFD);
+
+ if (-s $resultsFile) {
+ open (RFD, "<$resultsFile");
+ $runLine = <RFD>;
+ $sawNoerrors = 0;
+ # Successful output should contain ONLY the line No Errors
+ while (<RFD>) {
+ chop;
+ $outLine = $_;
+ if ($outLine =~ /^\s+No [Ee]rrors\s*$/) {
+ $sawNoerrors = 1;
+ }
+ else {
+ # To filter out output that may be added to STDOUT
+ # by a badly behaved runtime system, you can either
+ # add a specific filter here (preferred) or set the
+ # -ignorebogus option (considered a workaround)
+ # The following is an example that accepts certain
+ # kinds of output once "No Errors" is seen.
+ if ($sawNoerrors) {
+ if ( /^Application [0-9]+ resources: utime .*/) {
+ last;
+ }
+ }
+ if (!$ignoreBogusOutput) {
+ # Any extraneous output is an error
+ $sawNoerrors = 0;
+ }
+ }
+ }
+ close (RFD);
+ if ($sawNoerrors == 1 && $testStatus == 0) {
+ $testsPassed ++;
+ }
+ else {
+ # Test wrote No Errors but then exited with a non-zero status
+ $testsFailed ++;
+ # Output the errors
+ if ($verbose) {
+ print STDOUT "Test $count failed:\n";
+ print STDOUT "Test status: $testStatus\n";
+ print STDOUT "Test output:\n";
+ system ("cat $resultsFile" );
+ }
+ }
+ }
+ else {
+ print STDERR "No $resultsFile\n" if $debug;
+ $testsFailed ++;
+ }
+}
+
+print "Tests passed: $testsPassed; test failed: $testsFailed\n";
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1 -Wno-error=unused-variable")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}/../include/")
+
+
+ add_executable(allgather2 allgather2.c ../util/mtest.c)
+ add_executable(allgather3 allgather3.c ../util/mtest.c)
+ add_executable(allgatherv2 allgatherv2.c ../util/mtest.c)
+ add_executable(allgatherv3 allgatherv3.c ../util/mtest.c)
+ add_executable(allgatherv4 allgatherv4.c ../util/mtest.c)
+ add_executable(allred2 allred2.c ../util/mtest.c)
+ add_executable(allred3 allred3.c ../util/mtest.c)
+ add_executable(allred4 allred4.c ../util/mtest.c)
+ add_executable(allred5 allred5.c ../util/mtest.c)
+ add_executable(allred6 allred6.c ../util/mtest.c)
+ add_executable(allred allred.c ../util/mtest.c)
+ add_executable(allredmany allredmany.c ../util/mtest.c)
+ add_executable(alltoall1 alltoall1.c ../util/mtest.c)
+ add_executable(alltoallv0 alltoallv0.c ../util/mtest.c)
+ add_executable(alltoallv alltoallv.c ../util/mtest.c)
+ add_executable(alltoallw1 alltoallw1.c ../util/mtest.c)
+ add_executable(alltoallw2 alltoallw2.c ../util/mtest.c)
+ add_executable(alltoallw_zeros alltoallw_zeros.c ../util/mtest.c)
+ add_executable(bcast2 bcast2.c ../util/mtest.c)
+ add_executable(bcast3 bcast3.c ../util/mtest.c)
+ add_executable(bcasttest bcasttest.c ../util/mtest.c)
+ add_executable(bcastzerotype bcastzerotype.c ../util/mtest.c)
+ add_executable(coll10 coll10.c ../util/mtest.c)
+ add_executable(coll11 coll11.c ../util/mtest.c)
+ add_executable(coll12 coll12.c ../util/mtest.c)
+ add_executable(coll13 coll13.c ../util/mtest.c)
+ add_executable(coll2 coll2.c ../util/mtest.c)
+ add_executable(coll3 coll3.c ../util/mtest.c)
+ add_executable(coll4 coll4.c ../util/mtest.c)
+ add_executable(coll5 coll5.c ../util/mtest.c)
+ add_executable(coll6 coll6.c ../util/mtest.c)
+ add_executable(coll7 coll7.c ../util/mtest.c)
+ add_executable(coll8 coll8.c ../util/mtest.c)
+ add_executable(coll9 coll9.c ../util/mtest.c)
+ add_executable(exscan2 exscan2.c ../util/mtest.c)
+ add_executable(exscan exscan.c ../util/mtest.c)
+ add_executable(gather2 gather2.c ../util/mtest.c)
+ add_executable(gather2_save gather2_save.c ../util/mtest.c)
+ add_executable(gather gather.c ../util/mtest.c)
+ add_executable(iallred iallred.c ../util/mtest.c)
+ add_executable(ibarrier ibarrier.c ../util/mtest.c)
+ add_executable(icallgather icallgather.c ../util/mtest.c)
+ add_executable(icallgatherv icallgatherv.c ../util/mtest.c)
+ add_executable(icallreduce icallreduce.c ../util/mtest.c)
+ add_executable(icalltoall icalltoall.c ../util/mtest.c)
+ add_executable(icalltoallv icalltoallv.c ../util/mtest.c)
+ add_executable(icalltoallw icalltoallw.c ../util/mtest.c)
+ add_executable(icbarrier icbarrier.c ../util/mtest.c)
+ add_executable(icbcast icbcast.c ../util/mtest.c)
+ add_executable(icgather icgather.c ../util/mtest.c)
+ add_executable(icgatherv icgatherv.c ../util/mtest.c)
+ add_executable(icreduce icreduce.c ../util/mtest.c)
+ add_executable(icscatter icscatter.c ../util/mtest.c)
+ add_executable(icscatterv icscatterv.c ../util/mtest.c)
+ add_executable(longuser longuser.c ../util/mtest.c)
+ add_executable(nonblocking2 nonblocking2.c ../util/mtest.c)
+ add_executable(nonblocking3 nonblocking3.c ../util/mtest.c)
+ add_executable(nonblocking nonblocking.c ../util/mtest.c)
+ add_executable(opband opband.c ../util/mtest.c)
+ add_executable(opbor opbor.c ../util/mtest.c)
+ add_executable(opbxor opbxor.c ../util/mtest.c)
+ add_executable(op_commutative op_commutative.c ../util/mtest.c)
+ add_executable(opland opland.c ../util/mtest.c)
+ add_executable(oplor oplor.c ../util/mtest.c)
+ add_executable(oplxor oplxor.c ../util/mtest.c)
+ add_executable(opmax opmax.c ../util/mtest.c)
+ add_executable(opmaxloc opmaxloc.c ../util/mtest.c)
+ add_executable(opmin opmin.c ../util/mtest.c)
+ add_executable(opminloc opminloc.c ../util/mtest.c)
+ add_executable(opprod opprod.c ../util/mtest.c)
+ add_executable(opsum opsum.c ../util/mtest.c)
+ add_executable(red3 red3.c ../util/mtest.c)
+ add_executable(red4 red4.c ../util/mtest.c)
+ add_executable(redscat2 redscat2.c ../util/mtest.c)
+ add_executable(redscat3 redscat3.c ../util/mtest.c)
+ add_executable(redscatbkinter redscatbkinter.c ../util/mtest.c)
+ add_executable(redscatblk3 redscatblk3.c ../util/mtest.c)
+ add_executable(red_scat_block2 red_scat_block2.c ../util/mtest.c)
+ add_executable(red_scat_block red_scat_block.c ../util/mtest.c)
+ add_executable(redscat redscat.c ../util/mtest.c)
+ add_executable(redscatinter redscatinter.c ../util/mtest.c)
+ add_executable(reduce_mpich reduce.c ../util/mtest.c)
+ add_executable(reduce_local reduce_local.c ../util/mtest.c)
+ add_executable(scantst scantst.c ../util/mtest.c)
+ add_executable(scatter2 scatter2.c ../util/mtest.c)
+ add_executable(scatter3 scatter3.c ../util/mtest.c)
+ add_executable(scattern scattern.c ../util/mtest.c)
+ add_executable(scatterv scatterv.c ../util/mtest.c)
+ add_executable(uoplong uoplong.c ../util/mtest.c)
+
+
+
+ target_link_libraries(allgather2 simgrid)
+ target_link_libraries(allgather3 simgrid)
+ target_link_libraries(allgatherv2 simgrid)
+ target_link_libraries(allgatherv3 simgrid)
+ target_link_libraries(allgatherv4 simgrid)
+ target_link_libraries(allred2 simgrid)
+ target_link_libraries(allred3 simgrid)
+ target_link_libraries(allred4 simgrid)
+ target_link_libraries(allred5 simgrid)
+ target_link_libraries(allred6 simgrid)
+ target_link_libraries(allred simgrid)
+ target_link_libraries(allredmany simgrid)
+ target_link_libraries(alltoall1 simgrid)
+ target_link_libraries(alltoallv0 simgrid)
+ target_link_libraries(alltoallv simgrid)
+ target_link_libraries(alltoallw1 simgrid)
+ target_link_libraries(alltoallw2 simgrid)
+ target_link_libraries(alltoallw_zeros simgrid)
+ target_link_libraries(bcast2 simgrid)
+ target_link_libraries(bcast3 simgrid)
+ target_link_libraries(bcasttest simgrid)
+ target_link_libraries(bcastzerotype simgrid)
+ target_link_libraries(coll10 simgrid)
+ target_link_libraries(coll11 simgrid)
+ target_link_libraries(coll12 simgrid)
+ target_link_libraries(coll13 simgrid)
+ target_link_libraries(coll2 simgrid)
+ target_link_libraries(coll3 simgrid)
+ target_link_libraries(coll4 simgrid)
+ target_link_libraries(coll5 simgrid)
+ target_link_libraries(coll6 simgrid)
+ target_link_libraries(coll7 simgrid)
+ target_link_libraries(coll8 simgrid)
+ target_link_libraries(coll9 simgrid)
+ target_link_libraries(exscan2 simgrid)
+ target_link_libraries(exscan simgrid)
+ target_link_libraries(gather2 simgrid)
+ target_link_libraries(gather2_save simgrid)
+ target_link_libraries(gather simgrid)
+ target_link_libraries(iallred simgrid)
+ target_link_libraries(ibarrier simgrid)
+ target_link_libraries(icallgather simgrid)
+ target_link_libraries(icallgatherv simgrid)
+ target_link_libraries(icallreduce simgrid)
+ target_link_libraries(icalltoall simgrid)
+ target_link_libraries(icalltoallv simgrid)
+ target_link_libraries(icalltoallw simgrid)
+ target_link_libraries(icbarrier simgrid)
+ target_link_libraries(icbcast simgrid)
+ target_link_libraries(icgather simgrid)
+ target_link_libraries(icgatherv simgrid)
+ target_link_libraries(icreduce simgrid)
+ target_link_libraries(icscatter simgrid)
+ target_link_libraries(icscatterv simgrid)
+ target_link_libraries(longuser simgrid)
+ target_link_libraries(nonblocking2 simgrid)
+ target_link_libraries(nonblocking3 simgrid)
+ target_link_libraries(nonblocking simgrid)
+ target_link_libraries(opband simgrid)
+ target_link_libraries(opbor simgrid)
+ target_link_libraries(opbxor simgrid)
+ target_link_libraries(op_commutative simgrid)
+ target_link_libraries(opland simgrid)
+ target_link_libraries(oplor simgrid)
+ target_link_libraries(oplxor simgrid)
+ target_link_libraries(opmax simgrid)
+ target_link_libraries(opmaxloc simgrid)
+ target_link_libraries(opmin simgrid)
+ target_link_libraries(opminloc simgrid)
+ target_link_libraries(opprod simgrid)
+ target_link_libraries(opsum simgrid)
+ target_link_libraries(red3 simgrid)
+ target_link_libraries(red4 simgrid)
+ target_link_libraries(redscat2 simgrid)
+ target_link_libraries(redscat3 simgrid)
+ target_link_libraries(redscatbkinter simgrid)
+ target_link_libraries(redscatblk3 simgrid)
+ target_link_libraries(red_scat_block2 simgrid)
+ target_link_libraries(red_scat_block simgrid)
+ target_link_libraries(redscat simgrid)
+ target_link_libraries(redscatinter simgrid)
+ target_link_libraries(reduce_mpich simgrid)
+ target_link_libraries(reduce_local simgrid)
+ target_link_libraries(scantst simgrid)
+ target_link_libraries(scatter2 simgrid)
+ target_link_libraries(scatter3 simgrid)
+ target_link_libraries(scattern simgrid)
+ target_link_libraries(scatterv simgrid)
+ target_link_libraries(uoplong simgrid)
+
+
+
+ set_target_properties(allgather2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgather3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgatherv2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgatherv3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allgatherv4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred5 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred6 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allred PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(allredmany PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoall1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallv0 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallw1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallw2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(alltoallw_zeros PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcast2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcast3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcasttest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(bcastzerotype PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll10 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll11 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll12 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll13 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll5 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll6 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll7 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll8 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(coll9 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exscan2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(exscan PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gather2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gather2_save PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(gather PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(iallred PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(ibarrier PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icallgather PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icallgatherv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icallreduce PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icalltoall PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icalltoallv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icalltoallw PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icbarrier PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icbcast PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icgather PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icgatherv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icreduce PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icscatter PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(icscatterv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(longuser PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(nonblocking2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(nonblocking3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(nonblocking PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opband PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opbor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opbxor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(op_commutative PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opland PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(oplor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(oplxor PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opmax PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opmaxloc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opmin PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opminloc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opprod PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(opsum PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscat2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscat3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscatbkinter PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscatblk3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red_scat_block2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(red_scat_block PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscat PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(redscatinter PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(reduce_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(reduce_local PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scantst PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scatter2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scatter3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scattern PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(scatterv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+ set_target_properties(uoplong PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgather2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgather3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgatherv2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgatherv3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgatherv4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred5.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred6.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allred.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allredmany.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoall1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv0.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallw1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallw2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallw_zeros.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcasttest.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcastzerotype.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll10.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll11.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll12.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll13.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll5.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll6.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll7.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll8.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/coll9.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exscan2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/exscan.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gather2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gather2_save.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/gather.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/iallred.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ibarrier.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icallgather.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icallgatherv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icallreduce.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icalltoall.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icalltoallv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icalltoallw.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icbarrier.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icbcast.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icgather.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icgatherv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icreduce.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icscatter.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/icscatterv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/longuser.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/nonblocking.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opband.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opbor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opbxor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/op_commutative.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opland.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/oplor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/oplxor.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opmax.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opmaxloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opmin.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opminloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opprod.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/opsum.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscat2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscat3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscatbkinter.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscatblk3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red_scat_block2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/red_scat_block.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscat.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/redscatinter.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/reduce.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/reduce_local.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scantst.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scatter2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scatter3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scattern.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/scatterv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/uoplong.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/runtests
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE */
+
+int main( int argc, char **argv )
+{
+ double *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ vecout[rank*count+i] = rank*count+i;
+ }
+ MPI_Allgather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, count, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( vecout );
+ }
+
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. */
+
+int main( int argc, char **argv )
+{
+ double *vecout, *invec;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ invec = (double *)malloc( count * sizeof(double) );
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ invec[i] = rank*count+i;
+ }
+ MPI_Allgather( invec, count, MPI_DOUBLE,
+ vecout, count, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( invec );
+ free( vecout );
+ }
+
+ MTestFreeComm( &comm );
+ }
+
+ /* Do a zero byte gather */
+ MPI_Allgather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, MPI_COMM_WORLD );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE. This is
+ the trivial version based on the allgather test (allgatherv but with
+ constant data sizes) */
+
+int main( int argc, char **argv )
+{
+ double *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int *displs, *recvcounts;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ displs = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ vecout[rank*count+i] = rank*count+i;
+ }
+ for (i=0; i<size; i++) {
+ recvcounts[i] = count;
+ displs[i] = i * count;
+ }
+ MPI_Allgatherv( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, recvcounts, displs, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( vecout );
+ }
+ free( displs );
+ free( recvcounts );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. This is
+ the trivial version based on the allgather test (allgatherv but with
+ constant data sizes) */
+
+int main( int argc, char **argv )
+{
+ double *vecout, *invec;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int *displs, *recvcounts;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ displs = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+
+ for (count = 1; count < 9000; count = count * 2) {
+ invec = (double *)malloc( count * sizeof(double) );
+ vecout = (double *)malloc( size * count * sizeof(double) );
+
+ for (i=0; i<count; i++) {
+ invec[i] = rank*count+i;
+ }
+ for (i=0; i<size; i++) {
+ recvcounts[i] = count;
+ displs[i] = i * count;
+ }
+ MPI_Allgatherv( invec, count, MPI_DOUBLE,
+ vecout, recvcounts, displs, MPI_DOUBLE, comm );
+ for (i=0; i<count*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ free( invec );
+ free( vecout );
+ }
+ free( displs );
+ free( recvcounts );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <time.h>
+#include <math.h>
+#include <assert.h>
+
+/* FIXME: What is this test supposed to accomplish? */
+
+#define START_BUF (1)
+#define LARGE_BUF (256 * 1024)
+
+/* FIXME: MAX_BUF is too large */
+#define MAX_BUF (128 * 1024 * 1024)
+#define LOOPS 10
+
+__thread char * sbuf, * rbuf;
+__thread int * recvcounts, * displs;
+int errs = 0;
+
+/* #define dprintf printf */
+#define dprintf(...)
+
+typedef enum {
+ REGULAR,
+ BCAST,
+ SPIKE,
+ HALF_FULL,
+ LINEAR_DECREASE,
+ BELL_CURVE
+} test_t;
+
+void comm_tests(MPI_Comm comm);
+double run_test(long long msg_size, MPI_Comm comm, test_t test_type, double * max_time);
+
+int main(int argc, char ** argv)
+{
+ int comm_size, comm_rank;
+ MPI_Comm comm;
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank);
+
+ if (LARGE_BUF * comm_size > MAX_BUF)
+ goto fn_exit;
+
+ sbuf = (void *) calloc(MAX_BUF, 1);
+ rbuf = (void *) calloc(MAX_BUF, 1);
+
+ srand(time(NULL));
+
+ recvcounts = (void *) malloc(comm_size * sizeof(int));
+ displs = (void *) malloc(comm_size * sizeof(int));
+ if (!recvcounts || !displs || !sbuf || !rbuf) {
+ fprintf(stderr, "Unable to allocate memory:\n");
+ if (!sbuf) fprintf(stderr,"\tsbuf of %d bytes\n", MAX_BUF );
+ if (!rbuf) fprintf(stderr,"\trbuf of %d bytes\n", MAX_BUF );
+ if (!recvcounts) fprintf(stderr,"\trecvcounts of %zd bytes\n", comm_size * sizeof(int) );
+ if (!displs) fprintf(stderr,"\tdispls of %zd bytes\n", comm_size * sizeof(int) );
+ fflush(stderr);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ if (!comm_rank) {
+ dprintf("Message Range: (%d, %d); System size: %d\n", START_BUF, LARGE_BUF, comm_size);
+ fflush(stdout);
+ }
+
+
+ /* COMM_WORLD tests */
+ if (!comm_rank) {
+ dprintf("\n\n==========================================================\n");
+ dprintf(" MPI_COMM_WORLD\n");
+ dprintf("==========================================================\n");
+ }
+ comm_tests(MPI_COMM_WORLD);
+
+ /* non-COMM_WORLD tests */
+ if (!comm_rank) {
+ dprintf("\n\n==========================================================\n");
+ dprintf(" non-COMM_WORLD\n");
+ dprintf("==========================================================\n");
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, (comm_rank == comm_size - 1) ? 0 : 1, 0, &comm);
+ if (comm_rank < comm_size - 1)
+ comm_tests(comm);
+ MPI_Comm_free(&comm);
+
+ /* Randomized communicator tests */
+ if (!comm_rank) {
+ dprintf("\n\n==========================================================\n");
+ dprintf(" Randomized Communicator\n");
+ dprintf("==========================================================\n");
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, 0, rand(), &comm);
+ comm_tests(comm);
+ MPI_Comm_free(&comm);
+
+ //free(sbuf);
+ //free(rbuf);
+ free(recvcounts);
+ free(displs);
+
+fn_exit:
+ MTest_Finalize(errs);
+ MPI_Finalize();
+
+ return 0;
+}
+
+void comm_tests(MPI_Comm comm)
+{
+ int comm_size, comm_rank;
+ double rtime, max_time;
+ long long msg_size;
+
+ MPI_Comm_size(comm, &comm_size);
+ MPI_Comm_rank(comm, &comm_rank);
+
+ for (msg_size = START_BUF; msg_size <= LARGE_BUF; msg_size *= 2) {
+ if (!comm_rank) {
+ dprintf("\n====> MSG_SIZE: %d\n", (int) msg_size);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, REGULAR, &max_time);
+ if (!comm_rank) {
+ dprintf("REGULAR:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, BCAST, &max_time);
+ if (!comm_rank) {
+ dprintf("BCAST:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, SPIKE, &max_time);
+ if (!comm_rank) {
+ dprintf("SPIKE:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, HALF_FULL, &max_time);
+ if (!comm_rank) {
+ dprintf("HALF_FULL:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, LINEAR_DECREASE, &max_time);
+ if (!comm_rank) {
+ dprintf("LINEAR_DECREASE:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+
+ rtime = run_test(msg_size, comm, BELL_CURVE, &max_time);
+ if (!comm_rank) {
+ dprintf("BELL_CURVE:\tAVG: %.3f\tMAX: %.3f\n", rtime, max_time);
+ fflush(stdout);
+ }
+ }
+}
+
+double run_test(long long msg_size, MPI_Comm comm, test_t test_type,
+ double * max_time)
+{
+ int i, j;
+ int comm_size, comm_rank;
+ double start, end;
+ double total_time, avg_time;
+ MPI_Aint tmp;
+
+ MPI_Comm_size(comm, &comm_size);
+ MPI_Comm_rank(comm, &comm_rank);
+
+ displs[0] = 0;
+ for (i = 0; i < comm_size; i++) {
+ if (test_type == REGULAR)
+ recvcounts[i] = msg_size;
+ else if (test_type == BCAST)
+ recvcounts[i] = (!i) ? msg_size : 0;
+ else if (test_type == SPIKE)
+ recvcounts[i] = (!i) ? (msg_size / 2) : (msg_size / (2 * (comm_size - 1)));
+ else if (test_type == HALF_FULL)
+ recvcounts[i] = (i < (comm_size / 2)) ? (2 * msg_size) : 0;
+ else if (test_type == LINEAR_DECREASE) {
+ tmp = 2 * msg_size * (comm_size - 1 - i) / (comm_size - 1);
+ if (tmp != (int)tmp) {
+ fprintf( stderr, "Integer overflow in variable tmp\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ recvcounts[i] = (int) tmp;
+
+ /* If the maximum message size is too large, don't run */
+ if (tmp > MAX_BUF) return 0;
+ }
+ else if (test_type == BELL_CURVE) {
+ for (j = 0; j < i; j++) {
+ if (i - 1 + j >= comm_size) continue;
+ tmp = msg_size * comm_size / (log(comm_size) * i);
+ recvcounts[i - 1 + j] = (int) tmp;
+ displs[i - 1 + j] = 0;
+
+ /* If the maximum message size is too large, don't run */
+ if (tmp > MAX_BUF) return 0;
+ }
+ }
+
+ if (i < comm_size - 1)
+ displs[i+1] = displs[i] + recvcounts[i];
+ }
+
+ /* Test that:
+ 1: sbuf is large enough
+ 2: rbuf is large enough
+ 3: There were no failures (e.g., tmp nowhere > rbuf size
+ */
+ MPI_Barrier(comm);
+ start = MPI_Wtime();
+ for (i = 0; i < LOOPS; i++) {
+ MPI_Allgatherv(sbuf, recvcounts[comm_rank], MPI_CHAR,
+ rbuf, recvcounts, displs, MPI_CHAR, comm);
+ }
+ end = MPI_Wtime();
+ MPI_Barrier(comm);
+
+ /* Convert to microseconds (why?) */
+ total_time = 1.0e6 * (end - start);
+ MPI_Reduce(&total_time, &avg_time, 1, MPI_DOUBLE, MPI_SUM, 0, comm);
+ MPI_Reduce(&total_time, max_time, 1, MPI_DOUBLE, MPI_MAX, 0, comm);
+
+ return (avg_time / (LOOPS * comm_size));
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/* Warning - this test will fail for MPI_PROD & maybe MPI_SUM
+ * if more than 10 MPI processes are used. Loss of precision
+ * will occur as the number of processors is increased.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+
+int count, size, rank;
+int cerrcnt;
+
+struct int_test { int a; int b; };
+struct long_test { long a; int b; };
+struct short_test { short a; int b; };
+struct float_test { float a; int b; };
+struct double_test { double a; int b; };
+
+#define mpi_op2str(op) \
+ ((op == MPI_SUM) ? "MPI_SUM" : \
+ (op == MPI_PROD) ? "MPI_PROD" : \
+ (op == MPI_MAX) ? "MPI_MAX" : \
+ (op == MPI_MIN) ? "MPI_MIN" : \
+ (op == MPI_LOR) ? "MPI_LOR" : \
+ (op == MPI_LXOR) ? "MPI_LXOR" : \
+ (op == MPI_LAND) ? "MPI_LAND" : \
+ (op == MPI_BOR) ? "MPI_BOR" : \
+ (op == MPI_BAND) ? "MPI_BAND" : \
+ (op == MPI_BXOR) ? "MPI_BXOR" : \
+ (op == MPI_MAXLOC) ? "MPI_MAXLOC" : \
+ (op == MPI_MINLOC) ? "MPI_MINLOC" : \
+ "MPI_NO_OP")
+
+/* calloc to avoid spurious valgrind warnings when "type" has padding bytes */
+#define DECL_MALLOC_IN_OUT_SOL(type) \
+ type *in, *out, *sol; \
+ in = (type *) calloc(count, sizeof(type)); \
+ out = (type *) calloc(count, sizeof(type)); \
+ sol = (type *) calloc(count, sizeof(type));
+
+#define SET_INDEX_CONST(arr, val) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i] = val; \
+ }
+
+#define SET_INDEX_SUM(arr, val) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i] = i + val; \
+ }
+
+#define SET_INDEX_FACTOR(arr, val) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i] = i * (val); \
+ }
+
+#define SET_INDEX_POWER(arr, val) \
+ { \
+ int i, j; \
+ for (i = 0; i < count; i++) { \
+ (arr)[i] = 1; \
+ for (j = 0; j < (val); j++) \
+ arr[i] *= i; \
+ } \
+ }
+
+#define ERROR_CHECK_AND_FREE(lerrcnt, mpi_type, mpi_op) \
+ do { \
+ char name[MPI_MAX_OBJECT_NAME] = {0}; \
+ int len = 0; \
+ if (lerrcnt) { \
+ MPI_Type_get_name(mpi_type, name, &len); \
+ fprintf(stderr, "(%d) Error for type %s and op %s\n", \
+ rank, name, mpi_op2str(mpi_op)); \
+ } \
+ free(in); free(out); free(sol); \
+ } while(0)
+
+/* The logic on the error check on MPI_Allreduce assumes that all
+ MPI_Allreduce routines return a failure if any do - this is sufficient
+ for MPI implementations that reject some of the valid op/datatype pairs
+ (and motivated this addition, as some versions of the IBM MPI
+ failed in just this way).
+*/
+#define ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol) \
+ { \
+ int i, rc, lerrcnt = 0; \
+ rc = MPI_Allreduce(in, out, count, mpi_type, mpi_op, MPI_COMM_WORLD); \
+ if (rc) { lerrcnt++; cerrcnt++; MTestPrintError( rc ); } \
+ else { \
+ for (i = 0; i < count; i++) { \
+ if (out[i] != sol[i]) { \
+ cerrcnt++; \
+ lerrcnt++; \
+ } \
+ } \
+ } \
+ ERROR_CHECK_AND_FREE(lerrcnt, mpi_type, mpi_op); \
+ }
+
+#define STRUCT_ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol) \
+ { \
+ int i, rc, lerrcnt = 0; \
+ rc = MPI_Allreduce(in, out, count, mpi_type, mpi_op, MPI_COMM_WORLD); \
+ if (rc) { lerrcnt++; cerrcnt++; MTestPrintError( rc ); } \
+ else { \
+ for (i = 0; i < count; i++) { \
+ if ((out[i].a != sol[i].a) || (out[i].b != sol[i].b)) { \
+ cerrcnt++; \
+ lerrcnt++; \
+ } \
+ } \
+ } \
+ ERROR_CHECK_AND_FREE(lerrcnt, mpi_type, mpi_op); \
+ }
+
+#define SET_INDEX_STRUCT_CONST(arr, val, el) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i].el = val; \
+ }
+
+#define SET_INDEX_STRUCT_SUM(arr, val, el) \
+ { \
+ int i; \
+ for (i = 0; i < count; i++) \
+ arr[i].el = i + (val); \
+ }
+
+#define sum_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, 0); \
+ SET_INDEX_FACTOR(sol, size); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_SUM, in, out, sol); \
+ }
+
+#define prod_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, 0); \
+ SET_INDEX_POWER(sol, size); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_PROD, in, out, sol); \
+ }
+
+#define max_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, rank); \
+ SET_INDEX_SUM(sol, size - 1); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_MAX, in, out, sol); \
+ }
+
+#define min_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_SUM(in, rank); \
+ SET_INDEX_SUM(sol, 0); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_MIN, in, out, sol); \
+ }
+
+#define const_test(type, mpi_type, mpi_op, val1, val2, val3) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_CONST(in, (val1)); \
+ SET_INDEX_CONST(sol, (val2)); \
+ SET_INDEX_CONST(out, (val3)); \
+ ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol); \
+ }
+
+#define lor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LOR, (rank & 0x1), (size > 1), 0)
+#define lor_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LOR, 0, 0, 0)
+#define lxor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LXOR, (rank == 1), (size > 1), 0)
+#define lxor_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LXOR, 0, 0, 0)
+#define lxor_test3(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LXOR, 1, (size & 0x1), 0)
+#define land_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LAND, (rank & 0x1), 0, 0)
+#define land_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_LAND, 1, 1, 0)
+#define bor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BOR, (rank & 0x3), ((size < 3) ? size - 1 : 0x3), 0)
+#define bxor_test1(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BXOR, (rank == 1) * 0xf0, (size > 1) * 0xf0, 0)
+#define bxor_test2(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BXOR, 0, 0, 0)
+#define bxor_test3(type, mpi_type) \
+ const_test(type, mpi_type, MPI_BXOR, ~0, (size &0x1) ? ~0 : 0, 0)
+
+#define band_test1(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ if (rank == size-1) { \
+ SET_INDEX_SUM(in, 0); \
+ } \
+ else { \
+ SET_INDEX_CONST(in, ~0); \
+ } \
+ SET_INDEX_SUM(sol, 0); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_BAND, in, out, sol); \
+ }
+
+#define band_test2(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ if (rank == size-1) { \
+ SET_INDEX_SUM(in, 0); \
+ } \
+ else { \
+ SET_INDEX_CONST(in, 0); \
+ } \
+ SET_INDEX_CONST(sol, 0); \
+ SET_INDEX_CONST(out, 0); \
+ ALLREDUCE_AND_FREE(mpi_type, MPI_BAND, in, out, sol); \
+ }
+
+#define maxloc_test(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_STRUCT_SUM(in, rank, a); \
+ SET_INDEX_STRUCT_CONST(in, rank, b); \
+ SET_INDEX_STRUCT_SUM(sol, size - 1, a); \
+ SET_INDEX_STRUCT_CONST(sol, size - 1, b); \
+ SET_INDEX_STRUCT_CONST(out, 0, a); \
+ SET_INDEX_STRUCT_CONST(out, -1, b); \
+ STRUCT_ALLREDUCE_AND_FREE(mpi_type, MPI_MAXLOC, in, out, sol); \
+ }
+
+#define minloc_test(type, mpi_type) \
+ { \
+ DECL_MALLOC_IN_OUT_SOL(type); \
+ SET_INDEX_STRUCT_SUM(in, rank, a); \
+ SET_INDEX_STRUCT_CONST(in, rank, b); \
+ SET_INDEX_STRUCT_SUM(sol, 0, a); \
+ SET_INDEX_STRUCT_CONST(sol, 0, b); \
+ SET_INDEX_STRUCT_CONST(out, 0, a); \
+ SET_INDEX_STRUCT_CONST(out, -1, b); \
+ STRUCT_ALLREDUCE_AND_FREE(mpi_type, MPI_MINLOC, in, out, sol); \
+ }
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+#define test_types_set_mpi_2_2_integer(op,post) do { \
+ op##_test##post(int8_t, MPI_INT8_T); \
+ op##_test##post(int16_t, MPI_INT16_T); \
+ op##_test##post(int32_t, MPI_INT32_T); \
+ op##_test##post(int64_t, MPI_INT64_T); \
+ op##_test##post(uint8_t, MPI_UINT8_T); \
+ op##_test##post(uint16_t, MPI_UINT16_T); \
+ op##_test##post(uint32_t, MPI_UINT32_T); \
+ op##_test##post(uint64_t, MPI_UINT64_T); \
+ op##_test##post(MPI_Aint, MPI_AINT); \
+ op##_test##post(MPI_Offset, MPI_OFFSET); \
+ } while (0)
+#else
+#define test_types_set_mpi_2_2_integer(op,post) do { } while (0)
+#endif
+
+#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
+#define test_types_set_mpi_3_0_integer(op,post) do { \
+ op##_test##post(MPI_Count, MPI_COUNT); \
+ } while (0)
+#else
+#define test_types_set_mpi_3_0_integer(op,post) do { } while (0)
+#endif
+
+#define test_types_set1(op, post) \
+ { \
+ op##_test##post(int, MPI_INT); \
+ op##_test##post(long, MPI_LONG); \
+ op##_test##post(short, MPI_SHORT); \
+ op##_test##post(unsigned short, MPI_UNSIGNED_SHORT); \
+ op##_test##post(unsigned, MPI_UNSIGNED); \
+ op##_test##post(unsigned long, MPI_UNSIGNED_LONG); \
+ op##_test##post(unsigned char, MPI_UNSIGNED_CHAR); \
+ test_types_set_mpi_2_2_integer(op,post); \
+ test_types_set_mpi_3_0_integer(op,post); \
+ }
+
+#define test_types_set2(op, post) \
+ { \
+ test_types_set1(op, post); \
+ op##_test##post(float, MPI_FLOAT); \
+ op##_test##post(double, MPI_DOUBLE); \
+ }
+
+#define test_types_set3(op, post) \
+ { \
+ op##_test##post(unsigned char, MPI_BYTE); \
+ }
+
+/* Make sure that we test complex and double complex, even if long
+ double complex is not available */
+#if defined(USE_LONG_DOUBLE_COMPLEX)
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
+ && defined(HAVE_DOUBLE__COMPLEX) \
+ && defined(HAVE_LONG_DOUBLE__COMPLEX)
+#define test_types_set4(op, post) \
+ do { \
+ op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
+ op##_test##post(double _Complex, MPI_C_DOUBLE_COMPLEX); \
+ if (MPI_C_LONG_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) { \
+ op##_test##post(long double _Complex, MPI_C_LONG_DOUBLE_COMPLEX); \
+ } \
+ } while (0)
+
+#else
+#define test_types_set4(op, post) do { } while (0)
+#endif
+#else
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
+ && defined(HAVE_DOUBLE__COMPLEX)
+#define test_types_set4(op, post) \
+ do { \
+ op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
+ op##_test##post(double _Complex, MPI_C_DOUBLE_COMPLEX); \
+ } while (0)
+
+#else
+#define test_types_set4(op, post) do { } while (0)
+#endif
+
+#endif /* defined(USE_LONG_DOUBLE_COMPLEX) */
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE__BOOL)
+#define test_types_set5(op, post) \
+ do { \
+ op##_test##post(_Bool, MPI_C_BOOL); \
+ } while (0)
+
+#else
+#define test_types_set5(op, post) do { } while (0)
+#endif
+
+int main( int argc, char **argv )
+{
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (size < 2) {
+ fprintf( stderr, "At least 2 processes required\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ /* Set errors return so that we can provide better information
+ should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+
+ count = 10;
+ /* Allow an argument to override the count.
+ Note that the product tests may fail if the count is very large.
+ */
+ if (argc >= 2) {
+ count = atoi( argv[1] );
+ if (count <= 0) {
+ fprintf( stderr, "Invalid count argument %s\n", argv[1] );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ }
+
+ test_types_set2(sum, 1);
+ test_types_set2(prod, 1);
+ test_types_set2(max, 1);
+ test_types_set2(min, 1);
+
+ test_types_set1(lor, 1);
+ test_types_set1(lor, 2);
+
+ test_types_set1(lxor, 1);
+ test_types_set1(lxor, 2);
+ test_types_set1(lxor, 3);
+
+ test_types_set1(land, 1);
+ test_types_set1(land, 2);
+
+ test_types_set1(bor, 1);
+ test_types_set1(band, 1);
+ test_types_set1(band, 2);
+
+ test_types_set1(bxor, 1);
+ test_types_set1(bxor, 2);
+ test_types_set1(bxor, 3);
+
+ test_types_set3(bor, 1);
+ test_types_set3(band, 1);
+ test_types_set3(band, 2);
+
+ test_types_set3(bxor, 1);
+ test_types_set3(bxor, 2);
+ test_types_set3(bxor, 3);
+
+ test_types_set4(sum, 1);
+ test_types_set4(prod, 1);
+
+ test_types_set5(lor, 1);
+ test_types_set5(lor, 2);
+ test_types_set5(lxor, 1);
+ test_types_set5(lxor, 2);
+ test_types_set5(lxor, 3);
+ test_types_set5(land, 1);
+ test_types_set5(land, 2);
+
+ maxloc_test(struct int_test, MPI_2INT);
+ maxloc_test(struct long_test, MPI_LONG_INT);
+ maxloc_test(struct short_test, MPI_SHORT_INT);
+ maxloc_test(struct float_test, MPI_FLOAT_INT);
+ maxloc_test(struct double_test, MPI_DOUBLE_INT);
+
+ minloc_test(struct int_test, MPI_2INT);
+ minloc_test(struct long_test, MPI_LONG_INT);
+ minloc_test(struct short_test, MPI_SHORT_INT);
+ minloc_test(struct float_test, MPI_FLOAT_INT);
+ minloc_test(struct double_test, MPI_DOUBLE_INT);
+
+ MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
+ MTest_Finalize( cerrcnt );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with MPI_IN_PLACE";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, i;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ for (count = 1; count < 65000; count = count * 2) {
+ /* Contiguous data */
+ buf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) buf[i] = rank + i;
+ MPI_Allreduce( MPI_IN_PLACE, buf, count, MPI_INT, MPI_SUM, comm );
+ /* Check the results */
+ for (i=0; i<count; i++) {
+ int result = i * size + (size*(size-1))/2;
+ if (buf[i] != result) {
+ errs ++;
+ if (errs < 10) {
+ fprintf( stderr, "buf[%d] = %d expected %d\n",
+ i, buf[i], result );
+ }
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <assert.h>
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with non-commutative user-defined operations";
+*/
+
+/* We make the error count global so that we can easily control the output
+ of error information (in particular, limiting it after the first 10
+ errors */
+int errs = 0;
+
+/* This implements a simple matrix-matrix multiply. This is an associative
+ but not commutative operation. The matrix size is set in matSize;
+ the number of matrices is the count argument. The matrix is stored
+ in C order, so that
+ c(i,j) is cin[j+i*matSize]
+ */
+#define MAXCOL 256
+static int matSize = 0; /* Must be < MAXCOL */
+static int max_offset = 0;
+void uop( void *, void *, int *, MPI_Datatype * );
+void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, j, k, nmat;
+ int tempcol[MAXCOL];
+ int offset1, offset2;
+ int matsize2 = matSize*matSize;
+
+ for (nmat = 0; nmat < *count; nmat++) {
+ for (j=0; j<matSize; j++) {
+ for (i=0; i<matSize; i++) {
+ tempcol[i] = 0;
+ for (k=0; k<matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ offset1 = k+i*matSize;
+ offset2 = j+k*matSize;
+ assert(offset1 < max_offset);
+ assert(offset2 < max_offset);
+ tempcol[i] += cin[offset1] * cout[offset2];
+ }
+ }
+ for (i=0; i<matSize; i++) {
+ offset1 = j+i*matSize;
+ assert(offset1 < max_offset);
+ cout[offset1] = tempcol[i];
+ }
+ }
+ cin += matsize2;
+ cout += matsize2;
+ }
+}
+
+/* Initialize the integer matrix as a permutation of rank with rank+1.
+ If we call this matrix P_r, we know that product of P_0 P_1 ... P_{size-2}
+ is the the matrix representing the permutation that shifts left by one.
+ As the final matrix (in the size-1 position), we use the matrix that
+ shifts RIGHT by one
+*/
+static void initMat( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank;
+ int offset;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size*size; i++) {
+ assert(i < max_offset);
+ mat[i] = 0;
+ }
+
+ if (rank < size-1) {
+ /* Create the permutation matrix that exchanges r with r+1 */
+ for (i=0; i<size; i++) {
+ if (i == rank) {
+ offset = ((i+1)%size) + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ else if (i == ((rank + 1)%size)) {
+ offset = ((i+size-1)%size) + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ else {
+ offset = i+i*size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ }
+ }
+ else {
+ /* Create the permutation matrix that shifts right by one */
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ offset = j + i * size; /* location of c(i,j) */
+ mat[offset] = 0;
+ if ( ((j-i+size)%size) == 1 ) mat[offset] = 1;
+ }
+ }
+
+ }
+}
+
+/* Compare a matrix with the identity matrix */
+static int isIdentity( MPI_Comm comm, int mat[] )
+{
+ int i, j, size, rank, lerrs = 0;
+ int offset;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (i=0; i<size; i++) {
+ for (j=0; j<size; j++) {
+ if (i == j) {
+ offset = j+i*size;
+ assert(offset < max_offset);
+ if (mat[offset] != 1) {
+ lerrs++;
+ if (errs + lerrs< 10) {
+ printf( "[%d] mat[%d,%d] = %d, expected 1 for comm %s\n",
+ rank, i,j, mat[offset], MTestGetIntracommName() );
+ }
+ }
+ }
+ else {
+ offset = j+i*size;
+ assert(offset < max_offset);
+ if (mat[offset] != 0) {
+ lerrs++;
+ if (errs + lerrs< 10) {
+ printf( "[%d] mat[%d,%d] = %d, expected 0 for comm %s\n",
+ rank, i,j, mat[offset], MTestGetIntracommName() );
+ }
+ }
+ }
+ }
+ }
+ return lerrs;
+}
+
+int main( int argc, char *argv[] )
+{
+ int size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, *bufout;
+ MPI_Op op;
+ MPI_Datatype mattype;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( uop, 0, &op );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) {
+ continue;
+ }
+ MPI_Comm_size( comm, &size );
+ matSize = size;
+
+ /* Only one matrix for now */
+ count = 1;
+
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous( size*size, MPI_INT, &mattype );
+ MPI_Type_commit( &mattype );
+
+ max_offset = count * size * size;
+ buf = (int *)malloc( max_offset * sizeof(int) );
+ if (!buf) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ bufout = (int *)malloc( max_offset * sizeof(int) );
+ if (!bufout) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ initMat( comm, buf );
+ MPI_Allreduce( buf, bufout, count, mattype, op, comm );
+ errs += isIdentity( comm, bufout );
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat( comm, bufout );
+ MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
+ errs += isIdentity( comm, bufout );
+
+ free( buf );
+ free( bufout );
+
+ //MPI_Type_free( &mattype );
+ MTestFreeComm( &comm );
+ }
+
+ // MPI_Op_free( &op );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2004 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <assert.h>
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with non-commutative user-defined operations using matrix rotations";
+*/
+
+/* This example is similar to allred3.c, but uses only 3x3 matrics with
+ integer-valued entries. This is an associative but not commutative
+ operation.
+ The number of matrices is the count argument. The matrix is stored
+ in C order, so that
+ c(i,j) is cin[j+i*3]
+
+ Three different matrices are used:
+ I = identity matrix
+ A = (1 0 0 B = (0 1 0
+ 0 0 1 1 0 0
+ 0 1 0) 0 0 1)
+
+ The product
+
+ I^k A I^(p-2-k-j) B I^j
+
+ is
+
+ ( 0 1 0
+ 0 0 1
+ 1 0 0 )
+
+ for all values of k, p, and j.
+ */
+
+void matmult( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+
+void matmult( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, j, k, nmat;
+ int tempcol[3];
+ int offset1, offset2;
+
+ for (nmat = 0; nmat < *count; nmat++) {
+ for (j=0; j<3; j++) {
+ for (i=0; i<3; i++) {
+ tempcol[i] = 0;
+ for (k=0; k<3; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ offset1 = k+i*3;
+ offset2 = j+k*3;
+ tempcol[i] += cin[offset1] * cout[offset2];
+ }
+ }
+ for (i=0; i<3; i++) {
+ offset1 = j+i*3;
+ cout[offset1] = tempcol[i];
+ }
+ }
+ /* Advance to the next matrix */
+ cin += 9;
+ cout += 9;
+ }
+}
+
+/* Initialize the integer matrix as one of the
+ above matrix entries, as a function of count.
+ We guarantee that both the A and B matrices are included.
+*/
+static void initMat( int rank, int size, int nmat, int mat[] )
+{
+ int i, kind;
+
+ /* Zero the matrix */
+ for (i=0; i<9; i++) {
+ mat[i] = 0;
+ }
+
+ /* Decide which matrix to create (I, A, or B) */
+ if ( size == 2) {
+ /* rank 0 is A, 1 is B */
+ kind = 1 + rank;
+ }
+ else {
+ int tmpA, tmpB;
+ /* Most ranks are identity matrices */
+ kind = 0;
+ /* Make sure exactly one rank gets the A matrix
+ and one the B matrix */
+ tmpA = size / 4;
+ tmpB = (3 * size) / 4;
+
+ if (rank == tmpA) kind = 1;
+ if (rank == tmpB) kind = 2;
+ }
+
+ switch (kind) {
+ case 0: /* Identity */
+ mat[0] = 1;
+ mat[4] = 1;
+ mat[8] = 1;
+ break;
+ case 1: /* A */
+ mat[0] = 1;
+ mat[5] = 1;
+ mat[7] = 1;
+ break;
+ case 2: /* B */
+ mat[1] = 1;
+ mat[3] = 1;
+ mat[8] = 1;
+ break;
+ }
+}
+
+/* Compare a matrix with the known result */
+static int checkResult( int nmat, int mat[], const char *msg )
+{
+ int n, k, errs = 0, wrank;
+ static int solution[9] = { 0, 1, 0,
+ 0, 0, 1,
+ 1, 0, 0 };
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+
+ for (n=0; n<nmat; n++) {
+ for (k=0; k<9; k++) {
+ if (mat[k] != solution[k]) {
+ errs ++;
+ if (errs == 1) {
+ printf( "Errors for communicators %s\n",
+ MTestGetIntracommName() ); fflush(stdout);
+
+ }
+ if (errs < 10) {
+ printf( "[%d]matrix #%d(%s): Expected mat[%d,%d] = %d, got %d\n",
+ wrank, n, msg, k / 3, k % 3, solution[k], mat[k] );
+ fflush(stdout);
+ }
+ }
+ }
+ /* Advance to the next matrix */
+ mat += 9;
+ }
+ return errs;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *buf, *bufout;
+ MPI_Op op;
+ MPI_Datatype mattype;
+ int i;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( matmult, 0, &op );
+
+ /* A single rotation matrix (3x3, stored as 9 consequetive elements) */
+ MPI_Type_contiguous( 9, MPI_INT, &mattype );
+ MPI_Type_commit( &mattype );
+
+ /* Sanity check: test that our routines work properly */
+ { int one = 1;
+ buf = (int *)malloc( 4*9 * sizeof(int) );
+ initMat( 0, 4, 0, &buf[0] );
+ initMat( 1, 4, 0, &buf[9] );
+ initMat( 2, 4, 0, &buf[18] );
+ initMat( 3, 4, 0, &buf[27] );
+ matmult( &buf[0], &buf[9], &one, &mattype );
+ matmult( &buf[9], &buf[18], &one, &mattype );
+ matmult( &buf[18], &buf[27], &one, &mattype );
+ checkResult( 1, &buf[27], "Sanity Check" );
+ free(buf);
+ }
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ for (count = 1; count < size; count ++ ) {
+
+ /* Allocate the matrices */
+ buf = (int *)malloc( count * 9 * sizeof(int) );
+ if (!buf) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ bufout = (int *)malloc( count * 9 * sizeof(int) );
+ if (!bufout) {
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ for (i=0; i < count; i++) {
+ initMat( rank, size, i, &buf[i*9] );
+ }
+
+ MPI_Allreduce( buf, bufout, count, mattype, op, comm );
+ errs += checkResult( count, bufout, "" );
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ for (i=0; i < count; i++) {
+ initMat( rank, size, i, &bufout[i*9] );
+ }
+ MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
+ errs += checkResult( count, bufout, "IN_PLACE" );
+
+ free( buf );
+ free( bufout );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MPI_Op_free( &op );
+ MPI_Type_free( &mattype );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+#include <assert.h>
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with count greater than the number of processes";
+*/
+
+/* We make the error count global so that we can easily control the output
+ of error information (in particular, limiting it after the first 10
+ errors */
+int errs = 0;
+
+int main( int argc, char *argv[] )
+{
+ MPI_Comm comm;
+ MPI_Datatype dtype;
+ int count, *bufin, *bufout, size, i, minsize=1;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) {
+ continue;
+ }
+ MPI_Comm_size( comm, &size );
+ count = size * 2;
+ bufin = (int *)malloc( count * sizeof(int) );
+ bufout = (int *)malloc( count * sizeof(int) );
+ if (!bufin || !bufout) {
+ fprintf( stderr, "Unable to allocated space for buffers (%d)\n",
+ count );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<count; i++) {
+ bufin[i] = i;
+ bufout[i] = -1;
+ }
+
+ dtype = MPI_INT;
+ MPI_Allreduce( bufin, bufout, count, dtype, MPI_SUM, comm );
+ /* Check output */
+ for (i=0; i<count; i++) {
+ if (bufout[i] != i * size) {
+ fprintf( stderr, "Expected bufout[%d] = %d but found %d\n",
+ i, i * size, bufout[i] );
+ errs++;
+ }
+ }
+ free( bufin );
+ free( bufout );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Allreduce with apparent non-commutative operators";
+*/
+/* While the operator is in fact commutative, this forces the MPI code to
+ run the code that is used for non-commutative operators, and for
+ various message lengths. Other tests check truly non-commutative
+ operators */
+
+void mysum( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+
+void mysum( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+{
+ const int *cin = (const int *)cinPtr;
+ int *cout = (int *)coutPtr;
+ int i, n = *count;
+ for (i=0; i<n; i++)
+ cout[i] += cin[i];
+}
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MPI_Op op;
+ int *buf, i;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Op_create( mysum, 0, &op );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ for (count = 1; count < 65000; count = count * 2) {
+ /* Contiguous data */
+ buf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count; i++) buf[i] = rank + i;
+ MPI_Allreduce( MPI_IN_PLACE, buf, count, MPI_INT, op, comm );
+ /* Check the results */
+ for (i=0; i<count; i++) {
+ int result = i * size + (size*(size-1))/2;
+ if (buf[i] != result) {
+ errs ++;
+ if (errs < 10) {
+ fprintf( stderr, "buf[%d] = %d expected %d\n",
+ i, buf[i], result );
+ }
+ }
+ }
+ free( buf );
+ }
+ MTestFreeComm( &comm );
+ }
+ MPI_Op_free( &op );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include "mpi.h"
+
+/*
+ * This example should be run with 2 processes and tests the ability of the
+ * implementation to handle a flood of one-way messages.
+ */
+
+int main( int argc, char **argv )
+{
+ double wscale = 10.0, scale;
+ int numprocs, myid,i;
+
+ MPI_Init(&argc,&argv);
+ MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
+ MPI_Comm_rank(MPI_COMM_WORLD,&myid);
+
+ for ( i=0; i<10000; i++) {
+ MPI_Allreduce(&wscale,&scale,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
+ }
+
+ if (myid == 0) {
+ /* If we get here at all, we're ok */
+ printf( " No Errors\n" );
+ }
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+#include <stdlib.h>
+
+/*
+static char MTEST_Descrip[] = "";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ int *sendbuf, *recvbuf, *p;
+ int sendcount, recvcount;
+ int i, j;
+ MPI_Datatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* printf( "Size of comm = %d\n", size ); */
+ for (count = 1; count < 65000; count = count * 2) {
+
+ /* Create a send buf and a receive buf suitable for testing
+ all to all. */
+ sendcount = count;
+ recvcount = count;
+ sendbuf = (int *)malloc( count * size * sizeof(int) );
+ recvbuf = (int *)malloc( count * size * sizeof(int) );
+ sendtype = MPI_INT;
+ recvtype = MPI_INT;
+
+ if (!sendbuf || !recvbuf) {
+ errs++;
+ fprintf( stderr, "Failed to allocate sendbuf and/or recvbuf\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ for (i=0; i<count*size; i++)
+ recvbuf[i] = -1;
+ p = sendbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ *p++ = j * size + rank + i;
+ }
+ }
+
+ MPI_Alltoall( sendbuf, sendcount, sendtype,
+ recvbuf, recvcount, recvtype, comm );
+
+ p = recvbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ if (*p != rank * size + j + i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error with communicator %s and size=%d count=%d\n",
+ MTestGetIntracommName(), size, count );
+ fprintf( stderr, "recvbuf[%d,%d] = %d, should %d\n",
+ j,i, *p, rank * size + j + i );
+ }
+ }
+ p++;
+ }
+ }
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ p = recvbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ *p++ = j * size + rank + i;
+ }
+ }
+ MPI_Alltoall( MPI_IN_PLACE, -1/*ignored*/, MPI_DATATYPE_NULL/*ignored*/,
+ recvbuf, recvcount, recvtype, comm );
+ p = recvbuf;
+ for (j=0; j<size; j++) {
+ for (i=0; i<count; i++) {
+ if (*p != rank * size + j + i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error (MPI_IN_PLACE) with communicator %s and size=%d count=%d\n",
+ MTestGetIntracommName(), size, count );
+ fprintf(stderr, "recvbuf[%d,%d] = %d, should be %d\n",
+ j,i, *p, rank * size + j + i );
+ }
+ }
+ p++;
+ }
+ }
+#endif
+
+ free( recvbuf );
+ free( sendbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/*
+ This program tests MPI_Alltoallv by having processor i send different
+ amounts of data to each processor.
+
+ Because there are separate send and receive types to alltoallv,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( size * size * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank;
+ sdispls[i] = (i * (i+1))/2;
+ }
+ MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i];
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j],(i*(i+1))/2 + j, j );
+ err++;
+ }
+ }
+ }
+
+ free( sdispls );
+ free( sendcounts );
+ free( sbuf );
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ free( rbuf );
+ rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
+ if (!rbuf) {
+ fprintf( stderr, "Could not reallocate rbuf!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size; i++) {
+ recvcounts[i] = i + rank;
+ rdispls[i] = i * (2 * size);
+ }
+ memset(rbuf, -1, size * (2 * size) * sizeof(int));
+ for (i=0; i < size; i++) {
+ p = rbuf + rdispls[i];
+ for (j = 0; j < recvcounts[i]; ++j) {
+ p[j] = 100 * rank + 10 * i + j;
+ }
+ }
+ MPI_Alltoallv( MPI_IN_PLACE, NULL, NULL, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i];
+ for (j=0; j<recvcounts[i]; j++) {
+ int expected = 100 * i + 10 * rank + j;
+ if (p[j] != expected) {
+ fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
+ rank, p[j], expected, i, j);
+ ++err;
+ }
+ }
+ }
+#endif
+
+ free( rdispls );
+ free( recvcounts );
+ free( rbuf );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/*
+ This program tests MPI_Alltoallv by having processor each process
+ send data to two neighbors only, using counts of 0 for the other processes.
+ This idiom is sometimes used for halo exchange operations.
+
+ Because there are separate send and receive types to alltoallv,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, *p, err;
+ int left, right, length;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ if (size < 3) continue;
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Get the neighbors */
+ left = (rank - 1 + size) % size;
+ right = (rank + 1) % size;
+
+ /* Set the defaults */
+ for (i=0; i<size; i++) {
+ sendcounts[i] = 0;
+ recvcounts[i] = 0;
+ rdispls[i] = 0;
+ sdispls[i] = 0;
+ }
+
+ for (length=1; length < 66000; length = length*2+1 ) {
+ /* Get the buffers */
+ sbuf = (int *)malloc( 2 * length * sizeof(int) );
+ rbuf = (int *)malloc( 2 * length * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocate buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<length; i++) {
+ sbuf[i] = i + 100000*rank;
+ sbuf[i+length] = i + 100000*rank;
+ rbuf[i] = -i;
+ rbuf[i+length] = -i-length;
+ }
+ sendcounts[left] = length;
+ sendcounts[right] = length;
+ recvcounts[left] = length;
+ recvcounts[right] = length;
+ rdispls[left] = 0;
+ rdispls[right] = length;
+ sdispls[left] = 0;
+ sdispls[right] = length;
+
+ MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+
+ /* Check rbuf */
+ p = rbuf; /* left */
+
+ for (i=0; i<length; i++) {
+ if (p[i] != i + 100000 * left) {
+ if (err < 10) {
+ fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n",
+ rank, left, p[i], i + 100000 * left, i );
+ }
+ err++;
+ }
+ }
+
+ p = rbuf + length; /* right */
+ for (i=0; i<length; i++) {
+ if (p[i] != i + 100000 * right) {
+ if (err < 10) {
+ fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n",
+ rank, right, p[i], i + 100000 * right, i );
+ }
+ err++;
+ }
+ }
+
+ free( rbuf );
+ free( sbuf );
+ }
+
+ free( sdispls );
+ free( rdispls );
+ free( recvcounts );
+ free( sendcounts );
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * Changes to this example
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/*
+ * This example is taken from MPI-The complete reference, Vol 1,
+ * pages 222-224.
+ *
+ * Lines after the "--CUT HERE--" were added to make this into a complete
+ * test program.
+ */
+
+/* Specify the maximum number of errors to report. */
+#define MAX_ERRORS 10
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+#define MAX_SIZE 64
+
+MPI_Datatype transpose_type(int M, int m, int n, MPI_Datatype type);
+MPI_Datatype submatrix_type(int N, int m, int n, MPI_Datatype type);
+void Transpose(float *localA, float *localB, int M, int N, MPI_Comm comm);
+void Transpose(float *localA, float *localB, int M, int N, MPI_Comm comm)
+/* transpose MxN matrix A that is block distributed (1-D) on
+ processes of comm onto block distributed matrix B */
+{
+ int i, j, extent, myrank, p, n[2], m[2];
+ int lasti, lastj;
+ int *sendcounts, *recvcounts;
+ int *sdispls, *rdispls;
+ MPI_Datatype xtype[2][2], stype[2][2], *sendtypes, *recvtypes;
+
+ MTestPrintfMsg( 2, "M = %d, N = %d\n", M, N );
+
+ /* compute parameters */
+ MPI_Comm_size(comm, &p);
+ MPI_Comm_rank(comm, &myrank);
+ extent = sizeof(float);
+
+ /* allocate arrays */
+ sendcounts = (int *)malloc(p*sizeof(int));
+ recvcounts = (int *)malloc(p*sizeof(int));
+ sdispls = (int *)malloc(p*sizeof(int));
+ rdispls = (int *)malloc(p*sizeof(int));
+ sendtypes = (MPI_Datatype *)malloc(p*sizeof(MPI_Datatype));
+ recvtypes = (MPI_Datatype *)malloc(p*sizeof(MPI_Datatype));
+
+ /* compute block sizes */
+ m[0] = M/p;
+ m[1] = M - (p-1)*(M/p);
+ n[0] = N/p;
+ n[1] = N - (p-1)*(N/p);
+
+ /* compute types */
+ for (i=0; i <= 1; i++)
+ for (j=0; j <= 1; j++) {
+ xtype[i][j] = transpose_type(N, m[i], n[j], MPI_FLOAT);
+ stype[i][j] = submatrix_type(M, m[i], n[j], MPI_FLOAT);
+ }
+
+ /* prepare collective operation arguments */
+ lasti = myrank == p-1;
+ for (j=0; j < p; j++) {
+ lastj = j == p-1;
+ sendcounts[j] = 1;
+ sdispls[j] = j*n[0]*extent;
+ sendtypes[j] = xtype[lasti][lastj];
+ recvcounts[j] = 1;
+ rdispls[j] = j*m[0]*extent;
+ recvtypes[j] = stype[lastj][lasti];
+ }
+
+ /* communicate */
+ MTestPrintfMsg( 2, "Begin Alltoallw...\n" );
+ /* -- Note that the book incorrectly uses &localA and &localB
+ as arguments to MPI_Alltoallw */
+ MPI_Alltoallw(localA, sendcounts, sdispls, sendtypes,
+ localB, recvcounts, rdispls, recvtypes, comm);
+ MTestPrintfMsg( 2, "Done with Alltoallw\n" );
+
+ /* Free buffers */
+ free( sendcounts );
+ free( recvcounts );
+ free( sdispls );
+ free( rdispls );
+ free( sendtypes );
+ free( recvtypes );
+
+ /* Free datatypes */
+ for (i=0; i <= 1; i++)
+ for (j=0; j <= 1; j++) {
+ MPI_Type_free( &xtype[i][j] );
+ MPI_Type_free( &stype[i][j] );
+ }
+}
+
+
+/* Define an n x m submatrix in a n x M local matrix (this is the
+ destination in the transpose matrix */
+MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
+/* computes a datatype for an mxn submatrix within an MxN matrix
+ with entries of type type */
+{
+ /* MPI_Datatype subrow; */
+ MPI_Datatype submatrix;
+
+ /* The book, MPI: The Complete Reference, has the wrong type constructor
+ here. Since the stride in the vector type is relative to the input
+ type, the stride in the book's code is n times as long as is intended.
+ Since n may not exactly divide N, it is better to simply use the
+ blocklength argument in Type_vector */
+ /*
+ MPI_Type_contiguous(n, type, &subrow);
+ MPI_Type_vector(m, 1, N, subrow, &submatrix);
+ */
+ MPI_Type_vector(n, m, M, type, &submatrix );
+ MPI_Type_commit(&submatrix);
+
+ /* Add a consistency test: the size of submatrix should be
+ n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
+ {
+ int tsize;
+ MPI_Aint textent, lb;
+ MPI_Type_size( type, &tsize );
+ MPI_Type_get_extent( submatrix, &lb, &textent );
+
+ if (textent != tsize * (M * (n-1)+m)) {
+ fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
+ (long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
+ }
+ }
+ return(submatrix);
+}
+
+/* Extract an m x n submatrix within an m x N matrix and transpose it.
+ Assume storage by rows; the defined datatype accesses by columns */
+MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
+/* computes a datatype for the transpose of an mxn matrix
+ with entries of type type */
+{
+ MPI_Datatype subrow, subrow1, submatrix;
+ MPI_Aint lb, extent;
+
+ MPI_Type_vector(m, 1, N, type, &subrow);
+ MPI_Type_get_extent(type, &lb, &extent);
+ MPI_Type_create_resized(subrow, 0, extent, &subrow1);
+ MPI_Type_contiguous(n, subrow1, &submatrix);
+ MPI_Type_commit(&submatrix);
+ MPI_Type_free( &subrow );
+ MPI_Type_free( &subrow1 );
+
+ /* Add a consistency test: the size of submatrix should be
+ n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
+ {
+ int tsize;
+ MPI_Aint textent, llb;
+ MPI_Type_size( type, &tsize );
+ MPI_Type_get_true_extent( submatrix, &llb, &textent );
+
+ if (textent != tsize * (N * (m-1)+n)) {
+ fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
+ (long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
+ }
+ }
+
+ return(submatrix);
+}
+
+/* -- CUT HERE -- */
+
+int main( int argc, char *argv[] )
+{
+ int gM, gN, lm, lmlast, ln, lnlast, i, j, errs = 0;
+ int size, rank;
+ float *localA, *localB;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+
+ gM = 20;
+ gN = 30;
+
+ /* Each block is lm x ln in size, except for the last process,
+ which has lmlast x lnlast */
+ lm = gM/size;
+ lmlast = gM - (size - 1)*lm;
+ ln = gN/size;
+ lnlast = gN - (size - 1)*ln;
+
+ /* Create the local matrices.
+ Initialize the input matrix so that the entries are
+ consequtive integers, by row, starting at 0.
+ */
+ if (rank == size - 1) {
+ localA = (float *)malloc( gN * lmlast * sizeof(float) );
+ localB = (float *)malloc( gM * lnlast * sizeof(float) );
+ for (i=0; i<lmlast; i++) {
+ for (j=0; j<gN; j++) {
+ localA[i*gN+j] = (float)(i*gN+j + rank * gN * lm);
+ }
+ }
+
+ }
+ else {
+ localA = (float *)malloc( gN * lm * sizeof(float) );
+ localB = (float *)malloc( gM * ln * sizeof(float) );
+ for (i=0; i<lm; i++) {
+ for (j=0; j<gN; j++) {
+ localA[i*gN+j] = (float)(i*gN+j + rank * gN * lm);
+ }
+ }
+ }
+
+ MTestPrintfMsg( 2, "Allocated local arrays\n" );
+ /* Transpose */
+ Transpose( localA, localB, gM, gN, comm );
+
+ /* check the transposed matrix
+ In the global matrix, the transpose has consequtive integers,
+ organized by columns.
+ */
+ if (rank == size - 1) {
+ for (i=0; i<lnlast; i++) {
+ for (j=0; j<gM; j++) {
+ int expected = i+gN*j + rank * ln;
+ if ((int)localB[i*gM+j] != expected) {
+ if (errs < MAX_ERRORS)
+ printf( "Found %d but expected %d\n",
+ (int)localB[i*gM+j], expected );
+ errs++;
+ }
+ }
+ }
+
+ }
+ else {
+ for (i=0; i<ln; i++) {
+ for (j=0; j<gM; j++) {
+ int expected = i+gN*j + rank * ln;
+ if ((int)localB[i*gM+j] != expected) {
+ if (errs < MAX_ERRORS)
+ printf( "Found %d but expected %d\n",
+ (int)localB[i*gM+j], expected );
+ errs++;
+ }
+ }
+ }
+ }
+
+ /* Free storage */
+ free( localA );
+ free( localB );
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/*
+ This program tests MPI_Alltoallw by having processor i send different
+ amounts of data to each processor. This is just the MPI_Alltoallv test,
+ but with displacements in bytes rather than units of the datatype.
+
+ Because there are separate send and receive types to alltoallw,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+ MPI_Datatype *sendtypes, *recvtypes;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( size * size * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ sendtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
+ recvtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ /* Note that process 0 sends no data (sendcounts[0] = 0) */
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank * sizeof(int);
+ sdispls[i] = (((i+1) * (i))/2) * sizeof(int);
+ sendtypes[i] = recvtypes[i] = MPI_INT;
+ }
+ MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
+ rbuf, recvcounts, rdispls, recvtypes, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i]/sizeof(int);
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j],(i*(i+1))/2 + j, j );
+ err++;
+ }
+ }
+ }
+
+ free(sendtypes);
+ free(sdispls);
+ free(sendcounts);
+ free(sbuf);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ free( rbuf );
+ rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
+ if (!rbuf) {
+ fprintf( stderr, "Could not reallocate rbuf!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size; i++) {
+ /* alltoallw displs are in bytes, not in type extents */
+ rdispls[i] = i * (2 * size) * sizeof(int);
+ recvtypes[i] = MPI_INT;
+ recvcounts[i] = i + rank;
+ }
+ memset(rbuf, -1, size * (2 * size) * sizeof(int));
+ for (i=0; i < size; i++) {
+ p = rbuf + (rdispls[i] / sizeof(int));
+ for (j = 0; j < recvcounts[i]; ++j) {
+ p[j] = 100 * rank + 10 * i + j;
+ }
+ }
+
+ MPI_Alltoallw( MPI_IN_PLACE, NULL, NULL, NULL,
+ rbuf, recvcounts, rdispls, recvtypes, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + (rdispls[i] / sizeof(int));
+ for (j=0; j<recvcounts[i]; j++) {
+ int expected = 100 * i + 10 * rank + j;
+ if (p[j] != expected) {
+ fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
+ rank, p[j], expected, i, j);
+ ++err;
+ }
+ }
+ }
+#endif
+
+ free(recvtypes);
+ free(rdispls);
+ free(recvcounts);
+ free(rbuf);
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( err );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2009 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+/* Based on a test case contributed by Michael Hofmann.
+ *
+ * This test makes sure that zero counts with non-zero-sized types on the
+ * send (recv) side match and don't cause a problem with non-zero counts and
+ * zero-sized types on the recv (send) side when using MPI_Alltoallw and
+ * MPI_Alltoallv. */
+
+/* TODO test intercommunicators as well */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <mpi.h>
+
+#include "mpitest.h"
+
+int main(int argc, char *argv[])
+{
+ int sendbuf, recvbuf;
+ int *sendcounts;
+ int *recvcounts;
+ int *sdispls;
+ int *rdispls;
+ MPI_Datatype sendtype;
+ MPI_Datatype *sendtypes;
+ MPI_Datatype *recvtypes;
+ int rank = -1;
+ int size = -1;
+ int i;
+
+
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ sendtypes = malloc(size * sizeof(MPI_Datatype));
+ recvtypes = malloc(size * sizeof(MPI_Datatype));
+ sendcounts = malloc(size * sizeof(int));
+ recvcounts = malloc(size * sizeof(int));
+ sdispls = malloc(size * sizeof(int));
+ rdispls = malloc(size * sizeof(int));
+ if (!sendtypes || !recvtypes ||
+ !sendcounts || !recvcounts ||
+ !sdispls || !rdispls)
+ {
+ printf("error, unable to allocate memory\n");
+ goto fn_exit;
+ }
+
+ MPI_Type_contiguous(0, MPI_INT, &sendtype);
+ MPI_Type_commit(&sendtype);
+
+ for (i = 0; i < size; ++i) {
+ sendtypes[i] = sendtype;
+ sendcounts[i] = 1;
+ sdispls[i] = 0;
+
+ recvtypes[i] = MPI_INT;
+ recvcounts[i] = 0;
+ rdispls[i] = 0;
+ }
+
+
+ /* try zero-counts on both the send and recv side in case only one direction is broken for some reason */
+ MPI_Alltoallw(&sendbuf, sendcounts, sdispls, sendtypes, &recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD);
+ MPI_Alltoallw(&sendbuf, recvcounts, rdispls, recvtypes, &recvbuf, sendcounts, sdispls, sendtypes, MPI_COMM_WORLD);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* pass MPI_IN_PLACE and different but compatible types rank is even/odd */
+ if (rank % 2)
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD);
+ else
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, sendcounts, sdispls, sendtypes, MPI_COMM_WORLD);
+#endif
+
+ /* now the same for Alltoallv instead of Alltoallw */
+ MPI_Alltoallv(&sendbuf, sendcounts, sdispls, sendtypes[0], &recvbuf, recvcounts, rdispls, recvtypes[0], MPI_COMM_WORLD);
+ MPI_Alltoallv(&sendbuf, recvcounts, rdispls, recvtypes[0], &recvbuf, sendcounts, sdispls, sendtypes[0], MPI_COMM_WORLD);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ if (rank % 2)
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, recvcounts, rdispls, recvtypes[0], MPI_COMM_WORLD);
+ else
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, sendcounts, sdispls, sendtypes[0], MPI_COMM_WORLD);
+#endif
+
+ MPI_Type_free(&sendtype);
+
+ if (rank == 0)
+ printf(" No Errors\n");
+
+fn_exit:
+ if (rdispls) free(rdispls);
+ if (sdispls) free(sdispls);
+ if (recvcounts) free(recvcounts);
+ if (sendcounts) free(sendcounts);
+ if (recvtypes) free(recvtypes);
+ if (sendtypes) free(sendtypes);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ /* The max value of count must be very large to ensure that we
+ reach the long message algorithms */
+ for (count = 1; count < 280000; count = count * 4) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
+ for (root=0; root<size; root++) {
+ if (rank == root) {
+ sendtype.InitBuf( &sendtype );
+ err = MPI_Bcast( sendtype.buf, sendtype.count,
+ sendtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ recvtype.InitBuf( &recvtype );
+ err = MPI_Bcast( recvtype.buf, recvtype.count,
+ recvtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ fprintf( stderr, "Error with communicator %s and datatype %s\n",
+ MTestGetIntracommName(),
+ MTestGetDatatypeName( &recvtype ) );
+ MTestPrintError( err );
+ }
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ errs += errs;
+ }
+ }
+ }
+ MTestFreeDatatype( &recvtype );
+ MTestFreeDatatype( &sendtype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes and sizes that are not powers of two";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ count = 1;
+ /* This must be very large to ensure that we reach the long message
+ algorithms */
+ for (count = 4; count < 66000; count = count * 4) {
+ while (MTestGetDatatypes( &sendtype, &recvtype, count-1 )) {
+ for (root=0; root<size; root++) {
+ if (rank == root) {
+ sendtype.InitBuf( &sendtype );
+ err = MPI_Bcast( sendtype.buf, sendtype.count,
+ sendtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ recvtype.InitBuf( &recvtype );
+ err = MPI_Bcast( recvtype.buf, recvtype.count,
+ recvtype.datatype, root, comm );
+ if (err) {
+ errs++;
+ fprintf( stderr, "Error with communicator %s and datatype %s\n",
+ MTestGetIntracommName(),
+ MTestGetDatatypeName( &recvtype ) );
+ MTestPrintError( err );
+ }
+ err = MTestCheckRecv( 0, &recvtype );
+ if (err) {
+ errs += errs;
+ }
+ }
+ }
+ MTestFreeDatatype( &recvtype );
+ MTestFreeDatatype( &sendtype );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "mpitest.h"
+
+#define ROOT 0
+#define NUM_REPS 5
+#define NUM_SIZES 4
+
+int main( int argc, char **argv)
+{
+ int *buf;
+ int i, rank, reps, n;
+ int bVerify = 1;
+ int sizes[NUM_SIZES] = { 100, 64*1024, 128*1024, 1024*1024 };
+ int num_errors=0;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (argc > 1)
+ {
+ if (strcmp(argv[1], "-novalidate") == 0 || strcmp(argv[1], "-noverify") == 0)
+ bVerify = 0;
+ }
+
+ buf = (int *) malloc(sizes[NUM_SIZES-1]*sizeof(int));
+ memset(buf, 0, sizes[NUM_SIZES-1]*sizeof(int));
+
+ for (n=0; n<NUM_SIZES; n++)
+ {
+#ifdef DEBUG
+ if (rank == ROOT)
+ {
+ printf("bcasting %d MPI_INTs %d times\n", sizes[n], NUM_REPS);
+ fflush(stdout);
+ }
+#endif
+ for (reps=0; reps < NUM_REPS; reps++)
+ {
+ if (bVerify)
+ {
+ if (rank == ROOT)
+ {
+ for (i=0; i<sizes[n]; i++)
+ {
+ buf[i] = 1000000 * (n * NUM_REPS + reps) + i;
+ }
+ }
+ else
+ {
+ for (i=0; i<sizes[n]; i++)
+ {
+ buf[i] = -1 - (n * NUM_REPS + reps);
+ }
+ }
+ }
+
+# ifdef DEBUG
+ {
+ printf("rank=%d, n=%d, reps=%d\n", rank, n, reps);
+ }
+# endif
+
+ MPI_Bcast(buf, sizes[n], MPI_INT, ROOT, MPI_COMM_WORLD);
+
+ if (bVerify)
+ {
+ num_errors = 0;
+ for (i=0; i<sizes[n]; i++)
+ {
+ if (buf[i] != 1000000 * (n * NUM_REPS + reps) + i)
+ {
+ num_errors++;
+ if (num_errors < 10)
+ {
+ printf("Error: Rank=%d, n=%d, reps=%d, i=%d, buf[i]=%d expected=%d\n", rank, n, reps, i, buf[i],
+ 1000000 * (n * NUM_REPS + reps) +i);
+ fflush(stdout);
+ }
+ }
+ }
+ if (num_errors >= 10)
+ {
+ printf("Error: Rank=%d, num_errors = %d\n", rank, num_errors);
+ fflush(stdout);
+ }
+ }
+ }
+ }
+
+ free(buf);
+
+ MTest_Finalize( num_errors );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include <mpi.h>
+
+/* test broadcast behavior with non-zero counts but zero-sized types */
+
+int main(int argc, char *argv[])
+{
+ int i, type_size;
+ MPI_Datatype type = MPI_DATATYPE_NULL;
+ char *buf = NULL;
+ int wrank, wsize;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+
+ /* a random non-zero sized buffer */
+#define NELEM (10)
+ buf = malloc(NELEM*sizeof(int));
+ assert(buf);
+
+ for (i = 0; i < NELEM; i++) {
+ buf[i] = wrank * NELEM + i;
+ }
+
+ /* create a zero-size type */
+ MPI_Type_contiguous(0, MPI_INT, &type);
+ MPI_Type_commit(&type);
+ MPI_Type_size(type, &type_size);
+ assert(type_size == 0);
+
+ /* do the broadcast, which will break on some MPI implementations */
+ MPI_Bcast(buf, NELEM, type, 0, MPI_COMM_WORLD);
+
+ /* check that the buffer remains unmolested */
+ for (i = 0; i < NELEM; i++) {
+ assert(buf[i] == wrank * NELEM + i);
+ }
+
+ MPI_Type_free(&type);
+ MPI_Finalize();
+
+ if (wrank == 0) {
+ printf(" No errors\n");
+ }
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define BAD_ANSWER 100000
+
+int assoc ( int *, int *, int *, MPI_Datatype * );
+
+/*
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ (see 4.9.4). The order is important.
+
+ Note that the computation is in process rank (in the communicator)
+ order, independant of the root.
+ */
+int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ ) {
+ if (inoutvec[i] <= invec[i] ) {
+ int rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
+ rank, inoutvec[0], invec[0] );
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
+ return (1);
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size;
+ int data;
+ int errors=0;
+ int result = -100;
+ MPI_Op op;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+
+ MPI_Op_create( (MPI_User_function*)assoc, 0, &op );
+ MPI_Reduce ( &data, &result, 1, MPI_INT, op, size-1, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, size-1, MPI_COMM_WORLD );
+ MPI_Op_free( &op );
+ if (result == BAD_ANSWER) errors++;
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+void addem ( int *, int *, int *, MPI_Datatype * );
+void assoc ( int *, int *, int *, MPI_Datatype * );
+
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ )
+ inoutvec[i] += invec[i];
+}
+
+#define BAD_ANSWER 100000
+
+/*
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ (see 4.9.4). The order is important.
+
+ Note that the computation is in process rank (in the communicator)
+ order, independant of the root.
+ */
+void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ ) {
+ if (inoutvec[i] <= invec[i] ) {
+ int rank;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
+ rank, inoutvec[0], invec[0] );
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op_assoc, op_addem;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+
+ correct_result = 0;
+ for (i=0;i<=rank;i++)
+ correct_result += i;
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error suming ints with scan\n", rank );
+ errors++;
+ }
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank );
+ errors++;
+ }
+
+ data = rank;
+ result = -100;
+ MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc );
+ MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem );
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (userop)\n",
+ rank );
+ errors++;
+ }
+
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD );
+ if (result != correct_result) {
+ fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n",
+ rank );
+ errors++;
+ }
+ result = -100;
+ data = rank;
+ MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, MPI_COMM_WORLD );
+ if (result == BAD_ANSWER) {
+ fprintf( stderr, "[%d] Error scanning with non-commutative op\n",
+ rank );
+ errors++;
+ }
+
+ MPI_Op_free( &op_assoc );
+ MPI_Op_free( &op_addem );
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define TABLE_SIZE 2
+
+int main( int argc, char **argv )
+{
+ int rank, size;
+ double a[TABLE_SIZE];
+ struct { double a; int b; } in[TABLE_SIZE], out[TABLE_SIZE];
+ int i;
+ int errors = 0;
+
+ /* Initialize the environment and some variables */
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* Initialize the maxloc data */
+ for ( i=0; i<TABLE_SIZE; i++ ) a[i] = 0;
+ for ( i=rank; i<TABLE_SIZE; i++ ) a[i] = (double)rank + 1.0;
+
+ /* Copy data to the "in" buffer */
+ for (i=0; i<TABLE_SIZE; i++) {
+ in[i].a = a[i];
+ in[i].b = rank;
+ }
+
+ /* Reduce it! */
+ MPI_Reduce( in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MAXLOC, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( out, TABLE_SIZE, MPI_DOUBLE_INT, 0, MPI_COMM_WORLD );
+
+ /* Check to see that we got the right answers */
+ for (i=0; i<TABLE_SIZE; i++)
+ if (i % size == rank)
+ if (out[i].b != rank) {
+ printf("MAX (ranks[%d] = %d != %d\n", i, out[i].b, rank );
+ errors++;
+ }
+
+ /* Initialize the minloc data */
+ for ( i=0; i<TABLE_SIZE; i++ ) a[i] = 0;
+ for ( i=rank; i<TABLE_SIZE; i++ ) a[i] = -(double)rank - 1.0;
+
+ /* Copy data to the "in" buffer */
+ for (i=0; i<TABLE_SIZE; i++) {
+ in[i].a = a[i];
+ in[i].b = rank;
+ }
+
+ /* Reduce it! */
+ MPI_Allreduce( in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD );
+
+ /* Check to see that we got the right answers */
+ for (i=0; i<TABLE_SIZE; i++)
+ if (i % size == rank)
+ if (out[i].b != rank) {
+ printf("MIN (ranks[%d] = %d != %d\n", i, out[i].b, rank );
+ errors++;
+ }
+
+ /* Finish up! */
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * Changes to the original code
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+
+/*
+From: hook@nas.nasa.gov (Edward C. Hook)
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "mpitest.h"
+
+#include <string.h>
+#include <errno.h>
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+#endif
+
+int main( int argc, char *argv[] )
+{
+ int rank, size;
+ int chunk = 128;
+ int i;
+ int *sb;
+ int *rb;
+ int status;
+
+ MTest_Init(&argc,&argv);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ MPI_Comm_size(MPI_COMM_WORLD,&size);
+
+ for ( i=1 ; i < argc ; ++i ) {
+ if ( argv[i][0] != '-' )
+ continue;
+ switch(argv[i][1]) {
+ case 'm':
+ chunk = atoi(argv[++i]);
+ break;
+ default:
+ fprintf(stderr,"Unrecognized argument %s\n",
+ argv[i]);
+ MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
+ }
+ }
+
+ sb = (int *)malloc(size*chunk*sizeof(int));
+ if ( !sb ) {
+ perror( "can't allocate send buffer" );
+ MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
+ }
+ rb = (int *)malloc(size*chunk*sizeof(int));
+ if ( !rb ) {
+ perror( "can't allocate recv buffer");
+ free(sb);
+ MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
+ }
+ for ( i=0 ; i < size*chunk ; ++i ) {
+ sb[i] = rank + 1;
+ rb[i] = 0;
+ }
+
+ /* fputs("Before MPI_Alltoall\n",stdout); */
+
+ /* This should really send MPI_CHAR, but since sb and rb were allocated
+ as chunk*size*sizeof(int), the buffers are large enough */
+ status = MPI_Alltoall(sb,chunk,MPI_INT,rb,chunk,MPI_INT,
+ MPI_COMM_WORLD);
+
+ /* fputs("Before MPI_Allreduce\n",stdout); */
+
+ MTest_Finalize( status );
+
+ free(sb);
+ free(rb);
+
+ MPI_Finalize();
+
+ return MTestReturnValue( status );
+}
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ if ( (rank < participants) ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+ int recv_count = send_count;
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Gather everybody's result together - sort of like an */
+ /* inefficient allgather */
+ for (i=0; i<participants; i++) {
+ void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
+ MPI_Gather(sendbuf, send_count, MPI_INT,
+ &table[0][0], recv_count, MPI_INT, i,
+ MPI_COMM_WORLD );
+ }
+
+ /* Everybody should have the same table now, */
+ /* This test does not in any way guarantee there are no errors */
+ /* Print out a table or devise a smart test to make sure it's correct */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ }
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int recv_counts[MAX_PROCESSES];
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+ /* while (MAX_PROCESSES % participants) participants--; */
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ if ( (rank < participants) ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+
+ /* Fill in the displacements and recv_counts */
+ for (i=0; i<participants; i++) {
+ displs[i] = i * block_size * MAX_PROCESSES;
+ recv_counts[i] = send_count;
+ }
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Gather everybody's result together - sort of like an */
+ /* inefficient allgather */
+ for (i=0; i<participants; i++) {
+ void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
+ MPI_Gatherv(sendbuf, send_count, MPI_INT,
+ &table[0][0], recv_counts, displs, MPI_INT,
+ i, MPI_COMM_WORLD);
+ }
+
+
+ /* Everybody should have the same table now.
+
+ The entries are:
+ Table[i][j] = (i/block_size) + 10;
+ */
+ for (i=0; i<MAX_PROCESSES;i++)
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ for (i=0; i<MAX_PROCESSES;i++) {
+ for (j=0; j<MAX_PROCESSES;j++) {
+ if (table[i][j] != (i/block_size) + 10) errors++;
+ }
+ }
+ if (errors) {
+ /* Print out table if there are any errors */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ printf("\n");
+ for (j=0; j<MAX_PROCESSES; j++)
+ printf(" %d",table[i][j]);
+ }
+ printf("\n");
+ }
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int row[MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ comm = MPI_COMM_WORLD;
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) {
+ participants = MAX_PROCESSES;
+ MPI_Comm_split( MPI_COMM_WORLD, rank < MAX_PROCESSES, rank, &comm );
+ }
+ else {
+ participants = size;
+ MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+ }
+ if ( (rank < participants) ) {
+ int send_count = MAX_PROCESSES;
+ int recv_count = MAX_PROCESSES;
+
+ /* If I'm the root (process 0), then fill out the big table */
+ if (rank == 0)
+ for ( i=0; i<participants; i++)
+ for ( j=0; j<MAX_PROCESSES; j++ )
+ table[i][j] = i+j;
+
+ /* Scatter the big table to everybody's little table */
+ MPI_Scatter(&table[0][0], send_count, MPI_INT,
+ &row[0] , recv_count, MPI_INT, 0, comm );
+
+ /* Now see if our row looks right */
+ for (i=0; i<MAX_PROCESSES; i++)
+ if ( row[i] != i+rank ) errors++;
+ }
+
+ MPI_Comm_free( &comm );
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int row[MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int send_counts[MAX_PROCESSES];
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+ if ( (rank < participants) ) {
+ int recv_count = MAX_PROCESSES;
+
+ /* If I'm the root (process 0), then fill out the big table */
+ /* and setup send_counts and displs arrays */
+ if (rank == 0)
+ for ( i=0; i<participants; i++) {
+ send_counts[i] = recv_count;
+ displs[i] = i * MAX_PROCESSES;
+ for ( j=0; j<MAX_PROCESSES; j++ )
+ table[i][j] = i+j;
+ }
+
+ /* Scatter the big table to everybody's little table */
+ MPI_Scatterv(&table[0][0], send_counts, displs, MPI_INT,
+ &row[0] , recv_count, MPI_INT, 0, MPI_COMM_WORLD);
+
+ /* Now see if our row looks right */
+ for (i=0; i<MAX_PROCESSES; i++)
+ if ( row[i] != i+rank ) errors++;
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int recv_counts[MAX_PROCESSES];
+ MPI_Comm test_comm;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ participants = ( size > MAX_PROCESSES ) ? MAX_PROCESSES : size;
+
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, rank<participants, rank, &test_comm);
+
+ if ( rank < participants ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+
+ /* Fill in the displacements and recv_counts */
+ for (i=0; i<participants; i++) {
+ displs[i] = i * block_size * MAX_PROCESSES;
+ recv_counts[i] = send_count;
+ }
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Everybody gets the gathered data */
+ MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
+ &table[0][0], recv_counts, displs,
+ MPI_INT, test_comm);
+
+ /* Everybody should have the same table now.
+
+ The entries are:
+ Table[i][j] = (i/block_size) + 10;
+ */
+ for (i=0; i<MAX_PROCESSES;i++)
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ for (i=0; i<MAX_PROCESSES;i++) {
+ for (j=0; j<MAX_PROCESSES;j++) {
+ if (table[i][j] != (i/block_size) + 10) errors++;
+ }
+ }
+ if (errors) {
+ /* Print out table if there are any errors */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ printf("\n");
+ for (j=0; j<MAX_PROCESSES; j++)
+ printf(" %d",table[i][j]);
+ }
+ printf("\n");
+ }
+ }
+
+ MTest_Finalize( errors );
+
+ MPI_Comm_free(&test_comm);
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+#define MAX_PROCESSES 10
+
+int main( int argc, char **argv )
+{
+ int rank, size, i,j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors=0;
+ int participants;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ /* A maximum of MAX_PROCESSES processes can participate */
+ if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
+ else participants = size;
+ if (MAX_PROCESSES % participants) {
+ fprintf( stderr, "Number of processors must divide %d\n",
+ MAX_PROCESSES );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ /* while (MAX_PROCESSES % participants) participants--; */
+ if ( (rank < participants) ) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank+1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+ int recv_count = send_count;
+
+ /* Paint my rows my color */
+ for (i=begin_row; i<end_row ;i++)
+ for (j=0; j<MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Everybody gets the gathered table */
+ MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
+ &table[0][0], recv_count, MPI_INT, MPI_COMM_WORLD);
+
+ /* Everybody should have the same table now, */
+ /* This test does not in any way guarantee there are no errors */
+ /* Print out a table or devise a smart test to make sure it's correct */
+ for (i=0; i<MAX_PROCESSES;i++) {
+ if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
+ errors++;
+ }
+ }
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+
+ MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ correct_result = 0;
+ for(i=0;i<size;i++)
+ correct_result += i;
+ if (result != correct_result) errors++;
+
+ MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ if (result != 0) errors++;
+
+ MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ if (result != (size-1)) errors++;
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+void addem ( int *, int *, int *, MPI_Datatype * );
+
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+{
+ int i;
+ for ( i=0; i<*len; i++ )
+ inoutvec[i] += invec[i];
+}
+
+int main( int argc, char **argv )
+{
+ int rank, size, i;
+ int data;
+ int errors=0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op;
+
+ MTest_Init( &argc, &argv );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ data = rank;
+ MPI_Op_create( (MPI_User_function *)addem, 1, &op );
+ MPI_Reduce ( &data, &result, 1, MPI_INT, op, 0, MPI_COMM_WORLD );
+ MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ MPI_Op_free( &op );
+ correct_result = 0;
+ for(i=0;i<size;i++)
+ correct_result += i;
+ if (result != correct_result) errors++;
+
+ MTest_Finalize( errors );
+ MPI_Finalize();
+ return MTestReturnValue( errors );
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Exscan";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
+ int *sendbuf, *recvbuf, i;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ /* The following illustrates the use of the routines to
+ run through a selection of communicators and datatypes.
+ Use subsets of these for tests that do not involve combinations
+ of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (count = 1; count < 65000; count = count * 2) {
+
+ sendbuf = (int *)malloc( count * sizeof(int) );
+ recvbuf = (int *)malloc( count * sizeof(int) );
+
+ for (i=0; i<count; i++) {
+ sendbuf[i] = rank + i * size;
+ recvbuf[i] = -1;
+ }
+
+ MPI_Exscan( sendbuf, recvbuf, count, MPI_INT, MPI_SUM, comm );
+
+ /* Check the results. rank 0 has no data */
+ if (rank > 0) {
+ int result;
+ for (i=0; i<count; i++) {
+ result = rank * i * size + ((rank) * (rank-1))/2;
+ if (recvbuf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
+ i, recvbuf[i], rank, result );
+ }
+ }
+ }
+ }
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* now try the MPI_IN_PLACE flavor */
+ for (i=0; i<count; i++) {
+ sendbuf[i] = -1; /* unused */
+ recvbuf[i] = rank + i * size;
+ }
+
+ MPI_Exscan( MPI_IN_PLACE, recvbuf, count, MPI_INT, MPI_SUM, comm );
+
+ /* Check the results. rank 0's data must remain unchanged */
+ for (i=0; i<count; i++) {
+ int result;
+ if (rank == 0)
+ result = rank + i * size;
+ else
+ result = rank * i * size + ((rank) * (rank-1))/2;
+ if (recvbuf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
+ i, recvbuf[i], rank, result );
+ }
+ }
+ }
+#endif
+
+ free( sendbuf );
+ free( recvbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test MPI_Exscan (simple test)";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int rank, size;
+ int sendbuf[1], recvbuf[1];
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ sendbuf[0] = rank;
+ recvbuf[0] = -2;
+
+ MPI_Exscan( sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm );
+
+ /* Check the results. rank 0 has no data. Input is
+ 0 1 2 3 4 5 6 7 8 ...
+ Output is
+ - 0 1 3 6 10 15 21 28 36
+ (scan, not counting the contribution from the calling process)
+ */
+ if (rank > 0) {
+ int result = (((rank) * (rank-1))/2);
+ /* printf( "%d: %d\n", rank, result ); */
+ if (recvbuf[0] != result) {
+ errs++;
+ fprintf( stderr, "Error in recvbuf = %d on %d, expected %d\n",
+ recvbuf[0], rank, result );
+ }
+ }
+ else if (recvbuf[0] != -2) {
+ errs++;
+ fprintf( stderr, "Error in recvbuf on zero, is %d\n", recvbuf[0] );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ MPI_Comm comm;
+ double *vecin, *vecout;
+ int minsize = 2, count;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (root=0; root<size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( size * n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride; i++) vecin[i] =-2;
+ for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
+
+ MPI_Gather( vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm );
+
+ if (rank == root) {
+ for (i=0; i<n*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d\n",
+ i, (int)vecout[i] );
+ }
+ }
+ }
+ }
+ MPI_Type_free( &vec );
+ free( vecin );
+ free( vecout );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ /* do a zero length gather */
+ MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (root=0; root<size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( size * n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride; i++) vecin[i] =-2;
+ for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
+ int errorcode = MPI_SUCCESS;
+ if (rank == root) {
+ for (i=0; i<n; i++) {
+ vecout[rank*n+i] = rank*n+i;
+ }
+ errorcode = MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, n, MPI_DOUBLE, root, comm );
+ }
+ else {
+ errorcode = MPI_Gather( vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL,
+ root, comm );
+ }
+
+ if (rank == root) {
+ for (i=0; i<n*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d, err=%d\n",
+ i, (int)vecout[i], errorcode );
+ }
+ }
+ }
+ }
+ MPI_Type_free( &vec );
+ free( vecin );
+ free( vecout );
+ }
+ }
+ MTestFreeComm( &comm );
+ }
+
+ /* do a zero length gather */
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if ( rank == 0 ) {
+ MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0,
+ MPI_COMM_WORLD );
+ } else {
+ MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Gather data from a vector to contiguous. Use IN_PLACE */
+
+int main( int argc, char **argv )
+{
+ MPI_Datatype vec;
+ double *vecin, *vecout;
+ MPI_Comm comm;
+ int count, minsize = 2;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
+
+ MTest_Init( &argc, &argv );
+
+ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ for (root=0; root<size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *)malloc( n * stride * size * sizeof(double) );
+ vecout = (double *)malloc( size * n * sizeof(double) );
+
+ MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
+ MPI_Type_commit( &vec );
+
+ for (i=0; i<n*stride; i++) vecin[i] =-2;
+ for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
+ int errorcode = MPI_SUCCESS;
+ if (rank == root) {
+ for (i=0; i<n; i++) {
+ vecout[rank*n+i] = rank*n+i;
+ }
+ errorcode = MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, n, MPI_DOUBLE, root, comm );
+ }
+ else {
+ errorcode = MPI_Gather( vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL,
+ root, comm );
+ }
+
+ if (rank == root) {
+ for (i=0; i<n*size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "vecout[%d]=%d, err=%d\n",
+ i, (int)vecout[i], errorcode );
+ }
+ }
+ }
+ }
+ MPI_Type_free( &vec );
+ free( vecin );
+ free( vecout );
+ }
+ }
+ printf("end with comm size : %d\n", size);
+ MTestFreeComm( &comm );
+ }
+
+ /* do a zero length gather */
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if ( rank == 0 ) {
+ MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0,
+ MPI_COMM_WORLD );
+ } else {
+ MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
+
+
--- /dev/null
+#include <stdio.h>
+#include <assert.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* Since MPICH is currently the only NBC implementation in existence, just use
+ * this quick-and-dirty #ifdef to decide whether to test the nonblocking
+ * collectives. Eventually we can add a configure option or configure test, or
+ * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
+ * check */
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NBC_ROUTINES 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ MPI_Request request;
+ int size, rank;
+ int one = 1, two = 2, isum, sum;
+ MPI_Init(&argc,&argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ assert(size == 2);
+#if defined(TEST_NBC_ROUTINES)
+ MPI_Iallreduce(&one,&isum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD,&request);
+ MPI_Allreduce(&two,&sum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
+ MPI_Wait(&request,MPI_STATUS_IGNORE);
+
+ assert(isum == 2);
+ assert(sum == 4);
+ if (rank == 0)
+ printf(" No errors\n");
+#endif
+
+ MPI_Finalize();
+ return 0;
+}
+
--- /dev/null
+/* -*- Mode: c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2013 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* Regression test for ticket #1785, contributed by Jed Brown. The test was
+ * hanging indefinitely under a buggy version of ch3:sock. */
+
+#include <mpi.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NBC_ROUTINES 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ MPI_Request barrier;
+ int rank,i,done;
+
+ MPI_Init(&argc,&argv);
+ MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ #if defined(TEST_NBC_ROUTINES)
+ MPI_Ibarrier(MPI_COMM_WORLD,&barrier);
+ for (i=0,done=0; !done; i++) {
+ usleep(1000);
+ /*printf("[%d] MPI_Test: %d\n",rank,i);*/
+ MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);
+ }
+ #endif
+ if (rank == 0)
+ printf(" No Errors\n");
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm allgather test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *rbuf = 0, *sbuf = 0;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* The left group will send rank to the right group;
+ The right group will send -rank to the left group */
+ rbuf = (int *)malloc( count * rsize * sizeof(int) );
+ sbuf = (int *)malloc( count * sizeof(int) );
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ if (leftGroup) {
+ for (i=0; i<count; i++) sbuf[i] = i + rank*count;
+ }
+ else {
+ for (i=0; i<count; i++) sbuf[i] = -(i + rank*count);
+ }
+ err = MPI_Allgather( sbuf, count, datatype,
+ rbuf, count, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ if (leftGroup) {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != i) {
+ errs++;
+ }
+ }
+ }
+
+ /* Use Allgather in a unidirectional way */
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ if (leftGroup) {
+ err = MPI_Allgather( sbuf, 0, datatype,
+ rbuf, count, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ err = MPI_Allgather( sbuf, count, datatype,
+ rbuf, 0, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free( rbuf );
+ free( sbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm allgatherv test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *rbuf = 0, *sbuf = 0;
+ int *recvcounts, *recvdispls;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* The left group will send rank to the right group;
+ The right group will send -rank to the left group */
+ rbuf = (int *)malloc( count * rsize * sizeof(int) );
+ sbuf = (int *)malloc( count * sizeof(int) );
+ recvcounts = (int *) malloc( rsize * sizeof(int) );
+ recvdispls = (int *) malloc( rsize * sizeof(int) );
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ for (i=0; i<rsize; i++) {
+ recvcounts[i] = count;
+ recvdispls[i] = i * count;
+ }
+ if (leftGroup) {
+ for (i=0; i<count; i++) sbuf[i] = i + rank*count;
+ }
+ else {
+ for (i=0; i<count; i++) sbuf[i] = -(i + rank*count);
+ }
+ err = MPI_Allgatherv( sbuf, count, datatype,
+ rbuf, recvcounts, recvdispls, datatype,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ if (leftGroup) {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != i) {
+ errs++;
+ }
+ }
+ }
+
+ /* Use Allgather in a unidirectional way */
+ for (i=0; i<count*rsize; i++) rbuf[i] = -1;
+ if (leftGroup) {
+ err = MPI_Allgatherv( sbuf, 0, datatype,
+ rbuf, recvcounts, recvdispls, datatype,
+ comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i=0; i<rsize; i++) {
+ recvcounts[i] = 0;
+ recvdispls[i] = 0;
+ }
+ err = MPI_Allgatherv( sbuf, count, datatype,
+ rbuf, recvcounts, recvdispls, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ for (i=0; i<count*rsize; i++) {
+ if (rbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free( rbuf );
+ free( sbuf );
+ free( recvcounts );
+ free( recvdispls );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm allreduce test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *sendbuf = 0, *recvbuf = 0;
+ int leftGroup, i, count, rank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_remote_size( comm, &rsize );
+
+ /* To improve reporting of problems about operations, we
+ change the error handler to errors return */
+ MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* printf( "rank = %d(%d)\n", rank, leftGroup ); fflush(stdout); */
+ sendbuf = (int *)malloc( count * sizeof(int) );
+ recvbuf = (int *)malloc( count * sizeof(int) );
+ if (leftGroup) {
+ for (i=0; i<count; i++) sendbuf[i] = i;
+ }
+ else {
+ for (i=0; i<count; i++) sendbuf[i] = -i;
+ }
+ for (i=0; i<count; i++) recvbuf[i] = 0;
+ err = MPI_Allreduce( sendbuf, recvbuf, count, datatype,
+ MPI_SUM, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* In each process should be the sum of the values from the
+ other process */
+ if (leftGroup) {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != -i * rsize) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "recvbuf[%d] = %d\n", i, recvbuf[i] );
+ }
+ }
+ }
+ }
+ else {
+ for (i=0; i<count; i++) {
+ if (recvbuf[i] != i * rsize) {
+ errs++;
+ if (errs < 10) {
+ fprintf( stderr, "recvbuf[%d] = %d\n", i, recvbuf[i] );
+ }
+ }
+ }
+ }
+ free( sendbuf );
+ free( recvbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Simple intercomm alltoall test";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, err;
+ int *sendbuf = 0, *recvbuf = 0;
+ int leftGroup, i, j, idx, count, rrank, rsize;
+ MPI_Comm comm;
+ MPI_Datatype datatype;
+
+ MTest_Init( &argc, &argv );
+
+ datatype = MPI_INT;
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+ for (count = 1; count < 66000; count = 2 * count) {
+ /* Get an intercommunicator */
+ MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_rank( comm, &rrank );
+ sendbuf = (int *)malloc( rsize * count * sizeof(int) );
+ recvbuf = (int *)malloc( rsize * count * sizeof(int) );
+ for (i=0; i<rsize*count; i++) recvbuf[i] = -1;
+ if (leftGroup) {
+ idx = 0;
+ for (j=0; j<rsize; j++) {
+ for (i=0; i<count; i++) {
+ sendbuf[idx++] = i + rrank;
+ }
+ }
+ err = MPI_Alltoall( sendbuf, count, datatype,
+ NULL, 0, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ }
+ else {
+ int rank, size;
+
+ MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size( comm, &size );
+
+ /* In the right group */
+ err = MPI_Alltoall( NULL, 0, datatype,
+ recvbuf, count, datatype, comm );
+ if (err) {
+ errs++;
+ MTestPrintError( err );
+ }
+ /* Check that we have received the correct data */
+ idx = 0;
+ for (j=0; j<rsize; j++) {
+ for (i=0; i<count; i++) {
+ if (recvbuf[idx++] != i + j) {
+ errs++;
+ if (errs < 10)
+ fprintf( stderr, "buf[%d] = %d on %d\n",
+ i, recvbuf[i], rank );
+ }
+ }
+ }
+ }
+ free( recvbuf );
+ free( sendbuf );
+ }
+ MTestFreeComm( &comm );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2001 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdlib.h>
+#include <stdio.h>
+
+/*
+ This program tests MPI_Alltoallv by having processor i send different
+ amounts of data to each processor.
+
+ Because there are separate send and receive types to alltoallv,
+ there need to be tests to rearrange data on the fly. Not done yet.
+
+ The first test sends i items to processor i from all processors.
+
+ Currently, the test uses only MPI_INT; this is adequate for testing systems
+ that use point-to-point operations
+ */
+
+int main( int argc, char **argv )
+{
+ MPI_Comm comm;
+ int *sbuf, *rbuf;
+ int rank, size, lsize, asize;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+ int leftGroup;
+
+ MTest_Init( &argc, &argv );
+ err = 0;
+
+ while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ if (comm == MPI_COMM_NULL) continue;
+
+ /* Create the buffer */
+ MPI_Comm_size( comm, &lsize );
+ MPI_Comm_remote_size( comm, &size );
+ asize = (lsize > size) ? lsize : size;
+ MPI_Comm_rank( comm, &rank );
+ sbuf = (int *)malloc( size * size * sizeof(int) );
+ rbuf = (int *)malloc( asize * asize * sizeof(int) );
+ if (!sbuf || !rbuf) {
+ fprintf( stderr, "Could not allocated buffers!\n" );
+ MPI_Abort( comm, 1 );
+ }
+
+ /* Load up the buffers */
+ for (i=0; i<size*size; i++) {
+ sbuf[i] = i + 100*rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *)malloc( size * sizeof(int) );
+ recvcounts = (int *)malloc( size * sizeof(int) );
+ rdispls = (int *)malloc( size * sizeof(int) );
+ sdispls = (int *)malloc( size * sizeof(int) );
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf( stderr, "Could not allocate arg items!\n" );
+ MPI_Abort( comm, 1 );
+ }
+ for (i=0; i<size; i++) {
+ sendcounts[i] = i;
+ sdispls[i] = (i * (i+1))/2;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank;
+ }
+ MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm );
+
+ /* Check rbuf */
+ for (i=0; i<size; i++) {
+ p = rbuf + rdispls[i];
+ for (j=0; j<rank; j++) {
+ if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
+ fprintf( stderr, "[%d] got&nb