add_executable(allgather2 allgather2.c)
add_executable(allgather3 allgather3.c)
+ add_executable(allgather_struct allgather_struct.c)
add_executable(allgatherv2 allgatherv2.c)
add_executable(allgatherv3 allgatherv3.c)
if(HAVE_PRIVATIZATION)
# add_executable(alltoallw1 alltoallw1.c)
# add_executable(alltoallw2 alltoallw2.c)
# add_executable(alltoallw_zeros alltoallw_zeros.c)
- add_executable(bcast2 bcast2.c)
- add_executable(bcast3 bcast3.c)
+ add_executable(bcast_full bcast.c)
+ add_executable(bcast_min_datatypes bcast.c)
+ add_executable(bcast_comm_world bcast.c)
add_executable(bcasttest bcasttest.c)
add_executable(bcastzerotype bcastzerotype.c)
add_executable(coll10 coll10.c)
add_executable(exscan2 exscan2.c)
add_executable(exscan exscan.c)
add_executable(gather2 gather2.c)
-# add_executable(gather2_save gather2_save.c)
add_executable(gather gather.c)
# add_executable(iallred iallred.c)
# add_executable(ibarrier ibarrier.c)
target_link_libraries(allgatherv2 simgrid mtest_c)
target_link_libraries(allgatherv3 simgrid mtest_c)
target_link_libraries(allgatherv4 simgrid mtest_c)
+ target_link_libraries(allgather_struct simgrid mtest_c)
target_link_libraries(allred2 simgrid mtest_c)
target_link_libraries(allred3 simgrid mtest_c)
target_link_libraries(allred4 simgrid mtest_c)
# target_link_libraries(alltoallw1 simgrid mtest_c)
# target_link_libraries(alltoallw2 simgrid mtest_c)
# target_link_libraries(alltoallw_zeros simgrid mtest_c)
- target_link_libraries(bcast2 simgrid mtest_c)
- target_link_libraries(bcast3 simgrid mtest_c)
+ target_link_libraries(bcast_full simgrid mtest_c)
+ target_link_libraries(bcast_min_datatypes simgrid mtest_c)
+ target_link_libraries(bcast_comm_world simgrid mtest_c)
target_link_libraries(bcasttest simgrid mtest_c)
target_link_libraries(bcastzerotype simgrid mtest_c)
target_link_libraries(coll10 simgrid mtest_c)
target_link_libraries(exscan2 simgrid mtest_c)
target_link_libraries(exscan simgrid mtest_c)
target_link_libraries(gather2 simgrid mtest_c)
-# target_link_libraries(gather2_save simgrid mtest_c)
target_link_libraries(gather simgrid mtest_c)
# target_link_libraries(iallred simgrid mtest_c)
# target_link_libraries(ibarrier simgrid mtest_c)
# target_link_libraries(uoplong simgrid mtest_c)
set_target_properties(allred PROPERTIES COMPILE_FLAGS "-O0" LINK_FLAGS "-O0")
+ set_target_properties(bcast_min_datatypes PROPERTIES COMPILE_FLAGS "-DBCAST_MIN_DATATYPES_ONLY" LINK_FLAGS "-DBCAST_MIN_DATATYPES_ONLY")
+ set_target_properties(bcast_comm_world PROPERTIES COMPILE_FLAGS "-DBCAST_COMM_WORLD_ONLY" LINK_FLAGS "-DBCAST_COMM_WORLD_ONLY")
if(HAVE_THREAD_CONTEXTS)
ADD_TEST(test-smpi-mpich3-coll-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
${CMAKE_CURRENT_SOURCE_DIR}/alltoallw1.c
${CMAKE_CURRENT_SOURCE_DIR}/alltoallw2.c
${CMAKE_CURRENT_SOURCE_DIR}/alltoallw_zeros.c
- ${CMAKE_CURRENT_SOURCE_DIR}/bcast2.c
- ${CMAKE_CURRENT_SOURCE_DIR}/bcast3.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast.c
${CMAKE_CURRENT_SOURCE_DIR}/bcasttest.c
${CMAKE_CURRENT_SOURCE_DIR}/bcastzerotype.c
${CMAKE_CURRENT_SOURCE_DIR}/coll10.c
${CMAKE_CURRENT_SOURCE_DIR}/exscan2.c
${CMAKE_CURRENT_SOURCE_DIR}/exscan.c
${CMAKE_CURRENT_SOURCE_DIR}/gather2.c
- ${CMAKE_CURRENT_SOURCE_DIR}/gather2_save.c
${CMAKE_CURRENT_SOURCE_DIR}/gather.c
${CMAKE_CURRENT_SOURCE_DIR}/iallred.c
${CMAKE_CURRENT_SOURCE_DIR}/ibarrier.c
/* Gather data from a vector to contiguous. Use IN_PLACE */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
double *vecout;
MPI_Comm comm;
- int count, minsize = 2;
- int i, errs = 0;
- int rank, size;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
+
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
for (count = 1; count < 9000; count = count * 2) {
- vecout = (double *)malloc( size * count * sizeof(double) );
-
- for (i=0; i<count; i++) {
- vecout[rank*count+i] = rank*count+i;
+ vecout = (double *) malloc(size * count * sizeof(double));
+
+ for (i = 0; i < count; i++) {
+ vecout[rank * count + i] = rank * count + i;
}
- MPI_Allgather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
- vecout, count, MPI_DOUBLE, comm );
- for (i=0; i<count*size; i++) {
+ MPI_Allgather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, vecout, count, MPI_DOUBLE, comm);
+ for (i = 0; i < count * size; i++) {
if (vecout[i] != i) {
errs++;
if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d\n",
- i, (int)vecout[i] );
+ fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
}
}
}
- free( vecout );
+ free(vecout);
}
- MTestFreeComm( &comm );
+ MTestFreeComm(&comm);
}
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- vecout = (double *) malloc(size * sizeof(double));
- if (MPI_SUCCESS == MPI_Allgather(&vecout[rank], 1, MPI_DOUBLE,
- vecout, 1, MPI_DOUBLE, MPI_COMM_WORLD))
- errs++;
- free(vecout);
-#endif
-
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
-
/* Gather data from a vector to contiguous. */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
double *vecout, *invec;
MPI_Comm comm;
- int count, minsize = 2;
- int i, errs = 0;
- int rank, size;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
+
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
for (count = 1; count < 9000; count = count * 2) {
- invec = (double *)malloc( count * sizeof(double) );
- vecout = (double *)malloc( size * count * sizeof(double) );
-
- for (i=0; i<count; i++) {
- invec[i] = rank*count+i;
+ invec = (double *) malloc(count * sizeof(double));
+ vecout = (double *) malloc(size * count * sizeof(double));
+
+ for (i = 0; i < count; i++) {
+ invec[i] = rank * count + i;
}
- MPI_Allgather( invec, count, MPI_DOUBLE,
- vecout, count, MPI_DOUBLE, comm );
- for (i=0; i<count*size; i++) {
+ MPI_Allgather(invec, count, MPI_DOUBLE, vecout, count, MPI_DOUBLE, comm);
+ for (i = 0; i < count * size; i++) {
if (vecout[i] != i) {
errs++;
if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d\n",
- i, (int)vecout[i] );
+ fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
}
}
}
- free( invec );
- free( vecout );
+ free(invec);
+ free(vecout);
}
- MTestFreeComm( &comm );
+ MTestFreeComm(&comm);
}
/* Do a zero byte gather */
- MPI_Allgather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, MPI_COMM_WORLD );
+ MPI_Allgather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, MPI_COMM_WORLD);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
-
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdlib.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+/* Allgather a two-field struct datatype. This test
+ may trigger bugs such as when the implementation
+ does not handle well misaligned types.*/
+
+typedef struct {
+ int first;
+ long second;
+} int_long_t;
+
+int main(int argc, char **argv)
+{
+ MPI_Comm comm;
+ int minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int_long_t object;
+ MPI_Datatype type;
+ MPI_Aint begin;
+ MPI_Aint displacements[2];
+ MPI_Datatype types[] = { MPI_INT, MPI_LONG };
+ int blocklength[2] = { 1, 1 };
+ int_long_t* gathered_objects;
+
+ MTest_Init(&argc, &argv);
+
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ gathered_objects = (int_long_t*) malloc (size*sizeof(int_long_t));
+
+ /* Local object */
+ object.first = rank;
+ object.second = rank * 10;
+
+ /* Datatype creation */
+ MPI_Get_address(&object, &begin);
+ MPI_Get_address(&object.first, &displacements[0]);
+ MPI_Get_address(&object.second, &displacements[1]);
+
+ for (i = 0; i != 2; ++i)
+ displacements[i] -= begin;
+
+ MPI_Type_create_struct(2, &blocklength[0], &displacements[0], &types[0], &type);
+ MPI_Type_commit(&type);
+
+ MPI_Allgather(&object, 1, type, gathered_objects, 1, type, comm);
+
+ for (i = 0; i < size; i++) {
+ if (gathered_objects[i].first != i || gathered_objects[i].second != i * 10)
+ errs++;
+ }
+
+ MPI_Type_free(&type);
+ MTestFreeComm(&comm);
+ free(gathered_objects);
+ }
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
#include <stdlib.h>
#include <stdio.h>
-/* Gather data from a vector to contiguous. Use IN_PLACE. This is
+/* Gather data from a vector to contiguous. Use IN_PLACE. This is
the trivial version based on the allgather test (allgatherv but with
constant data sizes) */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
double *vecout;
MPI_Comm comm;
- int count, minsize = 2;
- int i, errs = 0;
- int rank, size;
- int *displs, *recvcounts;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int *displs, *recvcounts;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ displs = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
- displs = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
-
for (count = 1; count < 9000; count = count * 2) {
- vecout = (double *)malloc( size * count * sizeof(double) );
-
- for (i=0; i<count; i++) {
- vecout[rank*count+i] = rank*count+i;
+ vecout = (double *) malloc(size * count * sizeof(double));
+
+ for (i = 0; i < count; i++) {
+ vecout[rank * count + i] = rank * count + i;
}
- for (i=0; i<size; i++) {
+ for (i = 0; i < size; i++) {
recvcounts[i] = count;
- displs[i] = i * count;
+ displs[i] = i * count;
}
- MPI_Allgatherv( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
- vecout, recvcounts, displs, MPI_DOUBLE, comm );
- for (i=0; i<count*size; i++) {
+ MPI_Allgatherv(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, recvcounts, displs, MPI_DOUBLE, comm);
+ for (i = 0; i < count * size; i++) {
if (vecout[i] != i) {
errs++;
if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d\n",
- i, (int)vecout[i] );
+ fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
}
}
}
- free( vecout );
+ free(vecout);
}
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- vecout = (double *) malloc(size * sizeof(double));
- if (MPI_SUCCESS == MPI_Allgatherv(&vecout[rank * recvcounts[rank]], recvcounts[rank], MPI_DOUBLE,
- vecout, recvcounts, displs, MPI_DOUBLE, comm))
- errs++;
- free(vecout);
-#endif
-
- free( displs );
- free( recvcounts );
- MTestFreeComm( &comm );
+ free(displs);
+ free(recvcounts);
+ MTestFreeComm(&comm);
}
-
- MTest_Finalize( errs );
+
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
-
#include <stdlib.h>
#include <stdio.h>
-/* Gather data from a vector to contiguous. This is
+/* Gather data from a vector to contiguous. This is
the trivial version based on the allgather test (allgatherv but with
constant data sizes) */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
double *vecout, *invec;
MPI_Comm comm;
- int count, minsize = 2;
- int i, errs = 0;
- int rank, size;
- int *displs, *recvcounts;
+ int count, minsize = 2;
+ int i, errs = 0;
+ int rank, size;
+ int *displs, *recvcounts;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ displs = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
- displs = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
-
for (count = 1; count < 9000; count = count * 2) {
- invec = (double *)malloc( count * sizeof(double) );
- vecout = (double *)malloc( size * count * sizeof(double) );
-
- for (i=0; i<count; i++) {
- invec[i] = rank*count+i;
+ invec = (double *) malloc(count * sizeof(double));
+ vecout = (double *) malloc(size * count * sizeof(double));
+
+ for (i = 0; i < count; i++) {
+ invec[i] = rank * count + i;
}
- for (i=0; i<size; i++) {
+ for (i = 0; i < size; i++) {
recvcounts[i] = count;
- displs[i] = i * count;
+ displs[i] = i * count;
}
- MPI_Allgatherv( invec, count, MPI_DOUBLE,
- vecout, recvcounts, displs, MPI_DOUBLE, comm );
- for (i=0; i<count*size; i++) {
+ MPI_Allgatherv(invec, count, MPI_DOUBLE, vecout, recvcounts, displs, MPI_DOUBLE, comm);
+ for (i = 0; i < count * size; i++) {
if (vecout[i] != i) {
errs++;
if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d\n",
- i, (int)vecout[i] );
+ fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
}
}
}
- free( invec );
- free( vecout );
+ free(invec);
+ free(vecout);
}
- free( displs );
- free( recvcounts );
- MTestFreeComm( &comm );
+ free(displs);
+ free(recvcounts);
+ MTestFreeComm(&comm);
}
-
- MTest_Finalize( errs );
+
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
-
#include <stdint.h>
#endif
-SMPI_VARINIT_GLOBAL(count, int);
-SMPI_VARINIT_GLOBAL(size, int);
-SMPI_VARINIT_GLOBAL(rank, int);
-SMPI_VARINIT_GLOBAL(cerrcnt, int);
-
-struct int_test { int a; int b; };
-struct long_test { long a; int b; };
-struct short_test { short a; int b; };
-struct float_test { float a; int b; };
-struct double_test { double a; int b; };
+int count, size, rank;
+int cerrcnt;
+
+struct int_test {
+ int a;
+ int b;
+};
+struct long_test {
+ long a;
+ int b;
+};
+struct short_test {
+ short a;
+ int b;
+};
+struct float_test {
+ float a;
+ int b;
+};
+struct double_test {
+ double a;
+ int b;
+};
#define mpi_op2str(op) \
((op == MPI_SUM) ? "MPI_SUM" : \
/* calloc to avoid spurious valgrind warnings when "type" has padding bytes */
#define DECL_MALLOC_IN_OUT_SOL(type) \
type *in, *out, *sol; \
- in = (type *) calloc(SMPI_VARGET_GLOBAL(count), sizeof(type)); \
- out = (type *) calloc(SMPI_VARGET_GLOBAL(count), sizeof(type)); \
- sol = (type *) calloc(SMPI_VARGET_GLOBAL(count), sizeof(type));
+ in = (type *) calloc(count, sizeof(type)); \
+ out = (type *) calloc(count, sizeof(type)); \
+ sol = (type *) calloc(count, sizeof(type));
#define SET_INDEX_CONST(arr, val) \
{ \
int i; \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) \
+ for (i = 0; i < count; i++) \
arr[i] = val; \
}
#define SET_INDEX_SUM(arr, val) \
{ \
int i; \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) \
+ for (i = 0; i < count; i++) \
arr[i] = i + val; \
}
#define SET_INDEX_FACTOR(arr, val) \
{ \
int i; \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) \
+ for (i = 0; i < count; i++) \
arr[i] = i * (val); \
}
#define SET_INDEX_POWER(arr, val) \
{ \
int i, j; \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) { \
+ for (i = 0; i < count; i++) { \
(arr)[i] = 1; \
for (j = 0; j < (val); j++) \
arr[i] *= i; \
if (lerrcnt) { \
MPI_Type_get_name(mpi_type, name, &len); \
fprintf(stderr, "(%d) Error for type %s and op %s\n", \
- SMPI_VARGET_GLOBAL(rank), name, mpi_op2str(mpi_op)); \
+ rank, name, mpi_op2str(mpi_op)); \
} \
free(in); free(out); free(sol); \
- } while(0)
+ } while (0)
-/* The logic on the error check on MPI_Allreduce assumes that all
+/* The logic on the error check on MPI_Allreduce assumes that all
MPI_Allreduce routines return a failure if any do - this is sufficient
for MPI implementations that reject some of the valid op/datatype pairs
- (and motivated this addition, as some versions of the IBM MPI
+ (and motivated this addition, as some versions of the IBM MPI
failed in just this way).
*/
#define ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol) \
{ \
int i, rc, lerrcnt = 0; \
- rc = MPI_Allreduce(in, out, SMPI_VARGET_GLOBAL(count), mpi_type, mpi_op, MPI_COMM_WORLD); \
- if (rc) { lerrcnt++; SMPI_VARGET_GLOBAL(cerrcnt)++; MTestPrintError( rc ); } \
+ rc = MPI_Allreduce(in, out, count, mpi_type, mpi_op, MPI_COMM_WORLD); \
+ if (rc) { lerrcnt++; cerrcnt++; MTestPrintError(rc); } \
else { \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) { \
+ for (i = 0; i < count; i++) { \
if (out[i] != sol[i]) { \
- SMPI_VARGET_GLOBAL(cerrcnt)++; \
+ cerrcnt++; \
lerrcnt++; \
} \
} \
#define STRUCT_ALLREDUCE_AND_FREE(mpi_type, mpi_op, in, out, sol) \
{ \
int i, rc, lerrcnt = 0; \
- rc = MPI_Allreduce(in, out, SMPI_VARGET_GLOBAL(count), mpi_type, mpi_op, MPI_COMM_WORLD); \
- if (rc) { lerrcnt++; SMPI_VARGET_GLOBAL(cerrcnt)++; MTestPrintError( rc ); } \
+ rc = MPI_Allreduce(in, out, count, mpi_type, mpi_op, MPI_COMM_WORLD); \
+ if (rc) { lerrcnt++; cerrcnt++; MTestPrintError(rc); } \
else { \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) { \
+ for (i = 0; i < count; i++) { \
if ((out[i].a != sol[i].a) || (out[i].b != sol[i].b)) { \
- SMPI_VARGET_GLOBAL(cerrcnt)++; \
+ cerrcnt++; \
lerrcnt++; \
} \
} \
#define SET_INDEX_STRUCT_CONST(arr, val, el) \
{ \
int i; \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) \
+ for (i = 0; i < count; i++) \
arr[i].el = val; \
}
#define SET_INDEX_STRUCT_SUM(arr, val, el) \
{ \
int i; \
- for (i = 0; i < SMPI_VARGET_GLOBAL(count); i++) \
+ for (i = 0; i < count; i++) \
arr[i].el = i + (val); \
}
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
SET_INDEX_SUM(in, 0); \
- SET_INDEX_FACTOR(sol, SMPI_VARGET_GLOBAL(size)); \
+ SET_INDEX_FACTOR(sol, size); \
SET_INDEX_CONST(out, 0); \
ALLREDUCE_AND_FREE(mpi_type, MPI_SUM, in, out, sol); \
}
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
SET_INDEX_SUM(in, 0); \
- SET_INDEX_POWER(sol, SMPI_VARGET_GLOBAL(size)); \
+ SET_INDEX_POWER(sol, size); \
SET_INDEX_CONST(out, 0); \
ALLREDUCE_AND_FREE(mpi_type, MPI_PROD, in, out, sol); \
}
#define max_test1(type, mpi_type) \
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
- SET_INDEX_SUM(in, SMPI_VARGET_GLOBAL(rank)); \
- SET_INDEX_SUM(sol, SMPI_VARGET_GLOBAL(size) - 1); \
+ SET_INDEX_SUM(in, rank); \
+ SET_INDEX_SUM(sol, size - 1); \
SET_INDEX_CONST(out, 0); \
ALLREDUCE_AND_FREE(mpi_type, MPI_MAX, in, out, sol); \
}
#define min_test1(type, mpi_type) \
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
- SET_INDEX_SUM(in, SMPI_VARGET_GLOBAL(rank)); \
+ SET_INDEX_SUM(in, rank); \
SET_INDEX_SUM(sol, 0); \
SET_INDEX_CONST(out, 0); \
ALLREDUCE_AND_FREE(mpi_type, MPI_MIN, in, out, sol); \
}
#define lor_test1(type, mpi_type) \
- const_test(type, mpi_type, MPI_LOR, (SMPI_VARGET_GLOBAL(rank) & 0x1), (SMPI_VARGET_GLOBAL(size) > 1), 0)
+ const_test(type, mpi_type, MPI_LOR, (rank & 0x1), (size > 1), 0)
#define lor_test2(type, mpi_type) \
const_test(type, mpi_type, MPI_LOR, 0, 0, 0)
#define lxor_test1(type, mpi_type) \
- const_test(type, mpi_type, MPI_LXOR, (SMPI_VARGET_GLOBAL(rank) == 1), (SMPI_VARGET_GLOBAL(size) > 1), 0)
+ const_test(type, mpi_type, MPI_LXOR, (rank == 1), (size > 1), 0)
#define lxor_test2(type, mpi_type) \
const_test(type, mpi_type, MPI_LXOR, 0, 0, 0)
#define lxor_test3(type, mpi_type) \
- const_test(type, mpi_type, MPI_LXOR, 1, (SMPI_VARGET_GLOBAL(size) & 0x1), 0)
+ const_test(type, mpi_type, MPI_LXOR, 1, (size & 0x1), 0)
#define land_test1(type, mpi_type) \
- const_test(type, mpi_type, MPI_LAND, (SMPI_VARGET_GLOBAL(rank) & 0x1), 0, 0)
+ const_test(type, mpi_type, MPI_LAND, (rank & 0x1), 0, 0)
#define land_test2(type, mpi_type) \
const_test(type, mpi_type, MPI_LAND, 1, 1, 0)
#define bor_test1(type, mpi_type) \
- const_test(type, mpi_type, MPI_BOR, (SMPI_VARGET_GLOBAL(rank) & 0x3), ((SMPI_VARGET_GLOBAL(size) < 3) ? SMPI_VARGET_GLOBAL(size) - 1 : 0x3), 0)
+ const_test(type, mpi_type, MPI_BOR, (rank & 0x3), ((size < 3) ? size - 1 : 0x3), 0)
#define bxor_test1(type, mpi_type) \
- const_test(type, mpi_type, MPI_BXOR, (SMPI_VARGET_GLOBAL(rank) == 1) * 0xf0, (SMPI_VARGET_GLOBAL(size) > 1) * 0xf0, 0)
+ const_test(type, mpi_type, MPI_BXOR, (rank == 1) * 0xf0, (size > 1) * 0xf0, 0)
#define bxor_test2(type, mpi_type) \
const_test(type, mpi_type, MPI_BXOR, 0, 0, 0)
#define bxor_test3(type, mpi_type) \
- const_test(type, mpi_type, MPI_BXOR, ~0, (SMPI_VARGET_GLOBAL(size) &0x1) ? ~0 : 0, 0)
+ const_test(type, mpi_type, MPI_BXOR, ~0, (size &0x1) ? ~0 : 0, 0)
#define band_test1(type, mpi_type) \
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
- if (SMPI_VARGET_GLOBAL(rank) == SMPI_VARGET_GLOBAL(size)-1) { \
+ if (rank == size-1) { \
SET_INDEX_SUM(in, 0); \
} \
else { \
#define band_test2(type, mpi_type) \
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
- if (SMPI_VARGET_GLOBAL(rank) == SMPI_VARGET_GLOBAL(size)-1) { \
+ if (rank == size-1) { \
SET_INDEX_SUM(in, 0); \
} \
else { \
#define maxloc_test(type, mpi_type) \
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
- SET_INDEX_STRUCT_SUM(in, SMPI_VARGET_GLOBAL(rank), a); \
- SET_INDEX_STRUCT_CONST(in, SMPI_VARGET_GLOBAL(rank), b); \
- SET_INDEX_STRUCT_SUM(sol, SMPI_VARGET_GLOBAL(size) - 1, a); \
- SET_INDEX_STRUCT_CONST(sol, SMPI_VARGET_GLOBAL(size) - 1, b); \
+ SET_INDEX_STRUCT_SUM(in, rank, a); \
+ SET_INDEX_STRUCT_CONST(in, rank, b); \
+ SET_INDEX_STRUCT_SUM(sol, size - 1, a); \
+ SET_INDEX_STRUCT_CONST(sol, size - 1, b); \
SET_INDEX_STRUCT_CONST(out, 0, a); \
SET_INDEX_STRUCT_CONST(out, -1, b); \
STRUCT_ALLREDUCE_AND_FREE(mpi_type, MPI_MAXLOC, in, out, sol); \
#define minloc_test(type, mpi_type) \
{ \
DECL_MALLOC_IN_OUT_SOL(type); \
- SET_INDEX_STRUCT_SUM(in, SMPI_VARGET_GLOBAL(rank), a); \
- SET_INDEX_STRUCT_CONST(in, SMPI_VARGET_GLOBAL(rank), b); \
+ SET_INDEX_STRUCT_SUM(in, rank, a); \
+ SET_INDEX_STRUCT_CONST(in, rank, b); \
SET_INDEX_STRUCT_SUM(sol, 0, a); \
SET_INDEX_STRUCT_CONST(sol, 0, b); \
SET_INDEX_STRUCT_CONST(out, 0, a); \
#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
#define test_types_set_mpi_3_0_integer(op,post) do { \
- op##_test##post(MPI_SMPI_VARGET_GLOBAL(count), MPI_SMPI_VARGET_GLOBAL(count)); \
+ op##_test##post(MPI_Count, MPI_COUNT); \
} while (0)
#else
#define test_types_set_mpi_3_0_integer(op,post) do { } while (0)
op##_test##post(unsigned char, MPI_BYTE); \
}
-/* Make sure that we test complex and double complex, even if long
+/* Make sure that we test complex and double complex, even if long
double complex is not available */
#if defined(USE_LONG_DOUBLE_COMPLEX)
#else
#if MTEST_HAVE_MIN_MPI_VERSION(2,2) && defined(HAVE_FLOAT__COMPLEX) \
- && defined(HAVE_DOUBLE__COMPLEX)
+ && defined(HAVE_DOUBLE__COMPLEX)
#define test_types_set4(op, post) \
do { \
op##_test##post(float _Complex, MPI_C_FLOAT_COMPLEX); \
#define test_types_set5(op, post) do { } while (0)
#endif
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &SMPI_VARGET_GLOBAL(size));
- MPI_Comm_rank(MPI_COMM_WORLD, &SMPI_VARGET_GLOBAL(rank));
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (SMPI_VARGET_GLOBAL(size) < 2) {
- fprintf( stderr, "At least 2 processes required\n" );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ if (size < 2) {
+ fprintf(stderr, "At least 2 processes required\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- /* Set errors return so that we can provide better information
- should a routine reject one of the operand/datatype pairs */
- MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+ /* Set errors return so that we can provide better information
+ * should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- SMPI_VARGET_GLOBAL(count) = 10;
+ count = 10;
/* Allow an argument to override the count.
- Note that the product tests may fail if the count is very large.
+ * Note that the product tests may fail if the count is very large.
*/
if (argc >= 2) {
- SMPI_VARGET_GLOBAL(count) = atoi( argv[1] );
- if (SMPI_VARGET_GLOBAL(count) <= 0) {
- fprintf( stderr, "Invalid count argument %s\n", argv[1] );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
+ count = atoi(argv[1]);
+ if (count <= 0) {
+ fprintf(stderr, "Invalid count argument %s\n", argv[1]);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
}
test_types_set2(sum, 1);
minloc_test(struct float_test, MPI_FLOAT_INT);
minloc_test(struct double_test, MPI_DOUBLE_INT);
- MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( SMPI_VARGET_GLOBAL(cerrcnt) );
+ MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(cerrcnt);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test MPI_Allreduce with MPI_IN_PLACE";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
int *buf, i;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
-
- for (count = 1; count < 65000; count = count * 2) {
- /* Contiguous data */
- buf = (int *)malloc( count * sizeof(int) );
- for (i=0; i<count; i++) buf[i] = rank + i;
- MPI_Allreduce( MPI_IN_PLACE, buf, count, MPI_INT, MPI_SUM, comm );
- /* Check the results */
- for (i=0; i<count; i++) {
- int result = i * size + (size*(size-1))/2;
- if (buf[i] != result) {
- errs ++;
- if (errs < 10) {
- fprintf( stderr, "buf[%d] = %d expected %d\n",
- i, buf[i], result );
- }
- }
- }
- free( buf );
- }
- MTestFreeComm( &comm );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ for (count = 1; count < 65000; count = count * 2) {
+ /* Contiguous data */
+ buf = (int *) malloc(count * sizeof(int));
+ for (i = 0; i < count; i++)
+ buf[i] = rank + i;
+ MPI_Allreduce(MPI_IN_PLACE, buf, count, MPI_INT, MPI_SUM, comm);
+ /* Check the results */
+ for (i = 0; i < count; i++) {
+ int result = i * size + (size * (size - 1)) / 2;
+ if (buf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "buf[%d] = %d expected %d\n", i, buf[i], result);
+ }
+ }
+ }
+ free(buf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/* We make the error count global so that we can easily control the output
- of error information (in particular, limiting it after the first 10
+ of error information (in particular, limiting it after the first 10
errors */
int errs = 0;
c(i,j) is cin[j+i*matSize]
*/
#define MAXCOL 256
-static int matSize = 0; /* Must be < MAXCOL */
+static int matSize = 0; /* Must be < MAXCOL */
static int max_offset = 0;
-void uop( void *, void *, int *, MPI_Datatype * );
-void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+void uop(void *, void *, int *, MPI_Datatype *);
+void uop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype)
{
- const int *cin = (const int *)cinPtr;
- int *cout = (int *)coutPtr;
+ const int *cin = (const int *) cinPtr;
+ int *cout = (int *) coutPtr;
int i, j, k, nmat;
int tempcol[MAXCOL];
int offset1, offset2;
- int matsize2 = matSize*matSize;
+ int matsize2 = matSize * matSize;
for (nmat = 0; nmat < *count; nmat++) {
- for (j=0; j<matSize; j++) {
- for (i=0; i<matSize; i++) {
- tempcol[i] = 0;
- for (k=0; k<matSize; k++) {
- /* col[i] += cin(i,k) * cout(k,j) */
- offset1 = k+i*matSize;
- offset2 = j+k*matSize;
- assert(offset1 < max_offset);
- assert(offset2 < max_offset);
- tempcol[i] += cin[offset1] * cout[offset2];
- }
- }
- for (i=0; i<matSize; i++) {
- offset1 = j+i*matSize;
- assert(offset1 < max_offset);
- cout[offset1] = tempcol[i];
- }
- }
- cin += matsize2;
- cout += matsize2;
+ for (j = 0; j < matSize; j++) {
+ for (i = 0; i < matSize; i++) {
+ tempcol[i] = 0;
+ for (k = 0; k < matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ offset1 = k + i * matSize;
+ offset2 = j + k * matSize;
+ assert(offset1 < max_offset);
+ assert(offset2 < max_offset);
+ tempcol[i] += cin[offset1] * cout[offset2];
+ }
+ }
+ for (i = 0; i < matSize; i++) {
+ offset1 = j + i * matSize;
+ assert(offset1 < max_offset);
+ cout[offset1] = tempcol[i];
+ }
+ }
+ cin += matsize2;
+ cout += matsize2;
}
}
is the the matrix representing the permutation that shifts left by one.
As the final matrix (in the size-1 position), we use the matrix that
shifts RIGHT by one
-*/
-static void initMat( MPI_Comm comm, int mat[] )
+*/
+static void initMat(MPI_Comm comm, int mat[])
{
int i, j, size, rank;
int offset;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
- for (i=0; i<size*size; i++) {
- assert(i < max_offset);
- mat[i] = 0;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (i = 0; i < size * size; i++) {
+ assert(i < max_offset);
+ mat[i] = 0;
}
- if (rank < size-1) {
- /* Create the permutation matrix that exchanges r with r+1 */
- for (i=0; i<size; i++) {
- if (i == rank) {
- offset = ((i+1)%size) + i * size;
- assert(offset < max_offset);
- mat[offset] = 1;
- }
- else if (i == ((rank + 1)%size)) {
- offset = ((i+size-1)%size) + i * size;
- assert(offset < max_offset);
- mat[offset] = 1;
- }
- else {
- offset = i+i*size;
- assert(offset < max_offset);
- mat[offset] = 1;
- }
- }
+ if (rank < size - 1) {
+ /* Create the permutation matrix that exchanges r with r+1 */
+ for (i = 0; i < size; i++) {
+ if (i == rank) {
+ offset = ((i + 1) % size) + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ else if (i == ((rank + 1) % size)) {
+ offset = ((i + size - 1) % size) + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ else {
+ offset = i + i * size;
+ assert(offset < max_offset);
+ mat[offset] = 1;
+ }
+ }
}
else {
- /* Create the permutation matrix that shifts right by one */
- for (i=0; i<size; i++) {
- for (j=0; j<size; j++) {
- offset = j + i * size; /* location of c(i,j) */
- mat[offset] = 0;
- if ( ((j-i+size)%size) == 1 ) mat[offset] = 1;
- }
- }
-
+ /* Create the permutation matrix that shifts right by one */
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ offset = j + i * size; /* location of c(i,j) */
+ mat[offset] = 0;
+ if (((j - i + size) % size) == 1)
+ mat[offset] = 1;
+ }
+ }
+
}
}
/* Compare a matrix with the identity matrix */
-static int isIdentity( MPI_Comm comm, int mat[] )
+static int isIdentity(MPI_Comm comm, int mat[])
{
int i, j, size, rank, lerrs = 0;
int offset;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (i=0; i<size; i++) {
- for (j=0; j<size; j++) {
- if (i == j) {
- offset = j+i*size;
- assert(offset < max_offset);
- if (mat[offset] != 1) {
- lerrs++;
- if (errs + lerrs< 10) {
- printf( "[%d] mat[%d,%d] = %d, expected 1 for comm %s\n",
- rank, i,j, mat[offset], MTestGetIntracommName() );
- }
- }
- }
- else {
- offset = j+i*size;
- assert(offset < max_offset);
- if (mat[offset] != 0) {
- lerrs++;
- if (errs + lerrs< 10) {
- printf( "[%d] mat[%d,%d] = %d, expected 0 for comm %s\n",
- rank, i,j, mat[offset], MTestGetIntracommName() );
- }
- }
- }
- }
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ if (i == j) {
+ offset = j + i * size;
+ assert(offset < max_offset);
+ if (mat[offset] != 1) {
+ lerrs++;
+ if (errs + lerrs < 10) {
+ printf("[%d] mat[%d,%d] = %d, expected 1 for comm %s\n",
+ rank, i, j, mat[offset], MTestGetIntracommName());
+ }
+ }
+ }
+ else {
+ offset = j + i * size;
+ assert(offset < max_offset);
+ if (mat[offset] != 0) {
+ lerrs++;
+ if (errs + lerrs < 10) {
+ printf("[%d] mat[%d,%d] = %d, expected 0 for comm %s\n",
+ rank, i, j, mat[offset], MTestGetIntracommName());
+ }
+ }
+ }
+ }
}
return lerrs;
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int size;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
int *buf, *bufout;
MPI_Op op;
MPI_Datatype mattype;
- MTest_Init( &argc, &argv );
-
- MPI_Op_create( uop, 0, &op );
-
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) {
- continue;
- }
- MPI_Comm_size( comm, &size );
- matSize = size;
-
- /* Only one matrix for now */
- count = 1;
-
- /* A single matrix, the size of the communicator */
- MPI_Type_contiguous( size*size, MPI_INT, &mattype );
- MPI_Type_commit( &mattype );
-
- max_offset = count * size * size;
- buf = (int *)malloc( max_offset * sizeof(int) );
- if (!buf) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- bufout = (int *)malloc( max_offset * sizeof(int) );
- if (!bufout) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
-
- initMat( comm, buf );
- MPI_Allreduce( buf, bufout, count, mattype, op, comm );
- errs += isIdentity( comm, bufout );
-
- /* Try the same test, but using MPI_IN_PLACE */
- initMat( comm, bufout );
- MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
- errs += isIdentity( comm, bufout );
-
- free( buf );
- free( bufout );
-
- MPI_Type_free( &mattype );
- MTestFreeComm( &comm );
+ MTest_Init(&argc, &argv);
+
+ MPI_Op_create(uop, 0, &op);
+
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL) {
+ continue;
+ }
+ MPI_Comm_size(comm, &size);
+ matSize = size;
+
+ /* Only one matrix for now */
+ count = 1;
+
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous(size * size, MPI_INT, &mattype);
+ MPI_Type_commit(&mattype);
+
+ max_offset = count * size * size;
+ buf = (int *) malloc(max_offset * sizeof(int));
+ if (!buf) {
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ bufout = (int *) malloc(max_offset * sizeof(int));
+ if (!bufout) {
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ initMat(comm, buf);
+ MPI_Allreduce(buf, bufout, count, mattype, op, comm);
+ errs += isIdentity(comm, bufout);
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat(comm, bufout);
+ MPI_Allreduce(MPI_IN_PLACE, bufout, count, mattype, op, comm);
+ errs += isIdentity(comm, bufout);
+
+ free(buf);
+ free(bufout);
+
+ MPI_Type_free(&mattype);
+ MTestFreeComm(&comm);
}
- MPI_Op_free( &op );
+ MPI_Op_free(&op);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test MPI_Allreduce with non-commutative user-defined operations using matrix rotations";
*/
-/* This example is similar to allred3.c, but uses only 3x3 matrics with
+/* This example is similar to allred3.c, but uses only 3x3 matrics with
integer-valued entries. This is an associative but not commutative
operation.
The number of matrices is the count argument. The matrix is stored
0 0 1 1 0 0
0 1 0) 0 0 1)
- The product
+ The product
I^k A I^(p-2-k-j) B I^j
- is
+ is
- ( 0 1 0
+ (0 1 0
0 0 1
- 1 0 0 )
+ 1 0 0)
- for all values of k, p, and j.
+ for all values of k, p, and j.
*/
-void matmult( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+void matmult(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype);
-void matmult( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+void matmult(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype)
{
- const int *cin = (const int *)cinPtr;
- int *cout = (int *)coutPtr;
+ const int *cin = (const int *) cinPtr;
+ int *cout = (int *) coutPtr;
int i, j, k, nmat;
int tempcol[3];
int offset1, offset2;
for (nmat = 0; nmat < *count; nmat++) {
- for (j=0; j<3; j++) {
- for (i=0; i<3; i++) {
- tempcol[i] = 0;
- for (k=0; k<3; k++) {
- /* col[i] += cin(i,k) * cout(k,j) */
- offset1 = k+i*3;
- offset2 = j+k*3;
- tempcol[i] += cin[offset1] * cout[offset2];
- }
- }
- for (i=0; i<3; i++) {
- offset1 = j+i*3;
- cout[offset1] = tempcol[i];
- }
- }
- /* Advance to the next matrix */
- cin += 9;
- cout += 9;
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 3; i++) {
+ tempcol[i] = 0;
+ for (k = 0; k < 3; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ offset1 = k + i * 3;
+ offset2 = j + k * 3;
+ tempcol[i] += cin[offset1] * cout[offset2];
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ offset1 = j + i * 3;
+ cout[offset1] = tempcol[i];
+ }
+ }
+ /* Advance to the next matrix */
+ cin += 9;
+ cout += 9;
}
}
-/* Initialize the integer matrix as one of the
+/* Initialize the integer matrix as one of the
above matrix entries, as a function of count.
We guarantee that both the A and B matrices are included.
-*/
-static void initMat( int rank, int size, int nmat, int mat[] )
+*/
+static void initMat(int rank, int size, int nmat, int mat[])
{
int i, kind;
/* Zero the matrix */
- for (i=0; i<9; i++) {
- mat[i] = 0;
+ for (i = 0; i < 9; i++) {
+ mat[i] = 0;
}
/* Decide which matrix to create (I, A, or B) */
- if ( size == 2) {
- /* rank 0 is A, 1 is B */
- kind = 1 + rank;
+ if (size == 2) {
+ /* rank 0 is A, 1 is B */
+ kind = 1 + rank;
}
else {
- int tmpA, tmpB;
- /* Most ranks are identity matrices */
- kind = 0;
- /* Make sure exactly one rank gets the A matrix
- and one the B matrix */
- tmpA = size / 4;
- tmpB = (3 * size) / 4;
-
- if (rank == tmpA) kind = 1;
- if (rank == tmpB) kind = 2;
+ int tmpA, tmpB;
+ /* Most ranks are identity matrices */
+ kind = 0;
+ /* Make sure exactly one rank gets the A matrix
+ * and one the B matrix */
+ tmpA = size / 4;
+ tmpB = (3 * size) / 4;
+
+ if (rank == tmpA)
+ kind = 1;
+ if (rank == tmpB)
+ kind = 2;
}
-
+
switch (kind) {
- case 0: /* Identity */
- mat[0] = 1;
- mat[4] = 1;
- mat[8] = 1;
- break;
- case 1: /* A */
- mat[0] = 1;
- mat[5] = 1;
- mat[7] = 1;
- break;
- case 2: /* B */
- mat[1] = 1;
- mat[3] = 1;
- mat[8] = 1;
- break;
+ case 0: /* Identity */
+ mat[0] = 1;
+ mat[4] = 1;
+ mat[8] = 1;
+ break;
+ case 1: /* A */
+ mat[0] = 1;
+ mat[5] = 1;
+ mat[7] = 1;
+ break;
+ case 2: /* B */
+ mat[1] = 1;
+ mat[3] = 1;
+ mat[8] = 1;
+ break;
}
}
/* Compare a matrix with the known result */
-static int checkResult( int nmat, int mat[], const char *msg )
+static int checkResult(int nmat, int mat[], const char *msg)
{
int n, k, errs = 0, wrank;
- static int solution[9] = { 0, 1, 0,
- 0, 0, 1,
- 1, 0, 0 };
-
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
-
- for (n=0; n<nmat; n++) {
- for (k=0; k<9; k++) {
- if (mat[k] != solution[k]) {
- errs ++;
- if (errs == 1) {
- printf( "Errors for communicators %s\n",
- MTestGetIntracommName() ); fflush(stdout);
-
- }
- if (errs < 10) {
- printf( "[%d]matrix #%d(%s): Expected mat[%d,%d] = %d, got %d\n",
- wrank, n, msg, k / 3, k % 3, solution[k], mat[k] );
- fflush(stdout);
- }
- }
- }
- /* Advance to the next matrix */
- mat += 9;
+ static int solution[9] = { 0, 1, 0,
+ 0, 0, 1,
+ 1, 0, 0
+ };
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+
+ for (n = 0; n < nmat; n++) {
+ for (k = 0; k < 9; k++) {
+ if (mat[k] != solution[k]) {
+ errs++;
+ if (errs == 1) {
+ printf("Errors for communicators %s\n", MTestGetIntracommName());
+ fflush(stdout);
+
+ }
+ if (errs < 10) {
+ printf("[%d]matrix #%d(%s): Expected mat[%d,%d] = %d, got %d\n",
+ wrank, n, msg, k / 3, k % 3, solution[k], mat[k]);
+ fflush(stdout);
+ }
+ }
+ }
+ /* Advance to the next matrix */
+ mat += 9;
}
return errs;
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int size, rank;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
int *buf, *bufout;
MPI_Op op;
MPI_Datatype mattype;
int i;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
+
+ MPI_Op_create(matmult, 0, &op);
- MPI_Op_create( matmult, 0, &op );
-
/* A single rotation matrix (3x3, stored as 9 consequetive elements) */
- MPI_Type_contiguous( 9, MPI_INT, &mattype );
- MPI_Type_commit( &mattype );
+ MPI_Type_contiguous(9, MPI_INT, &mattype);
+ MPI_Type_commit(&mattype);
/* Sanity check: test that our routines work properly */
- { int one = 1;
- buf = (int *)malloc( 4*9 * sizeof(int) );
- initMat( 0, 4, 0, &buf[0] );
- initMat( 1, 4, 0, &buf[9] );
- initMat( 2, 4, 0, &buf[18] );
- initMat( 3, 4, 0, &buf[27] );
- matmult( &buf[0], &buf[9], &one, &mattype );
- matmult( &buf[9], &buf[18], &one, &mattype );
- matmult( &buf[18], &buf[27], &one, &mattype );
- checkResult( 1, &buf[27], "Sanity Check" );
- free(buf);
+ {
+ int one = 1;
+ buf = (int *) malloc(4 * 9 * sizeof(int));
+ initMat(0, 4, 0, &buf[0]);
+ initMat(1, 4, 0, &buf[9]);
+ initMat(2, 4, 0, &buf[18]);
+ initMat(3, 4, 0, &buf[27]);
+ matmult(&buf[0], &buf[9], &one, &mattype);
+ matmult(&buf[9], &buf[18], &one, &mattype);
+ matmult(&buf[18], &buf[27], &one, &mattype);
+ checkResult(1, &buf[27], "Sanity Check");
+ free(buf);
}
-
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
-
- for (count = 1; count < size; count ++ ) {
-
- /* Allocate the matrices */
- buf = (int *)malloc( count * 9 * sizeof(int) );
- if (!buf) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
-
- bufout = (int *)malloc( count * 9 * sizeof(int) );
- if (!bufout) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
-
- for (i=0; i < count; i++) {
- initMat( rank, size, i, &buf[i*9] );
- }
-
- MPI_Allreduce( buf, bufout, count, mattype, op, comm );
- errs += checkResult( count, bufout, "" );
-
- /* Try the same test, but using MPI_IN_PLACE */
- for (i=0; i < count; i++) {
- initMat( rank, size, i, &bufout[i*9] );
- }
- MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
- errs += checkResult( count, bufout, "IN_PLACE" );
-
- free( buf );
- free( bufout );
- }
- MTestFreeComm( &comm );
+
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ for (count = 1; count < size; count++) {
+
+ /* Allocate the matrices */
+ buf = (int *) malloc(count * 9 * sizeof(int));
+ if (!buf) {
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ bufout = (int *) malloc(count * 9 * sizeof(int));
+ if (!bufout) {
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ for (i = 0; i < count; i++) {
+ initMat(rank, size, i, &buf[i * 9]);
+ }
+
+ MPI_Allreduce(buf, bufout, count, mattype, op, comm);
+ errs += checkResult(count, bufout, "");
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ for (i = 0; i < count; i++) {
+ initMat(rank, size, i, &bufout[i * 9]);
+ }
+ MPI_Allreduce(MPI_IN_PLACE, bufout, count, mattype, op, comm);
+ errs += checkResult(count, bufout, "IN_PLACE");
+
+ free(buf);
+ free(bufout);
+ }
+ MTestFreeComm(&comm);
}
-
- MPI_Op_free( &op );
- MPI_Type_free( &mattype );
- MTest_Finalize( errs );
+ MPI_Op_free(&op);
+ MPI_Type_free(&mattype);
+
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/* We make the error count global so that we can easily control the output
- of error information (in particular, limiting it after the first 10
+ of error information (in particular, limiting it after the first 10
errors */
int errs = 0;
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
MPI_Comm comm;
MPI_Datatype dtype;
- int count, *bufin, *bufout, size, i, minsize=1;
+ int count, *bufin, *bufout, size, i, minsize = 1;
- MTest_Init( &argc, &argv );
-
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) {
- continue;
- }
- MPI_Comm_size( comm, &size );
- count = size * 2;
- bufin = (int *)malloc( count * sizeof(int) );
- bufout = (int *)malloc( count * sizeof(int) );
- if (!bufin || !bufout) {
- fprintf( stderr, "Unable to allocated space for buffers (%d)\n",
- count );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- for (i=0; i<count; i++) {
- bufin[i] = i;
- bufout[i] = -1;
- }
+ MTest_Init(&argc, &argv);
- dtype = MPI_INT;
- MPI_Allreduce( bufin, bufout, count, dtype, MPI_SUM, comm );
- /* Check output */
- for (i=0; i<count; i++) {
- if (bufout[i] != i * size) {
- fprintf( stderr, "Expected bufout[%d] = %d but found %d\n",
- i, i * size, bufout[i] );
- errs++;
- }
- }
- free( bufin );
- free( bufout );
- MTestFreeComm( &comm );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL) {
+ continue;
+ }
+ MPI_Comm_size(comm, &size);
+ count = size * 2;
+ bufin = (int *) malloc(count * sizeof(int));
+ bufout = (int *) malloc(count * sizeof(int));
+ if (!bufin || !bufout) {
+ fprintf(stderr, "Unable to allocated space for buffers (%d)\n", count);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i = 0; i < count; i++) {
+ bufin[i] = i;
+ bufout[i] = -1;
+ }
+
+ dtype = MPI_INT;
+ MPI_Allreduce(bufin, bufout, count, dtype, MPI_SUM, comm);
+ /* Check output */
+ for (i = 0; i < count; i++) {
+ if (bufout[i] != i * size) {
+ fprintf(stderr, "Expected bufout[%d] = %d but found %d\n", i, i * size, bufout[i]);
+ errs++;
+ }
+ }
+ free(bufin);
+ free(bufout);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test MPI_Allreduce with apparent non-commutative operators";
*/
/* While the operator is in fact commutative, this forces the MPI code to
- run the code that is used for non-commutative operators, and for
- various message lengths. Other tests check truly non-commutative
+ run the code that is used for non-commutative operators, and for
+ various message lengths. Other tests check truly non-commutative
operators */
-void mysum( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+void mysum(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype);
-void mysum( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+void mysum(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype)
{
- const int *cin = (const int *)cinPtr;
- int *cout = (int *)coutPtr;
- int i, n = *count;
- for (i=0; i<n; i++)
- cout[i] += cin[i];
+ const int *cin = (const int *) cinPtr;
+ int *cout = (int *) coutPtr;
+ int i, n = *count;
+ for (i = 0; i < n; i++)
+ cout[i] += cin[i];
}
-int main( int argc, char *argv[] )
+
+int main(int argc, char *argv[])
{
- int errs = 0;
- int rank, size;
- int minsize = 2, count;
+ int errs = 0;
+ int rank, size;
+ int minsize = 2, count;
MPI_Comm comm;
- MPI_Op op;
- int *buf, i;
+ MPI_Op op;
+ int *buf, i;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Op_create( mysum, 0, &op );
+ MPI_Op_create(mysum, 0, &op);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
-
- for (count = 1; count < 65000; count = count * 2) {
- /* Contiguous data */
- buf = (int *)malloc( count * sizeof(int) );
- for (i=0; i<count; i++) buf[i] = rank + i;
- MPI_Allreduce( MPI_IN_PLACE, buf, count, MPI_INT, op, comm );
- /* Check the results */
- for (i=0; i<count; i++) {
- int result = i * size + (size*(size-1))/2;
- if (buf[i] != result) {
- errs ++;
- if (errs < 10) {
- fprintf( stderr, "buf[%d] = %d expected %d\n",
- i, buf[i], result );
- }
- }
- }
- free( buf );
- }
- MTestFreeComm( &comm );
- }
- MPI_Op_free( &op );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- /* Check to make sure that aliasing is disallowed correctly */
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (MPI_SUCCESS == MPI_Allreduce(&rank, &rank, 1, MPI_INT, MPI_OP_NULL, MPI_COMM_WORLD))
- errs++;
-#endif
+ for (count = 1; count < 65000; count = count * 2) {
+ /* Contiguous data */
+ buf = (int *) malloc(count * sizeof(int));
+ for (i = 0; i < count; i++)
+ buf[i] = rank + i;
+ MPI_Allreduce(MPI_IN_PLACE, buf, count, MPI_INT, op, comm);
+ /* Check the results */
+ for (i = 0; i < count; i++) {
+ int result = i * size + (size * (size - 1)) / 2;
+ if (buf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "buf[%d] = %d expected %d\n", i, buf[i], result);
+ }
+ }
+ }
+ free(buf);
+ }
+ MTestFreeComm(&comm);
+ }
+ MPI_Op_free(&op);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
* implementation to handle a flood of one-way messages.
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- double wscale = 10.0, scale;
- int numprocs, myid,i;
+ double wscale = 10.0, scale;
+ int numprocs, myid, i;
- MPI_Init(&argc,&argv);
- MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
- MPI_Comm_rank(MPI_COMM_WORLD,&myid);
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
+ MPI_Comm_rank(MPI_COMM_WORLD, &myid);
- for ( i=0; i<10000; i++) {
- MPI_Allreduce(&wscale,&scale,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
- }
+ for (i = 0; i < 10000; i++) {
+ MPI_Allreduce(&wscale, &scale, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
+ }
- if (myid == 0) {
- /* If we get here at all, we're ok */
- printf( " No Errors\n" );
- }
- MPI_Finalize();
-
- return 0;
+ if (myid == 0) {
+ /* If we get here at all, we're ok */
+ printf(" No Errors\n");
+ }
+ MPI_Finalize();
+
+ return 0;
}
#include <stdio.h>
#include "mpitest.h"
#include <stdlib.h>
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
int *sendbuf, *recvbuf, *p;
int sendcount, recvcount;
int i, j;
MPI_Datatype sendtype, recvtype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- /* The following illustrates the use of the routines to
- run through a selection of communicators and datatypes.
- Use subsets of these for tests that do not involve combinations
- of communicators, datatypes, and counts of datatypes */
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
+ /* The following illustrates the use of the routines to
+ * run through a selection of communicators and datatypes.
+ * Use subsets of these for tests that do not involve combinations
+ * of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- /* printf( "Size of comm = %d\n", size ); */
- for (count = 1; count < 65000; count = count * 2) {
-
- /* Create a send buf and a receive buf suitable for testing
- all to all. */
- sendcount = count;
- recvcount = count;
- sendbuf = (int *)malloc( count * size * sizeof(int) );
- recvbuf = (int *)malloc( count * size * sizeof(int) );
- sendtype = MPI_INT;
- recvtype = MPI_INT;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- if (!sendbuf || !recvbuf) {
- errs++;
- fprintf( stderr, "Failed to allocate sendbuf and/or recvbuf\n" );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- for (i=0; i<count*size; i++)
- recvbuf[i] = -1;
- p = sendbuf;
- for (j=0; j<size; j++) {
- for (i=0; i<count; i++) {
- *p++ = j * size + rank + i;
- }
- }
+ /* printf("Size of comm = %d\n", size); */
+ for (count = 1; count < 65000; count = count * 2) {
- MPI_Alltoall( sendbuf, sendcount, sendtype,
- recvbuf, recvcount, recvtype, comm );
+ /* Create a send buf and a receive buf suitable for testing
+ * all to all. */
+ sendcount = count;
+ recvcount = count;
+ sendbuf = (int *) malloc(count * size * sizeof(int));
+ recvbuf = (int *) malloc(count * size * sizeof(int));
+ sendtype = MPI_INT;
+ recvtype = MPI_INT;
- p = recvbuf;
- for (j=0; j<size; j++) {
- for (i=0; i<count; i++) {
- if (*p != rank * size + j + i) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "Error with communicator %s and size=%d count=%d\n",
- MTestGetIntracommName(), size, count );
- fprintf( stderr, "recvbuf[%d,%d] = %d, should %d\n",
- j,i, *p, rank * size + j + i );
- }
- }
- p++;
- }
- }
+ if (!sendbuf || !recvbuf) {
+ errs++;
+ fprintf(stderr, "Failed to allocate sendbuf and/or recvbuf\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i = 0; i < count * size; i++)
+ recvbuf[i] = -1;
+ p = sendbuf;
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < count; i++) {
+ *p++ = j * size + rank + i;
+ }
+ }
+
+ MTest_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+
+ p = recvbuf;
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < count; i++) {
+ if (*p != rank * size + j + i) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "Error with communicator %s and size=%d count=%d\n",
+ MTestGetIntracommName(), size, count);
+ fprintf(stderr, "recvbuf[%d,%d] = %d, should %d\n",
+ j, i, *p, rank * size + j + i);
+ }
+ }
+ p++;
+ }
+ }
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
/* check MPI_IN_PLACE, added in MPI-2.2 */
p = recvbuf;
- for (j=0; j<size; j++) {
- for (i=0; i<count; i++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < count; i++) {
*p++ = j * size + rank + i;
}
}
- MPI_Alltoall( MPI_IN_PLACE, -1/*ignored*/, MPI_DATATYPE_NULL/*ignored*/,
- recvbuf, recvcount, recvtype, comm );
+ MPI_Alltoall(MPI_IN_PLACE, -1 /*ignored */ , MPI_DATATYPE_NULL /*ignored */ ,
+ recvbuf, recvcount, recvtype, comm);
p = recvbuf;
- for (j=0; j<size; j++) {
- for (i=0; i<count; i++) {
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < count; i++) {
if (*p != rank * size + j + i) {
errs++;
if (errs < 10) {
- fprintf( stderr, "Error (MPI_IN_PLACE) with communicator %s and size=%d count=%d\n",
- MTestGetIntracommName(), size, count );
- fprintf(stderr, "recvbuf[%d,%d] = %d, should be %d\n",
- j,i, *p, rank * size + j + i );
+ fprintf(stderr,
+ "Error (MPI_IN_PLACE) with communicator %s and size=%d count=%d\n",
+ MTestGetIntracommName(), size, count);
+ fprintf(stderr, "recvbuf[%d,%d] = %d, should be %d\n", j, i, *p,
+ rank * size + j + i);
}
}
p++;
}
#endif
- free( recvbuf );
- free( sendbuf );
- }
- MTestFreeComm( &comm );
+ free(recvbuf);
+ free(sendbuf);
+ }
+ MTestFreeComm(&comm);
}
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- /* Check to make sure that aliasing is disallowed correctly */
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (MPI_SUCCESS == MPI_Alltoall(&rank, 1, MPI_INT, &rank, 1, MPI_INT, MPI_COMM_WORLD))
- errs++;
-#endif
-
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
Because there are separate send and receive types to alltoallv,
there need to be tests to rearrange data on the fly. Not done yet.
-
+
The first test sends i items to processor i from all processors.
Currently, the test uses only MPI_INT; this is adequate for testing systems
that use point-to-point operations
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Comm comm;
- int *sbuf, *rbuf;
- int rank, size;
- int *sendcounts, *recvcounts, *rdispls, *sdispls;
- int i, j, *p, err;
-
- MTest_Init( &argc, &argv );
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+
+ MTest_Init(&argc, &argv);
err = 0;
-
- while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- /* Create the buffer */
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
- sbuf = (int *)malloc( size * size * sizeof(int) );
- rbuf = (int *)malloc( size * size * sizeof(int) );
- if (!sbuf || !rbuf) {
- fprintf( stderr, "Could not allocated buffers!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Load up the buffers */
- for (i=0; i<size*size; i++) {
- sbuf[i] = i + 100*rank;
- rbuf[i] = -i;
- }
-
- /* Create and load the arguments to alltoallv */
- sendcounts = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
- rdispls = (int *)malloc( size * sizeof(int) );
- sdispls = (int *)malloc( size * sizeof(int) );
- if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
- fprintf( stderr, "Could not allocate arg items!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
- for (i=0; i<size; i++) {
- sendcounts[i] = i;
- recvcounts[i] = rank;
- rdispls[i] = i * rank;
- sdispls[i] = (i * (i+1))/2;
- }
- MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
- rbuf, recvcounts, rdispls, MPI_INT, comm );
-
- /* Check rbuf */
- for (i=0; i<size; i++) {
- p = rbuf + rdispls[i];
- for (j=0; j<rank; j++) {
- if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
- fprintf( stderr, "[%d] got %d expected %d for %dth\n",
- rank, p[j],(i*(i+1))/2 + j, j );
- err++;
- }
- }
- }
-
- free( sdispls );
- free( sendcounts );
- free( sbuf );
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- /* check MPI_IN_PLACE, added in MPI-2.2 */
- free( rbuf );
- rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
- if (!rbuf) {
- fprintf( stderr, "Could not reallocate rbuf!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Load up the buffers */
- for (i = 0; i < size; i++) {
- recvcounts[i] = i + rank;
- rdispls[i] = i * (2 * size);
- }
- memset(rbuf, -1, size * (2 * size) * sizeof(int));
- for (i=0; i < size; i++) {
- p = rbuf + rdispls[i];
- for (j = 0; j < recvcounts[i]; ++j) {
- p[j] = 100 * rank + 10 * i + j;
+ while (MTestGetIntracommGeneral(&comm, 2, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ /* Create the buffer */
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+ sbuf = (int *) malloc(size * size * sizeof(int));
+ rbuf = (int *) malloc(size * size * sizeof(int));
+ if (!sbuf || !rbuf) {
+ fprintf(stderr, "Could not allocated buffers!\n");
+ MPI_Abort(comm, 1);
}
- }
- MPI_Alltoallv( MPI_IN_PLACE, NULL, NULL, MPI_INT,
- rbuf, recvcounts, rdispls, MPI_INT, comm );
- /* Check rbuf */
- for (i=0; i<size; i++) {
- p = rbuf + rdispls[i];
- for (j=0; j<recvcounts[i]; j++) {
- int expected = 100 * i + 10 * rank + j;
- if (p[j] != expected) {
- fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
- rank, p[j], expected, i, j);
- ++err;
- }
+
+ /* Load up the buffers */
+ for (i = 0; i < size * size; i++) {
+ sbuf[i] = i + 100 * rank;
+ rbuf[i] = -i;
}
- }
- /* Check to make sure that aliasing is disallowed correctly */
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Alltoallv(rbuf, recvcounts, rdispls, MPI_INT,
- rbuf, recvcounts, rdispls, MPI_INT, comm))
- err++;
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
+ rdispls = (int *) malloc(size * sizeof(int));
+ sdispls = (int *) malloc(size * sizeof(int));
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf(stderr, "Could not allocate arg items!\n");
+ MPI_Abort(comm, 1);
+ }
+ for (i = 0; i < size; i++) {
+ sendcounts[i] = i;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank;
+ sdispls[i] = (i * (i + 1)) / 2;
+ }
+ MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, comm);
+
+ /* Check rbuf */
+ for (i = 0; i < size; i++) {
+ p = rbuf + rdispls[i];
+ for (j = 0; j < rank; j++) {
+ if (p[j] != i * 100 + (rank * (rank + 1)) / 2 + j) {
+ fprintf(stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j], (i * (i + 1)) / 2 + j, j);
+ err++;
+ }
+ }
+ }
+
+ free(sdispls);
+ free(sendcounts);
+ free(sbuf);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ free(rbuf);
+ rbuf = (int *) malloc(size * (2 * size) * sizeof(int));
+ if (!rbuf) {
+ fprintf(stderr, "Could not reallocate rbuf!\n");
+ MPI_Abort(comm, 1);
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size; i++) {
+ recvcounts[i] = i + rank;
+ rdispls[i] = i * (2 * size);
+ }
+ memset(rbuf, -1, size * (2 * size) * sizeof(int));
+ for (i = 0; i < size; i++) {
+ p = rbuf + rdispls[i];
+ for (j = 0; j < recvcounts[i]; ++j) {
+ p[j] = 100 * rank + 10 * i + j;
+ }
+ }
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, comm);
+ /* Check rbuf */
+ for (i = 0; i < size; i++) {
+ p = rbuf + rdispls[i];
+ for (j = 0; j < recvcounts[i]; j++) {
+ int expected = 100 * i + 10 * rank + j;
+ if (p[j] != expected) {
+ fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
+ rank, p[j], expected, i, j);
+ ++err;
+ }
+ }
+ }
#endif
- free( rdispls );
- free( recvcounts );
- free( rbuf );
- MTestFreeComm( &comm );
+ free(rdispls);
+ free(recvcounts);
+ free(rbuf);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
MPI_Finalize();
return 0;
}
#include <stdio.h>
/*
- This program tests MPI_Alltoallv by having processor each process
+ This program tests MPI_Alltoallv by having processor each process
send data to two neighbors only, using counts of 0 for the other processes.
This idiom is sometimes used for halo exchange operations.
Because there are separate send and receive types to alltoallv,
there need to be tests to rearrange data on the fly. Not done yet.
-
+
Currently, the test uses only MPI_INT; this is adequate for testing systems
that use point-to-point operations
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Comm comm;
- int *sbuf, *rbuf;
- int rank, size;
- int *sendcounts, *recvcounts, *rdispls, *sdispls;
- int i, *p, err;
- int left, right, length;
-
- MTest_Init( &argc, &argv );
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, *p, err;
+ int left, right, length;
+
+ MTest_Init(&argc, &argv);
err = 0;
-
- while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
-
- if (size < 3) continue;
-
- /* Create and load the arguments to alltoallv */
- sendcounts = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
- rdispls = (int *)malloc( size * sizeof(int) );
- sdispls = (int *)malloc( size * sizeof(int) );
- if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
- fprintf( stderr, "Could not allocate arg items!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Get the neighbors */
- left = (rank - 1 + size) % size;
- right = (rank + 1) % size;
-
- /* Set the defaults */
- for (i=0; i<size; i++) {
- sendcounts[i] = 0;
- recvcounts[i] = 0;
- rdispls[i] = 0;
- sdispls[i] = 0;
- }
-
- for (length=1; length < 66000; length = length*2+1 ) {
- /* Get the buffers */
- sbuf = (int *)malloc( 2 * length * sizeof(int) );
- rbuf = (int *)malloc( 2 * length * sizeof(int) );
- if (!sbuf || !rbuf) {
- fprintf( stderr, "Could not allocate buffers!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Load up the buffers */
- for (i=0; i<length; i++) {
- sbuf[i] = i + 100000*rank;
- sbuf[i+length] = i + 100000*rank;
- rbuf[i] = -i;
- rbuf[i+length] = -i-length;
- }
- sendcounts[left] = length;
- sendcounts[right] = length;
- recvcounts[left] = length;
- recvcounts[right] = length;
- rdispls[left] = 0;
- rdispls[right] = length;
- sdispls[left] = 0;
- sdispls[right] = length;
-
- MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
- rbuf, recvcounts, rdispls, MPI_INT, comm );
-
- /* Check rbuf */
- p = rbuf; /* left */
-
- for (i=0; i<length; i++) {
- if (p[i] != i + 100000 * left) {
- if (err < 10) {
- fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n",
- rank, left, p[i], i + 100000 * left, i );
- }
- err++;
- }
- }
-
- p = rbuf + length; /* right */
- for (i=0; i<length; i++) {
- if (p[i] != i + 100000 * right) {
- if (err < 10) {
- fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n",
- rank, right, p[i], i + 100000 * right, i );
- }
- err++;
- }
- }
-
- free( rbuf );
- free( sbuf );
- }
-
- free( sdispls );
- free( rdispls );
- free( recvcounts );
- free( sendcounts );
- MTestFreeComm( &comm );
+
+ while (MTestGetIntracommGeneral(&comm, 2, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ if (size < 3)
+ continue;
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
+ rdispls = (int *) malloc(size * sizeof(int));
+ sdispls = (int *) malloc(size * sizeof(int));
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf(stderr, "Could not allocate arg items!\n");
+ MPI_Abort(comm, 1);
+ }
+
+ /* Get the neighbors */
+ left = (rank - 1 + size) % size;
+ right = (rank + 1) % size;
+
+ /* Set the defaults */
+ for (i = 0; i < size; i++) {
+ sendcounts[i] = 0;
+ recvcounts[i] = 0;
+ rdispls[i] = 0;
+ sdispls[i] = 0;
+ }
+
+ for (length = 1; length < 66000; length = length * 2 + 1) {
+ /* Get the buffers */
+ sbuf = (int *) malloc(2 * length * sizeof(int));
+ rbuf = (int *) malloc(2 * length * sizeof(int));
+ if (!sbuf || !rbuf) {
+ fprintf(stderr, "Could not allocate buffers!\n");
+ MPI_Abort(comm, 1);
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < length; i++) {
+ sbuf[i] = i + 100000 * rank;
+ sbuf[i + length] = i + 100000 * rank;
+ rbuf[i] = -i;
+ rbuf[i + length] = -i - length;
+ }
+ sendcounts[left] = length;
+ sendcounts[right] = length;
+ recvcounts[left] = length;
+ recvcounts[right] = length;
+ rdispls[left] = 0;
+ rdispls[right] = length;
+ sdispls[left] = 0;
+ sdispls[right] = length;
+
+ MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm);
+
+ /* Check rbuf */
+ p = rbuf; /* left */
+
+ for (i = 0; i < length; i++) {
+ if (p[i] != i + 100000 * left) {
+ if (err < 10) {
+ fprintf(stderr, "[%d from %d] got %d expected %d for %dth\n",
+ rank, left, p[i], i + 100000 * left, i);
+ }
+ err++;
+ }
+ }
+
+ p = rbuf + length; /* right */
+ for (i = 0; i < length; i++) {
+ if (p[i] != i + 100000 * right) {
+ if (err < 10) {
+ fprintf(stderr, "[%d from %d] got %d expected %d for %dth\n",
+ rank, right, p[i], i + 100000 * right, i);
+ }
+ err++;
+ }
+ }
+
+ free(rbuf);
+ free(sbuf);
+ }
+
+ free(sdispls);
+ free(rdispls);
+ free(recvcounts);
+ free(sendcounts);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
MPI_Finalize();
return 0;
}
*/
/*
- * This example is taken from MPI-The complete reference, Vol 1,
+ * This example is taken from MPI-The complete reference, Vol 1,
* pages 222-224.
- *
- * Lines after the "--CUT HERE--" were added to make this into a complete
+ *
+ * Lines after the "--CUT HERE--" were added to make this into a complete
* test program.
*/
MPI_Datatype submatrix_type(int N, int m, int n, MPI_Datatype type);
void Transpose(float *localA, float *localB, int M, int N, MPI_Comm comm);
void Transpose(float *localA, float *localB, int M, int N, MPI_Comm comm)
-/* transpose MxN matrix A that is block distributed (1-D) on
+/* transpose MxN matrix A that is block distributed (1-D) on
processes of comm onto block distributed matrix B */
{
- int i, j, extent, myrank, p, n[2], m[2];
- int lasti, lastj;
- int *sendcounts, *recvcounts;
- int *sdispls, *rdispls;
- MPI_Datatype xtype[2][2], stype[2][2], *sendtypes, *recvtypes;
-
- MTestPrintfMsg( 2, "M = %d, N = %d\n", M, N );
-
- /* compute parameters */
- MPI_Comm_size(comm, &p);
- MPI_Comm_rank(comm, &myrank);
- extent = sizeof(float);
-
- /* allocate arrays */
- sendcounts = (int *)malloc(p*sizeof(int));
- recvcounts = (int *)malloc(p*sizeof(int));
- sdispls = (int *)malloc(p*sizeof(int));
- rdispls = (int *)malloc(p*sizeof(int));
- sendtypes = (MPI_Datatype *)malloc(p*sizeof(MPI_Datatype));
- recvtypes = (MPI_Datatype *)malloc(p*sizeof(MPI_Datatype));
-
- /* compute block sizes */
- m[0] = M/p;
- m[1] = M - (p-1)*(M/p);
- n[0] = N/p;
- n[1] = N - (p-1)*(N/p);
-
- /* compute types */
- for (i=0; i <= 1; i++)
- for (j=0; j <= 1; j++) {
- xtype[i][j] = transpose_type(N, m[i], n[j], MPI_FLOAT);
- stype[i][j] = submatrix_type(M, m[i], n[j], MPI_FLOAT);
- }
-
- /* prepare collective operation arguments */
- lasti = myrank == p-1;
- for (j=0; j < p; j++) {
- lastj = j == p-1;
- sendcounts[j] = 1;
- sdispls[j] = j*n[0]*extent;
- sendtypes[j] = xtype[lasti][lastj];
- recvcounts[j] = 1;
- rdispls[j] = j*m[0]*extent;
- recvtypes[j] = stype[lastj][lasti];
- }
-
- /* communicate */
- MTestPrintfMsg( 2, "Begin Alltoallw...\n" );
- /* -- Note that the book incorrectly uses &localA and &localB
- as arguments to MPI_Alltoallw */
- MPI_Alltoallw(localA, sendcounts, sdispls, sendtypes,
- localB, recvcounts, rdispls, recvtypes, comm);
- MTestPrintfMsg( 2, "Done with Alltoallw\n" );
-
- /* Free buffers */
- free( sendcounts );
- free( recvcounts );
- free( sdispls );
- free( rdispls );
- free( sendtypes );
- free( recvtypes );
-
- /* Free datatypes */
- for (i=0; i <= 1; i++)
- for (j=0; j <= 1; j++) {
- MPI_Type_free( &xtype[i][j] );
- MPI_Type_free( &stype[i][j] );
- }
+ int i, j, extent, myrank, p, n[2], m[2];
+ int lasti, lastj;
+ int *sendcounts, *recvcounts;
+ int *sdispls, *rdispls;
+ MPI_Datatype xtype[2][2], stype[2][2], *sendtypes, *recvtypes;
+
+ MTestPrintfMsg(2, "M = %d, N = %d\n", M, N);
+
+ /* compute parameters */
+ MPI_Comm_size(comm, &p);
+ MPI_Comm_rank(comm, &myrank);
+ extent = sizeof(float);
+
+ /* allocate arrays */
+ sendcounts = (int *) malloc(p * sizeof(int));
+ recvcounts = (int *) malloc(p * sizeof(int));
+ sdispls = (int *) malloc(p * sizeof(int));
+ rdispls = (int *) malloc(p * sizeof(int));
+ sendtypes = (MPI_Datatype *) malloc(p * sizeof(MPI_Datatype));
+ recvtypes = (MPI_Datatype *) malloc(p * sizeof(MPI_Datatype));
+
+ /* compute block sizes */
+ m[0] = M / p;
+ m[1] = M - (p - 1) * (M / p);
+ n[0] = N / p;
+ n[1] = N - (p - 1) * (N / p);
+
+ /* compute types */
+ for (i = 0; i <= 1; i++)
+ for (j = 0; j <= 1; j++) {
+ xtype[i][j] = transpose_type(N, m[i], n[j], MPI_FLOAT);
+ stype[i][j] = submatrix_type(M, m[i], n[j], MPI_FLOAT);
+ }
+
+ /* prepare collective operation arguments */
+ lasti = myrank == p - 1;
+ for (j = 0; j < p; j++) {
+ lastj = j == p - 1;
+ sendcounts[j] = 1;
+ sdispls[j] = j * n[0] * extent;
+ sendtypes[j] = xtype[lasti][lastj];
+ recvcounts[j] = 1;
+ rdispls[j] = j * m[0] * extent;
+ recvtypes[j] = stype[lastj][lasti];
+ }
+
+ /* communicate */
+ MTestPrintfMsg(2, "Begin Alltoallw...\n");
+ /* -- Note that the book incorrectly uses &localA and &localB
+ * as arguments to MPI_Alltoallw */
+ MPI_Alltoallw(localA, sendcounts, sdispls, sendtypes,
+ localB, recvcounts, rdispls, recvtypes, comm);
+ MTestPrintfMsg(2, "Done with Alltoallw\n");
+
+ /* Free buffers */
+ free(sendcounts);
+ free(recvcounts);
+ free(sdispls);
+ free(rdispls);
+ free(sendtypes);
+ free(recvtypes);
+
+ /* Free datatypes */
+ for (i = 0; i <= 1; i++)
+ for (j = 0; j <= 1; j++) {
+ MPI_Type_free(&xtype[i][j]);
+ MPI_Type_free(&stype[i][j]);
+ }
}
-/* Define an n x m submatrix in a n x M local matrix (this is the
+/* Define an n x m submatrix in a n x M local matrix (this is the
destination in the transpose matrix */
MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
-/* computes a datatype for an mxn submatrix within an MxN matrix
+/* computes a datatype for an mxn submatrix within an MxN matrix
with entries of type type */
{
- /* MPI_Datatype subrow; */
- MPI_Datatype submatrix;
-
- /* The book, MPI: The Complete Reference, has the wrong type constructor
- here. Since the stride in the vector type is relative to the input
- type, the stride in the book's code is n times as long as is intended.
- Since n may not exactly divide N, it is better to simply use the
- blocklength argument in Type_vector */
- /*
- MPI_Type_contiguous(n, type, &subrow);
- MPI_Type_vector(m, 1, N, subrow, &submatrix);
- */
- MPI_Type_vector(n, m, M, type, &submatrix );
- MPI_Type_commit(&submatrix);
-
- /* Add a consistency test: the size of submatrix should be
- n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
- {
- int tsize;
- MPI_Aint textent, lb;
- MPI_Type_size( type, &tsize );
- MPI_Type_get_extent( submatrix, &lb, &textent );
-
- if (textent != tsize * (M * (n-1)+m)) {
- fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
- (long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
- }
- }
- return(submatrix);
+ /* MPI_Datatype subrow; */
+ MPI_Datatype submatrix;
+
+ /* The book, MPI: The Complete Reference, has the wrong type constructor
+ * here. Since the stride in the vector type is relative to the input
+ * type, the stride in the book's code is n times as long as is intended.
+ * Since n may not exactly divide N, it is better to simply use the
+ * blocklength argument in Type_vector */
+ /*
+ * MPI_Type_contiguous(n, type, &subrow);
+ * MPI_Type_vector(m, 1, N, subrow, &submatrix);
+ */
+ MPI_Type_vector(n, m, M, type, &submatrix);
+ MPI_Type_commit(&submatrix);
+
+ /* Add a consistency test: the size of submatrix should be
+ * n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
+ {
+ int tsize;
+ MPI_Aint textent, lb;
+ MPI_Type_size(type, &tsize);
+ MPI_Type_get_extent(submatrix, &lb, &textent);
+
+ if (textent != tsize * (M * (n - 1) + m)) {
+ fprintf(stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
+ (long) textent, (long) (tsize * (M * (n - 1) + m)), M, n, m);
+ }
+ }
+ return (submatrix);
}
/* Extract an m x n submatrix within an m x N matrix and transpose it.
Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
-/* computes a datatype for the transpose of an mxn matrix
+/* computes a datatype for the transpose of an mxn matrix
with entries of type type */
{
- MPI_Datatype subrow, subrow1, submatrix;
- MPI_Aint lb, extent;
-
- MPI_Type_vector(m, 1, N, type, &subrow);
- MPI_Type_get_extent(type, &lb, &extent);
- MPI_Type_create_resized(subrow, 0, extent, &subrow1);
- MPI_Type_contiguous(n, subrow1, &submatrix);
- MPI_Type_commit(&submatrix);
- MPI_Type_free( &subrow );
- MPI_Type_free( &subrow1 );
-
- /* Add a consistency test: the size of submatrix should be
- n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
- {
- int tsize;
- MPI_Aint textent, llb;
- MPI_Type_size( type, &tsize );
- MPI_Type_get_true_extent( submatrix, &llb, &textent );
-
- if (textent != tsize * (N * (m-1)+n)) {
- fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
- (long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
- }
- }
-
- return(submatrix);
+ MPI_Datatype subrow, subrow1, submatrix;
+ MPI_Aint lb, extent;
+
+ MPI_Type_vector(m, 1, N, type, &subrow);
+ MPI_Type_get_extent(type, &lb, &extent);
+ MPI_Type_create_resized(subrow, 0, extent, &subrow1);
+ MPI_Type_contiguous(n, subrow1, &submatrix);
+ MPI_Type_commit(&submatrix);
+ MPI_Type_free(&subrow);
+ MPI_Type_free(&subrow1);
+
+ /* Add a consistency test: the size of submatrix should be
+ * n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
+ {
+ int tsize;
+ MPI_Aint textent, llb;
+ MPI_Type_size(type, &tsize);
+ MPI_Type_get_true_extent(submatrix, &llb, &textent);
+
+ if (textent != tsize * (N * (m - 1) + n)) {
+ fprintf(stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
+ (long) textent, (long) (tsize * (N * (m - 1) + n)), N, n, m);
+ }
+ }
+
+ return (submatrix);
}
/* -- CUT HERE -- */
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int gM, gN, lm, lmlast, ln, lnlast, i, j, errs = 0;
int size, rank;
float *localA, *localB;
MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
-
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
+
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
gM = 20;
gN = 30;
- /* Each block is lm x ln in size, except for the last process,
- which has lmlast x lnlast */
- lm = gM/size;
- lmlast = gM - (size - 1)*lm;
- ln = gN/size;
- lnlast = gN - (size - 1)*ln;
+ /* Each block is lm x ln in size, except for the last process,
+ * which has lmlast x lnlast */
+ lm = gM / size;
+ lmlast = gM - (size - 1) * lm;
+ ln = gN / size;
+ lnlast = gN - (size - 1) * ln;
/* Create the local matrices.
- Initialize the input matrix so that the entries are
- consequtive integers, by row, starting at 0.
+ * Initialize the input matrix so that the entries are
+ * consequtive integers, by row, starting at 0.
*/
if (rank == size - 1) {
- localA = (float *)malloc( gN * lmlast * sizeof(float) );
- localB = (float *)malloc( gM * lnlast * sizeof(float) );
- for (i=0; i<lmlast; i++) {
- for (j=0; j<gN; j++) {
- localA[i*gN+j] = (float)(i*gN+j + rank * gN * lm);
- }
- }
-
+ localA = (float *) malloc(gN * lmlast * sizeof(float));
+ localB = (float *) malloc(gM * lnlast * sizeof(float));
+ for (i = 0; i < lmlast; i++) {
+ for (j = 0; j < gN; j++) {
+ localA[i * gN + j] = (float) (i * gN + j + rank * gN * lm);
+ }
+ }
+
}
else {
- localA = (float *)malloc( gN * lm * sizeof(float) );
- localB = (float *)malloc( gM * ln * sizeof(float) );
- for (i=0; i<lm; i++) {
- for (j=0; j<gN; j++) {
- localA[i*gN+j] = (float)(i*gN+j + rank * gN * lm);
- }
- }
+ localA = (float *) malloc(gN * lm * sizeof(float));
+ localB = (float *) malloc(gM * ln * sizeof(float));
+ for (i = 0; i < lm; i++) {
+ for (j = 0; j < gN; j++) {
+ localA[i * gN + j] = (float) (i * gN + j + rank * gN * lm);
+ }
+ }
}
- MTestPrintfMsg( 2, "Allocated local arrays\n" );
+ MTestPrintfMsg(2, "Allocated local arrays\n");
/* Transpose */
- Transpose( localA, localB, gM, gN, comm );
+ Transpose(localA, localB, gM, gN, comm);
/* check the transposed matrix
- In the global matrix, the transpose has consequtive integers,
- organized by columns.
+ * In the global matrix, the transpose has consequtive integers,
+ * organized by columns.
*/
if (rank == size - 1) {
- for (i=0; i<lnlast; i++) {
- for (j=0; j<gM; j++) {
- int expected = i+gN*j + rank * ln;
- if ((int)localB[i*gM+j] != expected) {
- if (errs < MAX_ERRORS)
- printf( "Found %d but expected %d\n",
- (int)localB[i*gM+j], expected );
- errs++;
- }
- }
- }
-
+ for (i = 0; i < lnlast; i++) {
+ for (j = 0; j < gM; j++) {
+ int expected = i + gN * j + rank * ln;
+ if ((int) localB[i * gM + j] != expected) {
+ if (errs < MAX_ERRORS)
+ printf("Found %d but expected %d\n", (int) localB[i * gM + j], expected);
+ errs++;
+ }
+ }
+ }
+
}
else {
- for (i=0; i<ln; i++) {
- for (j=0; j<gM; j++) {
- int expected = i+gN*j + rank * ln;
- if ((int)localB[i*gM+j] != expected) {
- if (errs < MAX_ERRORS)
- printf( "Found %d but expected %d\n",
- (int)localB[i*gM+j], expected );
- errs++;
- }
- }
- }
+ for (i = 0; i < ln; i++) {
+ for (j = 0; j < gM; j++) {
+ int expected = i + gN * j + rank * ln;
+ if ((int) localB[i * gM + j] != expected) {
+ if (errs < MAX_ERRORS)
+ printf("Found %d but expected %d\n", (int) localB[i * gM + j], expected);
+ errs++;
+ }
+ }
+ }
}
/* Free storage */
- free( localA );
- free( localB );
+ free(localA);
+ free(localB);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
Because there are separate send and receive types to alltoallw,
there need to be tests to rearrange data on the fly. Not done yet.
-
+
The first test sends i items to processor i from all processors.
Currently, the test uses only MPI_INT; this is adequate for testing systems
that use point-to-point operations
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Comm comm;
- int *sbuf, *rbuf;
- int rank, size;
- int *sendcounts, *recvcounts, *rdispls, *sdispls;
- int i, j, *p, err;
+ int *sbuf, *rbuf;
+ int rank, size;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
MPI_Datatype *sendtypes, *recvtypes;
-
- MTest_Init( &argc, &argv );
+
+ MTest_Init(&argc, &argv);
err = 0;
-
- while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- /* Create the buffer */
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
- sbuf = (int *)malloc( size * size * sizeof(int) );
- rbuf = (int *)malloc( size * size * sizeof(int) );
- if (!sbuf || !rbuf) {
- fprintf( stderr, "Could not allocated buffers!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Load up the buffers */
- for (i=0; i<size*size; i++) {
- sbuf[i] = i + 100*rank;
- rbuf[i] = -i;
- }
-
- /* Create and load the arguments to alltoallv */
- sendcounts = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
- rdispls = (int *)malloc( size * sizeof(int) );
- sdispls = (int *)malloc( size * sizeof(int) );
- sendtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
- recvtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
- if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
- fprintf( stderr, "Could not allocate arg items!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
- /* Note that process 0 sends no data (sendcounts[0] = 0) */
- for (i=0; i<size; i++) {
- sendcounts[i] = i;
- recvcounts[i] = rank;
- rdispls[i] = i * rank * sizeof(int);
- sdispls[i] = (((i+1) * (i))/2) * sizeof(int);
- sendtypes[i] = recvtypes[i] = MPI_INT;
- }
- MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
- rbuf, recvcounts, rdispls, recvtypes, comm );
-
- /* Check rbuf */
- for (i=0; i<size; i++) {
- p = rbuf + rdispls[i]/sizeof(int);
- for (j=0; j<rank; j++) {
- if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
- fprintf( stderr, "[%d] got %d expected %d for %dth\n",
- rank, p[j],(i*(i+1))/2 + j, j );
- err++;
- }
- }
- }
-
- free(sendtypes);
- free(sdispls);
- free(sendcounts);
- free(sbuf);
+
+ while (MTestGetIntracommGeneral(&comm, 2, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ /* Create the buffer */
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+ sbuf = (int *) malloc(size * size * sizeof(int));
+ rbuf = (int *) malloc(size * size * sizeof(int));
+ if (!sbuf || !rbuf) {
+ fprintf(stderr, "Could not allocated buffers!\n");
+ MPI_Abort(comm, 1);
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size * size; i++) {
+ sbuf[i] = i + 100 * rank;
+ rbuf[i] = -i;
+ }
+
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
+ rdispls = (int *) malloc(size * sizeof(int));
+ sdispls = (int *) malloc(size * sizeof(int));
+ sendtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
+ recvtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
+ fprintf(stderr, "Could not allocate arg items!\n");
+ MPI_Abort(comm, 1);
+ }
+ /* Note that process 0 sends no data (sendcounts[0] = 0) */
+ for (i = 0; i < size; i++) {
+ sendcounts[i] = i;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank * sizeof(int);
+ sdispls[i] = (((i + 1) * (i)) / 2) * sizeof(int);
+ sendtypes[i] = recvtypes[i] = MPI_INT;
+ }
+ MPI_Alltoallw(sbuf, sendcounts, sdispls, sendtypes,
+ rbuf, recvcounts, rdispls, recvtypes, comm);
+
+ /* Check rbuf */
+ for (i = 0; i < size; i++) {
+ p = rbuf + rdispls[i] / sizeof(int);
+ for (j = 0; j < rank; j++) {
+ if (p[j] != i * 100 + (rank * (rank + 1)) / 2 + j) {
+ fprintf(stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j], (i * (i + 1)) / 2 + j, j);
+ err++;
+ }
+ }
+ }
+
+ free(sendtypes);
+ free(sdispls);
+ free(sendcounts);
+ free(sbuf);
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- /* check MPI_IN_PLACE, added in MPI-2.2 */
- free( rbuf );
- rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
- if (!rbuf) {
- fprintf( stderr, "Could not reallocate rbuf!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Load up the buffers */
- for (i = 0; i < size; i++) {
- /* alltoallw displs are in bytes, not in type extents */
- rdispls[i] = i * (2 * size) * sizeof(int);
- recvtypes[i] = MPI_INT;
- recvcounts[i] = i + rank;
- }
- memset(rbuf, -1, size * (2 * size) * sizeof(int));
- for (i=0; i < size; i++) {
- p = rbuf + (rdispls[i] / sizeof(int));
- for (j = 0; j < recvcounts[i]; ++j) {
- p[j] = 100 * rank + 10 * i + j;
+ /* check MPI_IN_PLACE, added in MPI-2.2 */
+ free(rbuf);
+ rbuf = (int *) malloc(size * (2 * size) * sizeof(int));
+ if (!rbuf) {
+ fprintf(stderr, "Could not reallocate rbuf!\n");
+ MPI_Abort(comm, 1);
}
- }
-
- MPI_Alltoallw( MPI_IN_PLACE, NULL, NULL, NULL,
- rbuf, recvcounts, rdispls, recvtypes, comm );
-
- /* Check rbuf */
- for (i=0; i<size; i++) {
- p = rbuf + (rdispls[i] / sizeof(int));
- for (j=0; j<recvcounts[i]; j++) {
- int expected = 100 * i + 10 * rank + j;
- if (p[j] != expected) {
- fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
- rank, p[j], expected, i, j);
- ++err;
- }
+
+ /* Load up the buffers */
+ for (i = 0; i < size; i++) {
+ /* alltoallw displs are in bytes, not in type extents */
+ rdispls[i] = i * (2 * size) * sizeof(int);
+ recvtypes[i] = MPI_INT;
+ recvcounts[i] = i + rank;
+ }
+ memset(rbuf, -1, size * (2 * size) * sizeof(int));
+ for (i = 0; i < size; i++) {
+ p = rbuf + (rdispls[i] / sizeof(int));
+ for (j = 0; j < recvcounts[i]; ++j) {
+ p[j] = 100 * rank + 10 * i + j;
+ }
}
- }
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Alltoallw(rbuf, recvcounts, rdispls, recvtypes,
- rbuf, recvcounts, rdispls, recvtypes, comm))
- err++;
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, rbuf, recvcounts, rdispls, recvtypes, comm);
+
+ /* Check rbuf */
+ for (i = 0; i < size; i++) {
+ p = rbuf + (rdispls[i] / sizeof(int));
+ for (j = 0; j < recvcounts[i]; j++) {
+ int expected = 100 * i + 10 * rank + j;
+ if (p[j] != expected) {
+ fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
+ rank, p[j], expected, i, j);
+ ++err;
+ }
+ }
+ }
#endif
- free(recvtypes);
- free(rdispls);
- free(recvcounts);
- free(rbuf);
- MTestFreeComm( &comm );
+ free(recvtypes);
+ free(rdispls);
+ free(recvcounts);
+ free(rbuf);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
MPI_Finalize();
return 0;
}
recvcounts = malloc(size * sizeof(int));
sdispls = malloc(size * sizeof(int));
rdispls = malloc(size * sizeof(int));
- if (!sendtypes || !recvtypes ||
- !sendcounts || !recvcounts ||
- !sdispls || !rdispls)
- {
+ if (!sendtypes || !recvtypes || !sendcounts || !recvcounts || !sdispls || !rdispls) {
printf("error, unable to allocate memory\n");
goto fn_exit;
}
/* try zero-counts on both the send and recv side in case only one direction is broken for some reason */
- MPI_Alltoallw(&sendbuf, sendcounts, sdispls, sendtypes, &recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD);
- MPI_Alltoallw(&sendbuf, recvcounts, rdispls, recvtypes, &recvbuf, sendcounts, sdispls, sendtypes, MPI_COMM_WORLD);
+ MPI_Alltoallw(&sendbuf, sendcounts, sdispls, sendtypes, &recvbuf, recvcounts, rdispls,
+ recvtypes, MPI_COMM_WORLD);
+ MPI_Alltoallw(&sendbuf, recvcounts, rdispls, recvtypes, &recvbuf, sendcounts, sdispls,
+ sendtypes, MPI_COMM_WORLD);
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
/* pass MPI_IN_PLACE and different but compatible types rank is even/odd */
if (rank % 2)
- MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD);
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, recvcounts, rdispls, recvtypes,
+ MPI_COMM_WORLD);
else
- MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, sendcounts, sdispls, sendtypes, MPI_COMM_WORLD);
+ MPI_Alltoallw(MPI_IN_PLACE, NULL, NULL, NULL, &recvbuf, sendcounts, sdispls, sendtypes,
+ MPI_COMM_WORLD);
#endif
/* now the same for Alltoallv instead of Alltoallw */
- MPI_Alltoallv(&sendbuf, sendcounts, sdispls, sendtypes[0], &recvbuf, recvcounts, rdispls, recvtypes[0], MPI_COMM_WORLD);
- MPI_Alltoallv(&sendbuf, recvcounts, rdispls, recvtypes[0], &recvbuf, sendcounts, sdispls, sendtypes[0], MPI_COMM_WORLD);
+ MPI_Alltoallv(&sendbuf, sendcounts, sdispls, sendtypes[0], &recvbuf, recvcounts, rdispls,
+ recvtypes[0], MPI_COMM_WORLD);
+ MPI_Alltoallv(&sendbuf, recvcounts, rdispls, recvtypes[0], &recvbuf, sendcounts, sdispls,
+ sendtypes[0], MPI_COMM_WORLD);
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
if (rank % 2)
- MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, recvcounts, rdispls, recvtypes[0], MPI_COMM_WORLD);
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, recvcounts, rdispls,
+ recvtypes[0], MPI_COMM_WORLD);
else
- MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, sendcounts, sdispls, sendtypes[0], MPI_COMM_WORLD);
+ MPI_Alltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, &recvbuf, sendcounts, sdispls,
+ sendtypes[0], MPI_COMM_WORLD);
#endif
MPI_Type_free(&sendtype);
if (rank == 0)
printf(" No Errors\n");
-fn_exit:
- if (rdispls) free(rdispls);
- if (sdispls) free(sdispls);
- if (recvcounts) free(recvcounts);
- if (sendcounts) free(sendcounts);
- if (recvtypes) free(recvtypes);
- if (sendtypes) free(sendtypes);
+ fn_exit:
+ if (rdispls)
+ free(rdispls);
+ if (sdispls)
+ free(sdispls);
+ if (recvcounts)
+ free(recvcounts);
+ if (sendcounts)
+ free(sendcounts);
+ if (recvtypes)
+ free(recvtypes);
+ if (sendtypes)
+ free(sendtypes);
MPI_Finalize();
return 0;
}
-
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes";
+*/
+
+int main(int argc, char *argv[])
+{
+ int errs = 0, err;
+ int rank, size, root;
+ int minsize = 2, count;
+ MPI_Comm comm;
+ MTestDatatype sendtype, recvtype;
+
+ MTest_Init(&argc, &argv);
+
+ /* The following illustrates the use of the routines to
+ * run through a selection of communicators and datatypes.
+ * Use subsets of these for tests that do not involve combinations
+ * of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+#if defined BCAST_COMM_WORLD_ONLY
+ if (comm != MPI_COMM_WORLD) {
+ MTestFreeComm(&comm);
+ continue;
+ }
+#endif /* BCAST_COMM_WORLD_ONLY */
+
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
+
+ MTEST_DATATYPE_FOR_EACH_COUNT(count) {
+
+ /* To shorten test time, only run the default version of datatype tests
+ * for comm world and run the minimum version for other communicators. */
+#if defined BCAST_MIN_DATATYPES_ONLY
+ MTestInitMinDatatypes();
+#endif /* BCAST_MIN_DATATYPES_ONLY */
+
+ while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
+ for (root = 0; root < size; root++) {
+ if (rank == root) {
+ sendtype.InitBuf(&sendtype);
+ err = MPI_Bcast(sendtype.buf, sendtype.count,
+ sendtype.datatype, root, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ }
+ else {
+ recvtype.InitBuf(&recvtype);
+ err = MPI_Bcast(recvtype.buf, recvtype.count,
+ recvtype.datatype, root, comm);
+ if (err) {
+ errs++;
+ fprintf(stderr, "Error with communicator %s and datatype %s\n",
+ MTestGetIntracommName(), MTestGetDatatypeName(&recvtype));
+ MTestPrintError(err);
+ }
+ err = MTestCheckRecv(0, &recvtype);
+ if (err) {
+ errs += errs;
+ }
+ }
+ }
+ MTestFreeDatatype(&recvtype);
+ MTestFreeDatatype(&sendtype);
+ }
+ }
+ MTestFreeComm(&comm);
+ }
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
+++ /dev/null
-/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
-/*
- *
- * (C) 2003 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-#include "mpi.h"
-#include <stdio.h>
-#include "mpitest.h"
-
-/*
-static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes";
-*/
-
-int main( int argc, char *argv[] )
-{
- int errs = 0, err;
- int rank, size, root;
- int minsize = 2, count;
- MPI_Comm comm;
- MTestDatatype sendtype, recvtype;
-
- MTest_Init( &argc, &argv );
-
- /* The following illustrates the use of the routines to
- run through a selection of communicators and datatypes.
- Use subsets of these for tests that do not involve combinations
- of communicators, datatypes, and counts of datatypes */
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
-
- /* The max value of count must be very large to ensure that we
- reach the long message algorithms */
- for (count = 1; count < 2800; count = count * 4) {
- while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
- for (root=0; root<size; root++) {
- if (rank == root) {
- sendtype.InitBuf( &sendtype );
- err = MPI_Bcast( sendtype.buf, sendtype.count,
- sendtype.datatype, root, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- else {
- recvtype.InitBuf( &recvtype );
- err = MPI_Bcast( recvtype.buf, recvtype.count,
- recvtype.datatype, root, comm );
- if (err) {
- errs++;
- fprintf( stderr, "Error with communicator %s and datatype %s\n",
- MTestGetIntracommName(),
- MTestGetDatatypeName( &recvtype ) );
- MTestPrintError( err );
- }
- err = MTestCheckRecv( 0, &recvtype );
- if (err) {
- errs += errs;
- }
- }
- }
- MTestFreeDatatype( &recvtype );
- MTestFreeDatatype( &sendtype );
- }
- }
- MTestFreeComm( &comm );
- }
-
- MTest_Finalize( errs );
- MPI_Finalize();
- return 0;
-}
+++ /dev/null
-/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
-/*
- *
- * (C) 2003 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-#include "mpi.h"
-#include <stdio.h>
-#include "mpitest.h"
-
-/*
-static char MTEST_Descrip[] = "Test of broadcast with various roots and datatypes and sizes that are not powers of two";
-*/
-
-int main( int argc, char *argv[] )
-{
- int errs = 0, err;
- int rank, size, root;
- int minsize = 2, count;
- MPI_Comm comm;
- MTestDatatype sendtype, recvtype;
-
- MTest_Init( &argc, &argv );
-
- /* The following illustrates the use of the routines to
- run through a selection of communicators and datatypes.
- Use subsets of these for tests that do not involve combinations
- of communicators, datatypes, and counts of datatypes */
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- count = 1;
- /* This must be very large to ensure that we reach the long message
- algorithms */
- for (count = 4; count < 6600; count = count * 4) {
- while (MTestGetDatatypes( &sendtype, &recvtype, count-1 )) {
- for (root=0; root<size; root++) {
- if (rank == root) {
- sendtype.InitBuf( &sendtype );
- err = MPI_Bcast( sendtype.buf, sendtype.count,
- sendtype.datatype, root, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- else {
- recvtype.InitBuf( &recvtype );
- err = MPI_Bcast( recvtype.buf, recvtype.count,
- recvtype.datatype, root, comm );
- if (err) {
- errs++;
- fprintf( stderr, "Error with communicator %s and datatype %s\n",
- MTestGetIntracommName(),
- MTestGetDatatypeName( &recvtype ) );
- MTestPrintError( err );
- }
- err = MTestCheckRecv( 0, &recvtype );
- if (err) {
- errs += errs;
- }
- }
- }
- MTestFreeDatatype( &recvtype );
- MTestFreeDatatype( &sendtype );
- }
- }
- MTestFreeComm( &comm );
- }
-
- MTest_Finalize( errs );
- MPI_Finalize();
- return 0;
-}
#define NUM_REPS 5
#define NUM_SIZES 4
-int main( int argc, char **argv)
+int main(int argc, char **argv)
{
int *buf;
int i, rank, reps, n;
int bVerify = 1;
- int sizes[NUM_SIZES] = { 100, 64*1024, 128*1024, 1024*1024 };
- int num_errors=0;
-
- MTest_Init( &argc, &argv );
+ int sizes[NUM_SIZES] = { 100, 64 * 1024, 128 * 1024, 1024 * 1024 };
+ int num_errors = 0;
+
+ MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (argc > 1)
- {
- if (strcmp(argv[1], "-novalidate") == 0 || strcmp(argv[1], "-noverify") == 0)
- bVerify = 0;
+ if (argc > 1) {
+ if (strcmp(argv[1], "-novalidate") == 0 || strcmp(argv[1], "-noverify") == 0)
+ bVerify = 0;
}
- buf = (int *) malloc(sizes[NUM_SIZES-1]*sizeof(int));
- memset(buf, 0, sizes[NUM_SIZES-1]*sizeof(int));
+ buf = (int *) malloc(sizes[NUM_SIZES - 1] * sizeof(int));
+ memset(buf, 0, sizes[NUM_SIZES - 1] * sizeof(int));
- for (n=0; n<NUM_SIZES; n++)
- {
+ for (n = 0; n < NUM_SIZES; n++) {
#ifdef DEBUG
- if (rank == ROOT)
- {
- printf("bcasting %d MPI_INTs %d times\n", sizes[n], NUM_REPS);
- fflush(stdout);
- }
+ if (rank == ROOT) {
+ printf("bcasting %d MPI_INTs %d times\n", sizes[n], NUM_REPS);
+ fflush(stdout);
+ }
#endif
- for (reps=0; reps < NUM_REPS; reps++)
- {
- if (bVerify)
- {
- if (rank == ROOT)
- {
- for (i=0; i<sizes[n]; i++)
- {
- buf[i] = 1000000 * (n * NUM_REPS + reps) + i;
- }
- }
- else
- {
- for (i=0; i<sizes[n]; i++)
- {
+ for (reps = 0; reps < NUM_REPS; reps++) {
+ if (bVerify) {
+ if (rank == ROOT) {
+ for (i = 0; i < sizes[n]; i++) {
+ buf[i] = 1000000 * (n * NUM_REPS + reps) + i;
+ }
+ }
+ else {
+ for (i = 0; i < sizes[n]; i++) {
buf[i] = -1 - (n * NUM_REPS + reps);
- }
- }
- }
+ }
+ }
+ }
# ifdef DEBUG
- {
- printf("rank=%d, n=%d, reps=%d\n", rank, n, reps);
- }
+ {
+ printf("rank=%d, n=%d, reps=%d\n", rank, n, reps);
+ }
# endif
-
- MPI_Bcast(buf, sizes[n], MPI_INT, ROOT, MPI_COMM_WORLD);
- if (bVerify)
- {
- num_errors = 0;
- for (i=0; i<sizes[n]; i++)
- {
- if (buf[i] != 1000000 * (n * NUM_REPS + reps) + i)
- {
- num_errors++;
- if (num_errors < 10)
- {
- printf("Error: Rank=%d, n=%d, reps=%d, i=%d, buf[i]=%d expected=%d\n", rank, n, reps, i, buf[i],
- 1000000 * (n * NUM_REPS + reps) +i);
- fflush(stdout);
- }
- }
- }
- if (num_errors >= 10)
- {
- printf("Error: Rank=%d, num_errors = %d\n", rank, num_errors);
- fflush(stdout);
- }
- }
- }
+ MPI_Bcast(buf, sizes[n], MPI_INT, ROOT, MPI_COMM_WORLD);
+
+ if (bVerify) {
+ num_errors = 0;
+ for (i = 0; i < sizes[n]; i++) {
+ if (buf[i] != 1000000 * (n * NUM_REPS + reps) + i) {
+ num_errors++;
+ if (num_errors < 10) {
+ printf("Error: Rank=%d, n=%d, reps=%d, i=%d, buf[i]=%d expected=%d\n",
+ rank, n, reps, i, buf[i], 1000000 * (n * NUM_REPS + reps) + i);
+ fflush(stdout);
+ }
+ }
+ }
+ if (num_errors >= 10) {
+ printf("Error: Rank=%d, num_errors = %d\n", rank, num_errors);
+ fflush(stdout);
+ }
+ }
+ }
}
-
+
free(buf);
- MTest_Finalize( num_errors );
+ MTest_Finalize(num_errors);
MPI_Finalize();
return 0;
}
/* a random non-zero sized buffer */
#define NELEM (10)
- buf = malloc(NELEM*sizeof(int));
- assert(buf!=NULL);
+ buf = malloc(NELEM * sizeof(int));
+ assert(buf);
for (i = 0; i < NELEM; i++) {
buf[i] = wrank * NELEM + i;
assert(buf[i] == wrank * NELEM + i);
}
+ free(buf);
+
MPI_Type_free(&type);
MPI_Finalize();
#define BAD_ANSWER 100000
-int assoc ( int *, int *, int *, MPI_Datatype * );
+int assoc(int *, int *, int *, MPI_Datatype *);
/*
- The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
order, independant of the root.
*/
-int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
- int i;
- for ( i=0; i<*len; i++ ) {
- if (inoutvec[i] <= invec[i] ) {
- int rank;
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
- rank, inoutvec[0], invec[0] );
- inoutvec[i] = BAD_ANSWER;
- }
- else
- inoutvec[i] = invec[i];
- }
- return (1);
+ int i;
+ for (i = 0; i < *len; i++) {
+ if (inoutvec[i] <= invec[i]) {
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ fprintf(stderr, "[%d] inout[0] = %d, in[0] = %d\n", rank, inoutvec[0], invec[0]);
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
+ return (1);
}
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size;
- int data;
- int errors=0;
- int result = -100;
- MPI_Op op;
+ int rank, size;
+ int data;
+ int errors = 0;
+ int result = -100;
+ MPI_Op op;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
data = rank;
- MPI_Op_create( (MPI_User_function*)assoc, 0, &op );
- MPI_Reduce ( &data, &result, 1, MPI_INT, op, size-1, MPI_COMM_WORLD );
- MPI_Bcast ( &result, 1, MPI_INT, size-1, MPI_COMM_WORLD );
- MPI_Op_free( &op );
- if (result == BAD_ANSWER) errors++;
+ MPI_Op_create((MPI_User_function *) assoc, 0, &op);
+ MPI_Reduce(&data, &result, 1, MPI_INT, op, size - 1, MPI_COMM_WORLD);
+ MPI_Bcast(&result, 1, MPI_INT, size - 1, MPI_COMM_WORLD);
+ MPI_Op_free(&op);
+ if (result == BAD_ANSWER)
+ errors++;
- MTest_Finalize( errors );
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#include <stdio.h>
#include "mpitest.h"
-void addem ( int *, int *, int *, MPI_Datatype * );
-void assoc ( int *, int *, int *, MPI_Datatype * );
+void addem(int *, int *, int *, MPI_Datatype *);
+void assoc(int *, int *, int *, MPI_Datatype *);
-void addem(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
- int i;
- for ( i=0; i<*len; i++ )
- inoutvec[i] += invec[i];
+ int i;
+ for (i = 0; i < *len; i++)
+ inoutvec[i] += invec[i];
}
#define BAD_ANSWER 100000
/*
- The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
order, independant of the root.
*/
-void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
- int i;
- for ( i=0; i<*len; i++ ) {
- if (inoutvec[i] <= invec[i] ) {
- int rank;
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
- rank, inoutvec[0], invec[0] );
- inoutvec[i] = BAD_ANSWER;
- }
- else
- inoutvec[i] = invec[i];
- }
+ int i;
+ for (i = 0; i < *len; i++) {
+ if (inoutvec[i] <= invec[i]) {
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ fprintf(stderr, "[%d] inout[0] = %d, in[0] = %d\n", rank, inoutvec[0], invec[0]);
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
}
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i;
- int data;
- int errors=0;
- int result = -100;
- int correct_result;
- MPI_Op op_assoc, op_addem;
+ int rank, size, i;
+ int data;
+ int errors = 0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op_assoc, op_addem;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
data = rank;
correct_result = 0;
- for (i=0;i<=rank;i++)
- correct_result += i;
+ for (i = 0; i <= rank; i++)
+ correct_result += i;
- MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ MPI_Scan(&data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error suming ints with scan\n", rank );
- errors++;
- }
+ fprintf(stderr, "[%d] Error suming ints with scan\n", rank);
+ errors++;
+ }
- MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ MPI_Scan(&data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank );
- errors++;
- }
+ fprintf(stderr, "[%d] Error summing ints with scan (2)\n", rank);
+ errors++;
+ }
data = rank;
result = -100;
- MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc );
- MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem );
- MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD );
+ MPI_Op_create((MPI_User_function *) assoc, 0, &op_assoc);
+ MPI_Op_create((MPI_User_function *) addem, 1, &op_addem);
+ MPI_Scan(&data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error summing ints with scan (userop)\n",
- rank );
- errors++;
- }
+ fprintf(stderr, "[%d] Error summing ints with scan (userop)\n", rank);
+ errors++;
+ }
- MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD );
+ MPI_Scan(&data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n",
- rank );
- errors++;
- }
+ fprintf(stderr, "[%d] Error summing ints with scan (userop2)\n", rank);
+ errors++;
+ }
result = -100;
data = rank;
- MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, MPI_COMM_WORLD );
+ MPI_Scan(&data, &result, 1, MPI_INT, op_assoc, MPI_COMM_WORLD);
if (result == BAD_ANSWER) {
- fprintf( stderr, "[%d] Error scanning with non-commutative op\n",
- rank );
- errors++;
- }
+ fprintf(stderr, "[%d] Error scanning with non-commutative op\n", rank);
+ errors++;
+ }
- MPI_Op_free( &op_assoc );
- MPI_Op_free( &op_addem );
+ MPI_Op_free(&op_assoc);
+ MPI_Op_free(&op_addem);
- MTest_Finalize( errors );
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#define TABLE_SIZE 2
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size;
- double a[TABLE_SIZE];
- struct { double a; int b; } in[TABLE_SIZE], out[TABLE_SIZE];
- int i;
- int errors = 0;
+ int rank, size;
+ double a[TABLE_SIZE];
+ struct {
+ double a;
+ int b;
+ } in[TABLE_SIZE], out[TABLE_SIZE];
+ int i;
+ int errors = 0, toterrors;
- /* Initialize the environment and some variables */
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ /* Initialize the environment and some variables */
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
- /* Initialize the maxloc data */
- for ( i=0; i<TABLE_SIZE; i++ ) a[i] = 0;
- for ( i=rank; i<TABLE_SIZE; i++ ) a[i] = (double)rank + 1.0;
+ /* Initialize the maxloc data */
+ for (i = 0; i < TABLE_SIZE; i++)
+ a[i] = 0;
+ for (i = rank; i < TABLE_SIZE; i++)
+ a[i] = (double) rank + 1.0;
- /* Copy data to the "in" buffer */
- for (i=0; i<TABLE_SIZE; i++) {
- in[i].a = a[i];
- in[i].b = rank;
- }
+ /* Copy data to the "in" buffer */
+ for (i = 0; i < TABLE_SIZE; i++) {
+ in[i].a = a[i];
+ in[i].b = rank;
+ }
- /* Reduce it! */
- MPI_Reduce( in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MAXLOC, 0, MPI_COMM_WORLD );
- MPI_Bcast ( out, TABLE_SIZE, MPI_DOUBLE_INT, 0, MPI_COMM_WORLD );
+ /* Reduce it! */
+ MPI_Reduce(in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MAXLOC, 0, MPI_COMM_WORLD);
+ MPI_Bcast(out, TABLE_SIZE, MPI_DOUBLE_INT, 0, MPI_COMM_WORLD);
- /* Check to see that we got the right answers */
- for (i=0; i<TABLE_SIZE; i++)
- if (i % size == rank)
- if (out[i].b != rank) {
- printf("MAX (ranks[%d] = %d != %d\n", i, out[i].b, rank );
- errors++;
- }
+ /* Check to see that we got the right answers */
+ for (i = 0; i < TABLE_SIZE; i++)
+ if (i % size == rank)
+ if (out[i].b != rank) {
+ printf("MAX (ranks[%d] = %d != %d\n", i, out[i].b, rank);
+ errors++;
+ }
- /* Initialize the minloc data */
- for ( i=0; i<TABLE_SIZE; i++ ) a[i] = 0;
- for ( i=rank; i<TABLE_SIZE; i++ ) a[i] = -(double)rank - 1.0;
+ /* Initialize the minloc data */
+ for (i = 0; i < TABLE_SIZE; i++)
+ a[i] = 0;
+ for (i = rank; i < TABLE_SIZE; i++)
+ a[i] = -(double) rank - 1.0;
- /* Copy data to the "in" buffer */
- for (i=0; i<TABLE_SIZE; i++) {
- in[i].a = a[i];
- in[i].b = rank;
- }
+ /* Copy data to the "in" buffer */
+ for (i = 0; i < TABLE_SIZE; i++) {
+ in[i].a = a[i];
+ in[i].b = rank;
+ }
- /* Reduce it! */
- MPI_Allreduce( in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD );
+ /* Reduce it! */
+ MPI_Allreduce(in, out, TABLE_SIZE, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD);
- /* Check to see that we got the right answers */
- for (i=0; i<TABLE_SIZE; i++)
- if (i % size == rank)
- if (out[i].b != rank) {
- printf("MIN (ranks[%d] = %d != %d\n", i, out[i].b, rank );
- errors++;
- }
+ /* Check to see that we got the right answers */
+ for (i = 0; i < TABLE_SIZE; i++)
+ if (i % size == rank)
+ if (out[i].b != rank) {
+ printf("MIN (ranks[%d] = %d != %d\n", i, out[i].b, rank);
+ errors++;
+ }
- /* Finish up! */
- MTest_Finalize( errors );
- MPI_Finalize();
- return MTestReturnValue( errors );
+ /* Finish up! */
+ MTest_Finalize(errors);
+ MPI_Finalize();
+ return MTestReturnValue(errors);
}
*/
#include "mpi.h"
-/*
+/*
From: hook@nas.nasa.gov (Edward C. Hook)
*/
#define EXIT_FAILURE 1
#endif
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int rank, size;
int chunk = 128;
int i;
int *sb;
int *rb;
- int status;
+ int status, gstatus;
- MTest_Init(&argc,&argv);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- MPI_Comm_size(MPI_COMM_WORLD,&size);
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
- for ( i=1 ; i < argc ; ++i ) {
- if ( argv[i][0] != '-' )
- continue;
- switch(argv[i][1]) {
- case 'm':
- chunk = atoi(argv[++i]);
- break;
- default:
- fprintf(stderr,"Unrecognized argument %s\n",
- argv[i]);
- MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
- exit(EXIT_FAILURE);
- }
+ for (i = 1; i < argc; ++i) {
+ if (argv[i][0] != '-')
+ continue;
+ switch (argv[i][1]) {
+ case 'm':
+ chunk = atoi(argv[++i]);
+ break;
+ default:
+ fprintf(stderr, "Unrecognized argument %s\n", argv[i]);
+ MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
+ }
}
- sb = (int *)malloc(size*chunk*sizeof(int));
- if ( !sb ) {
- perror( "can't allocate send buffer" );
- MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
- exit(EXIT_FAILURE);
+ sb = (int *) malloc(size * chunk * sizeof(int));
+ if (!sb) {
+ perror("can't allocate send buffer");
+ MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
- rb = (int *)malloc(size*chunk*sizeof(int));
- if ( !rb ) {
- perror( "can't allocate recv buffer");
- free(sb);
- MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE);
- exit(EXIT_FAILURE);
+ rb = (int *) malloc(size * chunk * sizeof(int));
+ if (!rb) {
+ perror("can't allocate recv buffer");
+ free(sb);
+ MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
- for ( i=0 ; i < size*chunk ; ++i ) {
- sb[i] = rank + 1;
- rb[i] = 0;
+ for (i = 0; i < size * chunk; ++i) {
+ sb[i] = rank + 1;
+ rb[i] = 0;
}
/* fputs("Before MPI_Alltoall\n",stdout); */
/* This should really send MPI_CHAR, but since sb and rb were allocated
- as chunk*size*sizeof(int), the buffers are large enough */
- status = MPI_Alltoall(sb,chunk,MPI_INT,rb,chunk,MPI_INT,
- MPI_COMM_WORLD);
+ * as chunk*size*sizeof(int), the buffers are large enough */
+ status = MPI_Alltoall(sb, chunk, MPI_INT, rb, chunk, MPI_INT, MPI_COMM_WORLD);
/* fputs("Before MPI_Allreduce\n",stdout); */
- MTest_Finalize( status );
+ MTest_Finalize(status);
free(sb);
free(rb);
MPI_Finalize();
- return MTestReturnValue( status );
+ return MTestReturnValue(status);
}
-
#include "mpi.h"
#include <stdio.h>
#include "mpitest.h"
+#include "mpicolltest.h"
#define MAX_PROCESSES 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i,j;
- int table[MAX_PROCESSES][MAX_PROCESSES];
- int errors=0;
- int participants;
+ int rank, size, i, j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors = 0;
+ int participants;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
/* A maximum of MAX_PROCESSES processes can participate */
- if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
- else participants = size;
+ if (size > MAX_PROCESSES)
+ participants = MAX_PROCESSES;
+ else
+ participants = size;
if (MAX_PROCESSES % participants) {
- fprintf( stderr, "Number of processors must divide %d\n",
- MAX_PROCESSES );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- if ( (rank < participants) ) {
+ fprintf(stderr, "Number of processors must divide %d\n", MAX_PROCESSES);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ if ((rank < participants)) {
- /* Determine what rows are my responsibility */
- int block_size = MAX_PROCESSES / participants;
- int begin_row = rank * block_size;
- int end_row = (rank+1) * block_size;
- int send_count = block_size * MAX_PROCESSES;
- int recv_count = send_count;
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank + 1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+ int recv_count = send_count;
- /* Paint my rows my color */
- for (i=begin_row; i<end_row ;i++)
- for (j=0; j<MAX_PROCESSES; j++)
- table[i][j] = rank + 10;
+ /* Paint my rows my color */
+ for (i = begin_row; i < end_row; i++)
+ for (j = 0; j < MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
- /* Gather everybody's result together - sort of like an */
- /* inefficient allgather */
- for (i=0; i<participants; i++) {
- void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
- MPI_Gather(sendbuf, send_count, MPI_INT,
- &table[0][0], recv_count, MPI_INT, i,
- MPI_COMM_WORLD );
- }
+ /* Gather everybody's result together - sort of like an */
+ /* inefficient allgather */
+ for (i = 0; i < participants; i++) {
+ void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
+ MTest_Gather(sendbuf, send_count, MPI_INT,
+ &table[0][0], recv_count, MPI_INT, i, MPI_COMM_WORLD);
+ }
- /* Everybody should have the same table now, */
- /* This test does not in any way guarantee there are no errors */
- /* Print out a table or devise a smart test to make sure it's correct */
- for (i=0; i<MAX_PROCESSES;i++) {
- if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
- errors++;
- }
- }
+ /* Everybody should have the same table now, */
+ /* This test does not in any way guarantee there are no errors */
+ /* Print out a table or devise a smart test to make sure it's correct */
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ if ((table[i][0] - table[i][MAX_PROCESSES - 1] != 0))
+ errors++;
+ }
+ }
- MTest_Finalize( errors );
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#define MAX_PROCESSES 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i,j;
- int table[MAX_PROCESSES][MAX_PROCESSES];
- int errors=0;
- int participants;
- int displs[MAX_PROCESSES];
- int recv_counts[MAX_PROCESSES];
+ int rank, size, i, j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors = 0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int recv_counts[MAX_PROCESSES];
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
/* A maximum of MAX_PROCESSES processes can participate */
- if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
- else participants = size;
+ if (size > MAX_PROCESSES)
+ participants = MAX_PROCESSES;
+ else
+ participants = size;
/* while (MAX_PROCESSES % participants) participants--; */
if (MAX_PROCESSES % participants) {
- fprintf( stderr, "Number of processors must divide %d\n",
- MAX_PROCESSES );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- if ( (rank < participants) ) {
+ fprintf(stderr, "Number of processors must divide %d\n", MAX_PROCESSES);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ if ((rank < participants)) {
- /* Determine what rows are my responsibility */
- int block_size = MAX_PROCESSES / participants;
- int begin_row = rank * block_size;
- int end_row = (rank+1) * block_size;
- int send_count = block_size * MAX_PROCESSES;
-
- /* Fill in the displacements and recv_counts */
- for (i=0; i<participants; i++) {
- displs[i] = i * block_size * MAX_PROCESSES;
- recv_counts[i] = send_count;
- }
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank + 1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
- /* Paint my rows my color */
- for (i=begin_row; i<end_row ;i++)
- for (j=0; j<MAX_PROCESSES; j++)
- table[i][j] = rank + 10;
-
- /* Gather everybody's result together - sort of like an */
- /* inefficient allgather */
- for (i=0; i<participants; i++) {
- void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
- MPI_Gatherv(sendbuf, send_count, MPI_INT,
- &table[0][0], recv_counts, displs, MPI_INT,
- i, MPI_COMM_WORLD);
- }
+ /* Fill in the displacements and recv_counts */
+ for (i = 0; i < participants; i++) {
+ displs[i] = i * block_size * MAX_PROCESSES;
+ recv_counts[i] = send_count;
+ }
+ /* Paint my rows my color */
+ for (i = begin_row; i < end_row; i++)
+ for (j = 0; j < MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
- /* Everybody should have the same table now.
+ /* Gather everybody's result together - sort of like an */
+ /* inefficient allgather */
+ for (i = 0; i < participants; i++) {
+ void *sendbuf = (i == rank ? MPI_IN_PLACE : &table[begin_row][0]);
+ MPI_Gatherv(sendbuf, send_count, MPI_INT,
+ &table[0][0], recv_counts, displs, MPI_INT, i, MPI_COMM_WORLD);
+ }
- The entries are:
- Table[i][j] = (i/block_size) + 10;
- */
- for (i=0; i<MAX_PROCESSES;i++)
- if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
- errors++;
- for (i=0; i<MAX_PROCESSES;i++) {
- for (j=0; j<MAX_PROCESSES;j++) {
- if (table[i][j] != (i/block_size) + 10) errors++;
- }
- }
- if (errors) {
- /* Print out table if there are any errors */
- for (i=0; i<MAX_PROCESSES;i++) {
- printf("\n");
- for (j=0; j<MAX_PROCESSES; j++)
- printf(" %d",table[i][j]);
- }
- printf("\n");
- }
- }
- MTest_Finalize( errors );
+ /* Everybody should have the same table now.
+ *
+ * The entries are:
+ * Table[i][j] = (i/block_size) + 10;
+ */
+ for (i = 0; i < MAX_PROCESSES; i++)
+ if ((table[i][0] - table[i][MAX_PROCESSES - 1] != 0))
+ errors++;
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ for (j = 0; j < MAX_PROCESSES; j++) {
+ if (table[i][j] != (i / block_size) + 10)
+ errors++;
+ }
+ }
+ if (errors) {
+ /* Print out table if there are any errors */
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ printf("\n");
+ for (j = 0; j < MAX_PROCESSES; j++)
+ printf(" %d", table[i][j]);
+ }
+ printf("\n");
+ }
+ }
+
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#define MAX_PROCESSES 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i,j;
- int table[MAX_PROCESSES][MAX_PROCESSES];
- int row[MAX_PROCESSES];
- int errors=0;
- int participants;
- MPI_Comm comm;
+ int rank, size, i, j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int row[MAX_PROCESSES];
+ int errors = 0;
+ int participants;
+ MPI_Comm comm;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
comm = MPI_COMM_WORLD;
/* A maximum of MAX_PROCESSES processes can participate */
- if ( size > MAX_PROCESSES ) {
- participants = MAX_PROCESSES;
- MPI_Comm_split( MPI_COMM_WORLD, rank < MAX_PROCESSES, rank, &comm );
+ if (size > MAX_PROCESSES) {
+ participants = MAX_PROCESSES;
+ MPI_Comm_split(MPI_COMM_WORLD, rank < MAX_PROCESSES, rank, &comm);
}
- else {
- participants = size;
- MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+ else {
+ participants = size;
+ MPI_Comm_dup(MPI_COMM_WORLD, &comm);
}
- if ( (rank < participants) ) {
- int send_count = MAX_PROCESSES;
- int recv_count = MAX_PROCESSES;
-
- /* If I'm the root (process 0), then fill out the big table */
- if (rank == 0)
- for ( i=0; i<participants; i++)
- for ( j=0; j<MAX_PROCESSES; j++ )
- table[i][j] = i+j;
-
- /* Scatter the big table to everybody's little table */
- MPI_Scatter(&table[0][0], send_count, MPI_INT,
- &row[0] , recv_count, MPI_INT, 0, comm );
-
- /* Now see if our row looks right */
- for (i=0; i<MAX_PROCESSES; i++)
- if ( row[i] != i+rank ) errors++;
- }
-
- MPI_Comm_free( &comm );
-
- MTest_Finalize( errors );
+ if ((rank < participants)) {
+ int send_count = MAX_PROCESSES;
+ int recv_count = MAX_PROCESSES;
+
+ /* If I'm the root (process 0), then fill out the big table */
+ if (rank == 0)
+ for (i = 0; i < participants; i++)
+ for (j = 0; j < MAX_PROCESSES; j++)
+ table[i][j] = i + j;
+
+ /* Scatter the big table to everybody's little table */
+ MPI_Scatter(&table[0][0], send_count, MPI_INT, &row[0], recv_count, MPI_INT, 0, comm);
+
+ /* Now see if our row looks right */
+ for (i = 0; i < MAX_PROCESSES; i++)
+ if (row[i] != i + rank)
+ errors++;
+ }
+
+ MPI_Comm_free(&comm);
+
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#define MAX_PROCESSES 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i,j;
- int table[MAX_PROCESSES][MAX_PROCESSES];
- int row[MAX_PROCESSES];
- int errors=0;
- int participants;
- int displs[MAX_PROCESSES];
- int send_counts[MAX_PROCESSES];
-
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ int rank, size, i, j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int row[MAX_PROCESSES];
+ int errors = 0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int send_counts[MAX_PROCESSES];
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
/* A maximum of MAX_PROCESSES processes can participate */
- if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
- else participants = size;
- if ( (rank < participants) ) {
- int recv_count = MAX_PROCESSES;
-
- /* If I'm the root (process 0), then fill out the big table */
- /* and setup send_counts and displs arrays */
- if (rank == 0)
- for ( i=0; i<participants; i++) {
- send_counts[i] = recv_count;
- displs[i] = i * MAX_PROCESSES;
- for ( j=0; j<MAX_PROCESSES; j++ )
- table[i][j] = i+j;
- }
-
- /* Scatter the big table to everybody's little table */
- MPI_Scatterv(&table[0][0], send_counts, displs, MPI_INT,
- &row[0] , recv_count, MPI_INT, 0, MPI_COMM_WORLD);
-
- /* Now see if our row looks right */
- for (i=0; i<MAX_PROCESSES; i++)
- if ( row[i] != i+rank ) errors++;
- }
-
- MTest_Finalize( errors );
+ if (size > MAX_PROCESSES)
+ participants = MAX_PROCESSES;
+ else
+ participants = size;
+ if ((rank < participants)) {
+ int recv_count = MAX_PROCESSES;
+
+ /* If I'm the root (process 0), then fill out the big table */
+ /* and setup send_counts and displs arrays */
+ if (rank == 0)
+ for (i = 0; i < participants; i++) {
+ send_counts[i] = recv_count;
+ displs[i] = i * MAX_PROCESSES;
+ for (j = 0; j < MAX_PROCESSES; j++)
+ table[i][j] = i + j;
+ }
+
+ /* Scatter the big table to everybody's little table */
+ MPI_Scatterv(&table[0][0], send_counts, displs, MPI_INT,
+ &row[0], recv_count, MPI_INT, 0, MPI_COMM_WORLD);
+
+ /* Now see if our row looks right */
+ for (i = 0; i < MAX_PROCESSES; i++)
+ if (row[i] != i + rank)
+ errors++;
+ }
+
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#define MAX_PROCESSES 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i,j;
- int table[MAX_PROCESSES][MAX_PROCESSES];
- int errors=0;
- int participants;
- int displs[MAX_PROCESSES];
- int recv_counts[MAX_PROCESSES];
- MPI_Comm test_comm;
+ int rank, size, i, j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors = 0;
+ int participants;
+ int displs[MAX_PROCESSES];
+ int recv_counts[MAX_PROCESSES];
+ MPI_Comm test_comm;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
/* A maximum of MAX_PROCESSES processes can participate */
- participants = ( size > MAX_PROCESSES ) ? MAX_PROCESSES : size;
+ participants = (size > MAX_PROCESSES) ? MAX_PROCESSES : size;
if (MAX_PROCESSES % participants) {
- fprintf( stderr, "Number of processors must divide %d\n",
- MAX_PROCESSES );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- MPI_Comm_split(MPI_COMM_WORLD, rank<participants, rank, &test_comm);
+ fprintf(stderr, "Number of processors must divide %d\n", MAX_PROCESSES);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ MPI_Comm_split(MPI_COMM_WORLD, rank < participants, rank, &test_comm);
- if ( rank < participants ) {
+ if (rank < participants) {
- /* Determine what rows are my responsibility */
- int block_size = MAX_PROCESSES / participants;
- int begin_row = rank * block_size;
- int end_row = (rank+1) * block_size;
- int send_count = block_size * MAX_PROCESSES;
-
- /* Fill in the displacements and recv_counts */
- for (i=0; i<participants; i++) {
- displs[i] = i * block_size * MAX_PROCESSES;
- recv_counts[i] = send_count;
- }
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank + 1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
- /* Paint my rows my color */
- for (i=begin_row; i<end_row ;i++)
- for (j=0; j<MAX_PROCESSES; j++)
- table[i][j] = rank + 10;
-
- /* Everybody gets the gathered data */
- if ((char *) &table[begin_row][0] != (char *) table + displs[rank]*sizeof(int))
- MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
- &table[0][0], recv_counts, displs,
- MPI_INT, test_comm);
- else
- MPI_Allgatherv(MPI_IN_PLACE, send_count, MPI_INT,
- &table[0][0], recv_counts, displs,
- MPI_INT, test_comm);
+ /* Fill in the displacements and recv_counts */
+ for (i = 0; i < participants; i++) {
+ displs[i] = i * block_size * MAX_PROCESSES;
+ recv_counts[i] = send_count;
+ }
- /* Everybody should have the same table now.
+ /* Paint my rows my color */
+ for (i = begin_row; i < end_row; i++)
+ for (j = 0; j < MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
- The entries are:
- Table[i][j] = (i/block_size) + 10;
- */
- for (i=0; i<MAX_PROCESSES;i++)
- if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
- errors++;
- for (i=0; i<MAX_PROCESSES;i++) {
- for (j=0; j<MAX_PROCESSES;j++) {
- if (table[i][j] != (i/block_size) + 10) errors++;
- }
- }
- if (errors) {
- /* Print out table if there are any errors */
- for (i=0; i<MAX_PROCESSES;i++) {
- printf("\n");
- for (j=0; j<MAX_PROCESSES; j++)
- printf(" %d",table[i][j]);
- }
- printf("\n");
- }
- }
+ /* Everybody gets the gathered data */
+ if ((char *) &table[begin_row][0] != (char *) table + displs[rank] * sizeof(int))
+ MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
+ &table[0][0], recv_counts, displs, MPI_INT, test_comm);
+ else
+ MPI_Allgatherv(MPI_IN_PLACE, send_count, MPI_INT,
+ &table[0][0], recv_counts, displs, MPI_INT, test_comm);
- MTest_Finalize( errors );
+ /* Everybody should have the same table now.
+ *
+ * The entries are:
+ * Table[i][j] = (i/block_size) + 10;
+ */
+ for (i = 0; i < MAX_PROCESSES; i++)
+ if ((table[i][0] - table[i][MAX_PROCESSES - 1] != 0))
+ errors++;
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ for (j = 0; j < MAX_PROCESSES; j++) {
+ if (table[i][j] != (i / block_size) + 10)
+ errors++;
+ }
+ }
+ if (errors) {
+ /* Print out table if there are any errors */
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ printf("\n");
+ for (j = 0; j < MAX_PROCESSES; j++)
+ printf(" %d", table[i][j]);
+ }
+ printf("\n");
+ }
+ }
+
+ MTest_Finalize(errors);
MPI_Comm_free(&test_comm);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#define MAX_PROCESSES 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i,j;
- int table[MAX_PROCESSES][MAX_PROCESSES];
- int errors=0;
- int participants;
+ int rank, size, i, j;
+ int table[MAX_PROCESSES][MAX_PROCESSES];
+ int errors = 0;
+ int participants;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
/* A maximum of MAX_PROCESSES processes can participate */
- if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES;
- else participants = size;
+ if (size > MAX_PROCESSES)
+ participants = MAX_PROCESSES;
+ else
+ participants = size;
if (MAX_PROCESSES % participants) {
- fprintf( stderr, "Number of processors must divide %d\n",
- MAX_PROCESSES );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
+ fprintf(stderr, "Number of processors must divide %d\n", MAX_PROCESSES);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
/* while (MAX_PROCESSES % participants) participants--; */
- if ( (rank < participants) ) {
-
- /* Determine what rows are my responsibility */
- int block_size = MAX_PROCESSES / participants;
- int begin_row = rank * block_size;
- int end_row = (rank+1) * block_size;
- int send_count = block_size * MAX_PROCESSES;
- int recv_count = send_count;
-
- /* Paint my rows my color */
- for (i=begin_row; i<end_row ;i++)
- for (j=0; j<MAX_PROCESSES; j++)
- table[i][j] = rank + 10;
-
- /* Everybody gets the gathered table */
- MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
- &table[0][0], recv_count, MPI_INT, MPI_COMM_WORLD);
-
- /* Everybody should have the same table now, */
- /* This test does not in any way guarantee there are no errors */
- /* Print out a table or devise a smart test to make sure it's correct */
- for (i=0; i<MAX_PROCESSES;i++) {
- if ( (table[i][0] - table[i][MAX_PROCESSES-1] !=0) )
- errors++;
- }
- }
-
- MTest_Finalize( errors );
+ if ((rank < participants)) {
+
+ /* Determine what rows are my responsibility */
+ int block_size = MAX_PROCESSES / participants;
+ int begin_row = rank * block_size;
+ int end_row = (rank + 1) * block_size;
+ int send_count = block_size * MAX_PROCESSES;
+ int recv_count = send_count;
+
+ /* Paint my rows my color */
+ for (i = begin_row; i < end_row; i++)
+ for (j = 0; j < MAX_PROCESSES; j++)
+ table[i][j] = rank + 10;
+
+ /* Everybody gets the gathered table */
+ MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL,
+ &table[0][0], recv_count, MPI_INT, MPI_COMM_WORLD);
+
+ /* Everybody should have the same table now, */
+ /* This test does not in any way guarantee there are no errors */
+ /* Print out a table or devise a smart test to make sure it's correct */
+ for (i = 0; i < MAX_PROCESSES; i++) {
+ if ((table[i][0] - table[i][MAX_PROCESSES - 1] != 0))
+ errors++;
+ }
+ }
+
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#include <stdio.h>
#include "mpitest.h"
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i;
- int data;
- int errors=0;
- int result = -100;
- int correct_result;
+ int rank, size, i;
+ int data;
+ int errors = 0;
+ int result = -100;
+ int correct_result;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
data = rank;
- MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
- MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
+ MPI_Reduce(&data, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ MPI_Bcast(&result, 1, MPI_INT, 0, MPI_COMM_WORLD);
correct_result = 0;
- for(i=0;i<size;i++)
- correct_result += i;
- if (result != correct_result) errors++;
+ for (i = 0; i < size; i++)
+ correct_result += i;
+ if (result != correct_result)
+ errors++;
- MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD );
- MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
- if (result != 0) errors++;
+ MPI_Reduce(&data, &result, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
+ MPI_Bcast(&result, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ if (result != 0)
+ errors++;
- MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD );
- MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
- if (result != (size-1)) errors++;
+ MPI_Reduce(&data, &result, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
+ MPI_Bcast(&result, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ if (result != (size - 1))
+ errors++;
- MTest_Finalize( errors );
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
#include <stdio.h>
#include "mpitest.h"
-void addem ( int *, int *, int *, MPI_Datatype * );
+void addem(int *, int *, int *, MPI_Datatype *);
-void addem(int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
- int i;
- for ( i=0; i<*len; i++ )
- inoutvec[i] += invec[i];
+ int i;
+ for (i = 0; i < *len; i++)
+ inoutvec[i] += invec[i];
}
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i;
- int data;
- int errors=0;
- int result = -100;
- int correct_result;
- MPI_Op op;
+ int rank, size, i;
+ int data;
+ int errors = 0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op;
- MTest_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
data = rank;
- MPI_Op_create( (MPI_User_function *)addem, 1, &op );
- MPI_Reduce ( &data, &result, 1, MPI_INT, op, 0, MPI_COMM_WORLD );
- MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD );
- MPI_Op_free( &op );
+ MPI_Op_create((MPI_User_function *) addem, 1, &op);
+ MPI_Reduce(&data, &result, 1, MPI_INT, op, 0, MPI_COMM_WORLD);
+ MPI_Bcast(&result, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Op_free(&op);
correct_result = 0;
- for(i=0;i<size;i++)
- correct_result += i;
- if (result != correct_result) errors++;
+ for (i = 0; i < size; i++)
+ correct_result += i;
+ if (result != correct_result)
+ errors++;
- MTest_Finalize( errors );
+ MTest_Finalize(errors);
MPI_Finalize();
- return MTestReturnValue( errors );
+ return MTestReturnValue(errors);
}
static char MTEST_Descrip[] = "Test MPI_Exscan";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- int minsize = 2, count;
+ int minsize = 2, count;
int *sendbuf, *recvbuf, i;
- MPI_Comm comm;
+ MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- /* The following illustrates the use of the routines to
- run through a selection of communicators and datatypes.
- Use subsets of these for tests that do not involve combinations
- of communicators, datatypes, and counts of datatypes */
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
+ /* The following illustrates the use of the routines to
+ * run through a selection of communicators and datatypes.
+ * Use subsets of these for tests that do not involve combinations
+ * of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (count = 1; count < 65000; count = count * 2) {
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- sendbuf = (int *)malloc( count * sizeof(int) );
- recvbuf = (int *)malloc( count * sizeof(int) );
+ for (count = 1; count < 65000; count = count * 2) {
- for (i=0; i<count; i++) {
- sendbuf[i] = rank + i * size;
- recvbuf[i] = -1;
- }
-
- MPI_Exscan( sendbuf, recvbuf, count, MPI_INT, MPI_SUM, comm );
+ sendbuf = (int *) malloc(count * sizeof(int));
+ recvbuf = (int *) malloc(count * sizeof(int));
- /* Check the results. rank 0 has no data */
- if (rank > 0) {
- int result;
- for (i=0; i<count; i++) {
- result = rank * i * size + ((rank) * (rank-1))/2;
- if (recvbuf[i] != result) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
- i, recvbuf[i], rank, result );
- }
- }
- }
- }
+ for (i = 0; i < count; i++) {
+ sendbuf[i] = rank + i * size;
+ recvbuf[i] = -1;
+ }
+
+ MPI_Exscan(sendbuf, recvbuf, count, MPI_INT, MPI_SUM, comm);
+
+ /* Check the results. rank 0 has no data */
+ if (rank > 0) {
+ int result;
+ for (i = 0; i < count; i++) {
+ result = rank * i * size + ((rank) * (rank - 1)) / 2;
+ if (recvbuf[i] != result) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
+ i, recvbuf[i], rank, result);
+ }
+ }
+ }
+ }
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
/* now try the MPI_IN_PLACE flavor */
- for (i=0; i<count; i++) {
- sendbuf[i] = -1; /* unused */
+ for (i = 0; i < count; i++) {
+ sendbuf[i] = -1; /* unused */
recvbuf[i] = rank + i * size;
}
- MPI_Exscan( MPI_IN_PLACE, recvbuf, count, MPI_INT, MPI_SUM, comm );
+ MPI_Exscan(MPI_IN_PLACE, recvbuf, count, MPI_INT, MPI_SUM, comm);
/* Check the results. rank 0's data must remain unchanged */
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
int result;
if (rank == 0)
result = rank + i * size;
else
- result = rank * i * size + ((rank) * (rank-1))/2;
+ result = rank * i * size + ((rank) * (rank - 1)) / 2;
if (recvbuf[i] != result) {
errs++;
if (errs < 10) {
- fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
- i, recvbuf[i], rank, result );
+ fprintf(stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
+ i, recvbuf[i], rank, result);
}
}
}
-
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- /* Make sure that we check for buffer aliasing properly */
- if (MPI_SUCCESS == MPI_Exscan( recvbuf, recvbuf, count, MPI_INT, MPI_SUM, comm ))
- errs++;
#endif
- free( sendbuf );
- free( recvbuf );
- }
- MTestFreeComm( &comm );
+ free(sendbuf);
+ free(recvbuf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test MPI_Exscan (simple test)";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
int sendbuf[1], recvbuf[1];
- MPI_Comm comm;
+ MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
sendbuf[0] = rank;
recvbuf[0] = -2;
-
- MPI_Exscan( sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm );
+
+ MPI_Exscan(sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm);
/* Check the results. rank 0 has no data. Input is
- 0 1 2 3 4 5 6 7 8 ...
- Output is
- - 0 1 3 6 10 15 21 28 36
- (scan, not counting the contribution from the calling process)
- */
+ * 0 1 2 3 4 5 6 7 8 ...
+ * Output is
+ * - 0 1 3 6 10 15 21 28 36
+ * (scan, not counting the contribution from the calling process)
+ */
if (rank > 0) {
- int result = (((rank) * (rank-1))/2);
- /* printf( "%d: %d\n", rank, result ); */
- if (recvbuf[0] != result) {
- errs++;
- fprintf( stderr, "Error in recvbuf = %d on %d, expected %d\n",
- recvbuf[0], rank, result );
- }
+ int result = (((rank) * (rank - 1)) / 2);
+ /* printf("%d: %d\n", rank, result); */
+ if (recvbuf[0] != result) {
+ errs++;
+ fprintf(stderr, "Error in recvbuf = %d on %d, expected %d\n", recvbuf[0], rank, result);
+ }
}
else if (recvbuf[0] != -2) {
- errs++;
- fprintf( stderr, "Error in recvbuf on zero, is %d\n", recvbuf[0] );
+ errs++;
+ fprintf(stderr, "Error in recvbuf on zero, is %d\n", recvbuf[0]);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
/* Gather data from a vector to contiguous */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Datatype vec;
- MPI_Comm comm;
+ MPI_Comm comm;
double *vecin, *vecout;
- int minsize = 2, count;
- int root, i, n, stride, errs = 0;
- int rank, size;
+ int minsize = 2, count;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (root=0; root<size; root++) {
- for (count = 1; count < 65000; count = count * 2) {
- n = 12;
- stride = 10;
- vecin = (double *)malloc( n * stride * size * sizeof(double) );
- vecout = (double *)malloc( size * n * sizeof(double) );
-
- MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
- MPI_Type_commit( &vec );
-
- for (i=0; i<n*stride; i++) vecin[i] =-2;
- for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
-
- MPI_Gather( vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm );
-
- if (rank == root) {
- for (i=0; i<n*size; i++) {
- if (vecout[i] != i) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d\n",
- i, (int)vecout[i] );
- }
- }
- }
- }
- MPI_Type_free( &vec );
- free( vecin );
- free( vecout );
- }
- }
- MTestFreeComm( &comm );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (root = 0; root < size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *) malloc(n * stride * size * sizeof(double));
+ vecout = (double *) malloc(size * n * sizeof(double));
+
+ MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
+ MPI_Type_commit(&vec);
+
+ for (i = 0; i < n * stride; i++)
+ vecin[i] = -2;
+ for (i = 0; i < n; i++)
+ vecin[i * stride] = rank * n + i;
+
+ MPI_Gather(vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm);
+
+ if (rank == root) {
+ for (i = 0; i < n * size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
+ }
+ }
+ }
+ }
+ MPI_Type_free(&vec);
+ free(vecin);
+ free(vecout);
+ }
+ }
+ MTestFreeComm(&comm);
}
/* do a zero length gather */
- MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
-
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- /* Check to make sure that aliasing is disallowed correctly */
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- if (0 == rank)
- if (MPI_SUCCESS == MPI_Gather(&rank, 1, MPI_INT,
- &rank, 1, MPI_INT, 0, MPI_COMM_WORLD))
- errs++;
-#endif
+ MPI_Gather(NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
-
/* Gather data from a vector to contiguous. Use IN_PLACE */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Datatype vec;
double *vecin, *vecout;
MPI_Comm comm;
- int count, minsize = 2;
- int root, i, n, stride, errs = 0;
- int rank, size;
+ int count, minsize = 2;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (root=0; root<size; root++) {
- for (count = 1; count < 65000; count = count * 2) {
- n = 12;
- stride = 10;
- vecin = (double *)malloc( n * stride * size * sizeof(double) );
- vecout = (double *)malloc( size * n * sizeof(double) );
-
- MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
- MPI_Type_commit( &vec );
-
- for (i=0; i<n*stride; i++) vecin[i] =-2;
- for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
- int errorcode = MPI_SUCCESS;
- if (rank == root) {
- for (i=0; i<n; i++) {
- vecout[rank*n+i] = rank*n+i;
- }
- errorcode = MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
- vecout, n, MPI_DOUBLE, root, comm );
- }
- else {
- errorcode = MPI_Gather( vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL,
- root, comm );
- }
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- if (rank == root) {
- for (i=0; i<n*size; i++) {
- if (vecout[i] != i) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d, err=%d\n",
- i, (int)vecout[i], errorcode );
- }
- }
- }
- }
- MPI_Type_free( &vec );
- free( vecin );
- free( vecout );
- }
- }
- MTestFreeComm( &comm );
+ for (root = 0; root < size; root++) {
+ for (count = 1; count < 65000; count = count * 2) {
+ n = 12;
+ stride = 10;
+ vecin = (double *) malloc(n * stride * size * sizeof(double));
+ vecout = (double *) malloc(size * n * sizeof(double));
+
+ MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
+ MPI_Type_commit(&vec);
+
+ for (i = 0; i < n * stride; i++)
+ vecin[i] = -2;
+ for (i = 0; i < n; i++)
+ vecin[i * stride] = rank * n + i;
+
+ if (rank == root) {
+ for (i = 0; i < n; i++) {
+ vecout[rank * n + i] = rank * n + i;
+ }
+ MPI_Gather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
+ vecout, n, MPI_DOUBLE, root, comm);
+ }
+ else {
+ MPI_Gather(vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL, root, comm);
+ }
+ if (rank == root) {
+ for (i = 0; i < n * size; i++) {
+ if (vecout[i] != i) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
+ }
+ }
+ }
+ }
+ MPI_Type_free(&vec);
+ free(vecin);
+ free(vecout);
+ }
+ }
+ MTestFreeComm(&comm);
}
/* do a zero length gather */
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- if ( rank == 0 ) {
- MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0,
- MPI_COMM_WORLD );
- } else {
- MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (rank == 0) {
+ MPI_Gather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);
+ }
+ else {
+ MPI_Gather(NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
-
+++ /dev/null
-/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
-/*
- *
- * (C) 2003 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "mpi.h"
-#include "mpitest.h"
-#include <stdlib.h>
-#include <stdio.h>
-
-/* Gather data from a vector to contiguous. Use IN_PLACE */
-
-int main( int argc, char **argv )
-{
- MPI_Datatype vec;
- double *vecin, *vecout;
- MPI_Comm comm;
- int count, minsize = 2;
- int root, i, n, stride, errs = 0;
- int rank, size;
-
- MTest_Init( &argc, &argv );
-
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (root=0; root<size; root++) {
- for (count = 1; count < 65000; count = count * 2) {
- n = 12;
- stride = 10;
- vecin = (double *)malloc( n * stride * size * sizeof(double) );
- vecout = (double *)malloc( size * n * sizeof(double) );
-
- MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
- MPI_Type_commit( &vec );
-
- for (i=0; i<n*stride; i++) vecin[i] =-2;
- for (i=0; i<n; i++) vecin[i*stride] = rank * n + i;
- int errorcode = MPI_SUCCESS;
- if (rank == root) {
- for (i=0; i<n; i++) {
- vecout[rank*n+i] = rank*n+i;
- }
- errorcode = MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
- vecout, n, MPI_DOUBLE, root, comm );
- }
- else {
- errorcode = MPI_Gather( vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL,
- root, comm );
- }
-
- if (rank == root) {
- for (i=0; i<n*size; i++) {
- if (vecout[i] != i) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "vecout[%d]=%d, err=%d\n",
- i, (int)vecout[i], errorcode );
- }
- }
- }
- }
- MPI_Type_free( &vec );
- free( vecin );
- free( vecout );
- }
- }
- printf("end with comm size : %d\n", size);
- MTestFreeComm( &comm );
- }
-
- /* do a zero length gather */
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- if ( rank == 0 ) {
- MPI_Gather( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0,
- MPI_COMM_WORLD );
- } else {
- MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
- }
-
- MTest_Finalize( errs );
- MPI_Finalize();
- return 0;
-}
-
-
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include "mpitest.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+#define ROOT 7
+#if 0
+/* Following should always work for -n 8 256, -N 32, using longs */
+#define COUNT 1048576*32
+#endif
+#if 1
+/* Following will fail for -n 8 unless gather path is 64 bit clean */
+#define COUNT (1024*1024*128+1)
+#endif
+#define VERIFY_CONST 100000000L
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ int i, j;
+ long *sendbuf = NULL;
+ long *recvbuf = NULL;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < (ROOT+1)) {
+ fprintf(stderr, "At least %d processes required\n", ROOT+1);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ sendbuf = malloc(COUNT * sizeof(long));
+ if (sendbuf == NULL) {
+ fprintf(stderr, "PE %d:ERROR: malloc of sendbuf failed\n", rank);
+ }
+ for (i = 0; i < COUNT; i++) {
+ sendbuf[i] = (long) i + (long) rank *VERIFY_CONST;
+ }
+
+ if (rank == ROOT) {
+ recvbuf = malloc(COUNT * sizeof(long) * size);
+ if (recvbuf == NULL) {
+ fprintf(stderr, "PE %d:ERROR: malloc of recvbuf failed\n", rank);
+ }
+ for (i = 0; i < COUNT * size; i++) {
+ recvbuf[i] = -456789L;
+ }
+ }
+
+ MPI_Gather(sendbuf, COUNT, MPI_LONG, recvbuf, COUNT, MPI_LONG, ROOT, MPI_COMM_WORLD);
+
+ int lerr = 0;
+ if (rank == ROOT) {
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < COUNT; j++) {
+ if (recvbuf[i * COUNT + j] != i * VERIFY_CONST + j) {
+ printf("PE 0: mis-match error");
+ printf(" recbuf[%d * %d + %d] = ", i, COUNT, j);
+ printf(" %ld,", recvbuf[i * COUNT + j]);
+ printf(" should be %ld\n", i * VERIFY_CONST + j);
+ lerr++;
+ if (lerr > 10) {
+ j = COUNT;
+ }
+ }
+ }
+ }
+ MTest_Finalize(lerr);
+ free(recvbuf);
+ }
+ else {
+ MTest_Finalize(lerr);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ MPI_Finalize();
+
+ free(sendbuf);
+ return 0;
+}
MPI_Request request;
int size, rank;
int one = 1, two = 2, isum, sum;
- int errs = 0;
- MPI_Init(&argc,&argv);
+ MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
assert(size == 2);
- MPI_Iallreduce(&one,&isum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD,&request);
- MPI_Allreduce(&two,&sum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
- MPI_Wait(&request,MPI_STATUS_IGNORE);
+ MPI_Iallreduce(&one, &isum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &request);
+ MPI_Allreduce(&two, &sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
+ MPI_Wait(&request, MPI_STATUS_IGNORE);
assert(isum == 2);
assert(sum == 4);
-
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Iallreduce(&one, &one, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &request))
- errs++;
-
- if (rank == 0 && errs == 0)
+ if (rank == 0)
printf(" No errors\n");
MPI_Finalize();
return 0;
}
-
int main(int argc, char *argv[])
{
MPI_Request barrier;
- int rank,i,done;
+ int rank, i, done;
- MPI_Init(&argc,&argv);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
- MPI_Ibarrier(MPI_COMM_WORLD,&barrier);
- for (i=0,done=0; !done; i++) {
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Ibarrier(MPI_COMM_WORLD, &barrier);
+ for (i = 0, done = 0; !done; i++) {
usleep(1000);
- /*printf("[%d] MPI_Test: %d\n",rank,i);*/
- MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);
+ /*printf("[%d] MPI_Test: %d\n",rank,i); */
+ MPI_Test(&barrier, &done, MPI_STATUS_IGNORE);
}
if (rank == 0)
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm allgather test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *rbuf = 0, *sbuf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- /* The left group will send rank to the right group;
- The right group will send -rank to the left group */
- rbuf = (int *)malloc( count * rsize * sizeof(int) );
- sbuf = (int *)malloc( count * sizeof(int) );
- for (i=0; i<count*rsize; i++) rbuf[i] = -1;
- if (leftGroup) {
- for (i=0; i<count; i++) sbuf[i] = i + rank*count;
- }
- else {
- for (i=0; i<count; i++) sbuf[i] = -(i + rank*count);
- }
- err = MPI_Allgather( sbuf, count, datatype,
- rbuf, count, datatype, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- if (leftGroup) {
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != -i) {
- errs++;
- }
- }
- }
- else {
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != i) {
- errs++;
- }
- }
- }
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* The left group will send rank to the right group;
+ * The right group will send -rank to the left group */
+ rbuf = (int *) malloc(count * rsize * sizeof(int));
+ sbuf = (int *) malloc(count * sizeof(int));
+ for (i = 0; i < count * rsize; i++)
+ rbuf[i] = -1;
+ if (leftGroup) {
+ for (i = 0; i < count; i++)
+ sbuf[i] = i + rank * count;
+ }
+ else {
+ for (i = 0; i < count; i++)
+ sbuf[i] = -(i + rank * count);
+ }
+ err = MTest_Allgather(sbuf, count, datatype, rbuf, count, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ if (leftGroup) {
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != i) {
+ errs++;
+ }
+ }
+ }
- /* Use Allgather in a unidirectional way */
- for (i=0; i<count*rsize; i++) rbuf[i] = -1;
- if (leftGroup) {
- err = MPI_Allgather( sbuf, 0, datatype,
- rbuf, count, datatype, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != -i) {
- errs++;
- }
- }
- }
- else {
- err = MPI_Allgather( sbuf, count, datatype,
- rbuf, 0, datatype, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != -1) {
- errs++;
- }
- }
- }
- free( rbuf );
- free( sbuf );
- }
- MTestFreeComm( &comm );
+ /* Use Allgather in a unidirectional way */
+ for (i = 0; i < count * rsize; i++)
+ rbuf[i] = -1;
+ if (leftGroup) {
+ err = MTest_Allgather(sbuf, 0, datatype, rbuf, count, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ err = MTest_Allgather(sbuf, count, datatype, rbuf, 0, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free(rbuf);
+ free(sbuf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm allgatherv test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *rbuf = 0, *sbuf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
- /* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
+ /* Get an intercommunicator */
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- /* The left group will send rank to the right group;
- The right group will send -rank to the left group */
- rbuf = (int *)malloc( count * rsize * sizeof(int) );
- sbuf = (int *)malloc( count * sizeof(int) );
- recvcounts = (int *) malloc( rsize * sizeof(int) );
- recvdispls = (int *) malloc( rsize * sizeof(int) );
- for (i=0; i<count*rsize; i++) rbuf[i] = -1;
- for (i=0; i<rsize; i++) {
- recvcounts[i] = count;
- recvdispls[i] = i * count;
- }
- if (leftGroup) {
- for (i=0; i<count; i++) sbuf[i] = i + rank*count;
- }
- else {
- for (i=0; i<count; i++) sbuf[i] = -(i + rank*count);
- }
- err = MPI_Allgatherv( sbuf, count, datatype,
- rbuf, recvcounts, recvdispls, datatype,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- if (leftGroup) {
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != -i) {
- errs++;
- }
- }
- }
- else {
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != i) {
- errs++;
- }
- }
- }
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* The left group will send rank to the right group;
+ * The right group will send -rank to the left group */
+ rbuf = (int *) malloc(count * rsize * sizeof(int));
+ sbuf = (int *) malloc(count * sizeof(int));
+ recvcounts = (int *) malloc(rsize * sizeof(int));
+ recvdispls = (int *) malloc(rsize * sizeof(int));
+ for (i = 0; i < count * rsize; i++)
+ rbuf[i] = -1;
+ for (i = 0; i < rsize; i++) {
+ recvcounts[i] = count;
+ recvdispls[i] = i * count;
+ }
+ if (leftGroup) {
+ for (i = 0; i < count; i++)
+ sbuf[i] = i + rank * count;
+ }
+ else {
+ for (i = 0; i < count; i++)
+ sbuf[i] = -(i + rank * count);
+ }
+ err = MTest_Allgatherv(sbuf, count, datatype,
+ rbuf, recvcounts, recvdispls, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ if (leftGroup) {
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != i) {
+ errs++;
+ }
+ }
+ }
- /* Use Allgather in a unidirectional way */
- for (i=0; i<count*rsize; i++) rbuf[i] = -1;
- if (leftGroup) {
- err = MPI_Allgatherv( sbuf, 0, datatype,
- rbuf, recvcounts, recvdispls, datatype,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != -i) {
- errs++;
- }
- }
- }
- else {
- for (i=0; i<rsize; i++) {
+ /* Use Allgather in a unidirectional way */
+ for (i = 0; i < count * rsize; i++)
+ rbuf[i] = -1;
+ if (leftGroup) {
+ err = MTest_Allgatherv(sbuf, 0, datatype,
+ rbuf, recvcounts, recvdispls, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != -i) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i = 0; i < rsize; i++) {
recvcounts[i] = 0;
recvdispls[i] = 0;
}
- err = MPI_Allgatherv( sbuf, count, datatype,
- rbuf, recvcounts, recvdispls, datatype, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- for (i=0; i<count*rsize; i++) {
- if (rbuf[i] != -1) {
- errs++;
- }
- }
- }
- free( rbuf );
- free( sbuf );
- free( recvcounts );
- free( recvdispls );
+ err = MTest_Allgatherv(sbuf, count, datatype,
+ rbuf, recvcounts, recvdispls, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ for (i = 0; i < count * rsize; i++) {
+ if (rbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free(rbuf);
+ free(sbuf);
+ free(recvcounts);
+ free(recvdispls);
}
- MTestFreeComm( &comm );
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm allreduce test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *sendbuf = 0, *recvbuf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- /* printf( "rank = %d(%d)\n", rank, leftGroup ); fflush(stdout); */
- sendbuf = (int *)malloc( count * sizeof(int) );
- recvbuf = (int *)malloc( count * sizeof(int) );
- if (leftGroup) {
- for (i=0; i<count; i++) sendbuf[i] = i;
- }
- else {
- for (i=0; i<count; i++) sendbuf[i] = -i;
- }
- for (i=0; i<count; i++) recvbuf[i] = 0;
- err = MPI_Allreduce( sendbuf, recvbuf, count, datatype,
- MPI_SUM, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* In each process should be the sum of the values from the
- other process */
- if (leftGroup) {
- for (i=0; i<count; i++) {
- if (recvbuf[i] != -i * rsize) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "recvbuf[%d] = %d\n", i, recvbuf[i] );
- }
- }
- }
- }
- else {
- for (i=0; i<count; i++) {
- if (recvbuf[i] != i * rsize) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "recvbuf[%d] = %d\n", i, recvbuf[i] );
- }
- }
- }
- }
- free( sendbuf );
- free( recvbuf );
- }
- MTestFreeComm( &comm );
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* printf("rank = %d(%d)\n", rank, leftGroup); fflush(stdout); */
+ sendbuf = (int *) malloc(count * sizeof(int));
+ recvbuf = (int *) malloc(count * sizeof(int));
+ if (leftGroup) {
+ for (i = 0; i < count; i++)
+ sendbuf[i] = i;
+ }
+ else {
+ for (i = 0; i < count; i++)
+ sendbuf[i] = -i;
+ }
+ for (i = 0; i < count; i++)
+ recvbuf[i] = 0;
+ err = MTest_Allreduce(sendbuf, recvbuf, count, datatype, MPI_SUM, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* In each process should be the sum of the values from the
+ * other process */
+ if (leftGroup) {
+ for (i = 0; i < count; i++) {
+ if (recvbuf[i] != -i * rsize) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "recvbuf[%d] = %d\n", i, recvbuf[i]);
+ }
+ }
+ }
+ }
+ else {
+ for (i = 0; i < count; i++) {
+ if (recvbuf[i] != i * rsize) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "recvbuf[%d] = %d\n", i, recvbuf[i]);
+ }
+ }
+ }
+ }
+ free(sendbuf);
+ free(recvbuf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm alltoall test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *sendbuf = 0, *recvbuf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- for (count = 1; count < 66000; count = 2 * count) {
- /* Get an intercommunicator */
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_rank( comm, &rrank );
- sendbuf = (int *)malloc( rsize * count * sizeof(int) );
- recvbuf = (int *)malloc( rsize * count * sizeof(int) );
- for (i=0; i<rsize*count; i++) recvbuf[i] = -1;
- if (leftGroup) {
- idx = 0;
- for (j=0; j<rsize; j++) {
- for (i=0; i<count; i++) {
- sendbuf[idx++] = i + rrank;
- }
- }
- err = MPI_Alltoall( sendbuf, count, datatype,
- NULL, 0, datatype, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- else {
- int rank, size;
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ for (count = 1; count < 66000; count = 2 * count) {
+ /* Get an intercommunicator */
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_rank(comm, &rrank);
+ sendbuf = (int *) malloc(rsize * count * sizeof(int));
+ recvbuf = (int *) malloc(rsize * count * sizeof(int));
+ for (i = 0; i < rsize * count; i++)
+ recvbuf[i] = -1;
+ if (leftGroup) {
+ idx = 0;
+ for (j = 0; j < rsize; j++) {
+ for (i = 0; i < count; i++) {
+ sendbuf[idx++] = i + rrank;
+ }
+ }
+ err = MTest_Alltoall(sendbuf, count, datatype, NULL, 0, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ }
+ else {
+ int rank, size;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- /* In the right group */
- err = MPI_Alltoall( NULL, 0, datatype,
- recvbuf, count, datatype, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Check that we have received the correct data */
- idx = 0;
- for (j=0; j<rsize; j++) {
- for (i=0; i<count; i++) {
- if (recvbuf[idx++] != i + j) {
- errs++;
- if (errs < 10)
- fprintf( stderr, "buf[%d] = %d on %d\n",
- i, recvbuf[i], rank );
- }
- }
- }
- }
- free( recvbuf );
- free( sendbuf );
- }
- MTestFreeComm( &comm );
+ /* In the right group */
+ err = MTest_Alltoall(NULL, 0, datatype, recvbuf, count, datatype, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Check that we have received the correct data */
+ idx = 0;
+ for (j = 0; j < rsize; j++) {
+ for (i = 0; i < count; i++) {
+ if (recvbuf[idx++] != i + j) {
+ errs++;
+ if (errs < 10)
+ fprintf(stderr, "buf[%d] = %d on %d\n", i, recvbuf[i], rank);
+ }
+ }
+ }
+ }
+ free(recvbuf);
+ free(sendbuf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include "mpitest.h"
#include <stdlib.h>
#include <stdio.h>
+#include "mpicolltest.h"
/*
This program tests MPI_Alltoallv by having processor i send different
Because there are separate send and receive types to alltoallv,
there need to be tests to rearrange data on the fly. Not done yet.
-
+
The first test sends i items to processor i from all processors.
Currently, the test uses only MPI_INT; this is adequate for testing systems
that use point-to-point operations
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Comm comm;
- int *sbuf, *rbuf;
- int rank, size, lsize, asize;
- int *sendcounts, *recvcounts, *rdispls, *sdispls;
- int i, j, *p, err;
- int leftGroup;
+ int *sbuf, *rbuf;
+ int rank, size, lsize, asize;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
+ int leftGroup;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
err = 0;
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
- /* Create the buffer */
- MPI_Comm_size( comm, &lsize );
- MPI_Comm_remote_size( comm, &size );
- asize = (lsize > size) ? lsize : size;
- MPI_Comm_rank( comm, &rank );
- sbuf = (int *)malloc( size * size * sizeof(int) );
- rbuf = (int *)malloc( asize * asize * sizeof(int) );
- if (!sbuf || !rbuf) {
- fprintf( stderr, "Could not allocated buffers!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
+ /* Create the buffer */
+ MPI_Comm_size(comm, &lsize);
+ MPI_Comm_remote_size(comm, &size);
+ asize = (lsize > size) ? lsize : size;
+ MPI_Comm_rank(comm, &rank);
+ sbuf = (int *) malloc(size * size * sizeof(int));
+ rbuf = (int *) malloc(asize * asize * sizeof(int));
+ if (!sbuf || !rbuf) {
+ fprintf(stderr, "Could not allocated buffers!\n");
+ MPI_Abort(comm, 1);
+ }
- /* Load up the buffers */
- for (i=0; i<size*size; i++) {
- sbuf[i] = i + 100*rank;
- rbuf[i] = -i;
- }
+ /* Load up the buffers */
+ for (i = 0; i < size * size; i++) {
+ sbuf[i] = i + 100 * rank;
+ rbuf[i] = -i;
+ }
- /* Create and load the arguments to alltoallv */
- sendcounts = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
- rdispls = (int *)malloc( size * sizeof(int) );
- sdispls = (int *)malloc( size * sizeof(int) );
- if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
- fprintf( stderr, "Could not allocate arg items!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
- for (i=0; i<size; i++) {
- sendcounts[i] = i;
- sdispls[i] = (i * (i+1))/2;
- recvcounts[i] = rank;
- rdispls[i] = i * rank;
- }
- MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
- rbuf, recvcounts, rdispls, MPI_INT, comm );
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
+ rdispls = (int *) malloc(size * sizeof(int));
+ sdispls = (int *) malloc(size * sizeof(int));
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
+ fprintf(stderr, "Could not allocate arg items!\n");
+ MPI_Abort(comm, 1);
+ }
+ for (i = 0; i < size; i++) {
+ sendcounts[i] = i;
+ sdispls[i] = (i * (i + 1)) / 2;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank;
+ }
+ MTest_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm);
- /* Check rbuf */
- for (i=0; i<size; i++) {
- p = rbuf + rdispls[i];
- for (j=0; j<rank; j++) {
- if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
- fprintf( stderr, "[%d] got %d expected %d for %dth\n",
- rank, p[j],(i*(i+1))/2 + j, j );
- err++;
- }
- }
- }
+ /* Check rbuf */
+ for (i = 0; i < size; i++) {
+ p = rbuf + rdispls[i];
+ for (j = 0; j < rank; j++) {
+ if (p[j] != i * 100 + (rank * (rank + 1)) / 2 + j) {
+ fprintf(stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j], (i * (i + 1)) / 2 + j, j);
+ err++;
+ }
+ }
+ }
- free( sdispls );
- free( rdispls );
- free( recvcounts );
- free( sendcounts );
- free( rbuf );
- free( sbuf );
- MTestFreeComm( &comm );
+ free(sdispls);
+ free(rdispls);
+ free(recvcounts);
+ free(sendcounts);
+ free(rbuf);
+ free(sbuf);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
MPI_Finalize();
return 0;
}
#include "mpitest.h"
#include <stdlib.h>
#include <stdio.h>
+#include "mpicolltest.h"
/*
This program tests MPI_Alltoallw by having processor i send different
Because there are separate send and receive types to alltoallw,
there need to be tests to rearrange data on the fly. Not done yet.
-
+
The first test sends i items to processor i from all processors.
Currently, the test uses only MPI_INT; this is adequate for testing systems
that use point-to-point operations
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Comm comm;
- int *sbuf, *rbuf;
- int rank, size, lsize, asize;
- int *sendcounts, *recvcounts, *rdispls, *sdispls;
- int i, j, *p, err;
+ int *sbuf, *rbuf;
+ int rank, size, lsize, asize;
+ int *sendcounts, *recvcounts, *rdispls, *sdispls;
+ int i, j, *p, err;
MPI_Datatype *sendtypes, *recvtypes;
- int leftGroup;
+ int leftGroup;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
err = 0;
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ /* Create the buffer */
+ MPI_Comm_size(comm, &lsize);
+ MPI_Comm_remote_size(comm, &size);
+ asize = (lsize > size) ? lsize : size;
+ MPI_Comm_rank(comm, &rank);
+ sbuf = (int *) malloc(size * size * sizeof(int));
+ rbuf = (int *) malloc(asize * asize * sizeof(int));
+ if (!sbuf || !rbuf) {
+ fprintf(stderr, "Could not allocated buffers!\n");
+ MPI_Abort(comm, 1);
+ }
+
+ /* Load up the buffers */
+ for (i = 0; i < size * size; i++) {
+ sbuf[i] = i + 100 * rank;
+ rbuf[i] = -i;
+ }
- /* Create the buffer */
- MPI_Comm_size( comm, &lsize );
- MPI_Comm_remote_size( comm, &size );
- asize = (lsize > size) ? lsize : size;
- MPI_Comm_rank( comm, &rank );
- sbuf = (int *)malloc( size * size * sizeof(int) );
- rbuf = (int *)malloc( asize * asize * sizeof(int) );
- if (!sbuf || !rbuf) {
- fprintf( stderr, "Could not allocated buffers!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
-
- /* Load up the buffers */
- for (i=0; i<size*size; i++) {
- sbuf[i] = i + 100*rank;
- rbuf[i] = -i;
- }
+ /* Create and load the arguments to alltoallv */
+ sendcounts = (int *) malloc(size * sizeof(int));
+ recvcounts = (int *) malloc(size * sizeof(int));
+ rdispls = (int *) malloc(size * sizeof(int));
+ sdispls = (int *) malloc(size * sizeof(int));
+ sendtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
+ recvtypes = (MPI_Datatype *) malloc(size * sizeof(MPI_Datatype));
+ if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
+ fprintf(stderr, "Could not allocate arg items!\n");
+ MPI_Abort(comm, 1);
+ }
+ /* Note that process 0 sends no data (sendcounts[0] = 0) */
+ for (i = 0; i < size; i++) {
+ sendcounts[i] = i;
+ sdispls[i] = (((i + 1) * (i)) / 2) * sizeof(int);
+ sendtypes[i] = MPI_INT;
+ recvcounts[i] = rank;
+ rdispls[i] = i * rank * sizeof(int);
+ recvtypes[i] = MPI_INT;
+ }
+ MTest_Alltoallw(sbuf, sendcounts, sdispls, sendtypes,
+ rbuf, recvcounts, rdispls, recvtypes, comm);
- /* Create and load the arguments to alltoallv */
- sendcounts = (int *)malloc( size * sizeof(int) );
- recvcounts = (int *)malloc( size * sizeof(int) );
- rdispls = (int *)malloc( size * sizeof(int) );
- sdispls = (int *)malloc( size * sizeof(int) );
- sendtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
- recvtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
- if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
- fprintf( stderr, "Could not allocate arg items!\n" );
- MPI_Abort( comm, 1 );
- exit(1);
- }
- /* Note that process 0 sends no data (sendcounts[0] = 0) */
- for (i=0; i<size; i++) {
- sendcounts[i] = i;
- sdispls[i] = (((i+1) * (i))/2) * sizeof(int);
- sendtypes[i] = MPI_INT;
- recvcounts[i] = rank;
- rdispls[i] = i * rank * sizeof(int);
- recvtypes[i] = MPI_INT;
- }
- MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
- rbuf, recvcounts, rdispls, recvtypes, comm );
-
- /* Check rbuf */
- for (i=0; i<size; i++) {
- p = rbuf + rdispls[i]/sizeof(int);
- for (j=0; j<rank; j++) {
- if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
- fprintf( stderr, "[%d] got %d expected %d for %dth\n",
- rank, p[j],(i*(i+1))/2 + j, j );
- err++;
- }
- }
- }
+ /* Check rbuf */
+ for (i = 0; i < size; i++) {
+ p = rbuf + rdispls[i] / sizeof(int);
+ for (j = 0; j < rank; j++) {
+ if (p[j] != i * 100 + (rank * (rank + 1)) / 2 + j) {
+ fprintf(stderr, "[%d] got %d expected %d for %dth\n",
+ rank, p[j], (i * (i + 1)) / 2 + j, j);
+ err++;
+ }
+ }
+ }
- free(sendtypes);
- free(recvtypes);
- free( sdispls );
- free( rdispls );
- free( recvcounts );
- free( sendcounts );
- free( rbuf );
- free( sbuf );
- MTestFreeComm( &comm );
+ free(sendtypes);
+ free(recvtypes);
+ free(sdispls);
+ free(rdispls);
+ free(recvcounts);
+ free(sendcounts);
+ free(rbuf);
+ free(sbuf);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm barrier test";
/* This only checks that the Barrier operation accepts intercommunicators.
It does not check for the semantics of a intercomm barrier (all processes
- in the local group can exit when (but not before) all processes in the
+ in the local group can exit when (but not before) all processes in the
remote group enter the barrier */
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int leftGroup;
MPI_Comm comm;
- /* MPI_Datatype datatype; */
+ MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- /* datatype = MPI_INT; */
+ datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
if (comm == MPI_COMM_NULL)
continue;
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
- if (leftGroup) {
- err = MPI_Barrier( comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- else {
- /* In the right group */
- err = MPI_Barrier( comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- MTestFreeComm( &comm );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
+ if (leftGroup) {
+ err = MTest_Barrier(comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ }
+ else {
+ /* In the right group */
+ err = MTest_Barrier(comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm broadcast test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *buf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
if (comm == MPI_COMM_NULL)
continue;
- MPI_Comm_rank( comm, &rank );
+ MPI_Comm_rank(comm, &rank);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- buf = (int *)malloc( count * sizeof(int) );
- if (leftGroup) {
- if (rank == 0) {
- for (i=0; i<count; i++) buf[i] = i;
- }
- else {
- for (i=0; i<count; i++) buf[i] = -1;
- }
- err = MPI_Bcast( buf, count, datatype,
- (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Test that no other process in this group received the
- broadcast */
- if (rank != 0) {
- for (i=0; i<count; i++) {
- if (buf[i] != -1) {
- errs++;
- }
- }
- }
- }
- else {
- /* In the right group */
- for (i=0; i<count; i++) buf[i] = -1;
- err = MPI_Bcast( buf, count, datatype, 0, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Check that we have received the correct data */
- for (i=0; i<count; i++) {
- if (buf[i] != i) {
- errs++;
- }
- }
- }
- free( buf );
- }
- MTestFreeComm( &comm );
+ for (count = 1; count < 65000; count = 2 * count) {
+ buf = (int *) malloc(count * sizeof(int));
+ if (leftGroup) {
+ if (rank == 0) {
+ for (i = 0; i < count; i++)
+ buf[i] = i;
+ }
+ else {
+ for (i = 0; i < count; i++)
+ buf[i] = -1;
+ }
+ err = MTest_Bcast(buf, count, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Test that no other process in this group received the
+ * broadcast */
+ if (rank != 0) {
+ for (i = 0; i < count; i++) {
+ if (buf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ for (i = 0; i < count; i++)
+ buf[i] = -1;
+ err = MTest_Bcast(buf, count, datatype, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Check that we have received the correct data */
+ for (i = 0; i < count; i++) {
+ if (buf[i] != i) {
+ errs++;
+ }
+ }
+ }
+ free(buf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm gather test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *buf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_size( comm, &size );
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
- for (count = 1; count < 65000; count = 2 * count) {
- if (leftGroup) {
- buf = (int *)malloc( count * rsize * sizeof(int) );
- for (i=0; i<count*rsize; i++) buf[i] = -1;
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_size(comm, &size);
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
+ for (count = 1; count < 65000; count = 2 * count) {
+ if (leftGroup) {
+ buf = (int *) malloc(count * rsize * sizeof(int));
+ for (i = 0; i < count * rsize; i++)
+ buf[i] = -1;
- err = MPI_Gather( NULL, 0, datatype,
- buf, count, datatype,
- (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Test that no other process in this group received the
- broadcast */
- if (rank != 0) {
- for (i=0; i<count; i++) {
- if (buf[i] != -1) {
- errs++;
- }
- }
- }
- else {
- /* Check for the correct data */
- for (i=0; i<count*rsize; i++) {
- if (buf[i] != i) {
- errs++;
- }
- }
- }
- }
- else {
- /* In the right group */
- buf = (int *)malloc( count * sizeof(int) );
- for (i=0; i<count; i++) buf[i] = rank * count + i;
- err = MPI_Gather( buf, count, datatype,
- NULL, 0, datatype, 0, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- free( buf );
- }
- MTestFreeComm( &comm );
+ err = MTest_Gather(NULL, 0, datatype,
+ buf, count, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Test that no other process in this group received the
+ * broadcast */
+ if (rank != 0) {
+ for (i = 0; i < count; i++) {
+ if (buf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ else {
+ /* Check for the correct data */
+ for (i = 0; i < count * rsize; i++) {
+ if (buf[i] != i) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ buf = (int *) malloc(count * sizeof(int));
+ for (i = 0; i < count; i++)
+ buf[i] = rank * count + i;
+ err = MTest_Gather(buf, count, datatype, NULL, 0, datatype, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ }
+ free(buf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm gatherv test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *buf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_size( comm, &size );
-
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_size(comm, &size);
- for (count = 1; count < 65000; count = 2 * count) {
- /* Get an intercommunicator */
- recvcounts = (int *)malloc( rsize * sizeof(int) );
- recvdispls = (int *)malloc( rsize * sizeof(int) );
- /* This simple test duplicates the Gather test,
- using the same lengths for all messages */
- for (i=0; i<rsize; i++) {
- recvcounts[i] = count;
- recvdispls[i] = count * i;
- }
- if (leftGroup) {
- buf = (int *)malloc( count * rsize * sizeof(int) );
- for (i=0; i<count*rsize; i++) buf[i] = -1;
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
- err = MPI_Gatherv( NULL, 0, datatype,
- buf, recvcounts, recvdispls, datatype,
- (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Test that no other process in this group received the
- broadcast */
- if (rank != 0) {
- for (i=0; i<count; i++) {
- if (buf[i] != -1) {
- errs++;
- }
- }
- }
- else {
- /* Check for the correct data */
- for (i=0; i<count*rsize; i++) {
- if (buf[i] != i) {
- errs++;
- }
- }
- }
- }
- else {
- /* In the right group */
- buf = (int *)malloc( count * sizeof(int) );
- for (i=0; i<count; i++) buf[i] = rank * count + i;
- err = MPI_Gatherv( buf, count, datatype,
- NULL, 0, 0, datatype, 0, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- }
- free( buf );
- free( recvcounts );
- free( recvdispls );
- }
- MTestFreeComm( &comm );
+ for (count = 1; count < 65000; count = 2 * count) {
+ /* Get an intercommunicator */
+ recvcounts = (int *) malloc(rsize * sizeof(int));
+ recvdispls = (int *) malloc(rsize * sizeof(int));
+ /* This simple test duplicates the Gather test,
+ * using the same lengths for all messages */
+ for (i = 0; i < rsize; i++) {
+ recvcounts[i] = count;
+ recvdispls[i] = count * i;
+ }
+ if (leftGroup) {
+ buf = (int *) malloc(count * rsize * sizeof(int));
+ for (i = 0; i < count * rsize; i++)
+ buf[i] = -1;
+
+ err = MTest_Gatherv(NULL, 0, datatype,
+ buf, recvcounts, recvdispls, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Test that no other process in this group received the
+ * broadcast */
+ if (rank != 0) {
+ for (i = 0; i < count; i++) {
+ if (buf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ else {
+ /* Check for the correct data */
+ for (i = 0; i < count * rsize; i++) {
+ if (buf[i] != i) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ buf = (int *) malloc(count * sizeof(int));
+ for (i = 0; i < count; i++)
+ buf[i] = rank * count + i;
+ err = MTest_Gatherv(buf, count, datatype, NULL, 0, 0, datatype, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ }
+ free(buf);
+ free(recvcounts);
+ free(recvdispls);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm reduce test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
- int *sendbuf = 0, *recvbuf=0;
+ int *sendbuf = 0, *recvbuf = 0;
int leftGroup, i, count, rank, rsize;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
if (comm == MPI_COMM_NULL)
continue;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- sendbuf = (int *)malloc( count * sizeof(int) );
- recvbuf = (int *)malloc( count * sizeof(int) );
- for (i=0; i<count; i++) {
- sendbuf[i] = -1;
- recvbuf[i] = -1;
- }
- if (leftGroup) {
- err = MPI_Reduce( sendbuf, recvbuf, count, datatype, MPI_SUM,
- (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Test that no other process in this group received the
- broadcast, and that we got the right answers */
- if (rank == 0) {
- for (i=0; i<count; i++) {
- if (recvbuf[i] != i * rsize) {
- errs++;
- }
- }
- }
- else {
- for (i=0; i<count; i++) {
- if (recvbuf[i] != -1) {
- errs++;
- }
- }
- }
- }
- else {
- /* In the right group */
- for (i=0; i<count; i++) sendbuf[i] = i;
- err = MPI_Reduce( sendbuf, recvbuf, count, datatype, MPI_SUM,
- 0, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Check that we have received no data */
- for (i=0; i<count; i++) {
- if (recvbuf[i] != -1) {
- errs++;
- }
- }
- }
- free( sendbuf );
- free( recvbuf );
- }
- MTestFreeComm( &comm );
+ for (count = 1; count < 65000; count = 2 * count) {
+ sendbuf = (int *) malloc(count * sizeof(int));
+ recvbuf = (int *) malloc(count * sizeof(int));
+ for (i = 0; i < count; i++) {
+ sendbuf[i] = -1;
+ recvbuf[i] = -1;
+ }
+ if (leftGroup) {
+ err = MTest_Reduce(sendbuf, recvbuf, count, datatype, MPI_SUM,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Test that no other process in this group received the
+ * broadcast, and that we got the right answers */
+ if (rank == 0) {
+ for (i = 0; i < count; i++) {
+ if (recvbuf[i] != i * rsize) {
+ errs++;
+ }
+ }
+ }
+ else {
+ for (i = 0; i < count; i++) {
+ if (recvbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ /* In the right group */
+ for (i = 0; i < count; i++)
+ sendbuf[i] = i;
+ err = MTest_Reduce(sendbuf, recvbuf, count, datatype, MPI_SUM, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Check that we have received no data */
+ for (i = 0; i < count; i++) {
+ if (recvbuf[i] != -1) {
+ errs++;
+ }
+ }
+ }
+ free(sendbuf);
+ free(recvbuf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm scatter test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *buf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- buf = 0;
- if (leftGroup) {
- buf = (int *)malloc( count * rsize * sizeof(int) );
- if (rank == 0) {
- for (i=0; i<count*rsize; i++) buf[i] = i;
- }
- else {
- for (i=0; i<count*rsize; i++) buf[i] = -1;
- }
- err = MPI_Scatter( buf, count, datatype,
- NULL, 0, datatype,
- (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Test that no other process in this group received the
- scatter */
- if (rank != 0) {
- for (i=0; i<count*rsize; i++) {
- if (buf[i] != -1) {
- if (errs < 10) {
- fprintf( stderr, "Received data on root group!\n" );
- }
- errs++;
- }
- }
- }
- }
- else {
- buf = (int *)malloc( count * sizeof(int) );
- /* In the right group */
- for (i=0; i<count; i++) buf[i] = -1;
- err = MPI_Scatter( NULL, 0, datatype,
- buf, count, datatype, 0, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Check that we have received the correct data */
- for (i=0; i<count; i++) {
- if (buf[i] != i + rank * count) {
- if (errs < 10)
- fprintf( stderr, "buf[%d] = %d on %d\n",
- i, buf[i], rank );
- errs++;
- }
- }
- }
- free( buf );
- }
- MTestFreeComm( &comm );
+ for (count = 1; count < 65000; count = 2 * count) {
+ buf = 0;
+ if (leftGroup) {
+ buf = (int *) malloc(count * rsize * sizeof(int));
+ if (rank == 0) {
+ for (i = 0; i < count * rsize; i++)
+ buf[i] = i;
+ }
+ else {
+ for (i = 0; i < count * rsize; i++)
+ buf[i] = -1;
+ }
+ err = MTest_Scatter(buf, count, datatype,
+ NULL, 0, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Test that no other process in this group received the
+ * scatter */
+ if (rank != 0) {
+ for (i = 0; i < count * rsize; i++) {
+ if (buf[i] != -1) {
+ if (errs < 10) {
+ fprintf(stderr, "Received data on root group!\n");
+ }
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ buf = (int *) malloc(count * sizeof(int));
+ /* In the right group */
+ for (i = 0; i < count; i++)
+ buf[i] = -1;
+ err = MTest_Scatter(NULL, 0, datatype, buf, count, datatype, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Check that we have received the correct data */
+ for (i = 0; i < count; i++) {
+ if (buf[i] != i + rank * count) {
+ if (errs < 10)
+ fprintf(stderr, "buf[%d] = %d on %d\n", i, buf[i], rank);
+ errs++;
+ }
+ }
+ }
+ free(buf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/*
static char MTEST_Descrip[] = "Simple intercomm scatterv test";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int *buf = 0;
MPI_Comm comm;
MPI_Datatype datatype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
datatype = MPI_INT;
/* Get an intercommunicator */
- while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ while (MTestGetIntercomm(&comm, &leftGroup, 4)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
- /* To improve reporting of problems about operations, we
- change the error handler to errors return */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ /* To improve reporting of problems about operations, we
+ * change the error handler to errors return */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
- for (count = 1; count < 65000; count = 2 * count) {
- buf = 0;
- sendcounts = (int *)malloc( rsize * sizeof(int) );
- senddispls = (int *)malloc( rsize * sizeof(int) );
- for (i=0; i<rsize; i++) {
- sendcounts[i] = count;
- senddispls[i] = count * i;
- }
- if (leftGroup) {
- buf = (int *)malloc( count * rsize * sizeof(int) );
- if (rank == 0) {
- for (i=0; i<count*rsize; i++) buf[i] = i;
- }
- else {
- for (i=0; i<count*rsize; i++) buf[i] = -1;
- }
- err = MPI_Scatterv( buf, sendcounts, senddispls, datatype,
- NULL, 0, datatype,
- (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
- comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Test that no other process in this group received the
- scatter */
- if (rank != 0) {
- for (i=0; i<count*rsize; i++) {
- if (buf[i] != -1) {
- if (errs < 10) {
- fprintf( stderr, "Received data on root group!\n" );
- }
- errs++;
- }
- }
- }
- }
- else {
- buf = (int *)malloc( count * sizeof(int) );
- /* In the right group */
- for (i=0; i<count; i++) buf[i] = -1;
- err = MPI_Scatterv( NULL, 0, 0, datatype,
- buf, count, datatype, 0, comm );
- if (err) {
- errs++;
- MTestPrintError( err );
- }
- /* Check that we have received the correct data */
- for (i=0; i<count; i++) {
- if (buf[i] != i + rank * count) {
- if (errs < 10)
- fprintf( stderr, "buf[%d] = %d on %d\n",
- i, buf[i], rank );
- errs++;
- }
- }
- }
- free( sendcounts );
- free( senddispls );
- free( buf );
- }
- MTestFreeComm( &comm );
+ for (count = 1; count < 65000; count = 2 * count) {
+ buf = 0;
+ sendcounts = (int *) malloc(rsize * sizeof(int));
+ senddispls = (int *) malloc(rsize * sizeof(int));
+ for (i = 0; i < rsize; i++) {
+ sendcounts[i] = count;
+ senddispls[i] = count * i;
+ }
+ if (leftGroup) {
+ buf = (int *) malloc(count * rsize * sizeof(int));
+ if (rank == 0) {
+ for (i = 0; i < count * rsize; i++)
+ buf[i] = i;
+ }
+ else {
+ for (i = 0; i < count * rsize; i++)
+ buf[i] = -1;
+ }
+ err = MTest_Scatterv(buf, sendcounts, senddispls, datatype,
+ NULL, 0, datatype,
+ (rank == 0) ? MPI_ROOT : MPI_PROC_NULL, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Test that no other process in this group received the
+ * scatter */
+ if (rank != 0) {
+ for (i = 0; i < count * rsize; i++) {
+ if (buf[i] != -1) {
+ if (errs < 10) {
+ fprintf(stderr, "Received data on root group!\n");
+ }
+ errs++;
+ }
+ }
+ }
+ }
+ else {
+ buf = (int *) malloc(count * sizeof(int));
+ /* In the right group */
+ for (i = 0; i < count; i++)
+ buf[i] = -1;
+ err = MTest_Scatterv(NULL, 0, 0, datatype, buf, count, datatype, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintError(err);
+ }
+ /* Check that we have received the correct data */
+ for (i = 0; i < count; i++) {
+ if (buf[i] != i + rank * count) {
+ if (errs < 10)
+ fprintf(stderr, "buf[%d] = %d on %d\n", i, buf[i], rank);
+ errs++;
+ }
+ }
+ }
+ free(sendcounts);
+ free(senddispls);
+ free(buf);
+ }
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
-int add ( double *, double *, int *, MPI_Datatype * );
+int add(double *, double *, int *, MPI_Datatype *);
/*
* User-defined operation on a long value (tests proper handling of
* possible pipelining in the implementation of reductions with user-defined
* operations).
*/
-int add( double *invec, double *inoutvec, int *len, MPI_Datatype *dtype )
+int add(double *invec, double *inoutvec, int *len, MPI_Datatype * dtype)
{
int i, n = *len;
- for (i=0; i<n; i++) {
- inoutvec[i] = invec[i] + inoutvec[i];
+ for (i = 0; i < n; i++) {
+ inoutvec[i] = invec[i] + inoutvec[i];
}
return 0;
}
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Op op;
- int i, rank, size, bufsize, errcnt = 0, toterr;
+ int i, rank, size, bufsize, errcnt = 0, toterr;
double *inbuf, *outbuf, value;
-
- MPI_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
- MPI_Op_create( (MPI_User_function *)add, 1, &op );
-
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Op_create((MPI_User_function *) add, 1, &op);
+
bufsize = 1;
while (bufsize < 100000) {
- inbuf = (double *)malloc( bufsize * sizeof(double) );
- outbuf = (double *)malloc( bufsize * sizeof(double) );
- if (! inbuf || ! outbuf) {
- fprintf( stderr, "Could not allocate buffers for size %d\n",
- bufsize );
- errcnt++;
- break;
- }
+ inbuf = (double *) malloc(bufsize * sizeof(double));
+ outbuf = (double *) malloc(bufsize * sizeof(double));
+ if (!inbuf || !outbuf) {
+ fprintf(stderr, "Could not allocate buffers for size %d\n", bufsize);
+ errcnt++;
+ break;
+ }
- value = (rank & 0x1) ? 1.0 : -1.0;
- for (i=0; i<bufsize; i++) {
- inbuf[i] = value;
- outbuf[i] = 100.0;
- }
- MPI_Allreduce( inbuf, outbuf, bufsize, MPI_DOUBLE, op,
- MPI_COMM_WORLD );
- /* Check values */
- value = (size & 0x1) ? -1.0 : 0.0;
- for (i=0; i<bufsize; i++) {
- if (outbuf[i] != value) {
- if (errcnt < 10)
- printf( "outbuf[%d] = %f, should = %f\n", i, outbuf[i],
- value );
- errcnt ++;
- }
- }
- free( inbuf );
- free( outbuf );
- bufsize *= 2;
+ value = (rank & 0x1) ? 1.0 : -1.0;
+ for (i = 0; i < bufsize; i++) {
+ inbuf[i] = value;
+ outbuf[i] = 100.0;
+ }
+ MPI_Allreduce(inbuf, outbuf, bufsize, MPI_DOUBLE, op, MPI_COMM_WORLD);
+ /* Check values */
+ value = (size & 0x1) ? -1.0 : 0.0;
+ for (i = 0; i < bufsize; i++) {
+ if (outbuf[i] != value) {
+ if (errcnt < 10)
+ printf("outbuf[%d] = %f, should = %f\n", i, outbuf[i], value);
+ errcnt++;
+ }
+ }
+ free(inbuf);
+ free(outbuf);
+ bufsize *= 2;
}
-
- MPI_Allreduce( &errcnt, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ MPI_Allreduce(&errcnt, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) {
- if (toterr == 0)
- printf( " No Errors\n" );
- else
- printf( "*! %d errors!\n", toterr );
+ if (toterr == 0)
+ printf(" No Errors\n");
+ else
+ printf("*! %d errors!\n", toterr);
}
- MPI_Op_free( &op );
- MPI_Finalize( );
+ MPI_Op_free(&op);
+ MPI_Finalize();
return 0;
}
-
if (!(cond_)) { \
fprintf(stderr, "assertion (%s) failed, aborting\n", #cond_); \
MPI_Abort(MPI_COMM_WORLD, 1); \
- exit(1); \
} \
} while (0)
int *rcounts = NULL;
int *sdispls = NULL;
int *rdispls = NULL;
- int *types = NULL;
+ MPI_Datatype *types = NULL;
MPI_Comm comm;
MPI_Request req;
/* enough space for every process to contribute at least NUM_INTS ints to any
* collective operation */
- sbuf = malloc(NUM_INTS*size*sizeof(int));
+ sbuf = malloc(NUM_INTS * size * sizeof(int));
my_assert(sbuf);
- rbuf = malloc(NUM_INTS*size*sizeof(int));
+ rbuf = malloc(NUM_INTS * size * sizeof(int));
my_assert(rbuf);
- scounts = malloc(size*sizeof(int));
+ scounts = malloc(size * sizeof(int));
my_assert(scounts);
- rcounts = malloc(size*sizeof(int));
+ rcounts = malloc(size * sizeof(int));
my_assert(rcounts);
- sdispls = malloc(size*sizeof(int));
+ sdispls = malloc(size * sizeof(int));
my_assert(sdispls);
- rdispls = malloc(size*sizeof(int));
+ rdispls = malloc(size * sizeof(int));
my_assert(rdispls);
- types = malloc(size*sizeof(int));
+ types = malloc(size * sizeof(MPI_Datatype));
my_assert(types);
for (i = 0; i < size; ++i) {
- sbuf[2*i] = i;
- sbuf[2*i+1] = i;
- rbuf[2*i] = i;
- rbuf[2*i+1] = i;
- scounts[i] = NUM_INTS;
- rcounts[i] = NUM_INTS;
- sdispls[i] = i * NUM_INTS;
- rdispls[i] = i * NUM_INTS;
- types[i] = MPI_INT;
+ sbuf[2 * i] = i;
+ sbuf[2 * i + 1] = i;
+ rbuf[2 * i] = i;
+ rbuf[2 * i + 1] = i;
+ scounts[i] = NUM_INTS;
+ rcounts[i] = NUM_INTS;
+ sdispls[i] = i * NUM_INTS;
+ rdispls[i] = i * NUM_INTS;
+ types[i] = MPI_INT;
}
MPI_Ibarrier(comm, &req);
MPI_Igather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (0 == rank)
+ MPI_Igather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ else
+ MPI_Igather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, 0, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (0 == rank)
+ MPI_Igatherv(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, rcounts, rdispls, MPI_INT, 0, comm,
+ &req);
+ else
+ MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (0 == rank)
+ MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, 0, comm, &req);
+ else
+ MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (0 == rank)
+ MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, 0, comm,
+ &req);
+ else
+ MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, rbuf, NUM_INTS, MPI_INT, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iallgather(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Iallgather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, NUM_INTS, MPI_INT, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iallgatherv(sbuf, NUM_INTS, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Iallgatherv(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, rcounts, rdispls, MPI_INT, comm,
+ &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Ialltoall(sbuf, NUM_INTS, MPI_INT, rbuf, NUM_INTS, MPI_INT, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Ialltoall(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, rbuf, NUM_INTS, MPI_INT, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Ialltoallv(sbuf, scounts, sdispls, MPI_INT, rbuf, rcounts, rdispls, MPI_INT, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Ialltoallv(MPI_IN_PLACE, NULL, NULL, MPI_DATATYPE_NULL, rbuf, rcounts, rdispls, MPI_INT,
+ comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Ialltoallw(sbuf, scounts, sdispls, types, rbuf, rcounts, rdispls, types, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Ialltoallw(MPI_IN_PLACE, NULL, NULL, NULL, rbuf, rcounts, rdispls, types, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Ireduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ if (0 == rank)
+ MPI_Ireduce(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
+ else
+ MPI_Ireduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iallreduce(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Iallreduce(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Ireduce_scatter(sbuf, rbuf, rcounts, MPI_INT, MPI_SUM, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Ireduce_scatter(MPI_IN_PLACE, rbuf, rcounts, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Ireduce_scatter_block(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Ireduce_scatter_block(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
+ MPI_Iscan(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
MPI_Iexscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
- if (sbuf) free(sbuf);
- if (rbuf) free(rbuf);
- if (scounts) free(scounts);
- if (rcounts) free(rcounts);
- if (sdispls) free(sdispls);
- if (rdispls) free(rdispls);
+ MPI_Iexscan(MPI_IN_PLACE, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
+ MPI_Wait(&req, MPI_STATUS_IGNORE);
+
+ if (sbuf)
+ free(sbuf);
+ if (rbuf)
+ free(rbuf);
+ if (scounts)
+ free(scounts);
+ if (rcounts)
+ free(rcounts);
+ if (sdispls)
+ free(sdispls);
+ if (rdispls)
+ free(rdispls);
+ if (types)
+ free(types);
if (rank == 0) {
if (errs)
MPI_Finalize();
return 0;
}
-
if (!(cond_)) { \
fprintf(stderr, "assertion (%s) failed, aborting\n", #cond_); \
MPI_Abort(MPI_COMM_WORLD, 1); \
- exit(1); \
} \
} while (0)
-static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype * datatype)
{
int i;
int *in = invec;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
- buf = malloc(COUNT*size*sizeof(int));
- recvbuf = malloc(COUNT*size*sizeof(int));
- sendcounts = malloc(size*sizeof(int));
- recvcounts = malloc(size*sizeof(int));
- sdispls = malloc(size*sizeof(int));
- rdispls = malloc(size*sizeof(int));
- sendtypes = malloc(size*sizeof(MPI_Datatype));
- recvtypes = malloc(size*sizeof(MPI_Datatype));
+ buf = malloc(COUNT * size * sizeof(int));
+ recvbuf = malloc(COUNT * size * sizeof(int));
+ sendcounts = malloc(size * sizeof(int));
+ recvcounts = malloc(size * sizeof(int));
+ sdispls = malloc(size * sizeof(int));
+ rdispls = malloc(size * sizeof(int));
+ sendtypes = malloc(size * sizeof(MPI_Datatype));
+ recvtypes = malloc(size * sizeof(MPI_Datatype));
/* MPI_Ibcast */
for (i = 0; i < COUNT; ++i) {
}
/* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
- buf_alias = (signed char *)buf;
- my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
+ buf_alias = (signed char *) buf;
+ my_assert(COUNT * size * sizeof(int) > PRIME); /* sanity */
for (i = 0; i < PRIME; ++i) {
if (rank == 0)
buf_alias[i] = i;
MPI_Wait(&req, MPI_STATUS_IGNORE);
if (rank == 0) {
for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
}
/* same again, use a user op and free it before the wait */
{
MPI_Op op = MPI_OP_NULL;
- MPI_Op_create(sum_fn, /*commute=*/1, &op);
+ MPI_Op_create(sum_fn, /*commute= */ 1, &op);
for (i = 0; i < COUNT; ++i) {
buf[i] = rank + i;
MPI_Wait(&req, MPI_STATUS_IGNORE);
if (rank == 0) {
for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
}
}
MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
/* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
sdispls[i] = COUNT * i;
rdispls[i] = COUNT * i;
for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
- MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD, &req);
+ MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT,
+ MPI_COMM_WORLD, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
}
/* MPI_Igather */
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
}
else {
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
my_assert(recvbuf[i] == 0xdeadbeef);
}
}
MPI_Datatype type = MPI_DATATYPE_NULL;
MPI_Type_dup(MPI_INT, &type);
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, MPI_COMM_WORLD, &req);
- MPI_Type_free(&type); /* should cause implementations that don't refcount
- correctly to blow up or hang in the wait */
+ MPI_Type_free(&type); /* should cause implementations that don't refcount
+ * correctly to blow up or hang in the wait */
MPI_Wait(&req, MPI_STATUS_IGNORE);
if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
}
else {
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
my_assert(recvbuf[i] == 0xdeadbeef);
}
}
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
if (rank == 0)
- buf[i*COUNT+j] = i + j;
+ buf[i * COUNT + j] = i + j;
else
- buf[i*COUNT+j] = 0xdeadbeef;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, MPI_COMM_WORLD, &req);
my_assert(recvbuf[j] == rank + j);
}
if (rank != 0) {
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
/* check we didn't corrupt the sendbuf somehow */
my_assert(buf[i] == 0xdeadbeef);
}
sdispls[i] = i * COUNT;
for (j = 0; j < COUNT; ++j) {
if (rank == 0)
- buf[i*COUNT+j] = i + j;
+ buf[i * COUNT + j] = i + j;
else
- buf[i*COUNT+j] = 0xdeadbeef;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
- MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, MPI_COMM_WORLD,
+ &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (j = 0; j < COUNT; ++j) {
my_assert(recvbuf[j] == rank + j);
}
if (rank != 0) {
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
/* check we didn't corrupt the sendbuf somehow */
my_assert(buf[i] == 0xdeadbeef);
}
for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
/* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
}
for (i = 0; i < size; ++i) {
recvcounts[i] = COUNT;
for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + i;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = rank + i;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
/* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
}
/* MPI_Ireduce_scatter_block */
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + i;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = rank + i;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
/* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
}
/* MPI_Igatherv */
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
buf[i] = 0xdeadbeef;
recvbuf[i] = 0xdeadbeef;
}
recvcounts[i] = COUNT;
rdispls[i] = i * COUNT;
}
- MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, MPI_COMM_WORLD, &req);
+ MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, MPI_COMM_WORLD,
+ &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
}
else {
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
my_assert(recvbuf[i] == 0xdeadbeef);
}
}
/* MPI_Ialltoall */
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, MPI_COMM_WORLD, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
}
/* MPI_Iallgather */
- for (i = 0; i < size*COUNT; ++i) {
+ for (i = 0; i < size * COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
/* MPI_Iallgatherv */
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
recvcounts[i] = COUNT;
rdispls[i] = i * COUNT;
}
for (i = 0; i < COUNT; ++i)
buf[i] = rank + i;
- MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD, &req);
+ MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD,
+ &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < COUNT; ++i) {
- my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1))));
+ my_assert(recvbuf[i] == ((rank * (rank + 1) / 2) + (i * (rank + 1))));
}
/* MPI_Iexscan */
if (rank == 0)
my_assert(recvbuf[i] == 0xdeadbeef);
else
- my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1)) - (rank + i)));
+ my_assert(recvbuf[i] == ((rank * (rank + 1) / 2) + (i * (rank + 1)) - (rank + i)));
}
/* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
sendtypes[i] = MPI_INT;
recvtypes[i] = MPI_INT;
for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
}
- MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, MPI_COMM_WORLD, &req);
+ MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes,
+ MPI_COMM_WORLD, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
}
free(sendtypes);
return 0;
}
-
#include <string.h>
#include <assert.h>
-#if HAVE_UNISTD_H
+#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
/* we sleep with probability 1/CHANCE_OF_SLEEP */
#define CHANCE_OF_SLEEP (1000)
/* JITTER_DELAY is denominated in microseconds (us) */
-#define JITTER_DELAY (50000) /* 0.05 seconds */
+#define JITTER_DELAY (50000) /* 0.05 seconds */
/* NUM_COMMS is the number of communicators on which ops will be posted */
#define NUM_COMMS (4)
* a=279470273, good primitive root of m from "TABLES OF LINEAR
* CONGRUENTIAL GENERATORS OF DIFFERENT SIZES AND GOOD
* LATTICE STRUCTURE", by Pierre L’Ecuyer */
- return (279470273UL * (unsigned long)x) % 4294967291UL;
+ return (279470273UL * (unsigned long) x) % 4294967291UL;
}
/* given a random unsigned int value "rndval_" from gen_prn, this evaluates to a
((unsigned int)((min_) + ((rndval_) * (1.0 / (GEN_PRN_MAX+1.0)) * ((max_) - (min_)))))
-static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype * datatype)
{
int i;
int *in = invec;
/* used to keep track of buffers that should be freed after the corresponding
* operation has completed */
struct laundry {
- int case_num; /* which test case initiated this req/laundry */
+ int case_num; /* which test case initiated this req/laundry */
MPI_Comm comm;
int *buf;
int *recvbuf;
{
l->case_num = -1;
l->comm = MPI_COMM_NULL;
- if (l->buf) free(l->buf);
- if (l->recvbuf) free(l->recvbuf);
- if (l->sendcounts) free(l->sendcounts);
- if (l->recvcounts) free(l->recvcounts);
- if (l->sdispls) free(l->sdispls);
- if (l->rdispls) free(l->rdispls);
- if (l->sendtypes) free(l->sendtypes);
- if (l->recvtypes) free(l->recvtypes);
+ if (l->buf)
+ free(l->buf);
+ if (l->recvbuf)
+ free(l->recvbuf);
+ if (l->sendcounts)
+ free(l->sendcounts);
+ if (l->recvcounts)
+ free(l->recvcounts);
+ if (l->sdispls)
+ free(l->sdispls);
+ if (l->rdispls)
+ free(l->rdispls);
+ if (l->sendtypes)
+ free(l->sendtypes);
+ if (l->recvtypes)
+ free(l->recvtypes);
}
/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
* in (*req) a request handle corresonding to that operation. This call should
* be considered collective over comm (with a consistent value for "rndnum"),
* even though the operation may only be a point-to-point request. */
-static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request *req, struct laundry *l)
+static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request * req,
+ struct laundry *l)
{
int i, j;
int rank, size;
l->case_num = -1;
l->comm = comm;
- l->buf = buf = malloc(COUNT*size*sizeof(int));
- l->recvbuf = recvbuf = malloc(COUNT*size*sizeof(int));
- l->sendcounts = sendcounts = malloc(size*sizeof(int));
- l->recvcounts = recvcounts = malloc(size*sizeof(int));
- l->sdispls = sdispls = malloc(size*sizeof(int));
- l->rdispls = rdispls = malloc(size*sizeof(int));
- l->sendtypes = sendtypes = malloc(size*sizeof(MPI_Datatype));
- l->recvtypes = recvtypes = malloc(size*sizeof(MPI_Datatype));
+ l->buf = buf = malloc(COUNT * size * sizeof(int));
+ l->recvbuf = recvbuf = malloc(COUNT * size * sizeof(int));
+ l->sendcounts = sendcounts = malloc(size * sizeof(int));
+ l->recvcounts = recvcounts = malloc(size * sizeof(int));
+ l->sdispls = sdispls = malloc(size * sizeof(int));
+ l->rdispls = rdispls = malloc(size * sizeof(int));
+ l->sendtypes = sendtypes = malloc(size * sizeof(MPI_Datatype));
+ l->recvtypes = recvtypes = malloc(size * sizeof(MPI_Datatype));
#define NUM_CASES (21)
l->case_num = rand_range(rndnum, 0, NUM_CASES);
switch (l->case_num) {
- case 0: /* MPI_Ibcast */
- for (i = 0; i < COUNT; ++i) {
- if (rank == 0) {
- buf[i] = i;
- }
- else {
- buf[i] = 0xdeadbeef;
- }
- }
- MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
- break;
-
- case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
- /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
- buf_alias = (signed char *)buf;
- my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
- for (i = 0; i < PRIME; ++i) {
- if (rank == 0)
- buf_alias[i] = i;
- else
- buf_alias[i] = 0xdb;
+ case 0: /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0) {
+ buf[i] = i;
}
- for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
- buf_alias[i] = 0xbf;
+ else {
+ buf[i] = 0xdeadbeef;
}
- MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
- break;
-
- case 2: /* MPI_Ibarrier */
- MPI_Ibarrier(comm, req);
- break;
+ }
+ MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
+ buf_alias = (signed char *) buf;
+ my_assert(COUNT * size * sizeof(int) > PRIME); /* sanity */
+ for (i = 0; i < PRIME; ++i) {
+ if (rank == 0)
+ buf_alias[i] = i;
+ else
+ buf_alias[i] = 0xdb;
+ }
+ for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
+ buf_alias[i] = 0xbf;
+ }
+ MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
+ break;
- case 3: /* MPI_Ireduce */
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
- break;
+ case 2: /* MPI_Ibarrier */
+ MPI_Ibarrier(comm, req);
+ break;
- case 4: /* same again, use a user op and free it before the wait */
- {
- MPI_Op op = MPI_OP_NULL;
- MPI_Op_create(sum_fn, /*commute=*/1, &op);
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
- MPI_Op_free(&op);
- }
- break;
+ case 3: /* MPI_Ireduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
+ break;
- case 5: /* MPI_Iallreduce */
+ case 4: /* same again, use a user op and free it before the wait */
+ {
+ MPI_Op op = MPI_OP_NULL;
+ MPI_Op_create(sum_fn, /*commute= */ 1, &op);
for (i = 0; i < COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
- MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
+ MPI_Op_free(&op);
+ }
+ break;
- case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
- for (i = 0; i < size; ++i) {
- sendcounts[i] = COUNT;
- recvcounts[i] = COUNT;
- sdispls[i] = COUNT * i;
- rdispls[i] = COUNT * i;
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 5: /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i;
+ rdispls[i] = COUNT * i;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
- break;
-
- case 7: /* MPI_Igather */
- for (i = 0; i < size*COUNT; ++i) {
+ }
+ MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT,
+ comm, req);
+ break;
+
+ case 7: /* MPI_Igather */
+ for (i = 0; i < size * COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
+ {
+ MPI_Datatype type = MPI_DATATYPE_NULL;
+ MPI_Type_dup(MPI_INT, &type);
+ for (i = 0; i < size * COUNT; ++i) {
buf[i] = rank + i;
recvbuf[i] = 0xdeadbeef;
}
- MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
- break;
-
- case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
- {
- MPI_Datatype type = MPI_DATATYPE_NULL;
- MPI_Type_dup(MPI_INT, &type);
- for (i = 0; i < size*COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
- MPI_Type_free(&type); /* should cause implementations that don't refcount
- correctly to blow up or hang in the wait */
- }
- break;
-
- case 9: /* MPI_Iscatter */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- if (rank == 0)
- buf[i*COUNT+j] = i + j;
- else
- buf[i*COUNT+j] = 0xdeadbeef;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
- }
- MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
- break;
-
- case 10: /* MPI_Iscatterv */
- for (i = 0; i < size; ++i) {
- /* weak test, just test the regular case where all counts are equal */
- sendcounts[i] = COUNT;
- sdispls[i] = i * COUNT;
- for (j = 0; j < COUNT; ++j) {
- if (rank == 0)
- buf[i*COUNT+j] = i + j;
- else
- buf[i*COUNT+j] = 0xdeadbeef;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
- }
- MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
- break;
+ MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
+ MPI_Type_free(&type); /* should cause implementations that don't refcount
+ * correctly to blow up or hang in the wait */
+ }
+ break;
- case 11: /* MPI_Ireduce_scatter */
- for (i = 0; i < size; ++i) {
- recvcounts[i] = COUNT;
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + i;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 9: /* MPI_Iscatter */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i * COUNT + j] = i + j;
+ else
+ buf[i * COUNT + j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
- break;
-
- case 12: /* MPI_Ireduce_scatter_block */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + i;
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ }
+ MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
+
+ case 10: /* MPI_Iscatterv */
+ for (i = 0; i < size; ++i) {
+ /* weak test, just test the regular case where all counts are equal */
+ sendcounts[i] = COUNT;
+ sdispls[i] = i * COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ if (rank == 0)
+ buf[i * COUNT + j] = i + j;
+ else
+ buf[i * COUNT + j] = 0xdeadbeef;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ }
+ MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
+ break;
- case 13: /* MPI_Igatherv */
- for (i = 0; i < size*COUNT; ++i) {
- buf[i] = 0xdeadbeef;
- recvbuf[i] = 0xdeadbeef;
- }
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- }
- for (i = 0; i < size; ++i) {
- recvcounts[i] = COUNT;
- rdispls[i] = i * COUNT;
+ case 11: /* MPI_Ireduce_scatter */
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + i;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
- break;
+ }
+ MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
+ break;
- case 14: /* MPI_Ialltoall */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 12: /* MPI_Ireduce_scatter_block */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + i;
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
- break;
+ }
+ MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
- case 15: /* MPI_Iallgather */
- for (i = 0; i < size*COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
- break;
+ case 13: /* MPI_Igatherv */
+ for (i = 0; i < size * COUNT; ++i) {
+ buf[i] = 0xdeadbeef;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ }
+ for (i = 0; i < size; ++i) {
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
+ break;
- case 16: /* MPI_Iallgatherv */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
- recvcounts[i] = COUNT;
- rdispls[i] = i * COUNT;
+ case 14: /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- for (i = 0; i < COUNT; ++i)
- buf[i] = rank + i;
- MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
- break;
+ }
+ MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
+ break;
- case 17: /* MPI_Iscan */
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
- }
- MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ case 15: /* MPI_Iallgather */
+ for (i = 0; i < size * COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
+ break;
- case 18: /* MPI_Iexscan */
- for (i = 0; i < COUNT; ++i) {
- buf[i] = rank + i;
- recvbuf[i] = 0xdeadbeef;
+ case 16: /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
- break;
+ recvcounts[i] = COUNT;
+ rdispls[i] = i * COUNT;
+ }
+ for (i = 0; i < COUNT; ++i)
+ buf[i] = rank + i;
+ MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
+ break;
+
+ case 17: /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
- case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
- for (i = 0; i < size; ++i) {
- sendcounts[i] = COUNT;
- recvcounts[i] = COUNT;
- sdispls[i] = COUNT * i * sizeof(int);
- rdispls[i] = COUNT * i * sizeof(int);
- sendtypes[i] = MPI_INT;
- recvtypes[i] = MPI_INT;
- for (j = 0; j < COUNT; ++j) {
- buf[i*COUNT+j] = rank + (i * j);
- recvbuf[i*COUNT+j] = 0xdeadbeef;
- }
+ case 18: /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ buf[i] = rank + i;
+ recvbuf[i] = 0xdeadbeef;
+ }
+ MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
+ break;
+
+ case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ sendcounts[i] = COUNT;
+ recvcounts[i] = COUNT;
+ sdispls[i] = COUNT * i * sizeof(int);
+ rdispls[i] = COUNT * i * sizeof(int);
+ sendtypes[i] = MPI_INT;
+ recvtypes[i] = MPI_INT;
+ for (j = 0; j < COUNT; ++j) {
+ buf[i * COUNT + j] = rank + (i * j);
+ recvbuf[i * COUNT + j] = 0xdeadbeef;
}
- MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, req);
- break;
+ }
+ MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes,
+ comm, req);
+ break;
- case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
- /* even ranks send to odd ranks, but only if we have a full pair */
- if ((rank % 2 != 0) || (rank != size-1)) {
- for (j = 0; j < COUNT; ++j) {
- buf[j] = j;
- recvbuf[j] = 0xdeadbeef;
- }
- if (rank % 2 == 0)
- MPI_Isend(buf, COUNT, MPI_INT, rank+1, 5, comm, req);
- else
- MPI_Irecv(recvbuf, COUNT, MPI_INT, rank-1, 5, comm, req);
+ case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
+ /* even ranks send to odd ranks, but only if we have a full pair */
+ if ((rank % 2 != 0) || (rank != size - 1)) {
+ for (j = 0; j < COUNT; ++j) {
+ buf[j] = j;
+ recvbuf[j] = 0xdeadbeef;
}
- break;
+ if (rank % 2 == 0)
+ MPI_Isend(buf, COUNT, MPI_INT, rank + 1, 5, comm, req);
+ else
+ MPI_Irecv(recvbuf, COUNT, MPI_INT, rank - 1, 5, comm, req);
+ }
+ break;
- default:
- fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
- MPI_Abort(comm, 1);
- exit(1);
- break;
+ default:
+ fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
+ MPI_Abort(comm, 1);
+ break;
}
}
{
int i, j;
int rank, size;
- MPI_Comm comm = l->comm;
- int *buf = l->buf;
- int *recvbuf = l->recvbuf;
+ MPI_Comm comm = l->comm;
+ int *buf = l->buf;
+ int *recvbuf = l->recvbuf;
int *sendcounts = l->sendcounts;
int *recvcounts = l->recvcounts;
- int *sdispls = l->sdispls;
- int *rdispls = l->rdispls;
- int *sendtypes = l->sendtypes;
- int *recvtypes = l->recvtypes;
- char *buf_alias = (char *)buf;
+ int *sdispls = l->sdispls;
+ int *rdispls = l->rdispls;
+ int *sendtypes = l->sendtypes;
+ int *recvtypes = l->recvtypes;
+ char *buf_alias = (char *) buf;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
/* these cases all correspond to cases in start_random_nonblocking */
switch (l->case_num) {
- case 0: /* MPI_Ibcast */
- for (i = 0; i < COUNT; ++i) {
- if (buf[i] != i)
- printf("buf[%d]=%d i=%d\n", i, buf[i], i);
- my_assert(buf[i] == i);
- }
- break;
+ case 0: /* MPI_Ibcast */
+ for (i = 0; i < COUNT; ++i) {
+ if (buf[i] != i)
+ printf("buf[%d]=%d i=%d\n", i, buf[i], i);
+ my_assert(buf[i] == i);
+ }
+ break;
- case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
- for (i = 0; i < PRIME; ++i) {
- if (buf_alias[i] != i)
- printf("buf_alias[%d]=%d i=%d\n", i, buf_alias[i], i);
- my_assert(buf_alias[i] == i);
- }
- break;
+ case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
+ for (i = 0; i < PRIME; ++i) {
+ if (buf_alias[i] != i)
+ printf("buf_alias[%d]=%d i=%d\n", i, buf_alias[i], i);
+ my_assert(buf_alias[i] == i);
+ }
+ break;
- case 2: /* MPI_Ibarrier */
- /* nothing to check */
- break;
+ case 2: /* MPI_Ibarrier */
+ /* nothing to check */
+ break;
- case 3: /* MPI_Ireduce */
- if (rank == 0) {
- for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
- }
+ case 3: /* MPI_Ireduce */
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
- break;
+ }
+ break;
- case 4: /* same again, use a user op and free it before the wait */
- if (rank == 0) {
- for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
- }
+ case 4: /* same again, use a user op and free it before the wait */
+ if (rank == 0) {
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
}
- break;
+ }
+ break;
+
+ case 5: /* MPI_Iallreduce */
+ for (i = 0; i < COUNT; ++i) {
+ if (recvbuf[i] != ((size * (size - 1) / 2) + (i * size)))
+ printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i],
+ ((size * (size - 1) / 2) + (i * size)));
+ my_assert(recvbuf[i] == ((size * (size - 1) / 2) + (i * size)));
+ }
+ break;
- case 5: /* MPI_Iallreduce */
- for (i = 0; i < COUNT; ++i) {
- if (recvbuf[i] != ((size * (size-1) / 2) + (i * size)))
- printf("got recvbuf[%d]=%d, expected %d\n", i, recvbuf[i], ((size * (size-1) / 2) + (i * size)));
- my_assert(recvbuf[i] == ((size * (size-1) / 2) + (i * size)));
+ case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
- break;
+ }
+ break;
- case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
+ case 7: /* MPI_Igather */
+ if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
- break;
-
- case 7: /* MPI_Igather */
- if (rank == 0) {
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
- }
- }
- else {
- for (i = 0; i < size*COUNT; ++i) {
- my_assert(recvbuf[i] == 0xdeadbeef);
- }
+ }
+ else {
+ for (i = 0; i < size * COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
- if (rank == 0) {
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
+ if (rank == 0) {
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
- else {
- for (i = 0; i < size*COUNT; ++i) {
- my_assert(recvbuf[i] == 0xdeadbeef);
- }
+ }
+ else {
+ for (i = 0; i < size * COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 9: /* MPI_Iscatter */
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == rank + j);
- }
- if (rank != 0) {
- for (i = 0; i < size*COUNT; ++i) {
- /* check we didn't corrupt the sendbuf somehow */
- my_assert(buf[i] == 0xdeadbeef);
- }
+ case 9: /* MPI_Iscatter */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size * COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 10: /* MPI_Iscatterv */
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == rank + j);
- }
- if (rank != 0) {
- for (i = 0; i < size*COUNT; ++i) {
- /* check we didn't corrupt the sendbuf somehow */
- my_assert(buf[i] == 0xdeadbeef);
- }
- }
- for (i = 1; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
- }
+ case 10: /* MPI_Iscatterv */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == rank + j);
+ }
+ if (rank != 0) {
+ for (i = 0; i < size * COUNT; ++i) {
+ /* check we didn't corrupt the sendbuf somehow */
+ my_assert(buf[i] == 0xdeadbeef);
}
- break;
-
- case 11: /* MPI_Ireduce_scatter */
+ }
+ for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
- for (i = 1; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
- }
- }
- break;
+ }
+ break;
- case 12: /* MPI_Ireduce_scatter_block */
+ case 11: /* MPI_Ireduce_scatter */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
- }
- for (i = 1; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /* check we didn't corrupt the rest of the recvbuf */
- my_assert(recvbuf[i*COUNT+j] == 0xdeadbeef);
- }
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 13: /* MPI_Igatherv */
- if (rank == 0) {
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
- }
- }
- else {
- for (i = 0; i < size*COUNT; ++i) {
- my_assert(recvbuf[i] == 0xdeadbeef);
- }
+ case 12: /* MPI_Ireduce_scatter_block */
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[j] == (size * rank + ((size - 1) * size) / 2));
+ }
+ for (i = 1; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /* check we didn't corrupt the rest of the recvbuf */
+ my_assert(recvbuf[i * COUNT + j] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 14: /* MPI_Ialltoall */
+ case 13: /* MPI_Igatherv */
+ if (rank == 0) {
for (i = 0; i < size; ++i) {
for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
}
- break;
-
- case 15: /* MPI_Iallgather */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ }
+ else {
+ for (i = 0; i < size * COUNT; ++i) {
+ my_assert(recvbuf[i] == 0xdeadbeef);
}
- break;
+ }
+ break;
- case 16: /* MPI_Iallgatherv */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- my_assert(recvbuf[i*COUNT+j] == i + j);
- }
+ case 14: /* MPI_Ialltoall */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (i * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
- break;
+ }
+ break;
- case 17: /* MPI_Iscan */
- for (i = 0; i < COUNT; ++i) {
- my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1))));
+ case 15: /* MPI_Iallgather */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
- break;
+ }
+ break;
- case 18: /* MPI_Iexscan */
- for (i = 0; i < COUNT; ++i) {
- if (rank == 0)
- my_assert(recvbuf[i] == 0xdeadbeef);
- else
- my_assert(recvbuf[i] == ((rank * (rank+1) / 2) + (i * (rank + 1)) - (rank + i)));
+ case 16: /* MPI_Iallgatherv */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ my_assert(recvbuf[i * COUNT + j] == i + j);
}
- break;
+ }
+ break;
- case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
- for (i = 0; i < size; ++i) {
- for (j = 0; j < COUNT; ++j) {
- /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j)));*/
- my_assert(recvbuf[i*COUNT+j] == (i + (rank * j)));
- }
+ case 17: /* MPI_Iscan */
+ for (i = 0; i < COUNT; ++i) {
+ my_assert(recvbuf[i] == ((rank * (rank + 1) / 2) + (i * (rank + 1))));
+ }
+ break;
+
+ case 18: /* MPI_Iexscan */
+ for (i = 0; i < COUNT; ++i) {
+ if (rank == 0)
+ my_assert(recvbuf[i] == 0xdeadbeef);
+ else
+ my_assert(recvbuf[i] == ((rank * (rank + 1) / 2) + (i * (rank + 1)) - (rank + i)));
+ }
+ break;
+
+ case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
+ for (i = 0; i < size; ++i) {
+ for (j = 0; j < COUNT; ++j) {
+ /*printf("recvbuf[%d*COUNT+%d]=%d, expecting %d\n", i, j, recvbuf[i*COUNT+j], (i + (rank * j))); */
+ my_assert(recvbuf[i * COUNT + j] == (i + (rank * j)));
}
- break;
+ }
+ break;
- case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
- /* even ranks send to odd ranks, but only if we have a full pair */
- if ((rank % 2 != 0) || (rank != size-1)) {
- for (j = 0; j < COUNT; ++j) {
- /* only odd procs did a recv */
- if (rank % 2 == 0) {
- my_assert(recvbuf[j] == 0xdeadbeef);
- }
- else {
- if (recvbuf[j] != j) printf("recvbuf[%d]=%d j=%d\n", j, recvbuf[j], j);
- my_assert(recvbuf[j] == j);
- }
+ case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
+ /* even ranks send to odd ranks, but only if we have a full pair */
+ if ((rank % 2 != 0) || (rank != size - 1)) {
+ for (j = 0; j < COUNT; ++j) {
+ /* only odd procs did a recv */
+ if (rank % 2 == 0) {
+ my_assert(recvbuf[j] == 0xdeadbeef);
+ }
+ else {
+ if (recvbuf[j] != j)
+ printf("recvbuf[%d]=%d j=%d\n", j, recvbuf[j], j);
+ my_assert(recvbuf[j] == j);
}
}
- break;
+ }
+ break;
- default:
- printf("invalid case_num (%d) detected\n", l->case_num);
- assert(0);
- break;
+ default:
+ printf("invalid case_num (%d) detected\n", l->case_num);
+ assert(0);
+ break;
}
}
+
#undef NUM_CASES
-static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[], int *outcount, int indices[])
+static void complete_something_somehow(unsigned int rndnum, int numreqs, MPI_Request reqs[],
+ int *outcount, int indices[])
{
int i, idx, flag;
#define COMPLETION_CASES (8)
switch (rand_range(rndnum, 0, COMPLETION_CASES)) {
- case 0:
- MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
- *outcount = numreqs;
- for (i = 0; i < numreqs; ++i) {
- indices[i] = i;
- }
- break;
-
- case 1:
- MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
- if (*outcount == MPI_UNDEFINED) {
- *outcount = 0;
- }
- break;
-
- case 2:
- MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
- if (*outcount == MPI_UNDEFINED) {
- *outcount = 0;
- }
- break;
-
- case 3:
- MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
- if (idx == MPI_UNDEFINED) {
- *outcount = 0;
- }
- else {
- *outcount = 1;
- indices[0] = idx;
- }
- break;
+ case 0:
+ MPI_Waitall(numreqs, reqs, MPI_STATUSES_IGNORE);
+ *outcount = numreqs;
+ for (i = 0; i < numreqs; ++i) {
+ indices[i] = i;
+ }
+ break;
- case 4:
- MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
- if (idx == MPI_UNDEFINED) {
- *outcount = 0;
- }
- else {
- *outcount = 1;
- indices[0] = idx;
- }
- break;
+ case 1:
+ MPI_Testsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
+ if (*outcount == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ break;
- case 5:
- MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
- if (flag) {
- *outcount = numreqs;
- for (i = 0; i < numreqs; ++i) {
- indices[i] = i;
- }
- }
- else {
- *outcount = 0;
- }
- break;
+ case 2:
+ MPI_Waitsome(numreqs, reqs, outcount, indices, MPI_STATUS_IGNORE);
+ if (*outcount == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ break;
- case 6:
- /* select a new random index and wait on it */
- rndnum = gen_prn(rndnum);
- idx = rand_range(rndnum, 0, numreqs);
- MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
+ case 3:
+ MPI_Waitany(numreqs, reqs, &idx, MPI_STATUS_IGNORE);
+ if (idx == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ else {
*outcount = 1;
indices[0] = idx;
- break;
-
- case 7:
- /* select a new random index and wait on it */
- rndnum = gen_prn(rndnum);
- idx = rand_range(rndnum, 0, numreqs);
- MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
- *outcount = (flag ? 1 : 0);
+ }
+ break;
+
+ case 4:
+ MPI_Testany(numreqs, reqs, &idx, &flag, MPI_STATUS_IGNORE);
+ if (idx == MPI_UNDEFINED) {
+ *outcount = 0;
+ }
+ else {
+ *outcount = 1;
indices[0] = idx;
- break;
+ }
+ break;
- default:
- assert(0);
- break;
+ case 5:
+ MPI_Testall(numreqs, reqs, &flag, MPI_STATUSES_IGNORE);
+ if (flag) {
+ *outcount = numreqs;
+ for (i = 0; i < numreqs; ++i) {
+ indices[i] = i;
+ }
+ }
+ else {
+ *outcount = 0;
+ }
+ break;
+
+ case 6:
+ /* select a new random index and wait on it */
+ rndnum = gen_prn(rndnum);
+ idx = rand_range(rndnum, 0, numreqs);
+ MPI_Wait(&reqs[idx], MPI_STATUS_IGNORE);
+ *outcount = 1;
+ indices[0] = idx;
+ break;
+
+ case 7:
+ /* select a new random index and wait on it */
+ rndnum = gen_prn(rndnum);
+ idx = rand_range(rndnum, 0, numreqs);
+ MPI_Test(&reqs[idx], &flag, MPI_STATUS_IGNORE);
+ *outcount = (flag ? 1 : 0);
+ indices[0] = idx;
+ break;
+
+ default:
+ assert(0);
+ break;
}
#undef COMPLETION_CASES
}
/* "randomly" and infrequently introduce some jitter into the system */
if (0 == rand_range(gen_prn(complete_seq + wrank), 0, CHANCE_OF_SLEEP)) {
- usleep(JITTER_DELAY); /* take a short nap */
+ usleep(JITTER_DELAY); /* take a short nap */
}
}
return 0;
}
-
+++ /dev/null
-/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
-/*
- * (C) 2010 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-/* This is a very weak sanity test that all nonblocking collectives specified by
- * MPI-3 are present in the library and take arguments as expected. This test
- * does not check for progress, matching issues, or sensible output buffer
- * values. */
-
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include "mpitest.h"
-
-#define NUM_INTS (2)
-
-#define my_assert(cond_) \
- do { \
- if (!(cond_)) { \
- fprintf(stderr, "assertion (%s) failed, aborting\n", #cond_); \
- MPI_Abort(MPI_COMM_WORLD, 1); \
- } \
- } while (0)
-
-int main(int argc, char **argv)
-{
- int errs = 0;
- int i;
- int rank, size;
- int *sbuf = NULL;
- int *rbuf = NULL;
- int *scounts = NULL;
- int *rcounts = NULL;
- int *sdispls = NULL;
- int *rdispls = NULL;
- int *types = NULL;
- MPI_Comm comm;
- MPI_Request req;
-
- /* intentionally not using MTest_Init/MTest_Finalize in order to make it
- * easy to take this test and use it as an NBC sanity test outside of the
- * MPICH test suite */
- MPI_Init(&argc, &argv);
-
- comm = MPI_COMM_WORLD;
-
- MPI_Comm_size(comm, &size);
- MPI_Comm_rank(comm, &rank);
-
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
-
- /* enough space for every process to contribute at least NUM_INTS ints to any
- * collective operation */
- sbuf = malloc(NUM_INTS*size*sizeof(int));
- my_assert(sbuf);
- rbuf = malloc(NUM_INTS*size*sizeof(int));
- my_assert(rbuf);
- scounts = malloc(size*sizeof(int));
- my_assert(scounts);
- rcounts = malloc(size*sizeof(int));
- my_assert(rcounts);
- sdispls = malloc(size*sizeof(int));
- my_assert(sdispls);
- rdispls = malloc(size*sizeof(int));
- my_assert(rdispls);
- types = malloc(size*sizeof(int));
- my_assert(types);
-
- for (i = 0; i < size; ++i) {
- sbuf[2*i] = i;
- sbuf[2*i+1] = i;
- rbuf[2*i] = i;
- rbuf[2*i+1] = i;
- scounts[i] = NUM_INTS;
- rcounts[i] = NUM_INTS;
- sdispls[i] = i * NUM_INTS;
- rdispls[i] = i * NUM_INTS;
- types[i] = MPI_INT;
- }
-
- if (rank == 0 && MPI_SUCCESS ==
- MPI_Igather(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
- errs++;
-
- if (rank == 0 && MPI_SUCCESS ==
- MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, sbuf, rcounts, rdispls, MPI_INT, 0, comm, &req))
- errs++;
-
- if (rank == 0 && MPI_SUCCESS ==
- MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
- errs++;
-
- if (rank == 0 && MPI_SUCCESS ==
- MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Iallgather(&sbuf[rank], 1, MPI_INT, sbuf, 1, MPI_INT, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Iallgatherv(&sbuf[rank * rcounts[rank]], rcounts[rank], MPI_INT, sbuf, rcounts, rdispls, MPI_INT, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Ialltoall(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Ialltoallv(sbuf, scounts, sdispls, MPI_INT, sbuf, scounts, sdispls, MPI_INT, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Ialltoallw(sbuf, scounts, sdispls, types, sbuf, scounts, sdispls, types, comm, &req))
- errs++;
-
- if (rank == 0 && MPI_SUCCESS ==
- MPI_Ireduce(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Iallreduce(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Ireduce_scatter(sbuf, sbuf, rcounts, MPI_INT, MPI_SUM, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Ireduce_scatter_block(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Iscan(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
- errs++;
-
- if (MPI_SUCCESS ==
- MPI_Iexscan(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
- errs++;
-
- if (sbuf) free(sbuf);
- if (rbuf) free(rbuf);
- if (scounts) free(scounts);
- if (rcounts) free(rcounts);
- if (sdispls) free(sdispls);
- if (rdispls) free(rdispls);
-
- if (rank == 0) {
- if (errs)
- fprintf(stderr, "Found %d errors\n", errs);
- else
- printf(" No errors\n");
- }
- MPI_Finalize();
- return 0;
-}
-
}
*/
-static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype * datatype)
{
int i;
- int *invec_int = (int *)invec;
- int *inoutvec_int = (int *)inoutvec;
+ int *invec_int = (int *) invec;
+ int *inoutvec_int = (int *) inoutvec;
if (*datatype != MPI_INT) {
++errs;
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
MPI_Op c_uop = MPI_OP_NULL;
MPI_Op nc_uop = MPI_OP_NULL;
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
int is_commutative = 0;
-#endif
MTest_Init(&argc, &argv);
/* make sure that user-define ops work too */
- MPI_Op_create(&user_op, 1/*commute*/, &c_uop);
- MPI_Op_create(&user_op, 0/*!commute*/, &nc_uop);
+ MPI_Op_create(&user_op, 1 /*commute */ , &c_uop);
+ MPI_Op_create(&user_op, 0 /*!commute */ , &nc_uop);
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
/* this function was added in MPI-2.2 */
MPI_Finalize();
return 0;
}
-
*/
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rc;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
long linbuf[3], loutbuf[3];
unsigned long ulinbuf[3], uloutbuf[3];
unsigned uinbuf[3], uoutbuf[3];
-
- MTest_Init( &argc, &argv );
+
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- /* Set errors return so that we can provide better information
- should a routine reject one of the operand/datatype pairs */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* Set errors return so that we can provide better information
+ * should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 0xff;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0) ? 0xff : 0xf0;
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != (char)0xff) {
- errs++;
- fprintf( stderr, "char BAND(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char BAND(0) test failed\n" );
- }
- if (coutbuf[2] != (char)0xf0 && size > 1) {
- errs++;
- fprintf( stderr, "char BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != (char) 0xff) {
+ errs++;
+ fprintf(stderr, "char BAND(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char BAND(0) test failed\n");
+ }
+ if (coutbuf[2] != (char) 0xf0 && size > 1) {
+ errs++;
+ fprintf(stderr, "char BAND(>) test failed\n");
+ }
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 0xff;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0) ? 0xff : 0xf0;
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_SIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_SIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (scoutbuf[0] != (signed char)0xff) {
- errs++;
- fprintf( stderr, "signed char BAND(1) test failed\n" );
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char BAND(0) test failed\n" );
- }
- if (scoutbuf[2] != (signed char)0xf0 && size > 1) {
- errs++;
- fprintf( stderr, "signed char BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (scoutbuf[0] != (signed char) 0xff) {
+ errs++;
+ fprintf(stderr, "signed char BAND(1) test failed\n");
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char BAND(0) test failed\n");
+ }
+ if (scoutbuf[2] != (signed char) 0xf0 && size > 1) {
+ errs++;
+ fprintf(stderr, "signed char BAND(>) test failed\n");
+ }
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 0xff;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0) ? 0xff : 0xf0;
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_UNSIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (ucoutbuf[0] != 0xff) {
- errs++;
- fprintf( stderr, "unsigned char BAND(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char BAND(0) test failed\n" );
- }
- if (ucoutbuf[2] != 0xf0 && size > 1) {
- errs++;
- fprintf( stderr, "unsigned char BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (ucoutbuf[0] != 0xff) {
+ errs++;
+ fprintf(stderr, "unsigned char BAND(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char BAND(0) test failed\n");
+ }
+ if (ucoutbuf[2] != 0xf0 && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned char BAND(>) test failed\n");
+ }
+ }
}
/* bytes */
- MTestPrintfMsg( 10, "Reduce of MPI_BYTE\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_BYTE\n");
cinbuf[0] = 0xff;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0) ? 0xff : 0xf0;
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_BYTE, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_BYTE, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_BYTE", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_BYTE", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != (char)0xff) {
- errs++;
- fprintf( stderr, "byte BAND(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "byte BAND(0) test failed\n" );
- }
- if (coutbuf[2] != (char)0xf0 && size > 1) {
- errs++;
- fprintf( stderr, "byte BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != (char) 0xff) {
+ errs++;
+ fprintf(stderr, "byte BAND(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "byte BAND(0) test failed\n");
+ }
+ if (coutbuf[2] != (char) 0xf0 && size > 1) {
+ errs++;
+ fprintf(stderr, "byte BAND(>) test failed\n");
+ }
+ }
}
/* short */
- MTestPrintfMsg( 10, "Reduce of MPI_SHORT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SHORT\n");
sinbuf[0] = 0xffff;
sinbuf[1] = 0;
sinbuf[2] = (rank > 0) ? 0xffff : 0xf0f0;
soutbuf[0] = 0;
soutbuf[1] = 1;
soutbuf[2] = 1;
- rc = MPI_Reduce( sinbuf, soutbuf, 3, MPI_SHORT, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(sinbuf, soutbuf, 3, MPI_SHORT, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_SHORT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_SHORT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (soutbuf[0] != (short)0xffff) {
- errs++;
- fprintf( stderr, "short BAND(1) test failed\n" );
- }
- if (soutbuf[1]) {
- errs++;
- fprintf( stderr, "short BAND(0) test failed\n" );
- }
- if (soutbuf[2] != (short)0xf0f0 && size > 1) {
- errs++;
- fprintf( stderr, "short BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (soutbuf[0] != (short) 0xffff) {
+ errs++;
+ fprintf(stderr, "short BAND(1) test failed\n");
+ }
+ if (soutbuf[1]) {
+ errs++;
+ fprintf(stderr, "short BAND(0) test failed\n");
+ }
+ if (soutbuf[2] != (short) 0xf0f0 && size > 1) {
+ errs++;
+ fprintf(stderr, "short BAND(>) test failed\n");
+ }
+ }
}
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_SHORT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_SHORT\n");
/* unsigned short */
usinbuf[0] = 0xffff;
usinbuf[1] = 0;
usoutbuf[0] = 0;
usoutbuf[1] = 1;
usoutbuf[2] = 1;
- rc = MPI_Reduce( usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED_SHORT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_UNSIGNED_SHORT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (usoutbuf[0] != 0xffff) {
- errs++;
- fprintf( stderr, "short BAND(1) test failed\n" );
- }
- if (usoutbuf[1]) {
- errs++;
- fprintf( stderr, "short BAND(0) test failed\n" );
- }
- if (usoutbuf[2] != 0xf0f0 && size > 1) {
- errs++;
- fprintf( stderr, "short BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (usoutbuf[0] != 0xffff) {
+ errs++;
+ fprintf(stderr, "short BAND(1) test failed\n");
+ }
+ if (usoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "short BAND(0) test failed\n");
+ }
+ if (usoutbuf[2] != 0xf0f0 && size > 1) {
+ errs++;
+ fprintf(stderr, "short BAND(>) test failed\n");
+ }
+ }
}
/* unsigned */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED\n");
uinbuf[0] = 0xffffffff;
uinbuf[1] = 0;
uinbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
uoutbuf[0] = 0;
uoutbuf[1] = 1;
uoutbuf[2] = 1;
- rc = MPI_Reduce( uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_UNSIGNED", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (uoutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "unsigned BAND(1) test failed\n" );
- }
- if (uoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned BAND(0) test failed\n" );
- }
- if (uoutbuf[2] != 0xf0f0f0f0 && size > 1) {
- errs++;
- fprintf( stderr, "unsigned BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (uoutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "unsigned BAND(1) test failed\n");
+ }
+ if (uoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned BAND(0) test failed\n");
+ }
+ if (uoutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned BAND(>) test failed\n");
+ }
+ }
}
/* long */
- MTestPrintfMsg( 10, "Reduce of MPI_LONG\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_LONG\n");
linbuf[0] = 0xffffffff;
linbuf[1] = 0;
linbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
loutbuf[0] = 0;
loutbuf[1] = 1;
loutbuf[2] = 1;
- rc = MPI_Reduce( linbuf, loutbuf, 3, MPI_LONG, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(linbuf, loutbuf, 3, MPI_LONG, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_LONG", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_LONG", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (loutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "long BAND(1) test failed\n" );
- }
- if (loutbuf[1]) {
- errs++;
- fprintf( stderr, "long BAND(0) test failed\n" );
- }
- if (loutbuf[2] != 0xf0f0f0f0 && size > 1) {
- errs++;
- fprintf( stderr, "long BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (loutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "long BAND(1) test failed\n");
+ }
+ if (loutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long BAND(0) test failed\n");
+ }
+ if (loutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf(stderr, "long BAND(>) test failed\n");
+ }
+ }
}
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_LONG\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_LONG\n");
/* unsigned long */
ulinbuf[0] = 0xffffffff;
ulinbuf[1] = 0;
uloutbuf[0] = 0;
uloutbuf[1] = 1;
uloutbuf[2] = 1;
- rc = MPI_Reduce( ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BAND, 0, comm );
+ rc = MPI_Reduce(ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_UNSIGNED_LONG", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BAND and MPI_UNSIGNED_LONG", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (uloutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "unsigned long BAND(1) test failed\n" );
- }
- if (uloutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned long BAND(0) test failed\n" );
- }
- if (uloutbuf[2] != 0xf0f0f0f0 && size > 1) {
- errs++;
- fprintf( stderr, "unsigned long BAND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (uloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "unsigned long BAND(1) test failed\n");
+ }
+ if (uloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned long BAND(0) test failed\n");
+ }
+ if (uloutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned long BAND(>) test failed\n");
+ }
+ }
}
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 0xffffffff;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 0xffffffff;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0) ? 0xffffffff : 0xf0f0f0f0;
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BAND, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_BAND and MPI_LONG_LONG", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (lloutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "long long BAND(1) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long BAND(0) test failed\n" );
- }
- if (lloutbuf[2] != 0xf0f0f0f0 && size > 1) {
- errs++;
- fprintf( stderr, "long long BAND(>) test failed\n" );
- }
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ rc = MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BAND, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_BAND and MPI_LONG_LONG", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "long long BAND(1) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long BAND(0) test failed\n");
+ }
+ if (lloutbuf[2] != 0xf0f0f0f0 && size > 1) {
+ errs++;
+ fprintf(stderr, "long long BAND(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif
- MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( errs );
+ MPI_Errhandler_set(comm, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rc;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
unsigned long ulinbuf[3], uloutbuf[3];
unsigned uinbuf[3], uoutbuf[3];
int iinbuf[3], ioutbuf[3];
-
- MTest_Init( &argc, &argv );
+
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- /* Set errors return so that we can provide better information
- should a routine reject one of the operand/datatype pairs */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* Set errors return so that we can provide better information
+ * should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 0xff;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != (char)0xff) {
- errs++;
- fprintf( stderr, "char BOR(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char BOR(0) test failed\n" );
- }
- if (coutbuf[2] != (char)0xff && size > 1) {
- errs++;
- fprintf( stderr, "char BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != (char) 0xff) {
+ errs++;
+ fprintf(stderr, "char BOR(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char BOR(0) test failed\n");
+ }
+ if (coutbuf[2] != (char) 0xff && size > 1) {
+ errs++;
+ fprintf(stderr, "char BOR(>) test failed\n");
+ }
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 0xff;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_SIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_SIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (scoutbuf[0] != (signed char)0xff) {
- errs++;
- fprintf( stderr, "signed char BOR(1) test failed\n" );
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char BOR(0) test failed\n" );
- }
- if (scoutbuf[2] != (signed char)0xff && size > 1) {
- errs++;
- fprintf( stderr, "signed char BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (scoutbuf[0] != (signed char) 0xff) {
+ errs++;
+ fprintf(stderr, "signed char BOR(1) test failed\n");
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char BOR(0) test failed\n");
+ }
+ if (scoutbuf[2] != (signed char) 0xff && size > 1) {
+ errs++;
+ fprintf(stderr, "signed char BOR(>) test failed\n");
+ }
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 0xff;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_UNSIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (ucoutbuf[0] != 0xff) {
- errs++;
- fprintf( stderr, "unsigned char BOR(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char BOR(0) test failed\n" );
- }
- if (ucoutbuf[2] != 0xff && size > 1) {
- errs++;
- fprintf( stderr, "unsigned char BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (ucoutbuf[0] != 0xff) {
+ errs++;
+ fprintf(stderr, "unsigned char BOR(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char BOR(0) test failed\n");
+ }
+ if (ucoutbuf[2] != 0xff && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned char BOR(>) test failed\n");
+ }
+ }
}
/* bytes */
- MTestPrintfMsg( 10, "Reduce of MPI_BYTE\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_BYTE\n");
cinbuf[0] = 0xff;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_BYTE, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_BYTE, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_BYTE", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_BYTE", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != (char)0xff) {
- errs++;
- fprintf( stderr, "byte BOR(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "byte BOR(0) test failed\n" );
- }
- if (coutbuf[2] != (char)0xff && size > 1) {
- errs++;
- fprintf( stderr, "byte BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != (char) 0xff) {
+ errs++;
+ fprintf(stderr, "byte BOR(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "byte BOR(0) test failed\n");
+ }
+ if (coutbuf[2] != (char) 0xff && size > 1) {
+ errs++;
+ fprintf(stderr, "byte BOR(>) test failed\n");
+ }
+ }
}
/* short */
- MTestPrintfMsg( 10, "Reduce of MPI_SHORT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SHORT\n");
sinbuf[0] = 0xffff;
sinbuf[1] = 0;
sinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
soutbuf[0] = 0;
soutbuf[1] = 1;
soutbuf[2] = 1;
- rc = MPI_Reduce( sinbuf, soutbuf, 3, MPI_SHORT, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(sinbuf, soutbuf, 3, MPI_SHORT, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_SHORT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_SHORT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (soutbuf[0] != (short)0xffff) {
- errs++;
- fprintf( stderr, "short BOR(1) test failed\n" );
- }
- if (soutbuf[1]) {
- errs++;
- fprintf( stderr, "short BOR(0) test failed\n" );
- }
- if (soutbuf[2] != (short)0xffff && size > 1) {
- errs++;
- fprintf( stderr, "short BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (soutbuf[0] != (short) 0xffff) {
+ errs++;
+ fprintf(stderr, "short BOR(1) test failed\n");
+ }
+ if (soutbuf[1]) {
+ errs++;
+ fprintf(stderr, "short BOR(0) test failed\n");
+ }
+ if (soutbuf[2] != (short) 0xffff && size > 1) {
+ errs++;
+ fprintf(stderr, "short BOR(>) test failed\n");
+ }
+ }
}
/* unsigned short */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_SHORT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_SHORT\n");
usinbuf[0] = 0xffff;
usinbuf[1] = 0;
usinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
usoutbuf[0] = 0;
usoutbuf[1] = 1;
usoutbuf[2] = 1;
- rc = MPI_Reduce( usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED_SHORT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_UNSIGNED_SHORT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (usoutbuf[0] != 0xffff) {
- errs++;
- fprintf( stderr, "short BOR(1) test failed\n" );
- }
- if (usoutbuf[1]) {
- errs++;
- fprintf( stderr, "short BOR(0) test failed\n" );
- }
- if (usoutbuf[2] != 0xffff && size > 1) {
- errs++;
- fprintf( stderr, "short BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (usoutbuf[0] != 0xffff) {
+ errs++;
+ fprintf(stderr, "short BOR(1) test failed\n");
+ }
+ if (usoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "short BOR(0) test failed\n");
+ }
+ if (usoutbuf[2] != 0xffff && size > 1) {
+ errs++;
+ fprintf(stderr, "short BOR(>) test failed\n");
+ }
+ }
}
/* unsigned */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED\n");
uinbuf[0] = 0xffffffff;
uinbuf[1] = 0;
uinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
uoutbuf[0] = 0;
uoutbuf[1] = 1;
uoutbuf[2] = 1;
- rc = MPI_Reduce( uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_UNSIGNED", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (uoutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "unsigned BOR(1) test failed\n" );
- }
- if (uoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned BOR(0) test failed\n" );
- }
- if (uoutbuf[2] != 0xffffffff && size > 1) {
- errs++;
- fprintf( stderr, "unsigned BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (uoutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "unsigned BOR(1) test failed\n");
+ }
+ if (uoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned BOR(0) test failed\n");
+ }
+ if (uoutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned BOR(>) test failed\n");
+ }
+ }
}
/* int */
- MTestPrintfMsg( 10, "Reduce of MPI_INT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_INT\n");
iinbuf[0] = 0xffffffff;
iinbuf[1] = 0;
iinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
ioutbuf[0] = 0;
ioutbuf[1] = 1;
ioutbuf[2] = 1;
- rc = MPI_Reduce( iinbuf, ioutbuf, 3, MPI_INT, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(iinbuf, ioutbuf, 3, MPI_INT, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_INT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_INT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (ioutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "int BOR(1) test failed\n" );
- }
- if (ioutbuf[1]) {
- errs++;
- fprintf( stderr, "int BOR(0) test failed\n" );
- }
- if (ioutbuf[2] != 0xffffffff && size > 1) {
- errs++;
- fprintf( stderr, "int BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (ioutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "int BOR(1) test failed\n");
+ }
+ if (ioutbuf[1]) {
+ errs++;
+ fprintf(stderr, "int BOR(0) test failed\n");
+ }
+ if (ioutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf(stderr, "int BOR(>) test failed\n");
+ }
+ }
}
/* long */
- MTestPrintfMsg( 10, "Reduce of MPI_LONG\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_LONG\n");
linbuf[0] = 0xffffffff;
linbuf[1] = 0;
linbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
loutbuf[0] = 0;
loutbuf[1] = 1;
loutbuf[2] = 1;
- rc = MPI_Reduce( linbuf, loutbuf, 3, MPI_LONG, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(linbuf, loutbuf, 3, MPI_LONG, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_LONG", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_LONG", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (loutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "long BOR(1) test failed\n" );
- }
- if (loutbuf[1]) {
- errs++;
- fprintf( stderr, "long BOR(0) test failed\n" );
- }
- if (loutbuf[2] != 0xffffffff && size > 1) {
- errs++;
- fprintf( stderr, "long BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (loutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "long BOR(1) test failed\n");
+ }
+ if (loutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long BOR(0) test failed\n");
+ }
+ if (loutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf(stderr, "long BOR(>) test failed\n");
+ }
+ }
}
/* unsigned long */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_LONG\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_LONG\n");
ulinbuf[0] = 0xffffffff;
ulinbuf[1] = 0;
ulinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
uloutbuf[0] = 0;
uloutbuf[1] = 1;
uloutbuf[2] = 1;
- rc = MPI_Reduce( ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BOR, 0, comm );
+ rc = MPI_Reduce(ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_UNSIGNED_LONG", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BOR and MPI_UNSIGNED_LONG", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (uloutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "unsigned long BOR(1) test failed\n" );
- }
- if (uloutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned long BOR(0) test failed\n" );
- }
- if (uloutbuf[2] != 0xffffffff && size > 1) {
- errs++;
- fprintf( stderr, "unsigned long BOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (uloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "unsigned long BOR(1) test failed\n");
+ }
+ if (uloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned long BOR(0) test failed\n");
+ }
+ if (uloutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned long BOR(>) test failed\n");
+ }
+ }
}
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 0xffffffff;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 0xffffffff;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BOR, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_BOR and MPI_LONG_LONG", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (lloutbuf[0] != 0xffffffff) {
- errs++;
- fprintf( stderr, "long long BOR(1) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long BOR(0) test failed\n" );
- }
- if (lloutbuf[2] != 0xffffffff && size > 1) {
- errs++;
- fprintf( stderr, "long long BOR(>) test failed\n" );
- }
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ rc = MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BOR, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_BOR and MPI_LONG_LONG", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != 0xffffffff) {
+ errs++;
+ fprintf(stderr, "long long BOR(1) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long BOR(0) test failed\n");
+ }
+ if (lloutbuf[2] != 0xffffffff && size > 1) {
+ errs++;
+ fprintf(stderr, "long long BOR(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif
- MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( errs );
+ MPI_Errhandler_set(comm, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rc;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
unsigned long ulinbuf[3], uloutbuf[3];
unsigned uinbuf[3], uoutbuf[3];
int iinbuf[3], ioutbuf[3];
-
- MTest_Init( &argc, &argv );
+
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- /* Set errors return so that we can provide better information
- should a routine reject one of the operand/datatype pairs */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* Set errors return so that we can provide better information
+ * should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 0xff;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
coutbuf[0] = 0xf;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != ((size % 2) ? (char)0xff : (char)0) ) {
- errs++;
- fprintf( stderr, "char BXOR(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char BXOR(0) test failed\n" );
- }
- if (coutbuf[2] != ((size % 2) ? (char)0xc3 : (char)0xff)) {
- errs++;
- fprintf( stderr, "char BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != ((size % 2) ? (char) 0xff : (char) 0)) {
+ errs++;
+ fprintf(stderr, "char BXOR(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char BXOR(0) test failed\n");
+ }
+ if (coutbuf[2] != ((size % 2) ? (char) 0xc3 : (char) 0xff)) {
+ errs++;
+ fprintf(stderr, "char BXOR(>) test failed\n");
+ }
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 0xff;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
scoutbuf[0] = 0xf;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_SIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_SIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (scoutbuf[0] != ((size % 2) ? (signed char)0xff : (signed char)0) ) {
- errs++;
- fprintf( stderr, "signed char BXOR(1) test failed\n" );
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char BXOR(0) test failed\n" );
- }
- if (scoutbuf[2] != ((size % 2) ? (signed char)0xc3 : (signed char)0xff)) {
- errs++;
- fprintf( stderr, "signed char BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (scoutbuf[0] != ((size % 2) ? (signed char) 0xff : (signed char) 0)) {
+ errs++;
+ fprintf(stderr, "signed char BXOR(1) test failed\n");
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char BXOR(0) test failed\n");
+ }
+ if (scoutbuf[2] != ((size % 2) ? (signed char) 0xc3 : (signed char) 0xff)) {
+ errs++;
+ fprintf(stderr, "signed char BXOR(>) test failed\n");
+ }
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 0xff;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_UNSIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (ucoutbuf[0] != ((size % 2) ? 0xff : 0)) {
- errs++;
- fprintf( stderr, "unsigned char BXOR(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char BXOR(0) test failed\n" );
- }
- if (ucoutbuf[2] != ((size % 2) ? (unsigned char)0xc3 : (unsigned char)0xff)) {
- errs++;
- fprintf( stderr, "unsigned char BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (ucoutbuf[0] != ((size % 2) ? 0xff : 0)) {
+ errs++;
+ fprintf(stderr, "unsigned char BXOR(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char BXOR(0) test failed\n");
+ }
+ if (ucoutbuf[2] != ((size % 2) ? (unsigned char) 0xc3 : (unsigned char) 0xff)) {
+ errs++;
+ fprintf(stderr, "unsigned char BXOR(>) test failed\n");
+ }
+ }
}
/* bytes */
- MTestPrintfMsg( 10, "Reduce of MPI_BYTE\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_BYTE\n");
cinbuf[0] = 0xff;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0) ? 0x3c : 0xc3;
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_BYTE, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_BYTE, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_BYTE", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_BYTE", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != ((size % 2) ? (char)0xff : 0)) {
- errs++;
- fprintf( stderr, "byte BXOR(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "byte BXOR(0) test failed\n" );
- }
- if (coutbuf[2] != ((size % 2) ? (char)0xc3 : (char)0xff)) {
- errs++;
- fprintf( stderr, "byte BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != ((size % 2) ? (char) 0xff : 0)) {
+ errs++;
+ fprintf(stderr, "byte BXOR(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "byte BXOR(0) test failed\n");
+ }
+ if (coutbuf[2] != ((size % 2) ? (char) 0xc3 : (char) 0xff)) {
+ errs++;
+ fprintf(stderr, "byte BXOR(>) test failed\n");
+ }
+ }
}
/* short */
- MTestPrintfMsg( 10, "Reduce of MPI_SHORT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SHORT\n");
sinbuf[0] = 0xffff;
sinbuf[1] = 0;
sinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
soutbuf[0] = 0;
soutbuf[1] = 1;
soutbuf[2] = 1;
- rc = MPI_Reduce( sinbuf, soutbuf, 3, MPI_SHORT, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(sinbuf, soutbuf, 3, MPI_SHORT, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_SHORT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_SHORT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (soutbuf[0] != ((size % 2) ? (short)0xffff : 0)) {
- errs++;
- fprintf( stderr, "short BXOR(1) test failed\n" );
- }
- if (soutbuf[1]) {
- errs++;
- fprintf( stderr, "short BXOR(0) test failed\n" );
- }
- if (soutbuf[2] != ((size % 2) ? (short)0xc3c3 : (short)0xffff)) {
- errs++;
- fprintf( stderr, "short BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (soutbuf[0] != ((size % 2) ? (short) 0xffff : 0)) {
+ errs++;
+ fprintf(stderr, "short BXOR(1) test failed\n");
+ }
+ if (soutbuf[1]) {
+ errs++;
+ fprintf(stderr, "short BXOR(0) test failed\n");
+ }
+ if (soutbuf[2] != ((size % 2) ? (short) 0xc3c3 : (short) 0xffff)) {
+ errs++;
+ fprintf(stderr, "short BXOR(>) test failed\n");
+ }
+ }
}
/* unsigned short */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_SHORT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_SHORT\n");
usinbuf[0] = 0xffff;
usinbuf[1] = 0;
usinbuf[2] = (rank > 0) ? 0x3c3c : 0xc3c3;
usoutbuf[0] = 0;
usoutbuf[1] = 1;
usoutbuf[2] = 1;
- rc = MPI_Reduce( usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(usinbuf, usoutbuf, 3, MPI_UNSIGNED_SHORT, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED_SHORT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_UNSIGNED_SHORT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (usoutbuf[0] != ((size % 2) ? 0xffff : 0)) {
- errs++;
- fprintf( stderr, "short BXOR(1) test failed\n" );
- }
- if (usoutbuf[1]) {
- errs++;
- fprintf( stderr, "short BXOR(0) test failed\n" );
- }
- if (usoutbuf[2] != ((size % 2) ? 0xc3c3 : 0xffff)) {
- errs++;
- fprintf( stderr, "short BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (usoutbuf[0] != ((size % 2) ? 0xffff : 0)) {
+ errs++;
+ fprintf(stderr, "short BXOR(1) test failed\n");
+ }
+ if (usoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "short BXOR(0) test failed\n");
+ }
+ if (usoutbuf[2] != ((size % 2) ? 0xc3c3 : 0xffff)) {
+ errs++;
+ fprintf(stderr, "short BXOR(>) test failed\n");
+ }
+ }
}
/* unsigned */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED\n");
uinbuf[0] = 0xffffffff;
uinbuf[1] = 0;
uinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
uoutbuf[0] = 0;
uoutbuf[1] = 1;
uoutbuf[2] = 1;
- rc = MPI_Reduce( uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(uinbuf, uoutbuf, 3, MPI_UNSIGNED, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_UNSIGNED", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (uoutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
- errs++;
- fprintf( stderr, "unsigned BXOR(1) test failed\n" );
- }
- if (uoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned BXOR(0) test failed\n" );
- }
- if (uoutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
- errs++;
- fprintf( stderr, "unsigned BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (uoutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf(stderr, "unsigned BXOR(1) test failed\n");
+ }
+ if (uoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned BXOR(0) test failed\n");
+ }
+ if (uoutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf(stderr, "unsigned BXOR(>) test failed\n");
+ }
+ }
}
/* int */
- MTestPrintfMsg( 10, "Reduce of MPI_INT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_INT\n");
iinbuf[0] = 0xffffffff;
iinbuf[1] = 0;
iinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
ioutbuf[0] = 0;
ioutbuf[1] = 1;
ioutbuf[2] = 1;
- rc = MPI_Reduce( iinbuf, ioutbuf, 3, MPI_INT, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(iinbuf, ioutbuf, 3, MPI_INT, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_INT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_INT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (ioutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
- errs++;
- fprintf( stderr, "int BXOR(1) test failed\n" );
- }
- if (ioutbuf[1]) {
- errs++;
- fprintf( stderr, "int BXOR(0) test failed\n" );
- }
- if (ioutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
- errs++;
- fprintf( stderr, "int BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (ioutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf(stderr, "int BXOR(1) test failed\n");
+ }
+ if (ioutbuf[1]) {
+ errs++;
+ fprintf(stderr, "int BXOR(0) test failed\n");
+ }
+ if (ioutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf(stderr, "int BXOR(>) test failed\n");
+ }
+ }
}
/* long */
- MTestPrintfMsg( 10, "Reduce of MPI_LONG\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_LONG\n");
linbuf[0] = 0xffffffff;
linbuf[1] = 0;
linbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
loutbuf[0] = 0;
loutbuf[1] = 1;
loutbuf[2] = 1;
- rc = MPI_Reduce( linbuf, loutbuf, 3, MPI_LONG, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(linbuf, loutbuf, 3, MPI_LONG, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_LONG", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_LONG", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (loutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
- errs++;
- fprintf( stderr, "long BXOR(1) test failed\n" );
- }
- if (loutbuf[1]) {
- errs++;
- fprintf( stderr, "long BXOR(0) test failed\n" );
- }
- if (loutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
- errs++;
- fprintf( stderr, "long BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (loutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf(stderr, "long BXOR(1) test failed\n");
+ }
+ if (loutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long BXOR(0) test failed\n");
+ }
+ if (loutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf(stderr, "long BXOR(>) test failed\n");
+ }
+ }
}
/* unsigned long */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_LONG\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_LONG\n");
ulinbuf[0] = 0xffffffff;
ulinbuf[1] = 0;
ulinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
uloutbuf[0] = 0;
uloutbuf[1] = 1;
uloutbuf[2] = 1;
- rc = MPI_Reduce( ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BXOR, 0, comm );
+ rc = MPI_Reduce(ulinbuf, uloutbuf, 3, MPI_UNSIGNED_LONG, MPI_BXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_UNSIGNED_LONG", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_BXOR and MPI_UNSIGNED_LONG", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (uloutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
- errs++;
- fprintf( stderr, "unsigned long BXOR(1) test failed\n" );
- }
- if (uloutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned long BXOR(0) test failed\n" );
- }
- if (uloutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
- errs++;
- fprintf( stderr, "unsigned long BXOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (uloutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf(stderr, "unsigned long BXOR(1) test failed\n");
+ }
+ if (uloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned long BXOR(0) test failed\n");
+ }
+ if (uloutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf(stderr, "unsigned long BXOR(>) test failed\n");
+ }
+ }
}
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 0xffffffff;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 0xffffffff;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0) ? 0x3c3c3c3c : 0xc3c3c3c3;
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BXOR, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_BXOR and MPI_LONG_LONG", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (lloutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
- errs++;
- fprintf( stderr, "long long BXOR(1) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long BXOR(0) test failed\n" );
- }
- if (lloutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
- errs++;
- fprintf( stderr, "long long BXOR(>) test failed\n" );
- }
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ rc = MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_BXOR, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_BXOR and MPI_LONG_LONG", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != ((size % 2) ? 0xffffffff : 0)) {
+ errs++;
+ fprintf(stderr, "long long BXOR(1) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long BXOR(0) test failed\n");
+ }
+ if (lloutbuf[2] != ((size % 2) ? 0xc3c3c3c3 : 0xffffffff)) {
+ errs++;
+ fprintf(stderr, "long long BXOR(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif
- MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( errs );
+ MPI_Errhandler_set(comm, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rc;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
float finbuf[3], foutbuf[3];
double dinbuf[3], doutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- /* Set errors return so that we can provide better information
- should a routine reject one of the operand/datatype pairs */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* Set errors return so that we can provide better information
+ * should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 1;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0);
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_LAND, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_LAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LAND and MPI_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (!coutbuf[0]) {
- errs++;
- fprintf( stderr, "char AND(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char AND(0) test failed\n" );
- }
- if (coutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "char AND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!coutbuf[0]) {
+ errs++;
+ fprintf(stderr, "char AND(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char AND(0) test failed\n");
+ }
+ if (coutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "char AND(>) test failed\n");
+ }
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 1;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0);
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LAND, 0, comm );
+ rc = MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_SIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LAND and MPI_SIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (!scoutbuf[0]) {
- errs++;
- fprintf( stderr, "signed char AND(1) test failed\n" );
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char AND(0) test failed\n" );
- }
- if (scoutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "signed char AND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!scoutbuf[0]) {
+ errs++;
+ fprintf(stderr, "signed char AND(1) test failed\n");
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char AND(0) test failed\n");
+ }
+ if (scoutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "signed char AND(>) test failed\n");
+ }
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 1;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0);
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LAND, 0, comm );
+ rc = MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_UNSIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LAND and MPI_UNSIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (!ucoutbuf[0]) {
- errs++;
- fprintf( stderr, "unsigned char AND(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char AND(0) test failed\n" );
- }
- if (ucoutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "unsigned char AND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!ucoutbuf[0]) {
+ errs++;
+ fprintf(stderr, "unsigned char AND(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char AND(0) test failed\n");
+ }
+ if (ucoutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned char AND(>) test failed\n");
+ }
+ }
}
#ifndef USE_STRICT_MPI
/* float */
- MTestPrintfMsg( 10, "Reduce of MPI_FLOAT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_FLOAT\n");
finbuf[0] = 1;
finbuf[1] = 0;
finbuf[2] = (rank > 0);
foutbuf[0] = 0;
foutbuf[1] = 1;
foutbuf[2] = 1;
- rc = MPI_Reduce( finbuf, foutbuf, 3, MPI_FLOAT, MPI_LAND, 0, comm );
+ rc = MPI_Reduce(finbuf, foutbuf, 3, MPI_FLOAT, MPI_LAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_FLOAT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LAND and MPI_FLOAT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (!foutbuf[0]) {
- errs++;
- fprintf( stderr, "float AND(1) test failed\n" );
- }
- if (foutbuf[1]) {
- errs++;
- fprintf( stderr, "float AND(0) test failed\n" );
- }
- if (foutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "float AND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!foutbuf[0]) {
+ errs++;
+ fprintf(stderr, "float AND(1) test failed\n");
+ }
+ if (foutbuf[1]) {
+ errs++;
+ fprintf(stderr, "float AND(0) test failed\n");
+ }
+ if (foutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "float AND(>) test failed\n");
+ }
+ }
}
- MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_DOUBLE\n");
/* double */
dinbuf[0] = 1;
dinbuf[1] = 0;
doutbuf[0] = 0;
doutbuf[1] = 1;
doutbuf[2] = 1;
- rc = MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LAND, 0, comm );
+ rc = MPI_Reduce(dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LAND, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_DOUBLE", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LAND and MPI_DOUBLE", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (!doutbuf[0]) {
- errs++;
- fprintf( stderr, "double AND(1) test failed\n" );
- }
- if (doutbuf[1]) {
- errs++;
- fprintf( stderr, "double AND(0) test failed\n" );
- }
- if (doutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "double AND(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!doutbuf[0]) {
+ errs++;
+ fprintf(stderr, "double AND(1) test failed\n");
+ }
+ if (doutbuf[1]) {
+ errs++;
+ fprintf(stderr, "double AND(0) test failed\n");
+ }
+ if (doutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "double AND(>) test failed\n");
+ }
+ }
}
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = (rank > 0);
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ MTEST_VG_MEM_INIT(ldinbuf, 3* sizeof(ldinbuf[0]));
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
- rc = MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LAND, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_LONG_DOUBLE", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (!ldoutbuf[0]) {
- errs++;
- fprintf( stderr, "long double AND(1) test failed\n" );
- }
- if (ldoutbuf[1]) {
- errs++;
- fprintf( stderr, "long double AND(0) test failed\n" );
- }
- if (ldoutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "long double AND(>) test failed\n" );
- }
- }
- }
- }
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_DOUBLE\n");
+ rc = MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LAND, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_LAND and MPI_LONG_DOUBLE", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!ldoutbuf[0]) {
+ errs++;
+ fprintf(stderr, "long double AND(1) test failed\n");
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long double AND(0) test failed\n");
+ }
+ if (ldoutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "long double AND(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif /* HAVE_LONG_DOUBLE */
#endif /* USE_STRICT_MPI */
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 1;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0);
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LAND, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_LAND and MPI_LONG_LONG", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (!lloutbuf[0]) {
- errs++;
- fprintf( stderr, "long long AND(1) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long AND(0) test failed\n" );
- }
- if (lloutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "long long AND(>) test failed\n" );
- }
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ rc = MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LAND, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_LAND and MPI_LONG_LONG", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (!lloutbuf[0]) {
+ errs++;
+ fprintf(stderr, "long long AND(1) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long AND(0) test failed\n");
+ }
+ if (lloutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "long long AND(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif
- MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( errs );
+ MPI_Errhandler_set(comm, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-
*/
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, err;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
float finbuf[3], foutbuf[3];
double dinbuf[3], doutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
/* Some MPI implementations do not implement all of the required
- (datatype,operations) combinations, and further, they do not
- always provide clear and specific error messages. By catching
- the error, we can provide a higher quality, more specific message.
- */
- MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
+ * (datatype,operations) combinations, and further, they do not
+ * always provide clear and specific error messages. By catching
+ * the error, we can provide a higher quality, more specific message.
+ */
+ MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 1;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0);
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- err = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_LOR, 0, comm );
+ err = MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_LOR, 0, comm);
if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_CHAR", err );
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_CHAR", err);
}
else {
- if (rank == 0) {
- if (!coutbuf[0]) {
- errs++;
- fprintf( stderr, "char OR(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char OR(0) test failed\n" );
- }
- if (!coutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "char OR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!coutbuf[0]) {
+ errs++;
+ fprintf(stderr, "char OR(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char OR(0) test failed\n");
+ }
+ if (!coutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "char OR(>) test failed\n");
+ }
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 1;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0);
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- err = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LOR, 0, comm );
+ err = MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LOR, 0, comm);
if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_SIGNED_CHAR", err );
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_SIGNED_CHAR", err);
}
else {
- if (rank == 0) {
- if (!scoutbuf[0]) {
- errs++;
- fprintf( stderr, "signed char OR(1) test failed\n" );
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char OR(0) test failed\n" );
- }
- if (!scoutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "signed char OR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!scoutbuf[0]) {
+ errs++;
+ fprintf(stderr, "signed char OR(1) test failed\n");
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char OR(0) test failed\n");
+ }
+ if (!scoutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "signed char OR(>) test failed\n");
+ }
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 1;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0);
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- err = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LOR, 0, comm );
+ err = MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LOR, 0, comm);
if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_UNSIGNED_CHAR", err );
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_UNSIGNED_CHAR", err);
}
else {
- if (rank == 0) {
- if (!ucoutbuf[0]) {
- errs++;
- fprintf( stderr, "unsigned char OR(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char OR(0) test failed\n" );
- }
- if (!ucoutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "unsigned char OR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!ucoutbuf[0]) {
+ errs++;
+ fprintf(stderr, "unsigned char OR(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char OR(0) test failed\n");
+ }
+ if (!ucoutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned char OR(>) test failed\n");
+ }
+ }
}
#ifndef USE_STRICT_MPI
/* float */
- MTestPrintfMsg( 10, "Reduce of MPI_FLOAT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_FLOAT\n");
finbuf[0] = 1;
finbuf[1] = 0;
finbuf[2] = (rank > 0);
foutbuf[0] = 0;
foutbuf[1] = 1;
foutbuf[2] = 1;
- err = MPI_Reduce( finbuf, foutbuf, 3, MPI_FLOAT, MPI_LOR, 0, comm );
+ err = MPI_Reduce(finbuf, foutbuf, 3, MPI_FLOAT, MPI_LOR, 0, comm);
if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_FLOAT", err );
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_FLOAT", err);
}
else {
- if (rank == 0) {
- if (!foutbuf[0]) {
- errs++;
- fprintf( stderr, "float OR(1) test failed\n" );
- }
- if (foutbuf[1]) {
- errs++;
- fprintf( stderr, "float OR(0) test failed\n" );
- }
- if (!foutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "float OR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!foutbuf[0]) {
+ errs++;
+ fprintf(stderr, "float OR(1) test failed\n");
+ }
+ if (foutbuf[1]) {
+ errs++;
+ fprintf(stderr, "float OR(0) test failed\n");
+ }
+ if (!foutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "float OR(>) test failed\n");
+ }
+ }
}
/* double */
- MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_DOUBLE\n");
dinbuf[0] = 1;
dinbuf[1] = 0;
dinbuf[2] = (rank > 0);
doutbuf[0] = 0;
doutbuf[1] = 1;
doutbuf[2] = 1;
- err = MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LOR, 0, comm );
+ err = MPI_Reduce(dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LOR, 0, comm);
if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_DOUBLE", err );
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_DOUBLE", err);
}
else {
- if (rank == 0) {
- if (!doutbuf[0]) {
- errs++;
- fprintf( stderr, "double OR(1) test failed\n" );
- }
- if (doutbuf[1]) {
- errs++;
- fprintf( stderr, "double OR(0) test failed\n" );
- }
- if (!doutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "double OR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (!doutbuf[0]) {
+ errs++;
+ fprintf(stderr, "double OR(1) test failed\n");
+ }
+ if (doutbuf[1]) {
+ errs++;
+ fprintf(stderr, "double OR(0) test failed\n");
+ }
+ if (!doutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "double OR(>) test failed\n");
+ }
+ }
}
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = (rank > 0);
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ MTEST_VG_MEM_INIT(ldinbuf, 3* sizeof(ldinbuf[0]));
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
- err = MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LOR, 0, comm );
- if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_LONG_DOUBLE", err );
- }
- else {
- if (rank == 0) {
- if (!ldoutbuf[0]) {
- errs++;
- fprintf( stderr, "long double OR(1) test failed\n" );
- }
- if (ldoutbuf[1]) {
- errs++;
- fprintf( stderr, "long double OR(0) test failed\n" );
- }
- if (!ldoutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "long double OR(>) test failed\n" );
- }
- }
- }
- }
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_DOUBLE\n");
+ err = MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LOR, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_LONG_DOUBLE", err);
+ }
+ else {
+ if (rank == 0) {
+ if (!ldoutbuf[0]) {
+ errs++;
+ fprintf(stderr, "long double OR(1) test failed\n");
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long double OR(0) test failed\n");
+ }
+ if (!ldoutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "long double OR(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif /* HAVE_LONG_DOUBLE */
#endif /* USE_STRICT_MPI */
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 1;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0);
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- err = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LOR, 0, comm );
- if (err) {
- errs++;
- MTestPrintErrorMsg( "MPI_LOR and MPI_LONG_LONG", err );
- }
- else {
- if (rank == 0) {
- if (!lloutbuf[0]) {
- errs++;
- fprintf( stderr, "long long OR(1) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long OR(0) test failed\n" );
- }
- if (!lloutbuf[2] && size > 1) {
- errs++;
- fprintf( stderr, "long long OR(>) test failed\n" );
- }
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ err = MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LOR, 0, comm);
+ if (err) {
+ errs++;
+ MTestPrintErrorMsg("MPI_LOR and MPI_LONG_LONG", err);
+ }
+ else {
+ if (rank == 0) {
+ if (!lloutbuf[0]) {
+ errs++;
+ fprintf(stderr, "long long OR(1) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long OR(0) test failed\n");
+ }
+ if (!lloutbuf[2] && size > 1) {
+ errs++;
+ fprintf(stderr, "long long OR(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif
- MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( errs );
+ MPI_Errhandler_set(comm, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rc;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
float finbuf[3], foutbuf[3];
double dinbuf[3], doutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- /* Set errors return so that we can provide better information
- should a routine reject one of the operand/datatype pairs */
- MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
+ /* Set errors return so that we can provide better information
+ * should a routine reject one of the operand/datatype pairs */
+ MPI_Errhandler_set(comm, MPI_ERRORS_RETURN);
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 1;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0);
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- rc = MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_LXOR, 0, comm );
+ rc = MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_LXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LXOR and MPI_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (coutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "char XOR(1) test failed\n" );
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char XOR(0) test failed\n" );
- }
- if (coutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "char XOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (coutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "char XOR(1) test failed\n");
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char XOR(0) test failed\n");
+ }
+ if (coutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "char XOR(>) test failed\n");
+ }
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 1;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0);
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- rc = MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LXOR, 0, comm );
+ rc = MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_LXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_SIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LXOR and MPI_SIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (scoutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "signed char XOR(1) test failed\n" );
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char XOR(0) test failed\n" );
- }
- if (scoutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "signed char XOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (scoutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "signed char XOR(1) test failed\n");
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char XOR(0) test failed\n");
+ }
+ if (scoutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "signed char XOR(>) test failed\n");
+ }
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 1;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0);
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- rc = MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LXOR, 0, comm );
+ rc = MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_LXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_UNSIGNED_CHAR", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LXOR and MPI_UNSIGNED_CHAR", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (ucoutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "unsigned char XOR(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char XOR(0) test failed\n" );
- }
- if (ucoutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "unsigned char XOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (ucoutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "unsigned char XOR(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char XOR(0) test failed\n");
+ }
+ if (ucoutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "unsigned char XOR(>) test failed\n");
+ }
+ }
}
#ifndef USE_STRICT_MPI
/* float */
- MTestPrintfMsg( 10, "Reduce of MPI_FLOAT\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_FLOAT\n");
finbuf[0] = 1;
finbuf[1] = 0;
finbuf[2] = (rank > 0);
foutbuf[0] = 0;
foutbuf[1] = 1;
foutbuf[2] = 1;
- rc = MPI_Reduce( finbuf, foutbuf, 3, MPI_FLOAT, MPI_LXOR, 0, comm );
+ rc = MPI_Reduce(finbuf, foutbuf, 3, MPI_FLOAT, MPI_LXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_FLOAT", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LXOR and MPI_FLOAT", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (foutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "float XOR(1) test failed\n" );
- }
- if (foutbuf[1]) {
- errs++;
- fprintf( stderr, "float XOR(0) test failed\n" );
- }
- if (foutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "float XOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (foutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "float XOR(1) test failed\n");
+ }
+ if (foutbuf[1]) {
+ errs++;
+ fprintf(stderr, "float XOR(0) test failed\n");
+ }
+ if (foutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "float XOR(>) test failed\n");
+ }
+ }
}
/* double */
- MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_DOUBLE\n");
dinbuf[0] = 1;
dinbuf[1] = 0;
dinbuf[2] = (rank > 0);
doutbuf[0] = 0;
doutbuf[1] = 1;
doutbuf[2] = 1;
- rc = MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LXOR, 0, comm );
+ rc = MPI_Reduce(dinbuf, doutbuf, 3, MPI_DOUBLE, MPI_LXOR, 0, comm);
if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_DOUBLE", rc );
- errs++;
+ MTestPrintErrorMsg("MPI_LXOR and MPI_DOUBLE", rc);
+ errs++;
}
else {
- if (rank == 0) {
- if (doutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "double XOR(1) test failed\n" );
- }
- if (doutbuf[1]) {
- errs++;
- fprintf( stderr, "double XOR(0) test failed\n" );
- }
- if (doutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "double XOR(>) test failed\n" );
- }
- }
+ if (rank == 0) {
+ if (doutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "double XOR(1) test failed\n");
+ }
+ if (doutbuf[1]) {
+ errs++;
+ fprintf(stderr, "double XOR(0) test failed\n");
+ }
+ if (doutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "double XOR(>) test failed\n");
+ }
+ }
}
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = (rank > 0);
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ MTEST_VG_MEM_INIT(ldinbuf, 3* sizeof(ldinbuf[0]));
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
- rc = MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LXOR, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_LONG_DOUBLE", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (ldoutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "long double XOR(1) test failed\n" );
- }
- if (ldoutbuf[1]) {
- errs++;
- fprintf( stderr, "long double XOR(0) test failed\n" );
- }
- if (ldoutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "long double XOR(>) test failed\n" );
- }
- }
- }
- }
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_DOUBLE\n");
+ rc = MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_LXOR, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_LXOR and MPI_LONG_DOUBLE", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (ldoutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "long double XOR(1) test failed\n");
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long double XOR(0) test failed\n");
+ }
+ if (ldoutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "long double XOR(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif /* HAVE_LONG_DOUBLE */
#endif /* USE_STRICT_MPI */
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 1;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0);
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- rc = MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LXOR, 0, comm );
- if (rc) {
- MTestPrintErrorMsg( "MPI_LXOR and MPI_LONG_LONG", rc );
- errs++;
- }
- else {
- if (rank == 0) {
- if (lloutbuf[0] != (size % 2)) {
- errs++;
- fprintf( stderr, "long long XOR(1) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long XOR(0) test failed\n" );
- }
- if (lloutbuf[2] == (size % 2) && size > 1) {
- errs++;
- fprintf( stderr, "long long XOR(>) test failed\n" );
- }
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ rc = MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_LXOR, 0, comm);
+ if (rc) {
+ MTestPrintErrorMsg("MPI_LXOR and MPI_LONG_LONG", rc);
+ errs++;
+ }
+ else {
+ if (rank == 0) {
+ if (lloutbuf[0] != (size % 2)) {
+ errs++;
+ fprintf(stderr, "long long XOR(1) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long XOR(0) test failed\n");
+ }
+ if (lloutbuf[2] == (size % 2) && size > 1) {
+ errs++;
+ fprintf(stderr, "long long XOR(>) test failed\n");
+ }
+ }
+ }
+ }
}
#endif
- MPI_Errhandler_set( comm, MPI_ERRORS_ARE_FATAL );
- MTest_Finalize( errs );
+ MPI_Errhandler_set(comm, MPI_ERRORS_ARE_FATAL);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of char and types that are not required
+ * This test looks at the handling of char and types that are not required
* integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 1;
cinbuf[1] = 0;
cinbuf[2] = rank;
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_MAX, 0, comm );
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_MAX, 0, comm);
if (rank == 0) {
- if (coutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "char MAX(1) test failed\n" );
- }
- if (coutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "char MAX(0) test failed\n" );
- }
- if (size < 128 && coutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "char MAX(>) test failed\n" );
- }
+ if (coutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "char MAX(1) test failed\n");
+ }
+ if (coutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "char MAX(0) test failed\n");
+ }
+ if (size < 128 && coutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "char MAX(>) test failed\n");
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 1;
scinbuf[1] = 0;
scinbuf[2] = rank;
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_MAX, 0, comm );
+ MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_MAX, 0, comm);
if (rank == 0) {
- if (scoutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "signed char MAX(1) test failed\n" );
- }
- if (scoutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "signed char MAX(0) test failed\n" );
- }
- if (size < 128 && scoutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "signed char MAX(>) test failed\n" );
- }
+ if (scoutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "signed char MAX(1) test failed\n");
+ }
+ if (scoutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "signed char MAX(0) test failed\n");
+ }
+ if (size < 128 && scoutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "signed char MAX(>) test failed\n");
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 1;
ucinbuf[1] = 0;
ucinbuf[2] = rank;
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_MAX, 0, comm );
+ MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_MAX, 0, comm);
if (rank == 0) {
- if (ucoutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "unsigned char MAX(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char MAX(0) test failed\n" );
- }
- if (size < 256 && ucoutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "unsigned char MAX(>) test failed\n" );
- }
+ if (ucoutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "unsigned char MAX(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char MAX(0) test failed\n");
+ }
+ if (size < 256 && ucoutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "unsigned char MAX(>) test failed\n");
+ }
}
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = rank;
-
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
- MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_MAX, 0, comm );
- if (rank == 0) {
- if (ldoutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "long double MAX(1) test failed\n" );
- }
- if (ldoutbuf[1] != 0.0) {
- errs++;
- fprintf( stderr, "long double MAX(0) test failed\n" );
- }
- if (ldoutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "long double MAX(>) test failed\n" );
- }
- }
- }
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ MTEST_VG_MEM_INIT(ldinbuf, 3* sizeof(ldinbuf[0]));
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = rank;
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_DOUBLE\n");
+ MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_MAX, 0, comm);
+ if (rank == 0) {
+ if (ldoutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "long double MAX(1) test failed\n");
+ }
+ if (ldoutbuf[1] != 0.0) {
+ errs++;
+ fprintf(stderr, "long double MAX(0) test failed\n");
+ }
+ if (ldoutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "long double MAX(>) test failed\n");
+ }
+ }
+ }
}
#endif /* HAVE_LONG_DOUBLE */
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 1;
- llinbuf[1] = 0;
- llinbuf[2] = rank;
-
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_MAX, 0, comm );
- if (rank == 0) {
- if (lloutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "long long MAX(1) test failed\n" );
- }
- if (lloutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "long long MAX(0) test failed\n" );
- }
- if (lloutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "long long MAX(>) test failed\n" );
- }
- }
- }
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = rank;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_MAX, 0, comm);
+ if (rank == 0) {
+ if (lloutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "long long MAX(1) test failed\n");
+ }
+ if (lloutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "long long MAX(0) test failed\n");
+ }
+ if (lloutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "long long MAX(>) test failed\n");
+ }
+ }
+ }
}
#endif /* HAVE_LONG_LONG */
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of char and types that are not required
+ * This test looks at the handling of char and types that are not required
* integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*
* The rule on max loc is that if there is a tie in the value, the minimum
* rank is used (see 4.9.3 in the MPI-1 standard)
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
/* 2 int */
{
- struct twoint { int val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_2INT, MPI_MAXLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "2int MAXLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0) {
- errs++;
- fprintf( stderr, "2int MAXLOC(0) test failed, value = %d, should be zero\n", coutbuf[1].val );
- }
- if (coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "2int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
- }
- if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
- errs++;
- fprintf( stderr, "2int MAXLOC(>) test failed\n" );
- }
- }
+ struct twoint {
+ int val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_2INT, MPI_MAXLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "2int MAXLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf(stderr, "2int MAXLOC(0) test failed, value = %d, should be zero\n",
+ coutbuf[1].val);
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr,
+ "2int MAXLOC(0) test failed, location of max = %d, should be zero\n",
+ coutbuf[1].loc);
+ }
+ if (coutbuf[2].val != size - 1 || coutbuf[2].loc != size - 1) {
+ errs++;
+ fprintf(stderr, "2int MAXLOC(>) test failed\n");
+ }
+ }
}
/* float int */
{
- struct floatint { float val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = (float)rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_FLOAT_INT, MPI_MAXLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "float-int MAXLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0) {
- errs++;
- fprintf( stderr, "float-int MAXLOC(0) test failed, value = %f, should be zero\n", coutbuf[1].val );
- }
- if (coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "float-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
- }
- if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
- errs++;
- fprintf( stderr, "float-int MAXLOC(>) test failed\n" );
- }
- }
+ struct floatint {
+ float val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = (float) rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_FLOAT_INT, MPI_MAXLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "float-int MAXLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf(stderr, "float-int MAXLOC(0) test failed, value = %f, should be zero\n",
+ coutbuf[1].val);
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr,
+ "float-int MAXLOC(0) test failed, location of max = %d, should be zero\n",
+ coutbuf[1].loc);
+ }
+ if (coutbuf[2].val != size - 1 || coutbuf[2].loc != size - 1) {
+ errs++;
+ fprintf(stderr, "float-int MAXLOC(>) test failed\n");
+ }
+ }
}
-
+
/* long int */
{
- struct longint { long val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_INT, MPI_MAXLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "long-int MAXLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0) {
- errs++;
- fprintf( stderr, "long-int MAXLOC(0) test failed, value = %ld, should be zero\n", coutbuf[1].val );
- }
- if (coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "long-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
- }
- if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
- errs++;
- fprintf( stderr, "long-int MAXLOC(>) test failed\n" );
- }
- }
+ struct longint {
+ long val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_LONG_INT, MPI_MAXLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "long-int MAXLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf(stderr, "long-int MAXLOC(0) test failed, value = %ld, should be zero\n",
+ coutbuf[1].val);
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr,
+ "long-int MAXLOC(0) test failed, location of max = %d, should be zero\n",
+ coutbuf[1].loc);
+ }
+ if (coutbuf[2].val != size - 1 || coutbuf[2].loc != size - 1) {
+ errs++;
+ fprintf(stderr, "long-int MAXLOC(>) test failed\n");
+ }
+ }
}
/* short int */
{
- struct shortint { short val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_SHORT_INT, MPI_MAXLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "short-int MAXLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0) {
- errs++;
- fprintf( stderr, "short-int MAXLOC(0) test failed, value = %d, should be zero\n", coutbuf[1].val );
- }
- if (coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "short-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
- }
- if (coutbuf[2].val != size-1) {
- errs++;
- fprintf( stderr, "short-int MAXLOC(>) test failed, value = %d, should be %d\n", coutbuf[2].val, size-1 );
- }
- if (coutbuf[2].loc != size -1) {
- errs++;
- fprintf( stderr, "short-int MAXLOC(>) test failed, location of max = %d, should be %d\n", coutbuf[2].loc, size-1 );
- }
- }
+ struct shortint {
+ short val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_SHORT_INT, MPI_MAXLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "short-int MAXLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf(stderr, "short-int MAXLOC(0) test failed, value = %d, should be zero\n",
+ coutbuf[1].val);
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr,
+ "short-int MAXLOC(0) test failed, location of max = %d, should be zero\n",
+ coutbuf[1].loc);
+ }
+ if (coutbuf[2].val != size - 1) {
+ errs++;
+ fprintf(stderr, "short-int MAXLOC(>) test failed, value = %d, should be %d\n",
+ coutbuf[2].val, size - 1);
+ }
+ if (coutbuf[2].loc != size - 1) {
+ errs++;
+ fprintf(stderr,
+ "short-int MAXLOC(>) test failed, location of max = %d, should be %d\n",
+ coutbuf[2].loc, size - 1);
+ }
+ }
}
-
+
/* double int */
{
- struct doubleint { double val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_DOUBLE_INT, MPI_MAXLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "double-int MAXLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0) {
- errs++;
- fprintf( stderr, "double-int MAXLOC(0) test failed, value = %f, should be zero\n", coutbuf[1].val );
- }
- if (coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "double-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
- }
- if (coutbuf[2].val != size-1 || coutbuf[2].loc != size-1) {
- errs++;
- fprintf( stderr, "double-int MAXLOC(>) test failed\n" );
- }
- }
+ struct doubleint {
+ double val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_DOUBLE_INT, MPI_MAXLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "double-int MAXLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf(stderr, "double-int MAXLOC(0) test failed, value = %lf, should be zero\n",
+ coutbuf[1].val);
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr,
+ "double-int MAXLOC(0) test failed, location of max = %d, should be zero\n",
+ coutbuf[1].loc);
+ }
+ if (coutbuf[2].val != size - 1 || coutbuf[2].loc != size - 1) {
+ errs++;
+ fprintf(stderr, "double-int MAXLOC(>) test failed\n");
+ }
+ }
}
-
+
#ifdef HAVE_LONG_DOUBLE
/* long double int */
{
- struct longdoubleint { long double val; int loc; } cinbuf[3], coutbuf[3];
+ struct longdoubleint {
+ long double val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
/* avoid valgrind warnings about padding bytes in the long double */
memset(&cinbuf[0], 0, sizeof(cinbuf));
memset(&coutbuf[0], 0, sizeof(coutbuf));
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_DOUBLE_INT, MPI_MAXLOC,
- 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "long double-int MAXLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0) {
- errs++;
- fprintf( stderr, "long double-int MAXLOC(0) test failed, value = %f, should be zero\n", (double)coutbuf[1].val );
- }
- if (coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "long double-int MAXLOC(0) test failed, location of max = %d, should be zero\n", coutbuf[1].loc );
- }
- if (coutbuf[2].val != size-1) {
- errs++;
- fprintf( stderr, "long double-int MAXLOC(>) test failed, value = %f, should be %d\n", (double)coutbuf[2].val, size-1 );
- }
- if (coutbuf[2].loc != size-1) {
- errs++;
- fprintf( stderr, "long double-int MAXLOC(>) test failed, location of max = %d, should be %d\n", coutbuf[2].loc, size-1 );
- }
- }
- }
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_LONG_DOUBLE_INT, MPI_MAXLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "long double-int MAXLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0) {
+ errs++;
+ fprintf(stderr,
+ "long double-int MAXLOC(0) test failed, value = %lf, should be zero\n",
+ (double) coutbuf[1].val);
+ }
+ if (coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr,
+ "long double-int MAXLOC(0) test failed, location of max = %d, should be zero\n",
+ coutbuf[1].loc);
+ }
+ if (coutbuf[2].val != size - 1) {
+ errs++;
+ fprintf(stderr,
+ "long double-int MAXLOC(>) test failed, value = %lf, should be %d\n",
+ (double) coutbuf[2].val, size - 1);
+ }
+ if (coutbuf[2].loc != size - 1) {
+ errs++;
+ fprintf(stderr,
+ "long double-int MAXLOC(>) test failed, location of max = %d, should be %d\n",
+ coutbuf[2].loc, size - 1);
+ }
+ }
+ }
}
#endif
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of char and types that are not required
+ * This test looks at the handling of char and types that are not required
* integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 1;
cinbuf[1] = 0;
cinbuf[2] = (rank & 0x7f);
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_MIN, 0, comm );
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_MIN, 0, comm);
if (rank == 0) {
- if (coutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "char MIN(1) test failed\n" );
- }
- if (coutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "char MIN(0) test failed\n" );
- }
- if (coutbuf[2] != 0) {
- errs++;
- fprintf( stderr, "char MIN(>) test failed\n" );
- }
+ if (coutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "char MIN(1) test failed\n");
+ }
+ if (coutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "char MIN(0) test failed\n");
+ }
+ if (coutbuf[2] != 0) {
+ errs++;
+ fprintf(stderr, "char MIN(>) test failed\n");
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 1;
scinbuf[1] = 0;
scinbuf[2] = (rank & 0x7f);
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_MIN, 0, comm );
+ MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_MIN, 0, comm);
if (rank == 0) {
- if (scoutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "signed char MIN(1) test failed\n" );
- }
- if (scoutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "signed char MIN(0) test failed\n" );
- }
- if (scoutbuf[2] != 0) {
- errs++;
- fprintf( stderr, "signed char MIN(>) test failed\n" );
- }
+ if (scoutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "signed char MIN(1) test failed\n");
+ }
+ if (scoutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "signed char MIN(0) test failed\n");
+ }
+ if (scoutbuf[2] != 0) {
+ errs++;
+ fprintf(stderr, "signed char MIN(>) test failed\n");
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 1;
ucinbuf[1] = 0;
ucinbuf[2] = (rank & 0x7f);
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_MIN, 0, comm );
+ MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_MIN, 0, comm);
if (rank == 0) {
- if (ucoutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "unsigned char MIN(1) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char MIN(0) test failed\n" );
- }
- if (ucoutbuf[2] != 0) {
- errs++;
- fprintf( stderr, "unsigned char MIN(>) test failed\n" );
- }
+ if (ucoutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "unsigned char MIN(1) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char MIN(0) test failed\n");
+ }
+ if (ucoutbuf[2] != 0) {
+ errs++;
+ fprintf(stderr, "unsigned char MIN(>) test failed\n");
+ }
}
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = rank;
-
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
- MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_MIN, 0, comm );
- if (rank == 0) {
- if (ldoutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "long double MIN(1) test failed\n" );
- }
- if (ldoutbuf[1] != 0.0) {
- errs++;
- fprintf( stderr, "long double MIN(0) test failed\n" );
- }
- if (ldoutbuf[2] != 0.0) {
- errs++;
- fprintf( stderr, "long double MIN(>) test failed\n" );
- }
- }
- }
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ /* long double */
+ MTEST_VG_MEM_INIT(ldinbuf, 3* sizeof(ldinbuf[0]));
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = rank;
+
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_DOUBLE\n");
+ MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_MIN, 0, comm);
+ if (rank == 0) {
+ if (ldoutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "long double MIN(1) test failed\n");
+ }
+ if (ldoutbuf[1] != 0.0) {
+ errs++;
+ fprintf(stderr, "long double MIN(0) test failed\n");
+ }
+ if (ldoutbuf[2] != 0.0) {
+ errs++;
+ fprintf(stderr, "long double MIN(>) test failed\n");
+ }
+ }
+ }
}
#endif /* HAVE_LONG_DOUBLE */
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 1;
- llinbuf[1] = 0;
- llinbuf[2] = rank;
-
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_MIN, 0, comm );
- if (rank == 0) {
- if (lloutbuf[0] != 1) {
- errs++;
- fprintf( stderr, "long long MIN(1) test failed\n" );
- }
- if (lloutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "long long MIN(0) test failed\n" );
- }
- if (lloutbuf[2] != 0) {
- errs++;
- fprintf( stderr, "long long MIN(>) test failed\n" );
- }
- }
- }
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = rank;
+
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_MIN, 0, comm);
+ if (rank == 0) {
+ if (lloutbuf[0] != 1) {
+ errs++;
+ fprintf(stderr, "long long MIN(1) test failed\n");
+ }
+ if (lloutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "long long MIN(0) test failed\n");
+ }
+ if (lloutbuf[2] != 0) {
+ errs++;
+ fprintf(stderr, "long long MIN(>) test failed\n");
+ }
+ }
+ }
}
#endif /* HAVE_LONG_LONG */
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
- * This test looks at the handling of char and types that are not required
+ * This test looks at the handling of char and types that are not required
* integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*
* The rule on min loc is that if there is a tie in the value, the minimum
* rank is used (see 4.9.3 in the MPI-1 standard)
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
/* 2 int */
{
- struct twoint { int val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = (rank & 0x7f);
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_2INT, MPI_MINLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 && coutbuf[0].loc != -1) {
- errs++;
- fprintf( stderr, "2int MINLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0 && coutbuf[1].loc != -1) {
- errs++;
- fprintf( stderr, "2int MINLOC(0) test failed\n" );
- }
- if (coutbuf[2].val != 0 && coutbuf[2].loc != 0) {
- errs++;
- fprintf( stderr, "2int MINLOC(>) test failed\n" );
- }
- }
+ struct twoint {
+ int val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = (rank & 0x7f);
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_2INT, MPI_MINLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 && coutbuf[0].loc != -1) {
+ errs++;
+ fprintf(stderr, "2int MINLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0 && coutbuf[1].loc != -1) {
+ errs++;
+ fprintf(stderr, "2int MINLOC(0) test failed\n");
+ }
+ if (coutbuf[2].val != 0 && coutbuf[2].loc != 0) {
+ errs++;
+ fprintf(stderr, "2int MINLOC(>) test failed\n");
+ }
+ }
}
-
+
/* float int */
{
- struct floatint { float val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = (float)rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_FLOAT_INT, MPI_MINLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 && coutbuf[0].loc != -1) {
- errs++;
- fprintf( stderr, "float-int MINLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0 && coutbuf[1].loc != -1) {
- errs++;
- fprintf( stderr, "float-int MINLOC(0) test failed\n" );
- }
- if (coutbuf[2].val != 0 && coutbuf[2].loc != 0) {
- errs++;
- fprintf( stderr, "float-int MINLOC(>) test failed\n" );
- }
- }
+ struct floatint {
+ float val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = (float) rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_FLOAT_INT, MPI_MINLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 && coutbuf[0].loc != -1) {
+ errs++;
+ fprintf(stderr, "float-int MINLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0 && coutbuf[1].loc != -1) {
+ errs++;
+ fprintf(stderr, "float-int MINLOC(0) test failed\n");
+ }
+ if (coutbuf[2].val != 0 && coutbuf[2].loc != 0) {
+ errs++;
+ fprintf(stderr, "float-int MINLOC(>) test failed\n");
+ }
+ }
}
-
+
/* long int */
{
- struct longint { long val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_INT, MPI_MINLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "long-int MINLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "long-int MINLOC(0) test failed\n" );
- }
- if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
- errs++;
- fprintf( stderr, "long-int MINLOC(>) test failed\n" );
- }
- }
+ struct longint {
+ long val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_LONG_INT, MPI_MINLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "long-int MINLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr, "long-int MINLOC(0) test failed\n");
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf(stderr, "long-int MINLOC(>) test failed\n");
+ }
+ }
}
/* short int */
{
- struct shortint { short val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_SHORT_INT, MPI_MINLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "short-int MINLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "short-int MINLOC(0) test failed\n" );
- }
- if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
- errs++;
- fprintf( stderr, "short-int MINLOC(>) test failed\n" );
- }
- }
+ struct shortint {
+ short val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_SHORT_INT, MPI_MINLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "short-int MINLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr, "short-int MINLOC(0) test failed\n");
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf(stderr, "short-int MINLOC(>) test failed\n");
+ }
+ }
}
-
+
/* double int */
{
- struct doubleint { double val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_DOUBLE_INT, MPI_MINLOC, 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "double-int MINLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "double-int MINLOC(0) test failed\n" );
- }
- if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
- errs++;
- fprintf( stderr, "double-int MINLOC(>) test failed\n" );
- }
- }
+ struct doubleint {
+ double val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_DOUBLE_INT, MPI_MINLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "double-int MINLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr, "double-int MINLOC(0) test failed\n");
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf(stderr, "double-int MINLOC(>) test failed\n");
+ }
+ }
}
-
+
#ifdef HAVE_LONG_DOUBLE
/* long double int */
{
- struct longdoubleint { long double val; int loc; } cinbuf[3], coutbuf[3];
-
- cinbuf[0].val = 1;
- cinbuf[0].loc = rank;
- cinbuf[1].val = 0;
- cinbuf[1].loc = rank;
- cinbuf[2].val = rank;
- cinbuf[2].loc = rank;
-
- coutbuf[0].val = 0;
- coutbuf[0].loc = -1;
- coutbuf[1].val = 1;
- coutbuf[1].loc = -1;
- coutbuf[2].val = 1;
- coutbuf[2].loc = -1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_LONG_DOUBLE_INT, MPI_MINLOC,
- 0, comm );
- if (rank == 0) {
- if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
- errs++;
- fprintf( stderr, "long double-int MINLOC(1) test failed\n" );
- }
- if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
- errs++;
- fprintf( stderr, "long double-int MINLOC(0) test failed\n" );
- }
- if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
- errs++;
- fprintf( stderr, "long double-int MINLOC(>) test failed\n" );
- }
- }
- }
+ struct longdoubleint {
+ long double val;
+ int loc;
+ } cinbuf[3], coutbuf[3];
+ MTEST_VG_MEM_INIT(cinbuf, 3* sizeof(cinbuf[0]));
+
+ cinbuf[0].val = 1;
+ cinbuf[0].loc = rank;
+ cinbuf[1].val = 0;
+ cinbuf[1].loc = rank;
+ cinbuf[2].val = rank;
+ cinbuf[2].loc = rank;
+
+ coutbuf[0].val = 0;
+ coutbuf[0].loc = -1;
+ coutbuf[1].val = 1;
+ coutbuf[1].loc = -1;
+ coutbuf[2].val = 1;
+ coutbuf[2].loc = -1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_LONG_DOUBLE_INT, MPI_MINLOC, 0, comm);
+ if (rank == 0) {
+ if (coutbuf[0].val != 1 || coutbuf[0].loc != 0) {
+ errs++;
+ fprintf(stderr, "long double-int MINLOC(1) test failed\n");
+ }
+ if (coutbuf[1].val != 0 || coutbuf[1].loc != 0) {
+ errs++;
+ fprintf(stderr, "long double-int MINLOC(0) test failed\n");
+ }
+ if (coutbuf[2].val != 0 || coutbuf[2].loc != 0) {
+ errs++;
+ fprintf(stderr, "long double-int MINLOC(>) test failed\n");
+ }
+ }
+ }
}
#endif
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test MPI_PROD operations on optional datatypes dupported by MPICH";
*/
-typedef struct { double r, i; } d_complex;
+typedef struct {
+ double r, i;
+} d_complex;
#ifdef HAVE_LONG_DOUBLE
-typedef struct { long double r, i; } ld_complex;
+typedef struct {
+ long double r, i;
+} ld_complex;
#endif
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, maxsize, result[6] = { 1, 1, 2, 6, 24, 120 };
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
d_complex dinbuf[3], doutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
- if (size > 5) maxsize = 5;
- else maxsize = size;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+ if (size > 5)
+ maxsize = 5;
+ else
+ maxsize = size;
- /* General forumula: If we multiple the values from 1 to n, the
- product is n!. This grows very fast, so we'll only use the first
- five (1! = 1, 2! = 2, 3! = 6, 4! = 24, 5! = 120), with n!
- stored in the array result[n] */
+ /* General forumula: If we multiple the values from 1 to n, the
+ * product is n!. This grows very fast, so we'll only use the first
+ * five (1! = 1, 2! = 2, 3! = 6, 4! = 24, 5! = 120), with n!
+ * stored in the array result[n] */
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
cinbuf[1] = 0;
cinbuf[2] = (rank > 1);
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_PROD, 0, comm );
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_PROD, 0, comm);
if (rank == 0) {
- if (coutbuf[0] != (char)result[maxsize-1]) {
- errs++;
- fprintf( stderr, "char PROD(rank) test failed (%d!=%d)\n",
- (int)coutbuf[0], (int)result[maxsize]);
- }
- if (coutbuf[1]) {
- errs++;
- fprintf( stderr, "char PROD(0) test failed\n" );
- }
- if (size > 1 && coutbuf[2]) {
- errs++;
- fprintf( stderr, "char PROD(>) test failed\n" );
- }
+ if (coutbuf[0] != (char) result[maxsize - 1]) {
+ errs++;
+ fprintf(stderr, "char PROD(rank) test failed (%d!=%d)\n",
+ (int) coutbuf[0], (int) result[maxsize]);
+ }
+ if (coutbuf[1]) {
+ errs++;
+ fprintf(stderr, "char PROD(0) test failed\n");
+ }
+ if (size > 1 && coutbuf[2]) {
+ errs++;
+ fprintf(stderr, "char PROD(>) test failed\n");
+ }
}
#endif /* USE_STRICT_MPI */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
scinbuf[1] = 0;
scinbuf[2] = (rank > 1);
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_PROD, 0, comm );
+ MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_PROD, 0, comm);
if (rank == 0) {
- if (scoutbuf[0] != (signed char)result[maxsize-1]) {
- errs++;
- fprintf( stderr, "signed char PROD(rank) test failed (%d!=%d)\n",
- (int)scoutbuf[0], (int)result[maxsize]);
- }
- if (scoutbuf[1]) {
- errs++;
- fprintf( stderr, "signed char PROD(0) test failed\n" );
- }
- if (size > 1 && scoutbuf[2]) {
- errs++;
- fprintf( stderr, "signed char PROD(>) test failed\n" );
- }
+ if (scoutbuf[0] != (signed char) result[maxsize - 1]) {
+ errs++;
+ fprintf(stderr, "signed char PROD(rank) test failed (%d!=%d)\n",
+ (int) scoutbuf[0], (int) result[maxsize]);
+ }
+ if (scoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "signed char PROD(0) test failed\n");
+ }
+ if (size > 1 && scoutbuf[2]) {
+ errs++;
+ fprintf(stderr, "signed char PROD(>) test failed\n");
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0);
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_PROD, 0, comm );
+ MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_PROD, 0, comm);
if (rank == 0) {
- if (ucoutbuf[0] != (unsigned char)result[maxsize-1]) {
- errs++;
- fprintf( stderr, "unsigned char PROD(rank) test failed\n" );
- }
- if (ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char PROD(0) test failed\n" );
- }
- if (size > 1 && ucoutbuf[2]) {
- errs++;
- fprintf( stderr, "unsigned char PROD(>) test failed\n" );
- }
+ if (ucoutbuf[0] != (unsigned char) result[maxsize - 1]) {
+ errs++;
+ fprintf(stderr, "unsigned char PROD(rank) test failed\n");
+ }
+ if (ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char PROD(0) test failed\n");
+ }
+ if (size > 1 && ucoutbuf[2]) {
+ errs++;
+ fprintf(stderr, "unsigned char PROD(>) test failed\n");
+ }
}
#ifndef USE_STRICT_MPI
/* For some reason, complex is not allowed for sum and prod */
if (MPI_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) {
- int dc;
-#ifdef HAVE_LONG_DOUBLE
- ld_complex ldinbuf[3], ldoutbuf[3];
-#endif
- /* Must determine which C type matches this Fortran type */
- MPI_Type_size( MPI_DOUBLE_COMPLEX, &dc );
- if (dc == sizeof(d_complex)) {
- /* double complex; may be null if we do not have Fortran support */
- dinbuf[0].r = (rank < maxsize && rank > 0) ? rank : 1;
- dinbuf[1].r = 0;
- dinbuf[2].r = (rank > 0);
- dinbuf[0].i = 0;
- dinbuf[1].i = 1;
- dinbuf[2].i = -(rank > 0);
-
- doutbuf[0].r = 0;
- doutbuf[1].r = 1;
- doutbuf[2].r = 1;
- doutbuf[0].i = 0;
- doutbuf[1].i = 1;
- doutbuf[2].i = 1;
- MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_PROD, 0, comm );
- if (rank == 0) {
- double imag, real;
- if (doutbuf[0].r != (double)result[maxsize-1] || doutbuf[0].i != 0) {
- errs++;
- fprintf( stderr, "double complex PROD(rank) test failed\n" );
- }
- /* Multiplying the imaginary part depends on size mod 4 */
- imag = 1.0; real = 0.0; /* Make compiler happy */
- switch (size % 4) {
- case 1: imag = 1.0; real = 0.0; break;
- case 2: imag = 0.0; real = -1.0; break;
- case 3: imag =-1.0; real = 0.0; break;
- case 0: imag = 0.0; real = 1.0; break;
- }
- if (doutbuf[1].r != real || doutbuf[1].i != imag) {
- errs++;
- fprintf( stderr, "double complex PROD(i) test failed (%f,%f)!=(%f,%f)\n",
- doutbuf[1].r,doutbuf[1].i,real,imag);
- }
- if (doutbuf[2].r != 0 || doutbuf[2].i != 0) {
- errs++;
- fprintf( stderr, "double complex PROD(>) test failed\n" );
- }
- }
- }
+ int dc;
#ifdef HAVE_LONG_DOUBLE
- else if (dc == sizeof(ld_complex)) {
- /* double complex; may be null if we do not have Fortran support */
- ldinbuf[0].r = (rank < maxsize && rank > 0) ? rank : 1;
- ldinbuf[1].r = 0;
- ldinbuf[2].r = (rank > 0);
- ldinbuf[0].i = 0;
- ldinbuf[1].i = 1;
- ldinbuf[2].i = -(rank > 0);
-
- ldoutbuf[0].r = 0;
- ldoutbuf[1].r = 1;
- ldoutbuf[2].r = 1;
- ldoutbuf[0].i = 0;
- ldoutbuf[1].i = 1;
- ldoutbuf[2].i = 1;
- MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_PROD, 0, comm );
- if (rank == 0) {
- long double imag, real;
- if (ldoutbuf[0].r != (double)result[maxsize-1] || ldoutbuf[0].i != 0) {
- errs++;
- fprintf( stderr, "double complex PROD(rank) test failed\n" );
- }
- /* Multiplying the imaginary part depends on size mod 4 */
- imag = 1.0; real = 0.0; /* Make compiler happy */
- switch (size % 4) {
- case 1: imag = 1.0; real = 0.0; break;
- case 2: imag = 0.0; real = -1.0; break;
- case 3: imag =-1.0; real = 0.0; break;
- case 0: imag = 0.0; real = 1.0; break;
- }
- if (ldoutbuf[1].r != real || ldoutbuf[1].i != imag) {
- errs++;
- fprintf( stderr, "double complex PROD(i) test failed (%Lf,%Lf)!=(%Lf,%Lf)\n",
- ldoutbuf[1].r,ldoutbuf[1].i,real,imag);
- }
- if (ldoutbuf[2].r != 0 || ldoutbuf[2].i != 0) {
- errs++;
- fprintf( stderr, "double complex PROD(>) test failed\n" );
- }
- }
- }
+ ld_complex ldinbuf[3], ldoutbuf[3];
+ MTEST_VG_MEM_INIT(ldinbuf, 3* sizeof(ldinbuf[0]));
+#endif
+ /* Must determine which C type matches this Fortran type */
+ MPI_Type_size(MPI_DOUBLE_COMPLEX, &dc);
+ if (dc == sizeof(d_complex)) {
+ /* double complex; may be null if we do not have Fortran support */
+ dinbuf[0].r = (rank < maxsize && rank > 0) ? rank : 1;
+ dinbuf[1].r = 0;
+ dinbuf[2].r = (rank > 0);
+ dinbuf[0].i = 0;
+ dinbuf[1].i = 1;
+ dinbuf[2].i = -(rank > 0);
+
+ doutbuf[0].r = 0;
+ doutbuf[1].r = 1;
+ doutbuf[2].r = 1;
+ doutbuf[0].i = 0;
+ doutbuf[1].i = 1;
+ doutbuf[2].i = 1;
+ MPI_Reduce(dinbuf, doutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_PROD, 0, comm);
+ if (rank == 0) {
+ double imag, real;
+ if (doutbuf[0].r != (double) result[maxsize - 1] || doutbuf[0].i != 0) {
+ errs++;
+ fprintf(stderr, "double complex PROD(rank) test failed\n");
+ }
+ /* Multiplying the imaginary part depends on size mod 4 */
+ imag = 1.0;
+ real = 0.0; /* Make compiler happy */
+ switch (size % 4) {
+ case 1:
+ imag = 1.0;
+ real = 0.0;
+ break;
+ case 2:
+ imag = 0.0;
+ real = -1.0;
+ break;
+ case 3:
+ imag = -1.0;
+ real = 0.0;
+ break;
+ case 0:
+ imag = 0.0;
+ real = 1.0;
+ break;
+ }
+ if (doutbuf[1].r != real || doutbuf[1].i != imag) {
+ errs++;
+ fprintf(stderr, "double complex PROD(i) test failed (%f,%f)!=(%f,%f)\n",
+ doutbuf[1].r, doutbuf[1].i, real, imag);
+ }
+ if (doutbuf[2].r != 0 || doutbuf[2].i != 0) {
+ errs++;
+ fprintf(stderr, "double complex PROD(>) test failed\n");
+ }
+ }
+ }
+#ifdef HAVE_LONG_DOUBLE
+ else if (dc == sizeof(ld_complex)) {
+ /* double complex; may be null if we do not have Fortran support */
+ ldinbuf[0].r = (rank < maxsize && rank > 0) ? rank : 1;
+ ldinbuf[1].r = 0;
+ ldinbuf[2].r = (rank > 0);
+ ldinbuf[0].i = 0;
+ ldinbuf[1].i = 1;
+ ldinbuf[2].i = -(rank > 0);
+
+ ldoutbuf[0].r = 0;
+ ldoutbuf[1].r = 1;
+ ldoutbuf[2].r = 1;
+ ldoutbuf[0].i = 0;
+ ldoutbuf[1].i = 1;
+ ldoutbuf[2].i = 1;
+ MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_PROD, 0, comm);
+ if (rank == 0) {
+ long double imag, real;
+ if (ldoutbuf[0].r != (double) result[maxsize - 1] || ldoutbuf[0].i != 0) {
+ errs++;
+ fprintf(stderr, "double complex PROD(rank) test failed\n");
+ }
+ /* Multiplying the imaginary part depends on size mod 4 */
+ imag = 1.0;
+ real = 0.0; /* Make compiler happy */
+ switch (size % 4) {
+ case 1:
+ imag = 1.0;
+ real = 0.0;
+ break;
+ case 2:
+ imag = 0.0;
+ real = -1.0;
+ break;
+ case 3:
+ imag = -1.0;
+ real = 0.0;
+ break;
+ case 0:
+ imag = 0.0;
+ real = 1.0;
+ break;
+ }
+ if (ldoutbuf[1].r != real || ldoutbuf[1].i != imag) {
+ errs++;
+ fprintf(stderr, "double complex PROD(i) test failed (%Lf,%Lf)!=(%Lf,%Lf)\n",
+ ldoutbuf[1].r, ldoutbuf[1].i, real, imag);
+ }
+ if (ldoutbuf[2].r != 0 || ldoutbuf[2].i != 0) {
+ errs++;
+ fprintf(stderr, "double complex PROD(>) test failed\n");
+ }
+ }
+ }
#endif /* HAVE_LONG_DOUBLE */
}
#endif /* USE_STRICT_MPI */
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = (rank > 0);
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ MTEST_VG_MEM_INIT(ldinbuf, 3 * sizeof(ldinbuf[0]));
+ /* long double */
+ ldinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_PROD, 0, comm );
- if (rank == 0) {
- if (ldoutbuf[0] != (long double)result[maxsize-1]) {
- errs++;
- fprintf( stderr, "long double PROD(rank) test failed\n" );
- }
- if (ldoutbuf[1]) {
- errs++;
- fprintf( stderr, "long double PROD(0) test failed\n" );
- }
- if (size > 1 && ldoutbuf[2] != 0) {
- errs++;
- fprintf( stderr, "long double PROD(>) test failed\n" );
- }
- }
- }
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_PROD, 0, comm);
+ if (rank == 0) {
+ if (ldoutbuf[0] != (long double) result[maxsize - 1]) {
+ errs++;
+ fprintf(stderr, "long double PROD(rank) test failed\n");
+ }
+ if (ldoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long double PROD(0) test failed\n");
+ }
+ if (size > 1 && ldoutbuf[2] != 0) {
+ errs++;
+ fprintf(stderr, "long double PROD(>) test failed\n");
+ }
+ }
+ }
}
#endif /* HAVE_LONG_DOUBLE */
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0);
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = (rank < maxsize && rank > 0) ? rank : 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_PROD, 0, comm );
- if (rank == 0) {
- if (lloutbuf[0] != (long long)result[maxsize-1]) {
- errs++;
- fprintf( stderr, "long long PROD(rank) test failed\n" );
- }
- if (lloutbuf[1]) {
- errs++;
- fprintf( stderr, "long long PROD(0) test failed\n" );
- }
- if (size > 1 && lloutbuf[2]) {
- errs++;
- fprintf( stderr, "long long PROD(>) test failed\n" );
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_PROD, 0, comm);
+ if (rank == 0) {
+ if (lloutbuf[0] != (long long) result[maxsize - 1]) {
+ errs++;
+ fprintf(stderr, "long long PROD(rank) test failed\n");
+ }
+ if (lloutbuf[1]) {
+ errs++;
+ fprintf(stderr, "long long PROD(0) test failed\n");
+ }
+ if (size > 1 && lloutbuf[2]) {
+ errs++;
+ fprintf(stderr, "long long PROD(>) test failed\n");
+ }
+ }
+ }
}
#endif /* HAVE_LONG_LONG */
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test MPI_SUM operations on optional datatypes dupported by MPICH";
*/
-typedef struct { double r, i; } d_complex;
+typedef struct {
+ double r, i;
+} d_complex;
#ifdef HAVE_LONG_DOUBLE
-typedef struct { long double r, i; } ld_complex;
+typedef struct {
+ long double r, i;
+} ld_complex;
#endif
/*
- * This test looks at the handling of logical and for types that are not
+ * This test looks at the handling of logical and for types that are not
* integers or are not required integers (e.g., long long). MPICH allows
* these as well. A strict MPI test should not include this test.
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size;
- MPI_Comm comm;
+ MPI_Comm comm;
char cinbuf[3], coutbuf[3];
signed char scinbuf[3], scoutbuf[3];
unsigned char ucinbuf[3], ucoutbuf[3];
d_complex dinbuf[3], doutbuf[3];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
#ifndef USE_STRICT_MPI
/* char */
- MTestPrintfMsg( 10, "Reduce of MPI_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_CHAR\n");
cinbuf[0] = 1;
cinbuf[1] = 0;
cinbuf[2] = (rank > 0);
coutbuf[0] = 0;
coutbuf[1] = 1;
coutbuf[2] = 1;
- MPI_Reduce( cinbuf, coutbuf, 3, MPI_CHAR, MPI_SUM, 0, comm );
+ MPI_Reduce(cinbuf, coutbuf, 3, MPI_CHAR, MPI_SUM, 0, comm);
if (rank == 0) {
- if (size < 128 && coutbuf[0] != size) {
- errs++;
- fprintf( stderr, "char SUM(1) test failed\n" );
- }
- if (size < 128 && coutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "char SUM(0) test failed\n" );
- }
- if (size < 128 && coutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "char SUM(>) test failed\n" );
- }
+ if (size < 128 && coutbuf[0] != size) {
+ errs++;
+ fprintf(stderr, "char SUM(1) test failed\n");
+ }
+ if (size < 128 && coutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "char SUM(0) test failed\n");
+ }
+ if (size < 128 && coutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "char SUM(>) test failed\n");
+ }
}
#endif /* USE_MPI_STRICT */
/* signed char */
- MTestPrintfMsg( 10, "Reduce of MPI_SIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_SIGNED_CHAR\n");
scinbuf[0] = 1;
scinbuf[1] = 0;
scinbuf[2] = (rank > 0);
scoutbuf[0] = 0;
scoutbuf[1] = 1;
scoutbuf[2] = 1;
- MPI_Reduce( scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_SUM, 0, comm );
+ MPI_Reduce(scinbuf, scoutbuf, 3, MPI_SIGNED_CHAR, MPI_SUM, 0, comm);
if (rank == 0) {
- if (size < 128 && scoutbuf[0] != size) {
- errs++;
- fprintf( stderr, "signed char SUM(1) test failed\n" );
- }
- if (size < 128 && scoutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "signed char SUM(0) test failed\n" );
- }
- if (size < 128 && scoutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "signed char SUM(>) test failed\n" );
- }
+ if (size < 128 && scoutbuf[0] != size) {
+ errs++;
+ fprintf(stderr, "signed char SUM(1) test failed\n");
+ }
+ if (size < 128 && scoutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "signed char SUM(0) test failed\n");
+ }
+ if (size < 128 && scoutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "signed char SUM(>) test failed\n");
+ }
}
/* unsigned char */
- MTestPrintfMsg( 10, "Reduce of MPI_UNSIGNED_CHAR\n" );
+ MTestPrintfMsg(10, "Reduce of MPI_UNSIGNED_CHAR\n");
ucinbuf[0] = 1;
ucinbuf[1] = 0;
ucinbuf[2] = (rank > 0);
ucoutbuf[0] = 0;
ucoutbuf[1] = 1;
ucoutbuf[2] = 1;
- MPI_Reduce( ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_SUM, 0, comm );
+ MPI_Reduce(ucinbuf, ucoutbuf, 3, MPI_UNSIGNED_CHAR, MPI_SUM, 0, comm);
if (rank == 0) {
- if (size < 128 && ucoutbuf[0] != size) {
- errs++;
- fprintf( stderr, "unsigned char SUM(1) test failed\n" );
- }
- if (size < 128 && ucoutbuf[1]) {
- errs++;
- fprintf( stderr, "unsigned char SUM(0) test failed\n" );
- }
- if (size < 128 && ucoutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "unsigned char SUM(>) test failed\n" );
- }
+ if (size < 128 && ucoutbuf[0] != size) {
+ errs++;
+ fprintf(stderr, "unsigned char SUM(1) test failed\n");
+ }
+ if (size < 128 && ucoutbuf[1]) {
+ errs++;
+ fprintf(stderr, "unsigned char SUM(0) test failed\n");
+ }
+ if (size < 128 && ucoutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "unsigned char SUM(>) test failed\n");
+ }
}
#ifndef USE_STRICT_MPI
/* For some reason, complex is not allowed for sum and prod */
if (MPI_DOUBLE_COMPLEX != MPI_DATATYPE_NULL) {
- int dc;
-#ifdef HAVE_LONG_DOUBLE
- ld_complex ldinbuf[3], ldoutbuf[3];
-#endif
- /* Must determine which C type matches this Fortran type */
- MPI_Type_size( MPI_DOUBLE_COMPLEX, &dc );
- if (dc == sizeof(d_complex)) {
- MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE_COMPLEX\n" );
- /* double complex; may be null if we do not have Fortran support */
- dinbuf[0].r = 1;
- dinbuf[1].r = 0;
- dinbuf[2].r = (rank > 0);
- dinbuf[0].i = -1;
- dinbuf[1].i = 0;
- dinbuf[2].i = -(rank > 0);
-
- doutbuf[0].r = 0;
- doutbuf[1].r = 1;
- doutbuf[2].r = 1;
- doutbuf[0].i = 0;
- doutbuf[1].i = 1;
- doutbuf[2].i = 1;
- MPI_Reduce( dinbuf, doutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm );
- if (rank == 0) {
- if (doutbuf[0].r != size || doutbuf[0].i != -size) {
- errs++;
- fprintf( stderr, "double complex SUM(1) test failed\n" );
- }
- if (doutbuf[1].r != 0 || doutbuf[1].i != 0) {
- errs++;
- fprintf( stderr, "double complex SUM(0) test failed\n" );
- }
- if (doutbuf[2].r != size - 1 || doutbuf[2].i != 1 - size) {
- errs++;
- fprintf( stderr, "double complex SUM(>) test failed\n" );
- }
- }
- }
+ int dc;
#ifdef HAVE_LONG_DOUBLE
- else if (dc == sizeof(ld_complex)) {
- MTestPrintfMsg( 10, "Reduce of MPI_DOUBLE_COMPLEX\n" );
- /* double complex; may be null if we do not have Fortran support */
- ldinbuf[0].r = 1;
- ldinbuf[1].r = 0;
- ldinbuf[2].r = (rank > 0);
- ldinbuf[0].i = -1;
- ldinbuf[1].i = 0;
- ldinbuf[2].i = -(rank > 0);
-
- ldoutbuf[0].r = 0;
- ldoutbuf[1].r = 1;
- ldoutbuf[2].r = 1;
- ldoutbuf[0].i = 0;
- ldoutbuf[1].i = 1;
- ldoutbuf[2].i = 1;
- MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_DOUBLE_COMPLEX,
- MPI_SUM, 0, comm );
- if (rank == 0) {
- if (ldoutbuf[0].r != size || ldoutbuf[0].i != -size) {
- errs++;
- fprintf( stderr, "double complex SUM(1) test failed\n" );
- }
- if (ldoutbuf[1].r != 0 || ldoutbuf[1].i != 0) {
- errs++;
- fprintf( stderr, "double complex SUM(0) test failed\n" );
- }
- if (ldoutbuf[2].r != size - 1 || ldoutbuf[2].i != 1 - size) {
- errs++;
- fprintf( stderr, "double complex SUM(>) test failed\n" );
- }
- }
- }
+ ld_complex ldinbuf[3], ldoutbuf[3];
+ MTEST_VG_MEM_INIT(ldinbuf, 3 * sizeof(ldinbuf[0]));
#endif
- /* Implicitly ignore if there is no matching C type */
+ /* Must determine which C type matches this Fortran type */
+ MPI_Type_size(MPI_DOUBLE_COMPLEX, &dc);
+ if (dc == sizeof(d_complex)) {
+ MTestPrintfMsg(10, "Reduce of MPI_DOUBLE_COMPLEX\n");
+ /* double complex; may be null if we do not have Fortran support */
+ dinbuf[0].r = 1;
+ dinbuf[1].r = 0;
+ dinbuf[2].r = (rank > 0);
+ dinbuf[0].i = -1;
+ dinbuf[1].i = 0;
+ dinbuf[2].i = -(rank > 0);
+
+ doutbuf[0].r = 0;
+ doutbuf[1].r = 1;
+ doutbuf[2].r = 1;
+ doutbuf[0].i = 0;
+ doutbuf[1].i = 1;
+ doutbuf[2].i = 1;
+ MPI_Reduce(dinbuf, doutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm);
+ if (rank == 0) {
+ if (doutbuf[0].r != size || doutbuf[0].i != -size) {
+ errs++;
+ fprintf(stderr, "double complex SUM(1) test failed\n");
+ }
+ if (doutbuf[1].r != 0 || doutbuf[1].i != 0) {
+ errs++;
+ fprintf(stderr, "double complex SUM(0) test failed\n");
+ }
+ if (doutbuf[2].r != size - 1 || doutbuf[2].i != 1 - size) {
+ errs++;
+ fprintf(stderr, "double complex SUM(>) test failed\n");
+ }
+ }
+ }
+#ifdef HAVE_LONG_DOUBLE
+ else if (dc == sizeof(ld_complex)) {
+ MTestPrintfMsg(10, "Reduce of MPI_DOUBLE_COMPLEX\n");
+ /* double complex; may be null if we do not have Fortran support */
+ ldinbuf[0].r = 1;
+ ldinbuf[1].r = 0;
+ ldinbuf[2].r = (rank > 0);
+ ldinbuf[0].i = -1;
+ ldinbuf[1].i = 0;
+ ldinbuf[2].i = -(rank > 0);
+
+ ldoutbuf[0].r = 0;
+ ldoutbuf[1].r = 1;
+ ldoutbuf[2].r = 1;
+ ldoutbuf[0].i = 0;
+ ldoutbuf[1].i = 1;
+ ldoutbuf[2].i = 1;
+ MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_DOUBLE_COMPLEX, MPI_SUM, 0, comm);
+ if (rank == 0) {
+ if (ldoutbuf[0].r != size || ldoutbuf[0].i != -size) {
+ errs++;
+ fprintf(stderr, "double complex SUM(1) test failed\n");
+ }
+ if (ldoutbuf[1].r != 0 || ldoutbuf[1].i != 0) {
+ errs++;
+ fprintf(stderr, "double complex SUM(0) test failed\n");
+ }
+ if (ldoutbuf[2].r != size - 1 || ldoutbuf[2].i != 1 - size) {
+ errs++;
+ fprintf(stderr, "double complex SUM(>) test failed\n");
+ }
+ }
+ }
+#endif
+ /* Implicitly ignore if there is no matching C type */
}
#endif /* USE_STRICT_MPI */
#ifdef HAVE_LONG_DOUBLE
- { long double ldinbuf[3], ldoutbuf[3];
- /* long double */
- ldinbuf[0] = 1;
- ldinbuf[1] = 0;
- ldinbuf[2] = (rank > 0);
+ {
+ long double ldinbuf[3], ldoutbuf[3];
+ MTEST_VG_MEM_INIT(ldinbuf, 3 * sizeof(ldinbuf[0]));
+ /* long double */
+ ldinbuf[0] = 1;
+ ldinbuf[1] = 0;
+ ldinbuf[2] = (rank > 0);
- ldoutbuf[0] = 0;
- ldoutbuf[1] = 1;
- ldoutbuf[2] = 1;
- if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_DOUBLE\n" );
- MPI_Reduce( ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_SUM, 0, comm );
- if (rank == 0) {
- if (ldoutbuf[0] != size) {
- errs++;
- fprintf( stderr, "long double SUM(1) test failed\n" );
- }
- if (ldoutbuf[1] != 0.0) {
- errs++;
- fprintf( stderr, "long double SUM(0) test failed\n" );
- }
- if (ldoutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "long double SUM(>) test failed\n" );
- }
- }
- }
+ ldoutbuf[0] = 0;
+ ldoutbuf[1] = 1;
+ ldoutbuf[2] = 1;
+ if (MPI_LONG_DOUBLE != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_DOUBLE\n");
+ MPI_Reduce(ldinbuf, ldoutbuf, 3, MPI_LONG_DOUBLE, MPI_SUM, 0, comm);
+ if (rank == 0) {
+ if (ldoutbuf[0] != size) {
+ errs++;
+ fprintf(stderr, "long double SUM(1) test failed\n");
+ }
+ if (ldoutbuf[1] != 0.0) {
+ errs++;
+ fprintf(stderr, "long double SUM(0) test failed\n");
+ }
+ if (ldoutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "long double SUM(>) test failed\n");
+ }
+ }
+ }
}
#endif
#ifdef HAVE_LONG_LONG
{
- long long llinbuf[3], lloutbuf[3];
- /* long long */
- llinbuf[0] = 1;
- llinbuf[1] = 0;
- llinbuf[2] = (rank > 0);
+ long long llinbuf[3], lloutbuf[3];
+ /* long long */
+ llinbuf[0] = 1;
+ llinbuf[1] = 0;
+ llinbuf[2] = (rank > 0);
- lloutbuf[0] = 0;
- lloutbuf[1] = 1;
- lloutbuf[2] = 1;
- if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
- MTestPrintfMsg( 10, "Reduce of MPI_LONG_LONG\n" );
- MPI_Reduce( llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_SUM, 0, comm );
- if (rank == 0) {
- if (lloutbuf[0] != size) {
- errs++;
- fprintf( stderr, "long long SUM(1) test failed\n" );
- }
- if (lloutbuf[1] != 0) {
- errs++;
- fprintf( stderr, "long long SUM(0) test failed\n" );
- }
- if (lloutbuf[2] != size - 1) {
- errs++;
- fprintf( stderr, "long long SUM(>) test failed\n" );
- }
- }
- }
+ lloutbuf[0] = 0;
+ lloutbuf[1] = 1;
+ lloutbuf[2] = 1;
+ if (MPI_LONG_LONG != MPI_DATATYPE_NULL) {
+ MTestPrintfMsg(10, "Reduce of MPI_LONG_LONG\n");
+ MPI_Reduce(llinbuf, lloutbuf, 3, MPI_LONG_LONG, MPI_SUM, 0, comm);
+ if (rank == 0) {
+ if (lloutbuf[0] != size) {
+ errs++;
+ fprintf(stderr, "long long SUM(1) test failed\n");
+ }
+ if (lloutbuf[1] != 0) {
+ errs++;
+ fprintf(stderr, "long long SUM(0) test failed\n");
+ }
+ if (lloutbuf[2] != size - 1) {
+ errs++;
+ fprintf(stderr, "long long SUM(>) test failed\n");
+ }
+ }
+ }
}
#endif
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
*/
/*
* This tests that the reduce operation respects the noncommutative flag.
- * See red4.c for a version that can distinguish between P_{root} P_{root+1}
+ * See red4.c for a version that can distinguish between P_{root} P_{root+1}
* ... P_{root-1} and P_0 ... P_{size-1} . The MPI standard clearly
- * specifies that the result is P_0 ... P_{size-1}, independent of the root
+ * specifies that the result is P_0 ... P_{size-1}, independent of the root
* (see 4.9.4 in MPI-1)
*/
c(i,j) is cin[j+i*matSize]
*/
#define MAXCOL 256
-static int matSize = 0; /* Must be < MAXCOL */
-void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
-void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+static int matSize = 0; /* Must be < MAXCOL */
+void uop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype);
+void uop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype)
{
- const int *cin = (const int *)cinPtr;
- int *cout = (int *)coutPtr;
+ const int *cin = (const int *) cinPtr;
+ int *cout = (int *) coutPtr;
int i, j, k, nmat;
int tempCol[MAXCOL];
for (nmat = 0; nmat < *count; nmat++) {
- for (j=0; j<matSize; j++) {
- for (i=0; i<matSize; i++) {
- tempCol[i] = 0;
- for (k=0; k<matSize; k++) {
- /* col[i] += cin(i,k) * cout(k,j) */
- tempCol[i] += cin[k+i*matSize] * cout[j+k*matSize];
- }
- }
- for (i=0; i<matSize; i++) {
- cout[j+i*matSize] = tempCol[i];
- }
- }
+ for (j = 0; j < matSize; j++) {
+ for (i = 0; i < matSize; i++) {
+ tempCol[i] = 0;
+ for (k = 0; k < matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ tempCol[i] += cin[k + i * matSize] * cout[j + k * matSize];
+ }
+ }
+ for (i = 0; i < matSize; i++) {
+ cout[j + i * matSize] = tempCol[i];
+ }
+ }
}
}
/* Initialize the integer matrix as a permutation of rank with rank+1.
If we call this matrix P_r, we know that product of P_0 P_1 ... P_{size-2}
is a left shift by 1.
-*/
+*/
-static void initMat( MPI_Comm comm, int mat[] )
+static void initMat(MPI_Comm comm, int mat[])
{
int i, size, rank;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
- for (i=0; i<size*size; i++) mat[i] = 0;
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (i = 0; i < size * size; i++)
+ mat[i] = 0;
/* For each row */
- for (i=0; i<size; i++) {
- if (rank != size - 1) {
- if (i == rank) mat[((i+1)%size) + i * size] = 1;
- else if (i == ((rank + 1)%size)) mat[((i+size-1)%size) + i * size] = 1;
- else mat[i+i*size] = 1;
- }
- else {
- mat[i+i*size] = 1;
- }
+ for (i = 0; i < size; i++) {
+ if (rank != size - 1) {
+ if (i == rank)
+ mat[((i + 1) % size) + i * size] = 1;
+ else if (i == ((rank + 1) % size))
+ mat[((i + size - 1) % size) + i * size] = 1;
+ else
+ mat[i + i * size] = 1;
+ }
+ else {
+ mat[i + i * size] = 1;
+ }
}
}
#ifdef FOO
/* Compare a matrix with the identity matrix */
-static int isIdentity( MPI_Comm comm, int mat[] )
+static int isIdentity(MPI_Comm comm, int mat[])
{
int i, j, size, rank, errs = 0;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (i=0; i<size; i++) {
- for (j=0; j<size; j++) {
- if (i == j) {
- if (mat[j+i*size] != 1) {
- errs++;
- }
- }
- else {
- if (mat[j+i*size] != 0) {
- errs++;
- }
- }
- }
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ if (i == j) {
+ if (mat[j + i * size] != 1) {
+ errs++;
+ }
+ }
+ else {
+ if (mat[j + i * size] != 0) {
+ errs++;
+ }
+ }
+ }
}
return errs;
}
#endif
/* Compare a matrix with the identity matrix */
-static int isShiftLeft( MPI_Comm comm, int mat[] )
+static int isShiftLeft(MPI_Comm comm, int mat[])
{
int i, j, size, rank, errs = 0;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (i=0; i<size; i++) {
- for (j=0; j<size; j++) {
- if (i == ((j + 1) % size)) {
- if (mat[j+i*size] != 1) {
- errs++;
- }
- }
- else {
- if (mat[j+i*size] != 0) {
- errs++;
- }
- }
- }
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ if (i == ((j + 1) % size)) {
+ if (mat[j + i * size] != 1) {
+ errs++;
+ }
+ }
+ else {
+ if (mat[j + i * size] != 0) {
+ errs++;
+ }
+ }
+ }
}
return errs;
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, root;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
int *buf, *bufout;
MPI_Op op;
MPI_Datatype mattype;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Op_create( uop, 0, &op );
-
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
+ MPI_Op_create(uop, 0, &op);
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
- matSize = size; /* used by the user-defined operation */
- /* Only one matrix for now */
- count = 1;
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
- /* A single matrix, the size of the communicator */
- MPI_Type_contiguous( size*size, MPI_INT, &mattype );
- MPI_Type_commit( &mattype );
-
- buf = (int *)malloc( count * size * size * sizeof(int) );
- if (!buf) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- bufout = (int *)malloc( count * size * size * sizeof(int) );
- if (!bufout) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
+ matSize = size; /* used by the user-defined operation */
+ /* Only one matrix for now */
+ count = 1;
- for (root = 0; root < size; root ++) {
- initMat( comm, buf );
- MPI_Reduce( buf, bufout, count, mattype, op, root, comm );
- if (rank == root) {
- errs += isShiftLeft( comm, bufout );
- }
-
- /* Try the same test, but using MPI_IN_PLACE */
- initMat( comm, bufout );
- if (rank == root) {
- MPI_Reduce( MPI_IN_PLACE, bufout, count, mattype, op, root, comm );
- }
- else {
- MPI_Reduce( bufout, NULL, count, mattype, op, root, comm );
- }
- if (rank == root) {
- errs += isShiftLeft( comm, bufout );
- }
-
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- /* Try one more time without IN_PLACE to make sure we check
- * aliasing correctly */
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous(size * size, MPI_INT, &mattype);
+ MPI_Type_commit(&mattype);
+
+ buf = (int *) malloc(count * size * size * sizeof(int));
+ if (!buf)
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ bufout = (int *) malloc(count * size * size * sizeof(int));
+ if (!bufout)
+ MPI_Abort(MPI_COMM_WORLD, 1);
+
+ for (root = 0; root < size; root++) {
+ initMat(comm, buf);
+ MPI_Reduce(buf, bufout, count, mattype, op, root, comm);
if (rank == root) {
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Reduce( bufout, bufout, count, mattype, op, root, comm ))
- errs++;
+ errs += isShiftLeft(comm, bufout);
}
-#endif
- }
- free( buf );
- free( bufout );
-
- MPI_Type_free( &mattype );
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat(comm, bufout);
+ if (rank == root) {
+ MPI_Reduce(MPI_IN_PLACE, bufout, count, mattype, op, root, comm);
+ }
+ else {
+ MPI_Reduce(bufout, NULL, count, mattype, op, root, comm);
+ }
+ if (rank == root) {
+ errs += isShiftLeft(comm, bufout);
+ }
+ }
+
+ free(buf);
+ free(bufout);
+
+ MPI_Type_free(&mattype);
- MTestFreeComm( &comm );
+ MTestFreeComm(&comm);
}
- MPI_Op_free( &op );
+ MPI_Op_free(&op);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
/*
* This tests that the reduce operation respects the noncommutative flag.
- * and that can distinguish between P_{root} P_{root+1}
+ * and that can distinguish between P_{root} P_{root+1}
* ... P_{root-1} and P_0 ... P_{size-1} . The MPI standard clearly
- * specifies that the result is P_0 ... P_{size-1}, independent of the root
+ * specifies that the result is P_0 ... P_{size-1}, independent of the root
* (see 4.9.4 in MPI-1)
*/
c(i,j) is cin[j+i*matSize]
*/
#define MAXCOL 256
-static int matSize = 0; /* Must be < MAXCOL */
+static int matSize = 0; /* Must be < MAXCOL */
-void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
-void uop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+void uop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype);
+void uop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype)
{
const int *cin;
- int *cout;
- int i, j, k, nmat;
- int tempCol[MAXCOL];
+ int *cout;
+ int i, j, k, nmat;
+ int tempCol[MAXCOL];
- if (*count != 1) printf( "Panic!\n" );
+ if (*count != 1)
+ printf("Panic!\n");
for (nmat = 0; nmat < *count; nmat++) {
- cin = (const int *)cinPtr;
- cout = (int *)coutPtr;
- for (j=0; j<matSize; j++) {
- for (i=0; i<matSize; i++) {
- tempCol[i] = 0;
- for (k=0; k<matSize; k++) {
- /* col[i] += cin(i,k) * cout(k,j) */
- tempCol[i] += cin[k+i*matSize] * cout[j+k*matSize];
- }
- }
- for (i=0; i<matSize; i++) {
- cout[j+i*matSize] = tempCol[i];
- }
- }
- cinPtr = (int *)cinPtr + matSize*matSize;
- coutPtr = (int *)coutPtr + matSize*matSize;
+ cin = (const int *) cinPtr;
+ cout = (int *) coutPtr;
+ for (j = 0; j < matSize; j++) {
+ for (i = 0; i < matSize; i++) {
+ tempCol[i] = 0;
+ for (k = 0; k < matSize; k++) {
+ /* col[i] += cin(i,k) * cout(k,j) */
+ tempCol[i] += cin[k + i * matSize] * cout[j + k * matSize];
+ }
+ }
+ for (i = 0; i < matSize; i++) {
+ cout[j + i * matSize] = tempCol[i];
+ }
+ }
+ cinPtr = (int *) cinPtr + matSize * matSize;
+ coutPtr = (int *) coutPtr + matSize * matSize;
}
}
If we call this matrix P_r, we know that product of P_0 P_1 ... P_{size-1}
is the matrix with rows ordered as
1,size,2,3,4,...,size-1
- (The matrix is basically a circular shift right,
+ (The matrix is basically a circular shift right,
shifting right n-1 steps for an n x n dimensional matrix, with the last
step swapping rows 1 and size)
-*/
+*/
-static void initMat( MPI_Comm comm, int mat[] )
+static void initMat(MPI_Comm comm, int mat[])
{
int i, size, rank;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
/* Remember the matrix size */
matSize = size;
- for (i=0; i<matSize*matSize; i++) mat[i] = 0;
+ for (i = 0; i < matSize * matSize; i++)
+ mat[i] = 0;
- for (i=0; i<matSize; i++) {
- if (i == rank)
- mat[((i+1)%matSize) + i * matSize] = 1;
- else if (i == ((rank + 1)%matSize))
- mat[((i+matSize-1)%matSize) + i * matSize] = 1;
- else
- mat[i+i*matSize] = 1;
+ for (i = 0; i < matSize; i++) {
+ if (i == rank)
+ mat[((i + 1) % matSize) + i * matSize] = 1;
+ else if (i == ((rank + 1) % matSize))
+ mat[((i + matSize - 1) % matSize) + i * matSize] = 1;
+ else
+ mat[i + i * matSize] = 1;
}
}
/* Compare a matrix with the identity matrix */
/*
-static int isIdentity( MPI_Comm comm, int mat[] )
+static int isIdentity(MPI_Comm comm, int mat[])
{
int i, j, size, rank, errs = 0;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
for (i=0; i<size; i++) {
for (j=0; j<size; j++) {
if (j == i) {
if (mat[j+i*size] != 1) {
- printf( "mat(%d,%d) = %d, should = 1\n",
- i, j, mat[j+i*size] );
+ printf("mat(%d,%d) = %d, should = 1\n",
+ i, j, mat[j+i*size]);
errs++;
}
}
else {
if (mat[j+i*size] != 0) {
- printf( "mat(%d,%d) = %d, should = 0\n",
- i, j, mat[j+i*size] );
+ printf("mat(%d,%d) = %d, should = 0\n",
+ i, j, mat[j+i*size]);
errs++;
}
}
/* Compare a matrix with the identity matrix with rows permuted to as rows
1,size,2,3,4,5,...,size-1 */
-static int isPermutedIdentity( MPI_Comm comm, int mat[] )
+static int isPermutedIdentity(MPI_Comm comm, int mat[])
{
int i, j, size, rank, errs = 0;
-
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
/* Check the first two last rows */
i = 0;
- for (j=0; j<size; j++) {
- if (j==0) {
- if (mat[j] != 1) {
- printf( "mat(%d,%d) = %d, should = 1\n",
- i, j, mat[j] );
- errs++;
- }
- }
- else {
- if (mat[j] != 0) {
- printf( "mat(%d,%d) = %d, should = 0\n",
- i, j, mat[j] );
- errs++;
- }
- }
+ for (j = 0; j < size; j++) {
+ if (j == 0) {
+ if (mat[j] != 1) {
+ printf("mat(%d,%d) = %d, should = 1\n", i, j, mat[j]);
+ errs++;
+ }
+ }
+ else {
+ if (mat[j] != 0) {
+ printf("mat(%d,%d) = %d, should = 0\n", i, j, mat[j]);
+ errs++;
+ }
+ }
}
i = 1;
- for (j=0; j<size; j++) {
- if (j==size-1) {
- if (mat[j+i*size] != 1) {
- printf( "mat(%d,%d) = %d, should = 1\n",
- i, j, mat[j+i*size] );
- errs++;
- }
- }
- else {
- if (mat[j+i*size] != 0) {
- printf( "mat(%d,%d) = %d, should = 0\n",
- i, j, mat[j+i*size] );
- errs++;
- }
- }
+ for (j = 0; j < size; j++) {
+ if (j == size - 1) {
+ if (mat[j + i * size] != 1) {
+ printf("mat(%d,%d) = %d, should = 1\n", i, j, mat[j + i * size]);
+ errs++;
+ }
+ }
+ else {
+ if (mat[j + i * size] != 0) {
+ printf("mat(%d,%d) = %d, should = 0\n", i, j, mat[j + i * size]);
+ errs++;
+ }
+ }
}
/* The remaint rows are shifted down by one */
- for (i=2; i<size; i++) {
- for (j=0; j<size; j++) {
- if (j == i-1) {
- if (mat[j+i*size] != 1) {
- printf( "mat(%d,%d) = %d, should = 1\n",
- i, j, mat[j+i*size] );
- errs++;
- }
- }
- else {
- if (mat[j+i*size] != 0) {
- printf( "mat(%d,%d) = %d, should = 0\n",
- i, j, mat[j+i*size] );
- errs++;
- }
- }
- }
+ for (i = 2; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ if (j == i - 1) {
+ if (mat[j + i * size] != 1) {
+ printf("mat(%d,%d) = %d, should = 1\n", i, j, mat[j + i * size]);
+ errs++;
+ }
+ }
+ else {
+ if (mat[j + i * size] != 0) {
+ printf("mat(%d,%d) = %d, should = 0\n", i, j, mat[j + i * size]);
+ errs++;
+ }
+ }
+ }
}
return errs;
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, root;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
int *buf, *bufout;
MPI_Op op;
MPI_Datatype mattype;
- MTest_Init( &argc, &argv );
-
- MPI_Op_create( uop, 0, &op );
-
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
+ MTest_Init(&argc, &argv);
- if (size > MAXCOL) {
- /* Skip because there are too many processes */
- MTestFreeComm( &comm );
- continue;
- }
+ MPI_Op_create(uop, 0, &op);
- /* Only one matrix for now */
- count = 1;
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
- /* A single matrix, the size of the communicator */
- MPI_Type_contiguous( size*size, MPI_INT, &mattype );
- MPI_Type_commit( &mattype );
-
- buf = (int *)malloc( count * size * size * sizeof(int) );
- if (!buf) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- bufout = (int *)malloc( count * size * size * sizeof(int) );
- if (!bufout) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ if (size > MAXCOL) {
+ /* Skip because there are too many processes */
+ MTestFreeComm(&comm);
+ continue;
}
- for (root = 0; root < size; root ++) {
- initMat( comm, buf );
- MPI_Reduce( buf, bufout, count, mattype, op, root, comm );
- if (rank == root) {
- errs += isPermutedIdentity( comm, bufout );
- }
+ /* Only one matrix for now */
+ count = 1;
- /* Try the same test, but using MPI_IN_PLACE */
- initMat( comm, bufout );
- if (rank == root) {
- MPI_Reduce( MPI_IN_PLACE, bufout, count, mattype, op, root, comm );
- }
- else {
- MPI_Reduce( bufout, NULL, count, mattype, op, root, comm );
- }
- if (rank == root) {
- errs += isPermutedIdentity( comm, bufout );
- }
- }
- MPI_Type_free( &mattype );
+ /* A single matrix, the size of the communicator */
+ MPI_Type_contiguous(size * size, MPI_INT, &mattype);
+ MPI_Type_commit(&mattype);
+
+ buf = (int *) malloc(count * size * size * sizeof(int));
+ if (!buf)
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ bufout = (int *) malloc(count * size * size * sizeof(int));
+ if (!bufout)
+ MPI_Abort(MPI_COMM_WORLD, 1);
+
+ for (root = 0; root < size; root++) {
+ initMat(comm, buf);
+ MPI_Reduce(buf, bufout, count, mattype, op, root, comm);
+ if (rank == root) {
+ errs += isPermutedIdentity(comm, bufout);
+ }
+
+ /* Try the same test, but using MPI_IN_PLACE */
+ initMat(comm, bufout);
+ if (rank == root) {
+ MPI_Reduce(MPI_IN_PLACE, bufout, count, mattype, op, root, comm);
+ }
+ else {
+ MPI_Reduce(bufout, NULL, count, mattype, op, root, comm);
+ }
+ if (rank == root) {
+ errs += isPermutedIdentity(comm, bufout);
+ }
+ }
+ MPI_Type_free(&mattype);
- free( buf );
- free( bufout );
+ free(buf);
+ free(bufout);
- MTestFreeComm( &comm );
+ MTestFreeComm(&comm);
}
- MPI_Op_free( &op );
+ MPI_Op_free(&op);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
int main(int argc, char **argv)
{
int err = 0;
- int toterr, size, rank;
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- int i, sumval;
+ int toterr, size, rank, i, sumval;
int *sendbuf;
int *recvbuf;
-#endif
MPI_Comm comm;
MPI_Init(&argc, &argv);
err++;
fprintf(stderr, "unable to allocate send/recv buffers, aborting");
MPI_Abort(MPI_COMM_WORLD, 1);
- exit(1);
}
- for (i=0; i<size; i++)
+ for (i = 0; i < size; i++)
sendbuf[i] = rank + i;
MPI_Reduce_scatter_block(sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
if (recvbuf[0] != sumval) {
err++;
fprintf(stdout, "Did not get expected value for reduce scatter block\n");
free(sendbuf);
/* let's try it again with MPI_IN_PLACE this time */
- for (i=0; i<size; i++)
+ for (i = 0; i < size; i++)
recvbuf[i] = rank + i;
MPI_Reduce_scatter_block(MPI_IN_PLACE, recvbuf, 1, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
if (recvbuf[0] != sumval) {
err++;
fprintf(stdout, "Did not get expected value for reduce scatter block\n");
fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval);
}
-
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Reduce_scatter_block(recvbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm))
- err++;
-
free(recvbuf);
#endif
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
- * (C) 2010 by Argonne National Laboratory.
+ * (C) 2009 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce_scatter_block.
*
* Checks that non-commutative operations are not commuted and that
int err = 0;
/* left(x,y) ==> x */
-void left(void *a, void *b, int *count, MPI_Datatype *type);
-void left(void *a, void *b, int *count, MPI_Datatype *type)
+void left(void *a, void *b, int *count, MPI_Datatype * type);
+void left(void *a, void *b, int *count, MPI_Datatype * type)
{
int *in = a;
int *inout = b;
int i;
- for (i = 0; i < *count; ++i)
- {
+ for (i = 0; i < *count; ++i) {
if (in[i] > inout[i])
++err;
inout[i] = in[i];
}
/* right(x,y) ==> y */
-void right(void *a, void *b, int *count, MPI_Datatype *type);
-void right(void *a, void *b, int *count, MPI_Datatype *type)
+void right(void *a, void *b, int *count, MPI_Datatype * type);
+void right(void *a, void *b, int *count, MPI_Datatype * type)
{
int *in = a;
int *inout = b;
int i;
- for (i = 0; i < *count; ++i)
- {
+ for (i = 0; i < *count; ++i) {
if (in[i] > inout[i])
++err;
inout[i] = inout[i];
/* Just performs a simple sum but can be marked as non-commutative to
potentially tigger different logic in the implementation. */
-void nc_sum(void *a, void *b, int *count, MPI_Datatype *type);
-void nc_sum(void *a, void *b, int *count, MPI_Datatype *type)
+void nc_sum(void *a, void *b, int *count, MPI_Datatype * type);
+void nc_sum(void *a, void *b, int *count, MPI_Datatype * type)
{
int *in = a;
int *inout = b;
int i;
- for (i = 0; i < *count; ++i)
- {
+ for (i = 0; i < *count; ++i) {
inout[i] = in[i] + inout[i];
}
}
#define MAX_BLOCK_SIZE 256
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- int *sendbuf;
- int block_size;
- int *recvbuf;
- int i;
- MPI_Op left_op, right_op, nc_sum_op;
-#endif
- int size, rank;
+ int *sendbuf;
+ int block_size;
+ int *recvbuf;
+ int size, rank, i;
MPI_Comm comm;
+ MPI_Op left_op, right_op, nc_sum_op;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
/* MPI_Reduce_scatter block was added in MPI-2.2 */
- MPI_Op_create(&left, 0/*non-commutative*/, &left_op);
- MPI_Op_create(&right, 0/*non-commutative*/, &right_op);
- MPI_Op_create(&nc_sum, 0/*non-commutative*/, &nc_sum_op);
+ MPI_Op_create(&left, 0 /*non-commutative */ , &left_op);
+ MPI_Op_create(&right, 0 /*non-commutative */ , &right_op);
+ MPI_Op_create(&nc_sum, 0 /*non-commutative */ , &nc_sum_op);
for (block_size = 1; block_size < MAX_BLOCK_SIZE; block_size *= 2) {
- sendbuf = (int *) malloc( block_size * size * sizeof(int) );
- recvbuf = malloc( block_size * sizeof(int) );
+ sendbuf = (int *) malloc(block_size * size * sizeof(int));
+ recvbuf = malloc(block_size * sizeof(int));
- for (i=0; i<(size*block_size); i++)
+ for (i = 0; i < (size * block_size); i++)
sendbuf[i] = rank + i;
- for (i=0; i<block_size; i++)
+ for (i = 0; i < block_size; i++)
recvbuf[i] = 0xdeadbeef;
- MPI_Reduce_scatter_block( sendbuf, recvbuf, block_size, MPI_INT, left_op, comm );
+ MPI_Reduce_scatter_block(sendbuf, recvbuf, block_size, MPI_INT, left_op, comm);
for (i = 0; i < block_size; ++i)
- if (recvbuf[i] != (rank * block_size + i)) ++err;
+ if (recvbuf[i] != (rank * block_size + i))
+ ++err;
- MPI_Reduce_scatter_block( sendbuf, recvbuf, block_size, MPI_INT, right_op, comm );
+ MPI_Reduce_scatter_block(sendbuf, recvbuf, block_size, MPI_INT, right_op, comm);
for (i = 0; i < block_size; ++i)
- if (recvbuf[i] != ((size - 1) + (rank * block_size) + i)) ++err;
+ if (recvbuf[i] != ((size - 1) + (rank * block_size) + i))
+ ++err;
- MPI_Reduce_scatter_block( sendbuf, recvbuf, block_size, MPI_INT, nc_sum_op, comm );
+ MPI_Reduce_scatter_block(sendbuf, recvbuf, block_size, MPI_INT, nc_sum_op, comm);
for (i = 0; i < block_size; ++i) {
int x = rank * block_size + i;
- if (recvbuf[i] != (size*x + (size-1)*size/2)) ++err;
+ if (recvbuf[i] != (size * x + (size - 1) * size / 2))
+ ++err;
}
free(recvbuf);
MPI_Op_free(&left_op);
MPI_Op_free(&right_op);
MPI_Op_free(&nc_sum_op);
-#endif
+#endif
- MTest_Finalize( err );
- MPI_Finalize( );
+ MTest_Finalize(err);
+ MPI_Finalize();
return err;
}
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce scatter.
*
- * Each processor contributes its rank + the index to the reduction,
+ * Each processor contributes its rank + the index to the reduction,
* then receives the ith sum
*
* Can be called with any number of processors.
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
+#include "mpicolltest.h"
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int err = 0, toterr;
- int *sendbuf, recvbuf, *recvcounts;
- int size, rank, i, sumval;
+ int err = 0, toterr;
+ int *sendbuf, recvbuf, *recvcounts;
+ int size, rank, i, sumval;
MPI_Comm comm;
- MPI_Init( &argc, &argv );
+ MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
- sendbuf = (int *) malloc( size * sizeof(int) );
- for (i=0; i<size; i++)
- sendbuf[i] = rank + i;
- recvcounts = (int *)malloc( size * sizeof(int) );
- for (i=0; i<size; i++)
- recvcounts[i] = 1;
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+ sendbuf = (int *) malloc(size * sizeof(int));
+ for (i = 0; i < size; i++)
+ sendbuf[i] = rank + i;
+ recvcounts = (int *) malloc(size * sizeof(int));
+ for (i = 0; i < size; i++)
+ recvcounts[i] = 1;
- MPI_Reduce_scatter( sendbuf, &recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
+ MTest_Reduce_scatter(sendbuf, &recvbuf, recvcounts, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
/* recvbuf should be size * (rank + i) */
if (recvbuf != sumval) {
- err++;
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf, sumval );
+ err++;
+ fprintf(stdout, "Did not get expected value for reduce scatter\n");
+ fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf, sumval);
}
- MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ MPI_Allreduce(&err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (rank == 0 && toterr == 0) {
- printf( " No Errors\n" );
+ printf(" No Errors\n");
}
- MPI_Finalize( );
+
+ free(sendbuf);
+ free(recvcounts);
+ MPI_Finalize();
return toterr;
}
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce scatter.
*
* Checks that non-commutative operations are not commuted and that
int err = 0;
/* left(x,y) ==> x */
-void left(void *a, void *b, int *count, MPI_Datatype *type);
-void left(void *a, void *b, int *count, MPI_Datatype *type)
+void left(void *a, void *b, int *count, MPI_Datatype * type);
+void left(void *a, void *b, int *count, MPI_Datatype * type)
{
int *in = a;
int *inout = b;
int i;
- for (i = 0; i < *count; ++i)
- {
+ for (i = 0; i < *count; ++i) {
if (in[i] > inout[i])
++err;
inout[i] = in[i];
}
/* right(x,y) ==> y */
-void right(void *a, void *b, int *count, MPI_Datatype *type);
-void right(void *a, void *b, int *count, MPI_Datatype *type)
+void right(void *a, void *b, int *count, MPI_Datatype * type);
+void right(void *a, void *b, int *count, MPI_Datatype * type)
{
int *in = a;
int *inout = b;
int i;
- for (i = 0; i < *count; ++i)
- {
+ for (i = 0; i < *count; ++i) {
if (in[i] > inout[i])
++err;
inout[i] = inout[i];
/* Just performs a simple sum but can be marked as non-commutative to
potentially tigger different logic in the implementation. */
-void nc_sum(void *a, void *b, int *count, MPI_Datatype *type);
-void nc_sum(void *a, void *b, int *count, MPI_Datatype *type)
+void nc_sum(void *a, void *b, int *count, MPI_Datatype * type);
+void nc_sum(void *a, void *b, int *count, MPI_Datatype * type)
{
int *in = a;
int *inout = b;
int i;
- for (i = 0; i < *count; ++i)
- {
+ for (i = 0; i < *count; ++i) {
inout[i] = in[i] + inout[i];
}
}
#define MAX_BLOCK_SIZE 256
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int *sendbuf, *recvcounts;
- int block_size;
- int *recvbuf;
- int size, rank, i;
+ int *sendbuf, *recvcounts;
+ int block_size;
+ int *recvbuf;
+ int size, rank, i;
MPI_Comm comm;
MPI_Op left_op, right_op, nc_sum_op;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
- MPI_Op_create(&left, 0/*non-commutative*/, &left_op);
- MPI_Op_create(&right, 0/*non-commutative*/, &right_op);
- MPI_Op_create(&nc_sum, 0/*non-commutative*/, &nc_sum_op);
+ MPI_Op_create(&left, 0 /*non-commutative */ , &left_op);
+ MPI_Op_create(&right, 0 /*non-commutative */ , &right_op);
+ MPI_Op_create(&nc_sum, 0 /*non-commutative */ , &nc_sum_op);
for (block_size = 1; block_size < MAX_BLOCK_SIZE; block_size *= 2) {
- sendbuf = (int *) malloc( block_size * size * sizeof(int) );
- recvbuf = malloc( block_size * sizeof(int) );
+ sendbuf = (int *) malloc(block_size * size * sizeof(int));
+ recvbuf = malloc(block_size * sizeof(int));
- for (i=0; i<(size*block_size); i++)
+ for (i = 0; i < (size * block_size); i++)
sendbuf[i] = rank + i;
- for (i=0; i<block_size; i++)
+ for (i = 0; i < block_size; i++)
recvbuf[i] = 0xdeadbeef;
- recvcounts = (int *)malloc( size * sizeof(int) );
- for (i=0; i<size; i++)
+ recvcounts = (int *) malloc(size * sizeof(int));
+ for (i = 0; i < size; i++)
recvcounts[i] = block_size;
- MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, left_op, comm );
+ MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_INT, left_op, comm);
for (i = 0; i < block_size; ++i)
- if (recvbuf[i] != (rank * block_size + i)) ++err;
+ if (recvbuf[i] != (rank * block_size + i))
+ ++err;
- MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, right_op, comm );
+ MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_INT, right_op, comm);
for (i = 0; i < block_size; ++i)
- if (recvbuf[i] != ((size - 1) + (rank * block_size) + i)) ++err;
+ if (recvbuf[i] != ((size - 1) + (rank * block_size) + i))
+ ++err;
- MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, nc_sum_op, comm );
+ MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_INT, nc_sum_op, comm);
for (i = 0; i < block_size; ++i) {
int x = rank * block_size + i;
- if (recvbuf[i] != (size*x + (size-1)*size/2)) ++err;
+ if (recvbuf[i] != (size * x + (size - 1) * size / 2))
+ ++err;
}
free(recvbuf);
free(sendbuf);
+ free(recvcounts);
}
MPI_Op_free(&left_op);
MPI_Op_free(&right_op);
MPI_Op_free(&nc_sum_op);
- MTest_Finalize( err );
- MPI_Finalize( );
+ MTest_Finalize(err);
+ MPI_Finalize();
return err;
}
* (C) 2010 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce scatter with large data (needed in MPICH to trigger the
* long-data algorithm)
*
- * Each processor contributes its rank + the index to the reduction,
+ * Each processor contributes its rank + the index to the reduction,
* then receives the ith sum
*
* Can be called with any number of processors.
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
/* Limit the number of error reports */
#define MAX_ERRORS 10
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int err = 0;
- int *sendbuf, *recvbuf, *recvcounts;
- int size, rank, i, j, idx, mycount, sumval;
+ int err = 0;
+ int *sendbuf, *recvbuf, *recvcounts;
+ int size, rank, i, j, idx, mycount, sumval;
MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
- recvcounts = (int *)malloc( size * sizeof(int) );
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+ recvcounts = (int *) malloc(size * sizeof(int));
if (!recvcounts) {
- fprintf( stderr, "Could not allocate %d ints for recvcounts\n",
- size );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "Could not allocate %d ints for recvcounts\n", size);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
mycount = (1024 * 1024) / size;
- for (i=0; i<size; i++)
- recvcounts[i] = mycount;
- sendbuf = (int *) malloc( mycount * size * sizeof(int) );
+ for (i = 0; i < size; i++)
+ recvcounts[i] = mycount;
+ sendbuf = (int *) malloc(mycount * size * sizeof(int));
if (!sendbuf) {
- fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
- mycount * size );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "Could not allocate %d ints for sendbuf\n", mycount * size);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
idx = 0;
- for (i=0; i<size; i++) {
- for (j=0; j<mycount; j++) {
- sendbuf[idx++] = rank + i;
- }
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < mycount; j++) {
+ sendbuf[idx++] = rank + i;
+ }
}
- recvbuf = (int *)malloc( mycount * sizeof(int) );
+ recvbuf = (int *) malloc(mycount * sizeof(int));
if (!recvbuf) {
- fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
- mycount );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "Could not allocate %d ints for recvbuf\n", mycount);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- for (i=0; i<mycount; i++) {
- recvbuf[i] = -1;
+ for (i = 0; i < mycount; i++) {
+ recvbuf[i] = -1;
}
- MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
+ MTest_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
/* recvbuf should be size * (rank + i) */
- for (i=0; i<mycount; i++) {
- if (recvbuf[i] != sumval) {
- err++;
- if (err < MAX_ERRORS) {
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] Got recvbuf[%d] = %d expected %d\n",
- rank, i, recvbuf[i], sumval );
- }
- }
+ for (i = 0; i < mycount; i++) {
+ if (recvbuf[i] != sumval) {
+ err++;
+ if (err < MAX_ERRORS) {
+ fprintf(stdout, "Did not get expected value for reduce scatter\n");
+ fprintf(stdout, "[%d] Got recvbuf[%d] = %d expected %d\n",
+ rank, i, recvbuf[i], sumval);
+ }
+ }
}
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- MPI_Reduce_scatter( MPI_IN_PLACE, sendbuf, recvcounts, MPI_INT, MPI_SUM,
- comm );
+ MTest_Reduce_scatter(MPI_IN_PLACE, sendbuf, recvcounts, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
/* recv'ed values for my process should be size * (rank + i) */
- for (i=0; i<mycount; i++) {
+ for (i = 0; i < mycount; i++) {
if (sendbuf[i] != sumval) {
err++;
if (err < MAX_ERRORS) {
- fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
- fprintf( stdout, "[%d] Got buf[%d] = %d expected %d\n",
- rank, i, sendbuf[i], sumval );
+ fprintf(stdout, "Did not get expected value for reduce scatter (in place)\n");
+ fprintf(stdout, "[%d] Got buf[%d] = %d expected %d\n", rank, i, sendbuf[i], sumval);
}
}
}
-
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Reduce_scatter(sendbuf, sendbuf, recvcounts, MPI_INT, MPI_SUM, comm))
- err++;
#endif
free(sendbuf);
free(recvbuf);
free(recvcounts);
-
- MTest_Finalize( err );
- MPI_Finalize( );
+ MTest_Finalize(err);
+
+ MPI_Finalize();
return 0;
}
* (C) 2011 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce scatter block with large data on an intercommunicator
* (needed in MPICH to trigger the long-data algorithm)
*
- * Each processor contributes its rank + the index to the reduction,
+ * Each processor contributes its rank + the index to the reduction,
* then receives the ith sum
*
* Can be called with any number of processors.
#include <stdlib.h>
#include "mpitest.h"
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int err = 0;
- int size, rsize, rank, i;
- int recvcount, /* Each process receives this much data */
- sendcount, /* Each process contributes this much data */
- basecount; /* Unit of elements - basecount *rsize is recvcount,
- etc. */
- int isLeftGroup;
+ int err = 0;
+ int size, rsize, rank, i;
+ int recvcount, /* Each process receives this much data */
+ sendcount, /* Each process contributes this much data */
+ basecount; /* Unit of elements - basecount *rsize is recvcount,
+ * etc. */
+ int isLeftGroup;
long long *sendbuf, *recvbuf;
long long sumval;
MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
basecount = 1024;
- while (MTestGetIntercomm( &comm, &isLeftGroup, 2 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
-
- if (0) {
- printf( "[%d] %s (%d,%d) remote %d\n", rank,
- isLeftGroup ? "L" : "R",
- rank, size, rsize );
- }
-
- recvcount = basecount * rsize;
- sendcount = basecount * rsize * size;
-
- sendbuf = (long long *) malloc( sendcount * sizeof(long long) );
- if (!sendbuf) {
- fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
- sendcount );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
-
- for (i=0; i<sendcount; i++) {
- sendbuf[i] = (long long)(rank*sendcount + i);
- }
- recvbuf = (long long *)malloc( recvcount * sizeof(long long) );
- if (!recvbuf) {
- fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
- recvcount );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- for (i=0; i<recvcount; i++) {
- recvbuf[i] = (long long)(-i);
- }
-
- MPI_Reduce_scatter_block( sendbuf, recvbuf, recvcount, MPI_LONG_LONG,
- MPI_SUM, comm );
-
- /* Check received data */
- for (i=0; i<recvcount; i++) {
- sumval = (long long)(sendcount) * (long long)((rsize * (rsize-1))/2) +
- (long long)(i + rank * rsize * basecount) * (long long)rsize;
- if (recvbuf[i] != sumval) {
- err++;
- if (err < 4) {
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
- rank,
- isLeftGroup ? "L" : "R",
- i, recvbuf[i], sumval );
- }
- }
- }
-
- free(sendbuf);
- free(recvbuf);
-
- MTestFreeComm( &comm );
+ while (MTestGetIntercomm(&comm, &isLeftGroup, 2)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ if (0) {
+ printf("[%d] %s (%d,%d) remote %d\n", rank, isLeftGroup ? "L" : "R", rank, size, rsize);
+ }
+
+ recvcount = basecount * rsize;
+ sendcount = basecount * rsize * size;
+
+ sendbuf = (long long *) malloc(sendcount * sizeof(long long));
+ if (!sendbuf) {
+ fprintf(stderr, "Could not allocate %d ints for sendbuf\n", sendcount);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ for (i = 0; i < sendcount; i++) {
+ sendbuf[i] = (long long) (rank * sendcount + i);
+ }
+ recvbuf = (long long *) malloc(recvcount * sizeof(long long));
+ if (!recvbuf) {
+ fprintf(stderr, "Could not allocate %d ints for recvbuf\n", recvcount);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i = 0; i < recvcount; i++) {
+ recvbuf[i] = (long long) (-i);
+ }
+
+ MPI_Reduce_scatter_block(sendbuf, recvbuf, recvcount, MPI_LONG_LONG, MPI_SUM, comm);
+
+ /* Check received data */
+ for (i = 0; i < recvcount; i++) {
+ sumval = (long long) (sendcount) * (long long) ((rsize * (rsize - 1)) / 2) +
+ (long long) (i + rank * rsize * basecount) * (long long) rsize;
+ if (recvbuf[i] != sumval) {
+ err++;
+ if (err < 4) {
+ fprintf(stdout, "Did not get expected value for reduce scatter\n");
+ fprintf(stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
+ rank, isLeftGroup ? "L" : "R", i, recvbuf[i], sumval);
+ }
+ }
+ }
+
+ free(sendbuf);
+ free(recvbuf);
+
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
- MPI_Finalize( );
+ MPI_Finalize();
return 0;
}
* (C) 2010 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce scatter with large data (needed in MPICH to trigger the
* long-data algorithm)
*
- * Each processor contributes its rank + the index to the reduction,
+ * Each processor contributes its rank + the index to the reduction,
* then receives the ith sum
*
* Can be called with any number of processors.
#include <stdlib.h>
#include "mpitest.h"
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int err = 0;
- int *sendbuf, *recvbuf;
- int size, rank, i, j, idx, mycount, sumval;
+ int err = 0;
+ int *sendbuf, *recvbuf;
+ int size, rank, i, j, idx, mycount, sumval;
MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
mycount = (1024 * 1024) / size;
- sendbuf = (int *) malloc( mycount * size * sizeof(int) );
+ sendbuf = (int *) malloc(mycount * size * sizeof(int));
if (!sendbuf) {
- fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
- mycount * size );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "Could not allocate %d ints for sendbuf\n", mycount * size);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
idx = 0;
- for (i=0; i<size; i++) {
- for (j=0; j<mycount; j++) {
- sendbuf[idx++] = rank + i;
- }
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < mycount; j++) {
+ sendbuf[idx++] = rank + i;
+ }
}
- recvbuf = (int *)malloc( mycount * sizeof(int) );
+ recvbuf = (int *) malloc(mycount * sizeof(int));
if (!recvbuf) {
- fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
- mycount );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "Could not allocate %d ints for recvbuf\n", mycount);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- MPI_Reduce_scatter_block( sendbuf, recvbuf, mycount, MPI_INT, MPI_SUM,
- comm );
+ MPI_Reduce_scatter_block(sendbuf, recvbuf, mycount, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
/* recvbuf should be size * (rank + i) */
- for (i=0; i<mycount; i++) {
- if (recvbuf[i] != sumval) {
- err++;
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf[i], sumval );
- }
+ for (i = 0; i < mycount; i++) {
+ if (recvbuf[i] != sumval) {
+ err++;
+ fprintf(stdout, "Did not get expected value for reduce scatter\n");
+ fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[i], sumval);
+ }
}
- MPI_Reduce_scatter_block( MPI_IN_PLACE, sendbuf, mycount, MPI_INT, MPI_SUM,
- comm );
+ MPI_Reduce_scatter_block(MPI_IN_PLACE, sendbuf, mycount, MPI_INT, MPI_SUM, comm);
- sumval = size * rank + ((size - 1) * size)/2;
+ sumval = size * rank + ((size - 1) * size) / 2;
/* recv'ed values for my process should be size * (rank + i) */
- for (i=0; i<mycount; i++) {
+ for (i = 0; i < mycount; i++) {
if (sendbuf[i] != sumval) {
err++;
- fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
- fprintf( stdout, "[%d] Got %d expected %d\n", rank, sendbuf[i], sumval );
+ fprintf(stdout, "Did not get expected value for reduce scatter (in place)\n");
+ fprintf(stdout, "[%d] Got %d expected %d\n", rank, sendbuf[i], sumval);
}
}
free(sendbuf);
free(recvbuf);
-
- MTest_Finalize( err );
- MPI_Finalize( );
+ MTest_Finalize(err);
+
+ MPI_Finalize();
return 0;
}
* (C) 2011 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
-/*
+/*
* Test of reduce scatter with large data on an intercommunicator
* (needed in MPICH to trigger the long-data algorithm)
*
- * Each processor contributes its rank + the index to the reduction,
+ * Each processor contributes its rank + the index to the reduction,
* then receives the ith sum
*
* Can be called with any number of processors.
#include <stdio.h>
#include <stdlib.h>
#include "mpitest.h"
+#include "mpicolltest.h"
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int err = 0;
- int *recvcounts;
- int size, rsize, rank, i;
- int recvcount, /* Each process receives this much data */
- sendcount, /* Each process contributes this much data */
- basecount; /* Unit of elements - basecount *rsize is recvcount,
- etc. */
- int isLeftGroup;
+ int err = 0;
+ int *recvcounts;
+ int size, rsize, rank, i;
+ int recvcount, /* Each process receives this much data */
+ sendcount, /* Each process contributes this much data */
+ basecount; /* Unit of elements - basecount *rsize is recvcount,
+ * etc. */
+ int isLeftGroup;
long long *sendbuf, *recvbuf;
long long sumval;
MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
basecount = 1024;
- while (MTestGetIntercomm( &comm, &isLeftGroup, 2 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
-
- if (0) {
- printf( "[%d] %s (%d,%d) remote %d\n", rank,
- isLeftGroup ? "L" : "R",
- rank, size, rsize );
- }
-
- recvcount = basecount * rsize;
- sendcount = basecount * rsize * size;
-
- recvcounts = (int *)malloc( size * sizeof(int) );
- if (!recvcounts) {
- fprintf( stderr, "Could not allocate %d int for recvcounts\n",
- size );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- for (i=0; i<size; i++)
- recvcounts[i] = recvcount;
-
- sendbuf = (long long *) malloc( sendcount * sizeof(long long) );
- if (!sendbuf) {
- fprintf( stderr, "Could not allocate %d ints for sendbuf\n",
- sendcount );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
-
- for (i=0; i<sendcount; i++) {
- sendbuf[i] = (long long)(rank*sendcount + i);
- }
- recvbuf = (long long *)malloc( recvcount * sizeof(long long) );
- if (!recvbuf) {
- fprintf( stderr, "Could not allocate %d ints for recvbuf\n",
- recvcount );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- for (i=0; i<recvcount; i++) {
- recvbuf[i] = (long long)(-i);
- }
-
- MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_LONG_LONG, MPI_SUM,
- comm );
-
- /* Check received data */
- for (i=0; i<recvcount; i++) {
- sumval = (long long)(sendcount) * (long long)((rsize * (rsize-1))/2) +
- (long long)(i + rank * rsize * basecount) * (long long)rsize;
- if (recvbuf[i] != sumval) {
- err++;
- if (err < 4) {
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
- rank,
- isLeftGroup ? "L" : "R",
- i, recvbuf[i], sumval );
- }
- }
- }
-
- free(sendbuf);
- free(recvbuf);
- free(recvcounts);
-
- MTestFreeComm( &comm );
+ while (MTestGetIntercomm(&comm, &isLeftGroup, 2)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ if (0) {
+ printf("[%d] %s (%d,%d) remote %d\n", rank, isLeftGroup ? "L" : "R", rank, size, rsize);
+ }
+
+ recvcount = basecount * rsize;
+ sendcount = basecount * rsize * size;
+
+ recvcounts = (int *) malloc(size * sizeof(int));
+ if (!recvcounts) {
+ fprintf(stderr, "Could not allocate %d int for recvcounts\n", size);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i = 0; i < size; i++)
+ recvcounts[i] = recvcount;
+
+ sendbuf = (long long *) malloc(sendcount * sizeof(long long));
+ if (!sendbuf) {
+ fprintf(stderr, "Could not allocate %d ints for sendbuf\n", sendcount);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ for (i = 0; i < sendcount; i++) {
+ sendbuf[i] = (long long) (rank * sendcount + i);
+ }
+ recvbuf = (long long *) malloc(recvcount * sizeof(long long));
+ if (!recvbuf) {
+ fprintf(stderr, "Could not allocate %d ints for recvbuf\n", recvcount);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i = 0; i < recvcount; i++) {
+ recvbuf[i] = (long long) (-i);
+ }
+
+ MTest_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_LONG_LONG, MPI_SUM, comm);
+
+ /* Check received data */
+ for (i = 0; i < recvcount; i++) {
+ sumval = (long long) (sendcount) * (long long) ((rsize * (rsize - 1)) / 2) +
+ (long long) (i + rank * rsize * basecount) * (long long) rsize;
+ if (recvbuf[i] != sumval) {
+ err++;
+ if (err < 4) {
+ fprintf(stdout, "Did not get expected value for reduce scatter\n");
+ fprintf(stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
+ rank, isLeftGroup ? "L" : "R", i, recvbuf[i], sumval);
+ }
+ }
+ }
+
+ free(sendbuf);
+ free(recvbuf);
+ free(recvcounts);
+
+ MTestFreeComm(&comm);
}
- MTest_Finalize( err );
+ MTest_Finalize(err);
- MPI_Finalize( );
+ MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "A simple test of Reduce with all choices of root process";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, root;
int *sendbuf, *recvbuf, i;
- int minsize = 2, count;
- MPI_Comm comm;
+ int minsize = 2, count;
+ MPI_Comm comm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
-
- for (count = 1; count < 130000; count = count * 2) {
- sendbuf = (int *)malloc( count * sizeof(int) );
- recvbuf = (int *)malloc( count * sizeof(int) );
- for (root = 0; root < size; root ++) {
- for (i=0; i<count; i++) sendbuf[i] = i;
- for (i=0; i<count; i++) recvbuf[i] = -1;
- MPI_Reduce( sendbuf, recvbuf, count, MPI_INT, MPI_SUM,
- root, comm );
- if (rank == root) {
- for (i=0; i<count; i++) {
- if (recvbuf[i] != i * size) {
- errs++;
- }
- }
- }
- }
- free( sendbuf );
- free( recvbuf );
- }
+ while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- if (0 == rank) {
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Reduce(&rank, &rank, 1, MPI_INT, MPI_SUM, 0, comm))
- errs++;
+ for (count = 1; count < 130000; count = count * 2) {
+ sendbuf = (int *) malloc(count * sizeof(int));
+ recvbuf = (int *) malloc(count * sizeof(int));
+ for (root = 0; root < size; root++) {
+ for (i = 0; i < count; i++)
+ sendbuf[i] = i;
+ for (i = 0; i < count; i++)
+ recvbuf[i] = -1;
+ MPI_Reduce(sendbuf, recvbuf, count, MPI_INT, MPI_SUM, root, comm);
+ if (rank == root) {
+ for (i = 0; i < count; i++) {
+ if (recvbuf[i] != i * size) {
+ errs++;
+ }
+ }
+ }
+ }
+ free(sendbuf);
+ free(recvbuf);
}
-#endif
-
- MTestFreeComm( &comm );
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static int uop_errs = 0;
/* prototype to keep the compiler happy */
-static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype);
+static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype * datatype);
-static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
+static void user_op(void *invec, void *inoutvec, int *len, MPI_Datatype * datatype)
{
int i;
- int *invec_int = (int *)invec;
- int *inoutvec_int = (int *)inoutvec;
+ int *invec_int = (int *) invec;
+ int *inoutvec_int = (int *) inoutvec;
if (*datatype != MPI_INT) {
++uop_errs;
}
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
int i;
int *inbuf = NULL;
int *inoutbuf = NULL;
int count = -1;
MPI_Op uop = MPI_OP_NULL;
-#endif
MTest_Init(&argc, &argv);
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
inbuf = malloc(sizeof(int) * MAX_BUF_ELEMENTS);
inoutbuf = malloc(sizeof(int) * MAX_BUF_ELEMENTS);
- for (count = 0; count < MAX_BUF_ELEMENTS; count > 0 ? count*=2 : count++) {
+ for (count = 0; count < MAX_BUF_ELEMENTS; count > 0 ? count *= 2 : count++) {
for (i = 0; i < count; ++i) {
inbuf[i] = i;
inoutbuf[i] = i;
for (i = 0; i < count; ++i)
if (inbuf[i] != i) {
++errs;
- if (inoutbuf[i] != (2*i))
- ++errs;
- }
+ if (inoutbuf[i] != (2 * i))
+ ++errs;
+ }
}
/* make sure that user-define ops work too */
- MPI_Op_create(&user_op, 0/*!commute*/, &uop);
- for (count = 0; count < MAX_BUF_ELEMENTS; count > 0 ? count*=2 : count++) {
+ MPI_Op_create(&user_op, 0 /*!commute */ , &uop);
+ for (count = 0; count < MAX_BUF_ELEMENTS; count > 0 ? count *= 2 : count++) {
for (i = 0; i < count; ++i) {
inbuf[i] = i;
inoutbuf[i] = i;
for (i = 0; i < count; ++i)
if (inbuf[i] != i) {
++errs;
- if (inoutbuf[i] != (3*i))
- ++errs;
- }
+ if (inoutbuf[i] != (3 * i))
+ ++errs;
+ }
}
MPI_Op_free(&uop);
MPI_Finalize();
return 0;
}
-
#include <stdio.h>
#include "mpitest.h"
-void addem ( int *, int *, int *, MPI_Datatype * );
-void assoc ( int *, int *, int *, MPI_Datatype * );
+void addem(int *, int *, int *, MPI_Datatype *);
+void assoc(int *, int *, int *, MPI_Datatype *);
-void addem( int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+void addem(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
- int i;
- for ( i=0; i<*len; i++ )
- inoutvec[i] += invec[i];
+ int i;
+ for (i = 0; i < *len; i++)
+ inoutvec[i] += invec[i];
}
#define BAD_ANSWER 100000
/*
- The operation is inoutvec[i] = invec[i] op inoutvec[i]
+ The operation is inoutvec[i] = invec[i] op inoutvec[i]
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
order, independant of the root.
*/
-void assoc( int *invec, int *inoutvec, int *len, MPI_Datatype *dtype)
+void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
- int i;
- for ( i=0; i<*len; i++ ) {
- if (inoutvec[i] <= invec[i] ) {
- int rank;
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n",
- rank, inoutvec[0], invec[0] );
- inoutvec[i] = BAD_ANSWER;
- }
- else
- inoutvec[i] = invec[i];
- }
+ int i;
+ for (i = 0; i < *len; i++) {
+ if (inoutvec[i] <= invec[i]) {
+ int rank;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ fprintf(stderr, "[%d] inout[0] = %d, in[0] = %d\n", rank, inoutvec[0], invec[0]);
+ inoutvec[i] = BAD_ANSWER;
+ }
+ else
+ inoutvec[i] = invec[i];
+ }
}
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
- int rank, size, i;
- int data;
- int errors=0;
- int result = -100;
- int correct_result;
- MPI_Op op_assoc, op_addem;
- MPI_Comm comm=MPI_COMM_WORLD;
-
- MPI_Init( &argc, &argv );
- MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc );
- MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem );
+ int rank, size, i;
+ int data;
+ int errors = 0;
+ int result = -100;
+ int correct_result;
+ MPI_Op op_assoc, op_addem;
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ MPI_Init(&argc, &argv);
+ MPI_Op_create((MPI_User_function *) assoc, 0, &op_assoc);
+ MPI_Op_create((MPI_User_function *) addem, 1, &op_addem);
/* Run this for a variety of communicator sizes */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
data = rank;
-
+
correct_result = 0;
- for (i=0;i<=rank;i++)
+ for (i = 0; i <= rank; i++)
correct_result += i;
-
- MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm );
+
+ MPI_Scan(&data, &result, 1, MPI_INT, MPI_SUM, comm);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error suming ints with scan\n", rank );
+ fprintf(stderr, "[%d] Error suming ints with scan\n", rank);
errors++;
}
- MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm );
+ MPI_Scan(&data, &result, 1, MPI_INT, MPI_SUM, comm);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank );
+ fprintf(stderr, "[%d] Error summing ints with scan (2)\n", rank);
errors++;
}
-
+
data = rank;
result = -100;
- MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm );
+ MPI_Scan(&data, &result, 1, MPI_INT, op_addem, comm);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error summing ints with scan (userop)\n",
- rank );
+ fprintf(stderr, "[%d] Error summing ints with scan (userop)\n", rank);
errors++;
}
-
- MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm );
+
+ MPI_Scan(&data, &result, 1, MPI_INT, op_addem, comm);
if (result != correct_result) {
- fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n",
- rank );
+ fprintf(stderr, "[%d] Error summing ints with scan (userop2)\n", rank);
errors++;
}
result = -100;
data = rank;
- MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, comm );
+ MPI_Scan(&data, &result, 1, MPI_INT, op_assoc, comm);
if (result == BAD_ANSWER) {
- fprintf( stderr, "[%d] Error scanning with non-commutative op\n",
- rank );
+ fprintf(stderr, "[%d] Error scanning with non-commutative op\n", rank);
errors++;
}
-#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (MPI_SUCCESS == MPI_Scan( &data, &data, 1, MPI_INT, op_assoc, comm))
- errors++;
-#endif
+ MPI_Op_free(&op_assoc);
+ MPI_Op_free(&op_addem);
- MPI_Op_free( &op_assoc );
- MPI_Op_free( &op_addem );
-
MPI_Finalize();
if (errors)
- printf( "[%d] done with ERRORS(%d)!\n", rank, errors );
+ printf("[%d] done with ERRORS(%d)!\n", rank, errors);
else {
- if (rank == 0)
- printf(" No Errors\n");
+ if (rank == 0)
+ printf(" No Errors\n");
}
return errors;
/* This example sends a vector and receives individual elements, but the
root process does not receive any data */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Datatype vec;
double *vecin, *vecout, ivalue;
- int root, i, n, stride, err = 0;
- int rank, size;
+ int root, i, n, stride, err = 0;
+ int rank, size;
MPI_Aint vextent;
- MTest_Init( &argc, &argv );
-
- MPI_Comm_size( MPI_COMM_WORLD, &size );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
n = 12;
stride = 10;
- vecin = (double *)malloc( n * stride * size * sizeof(double) );
- vecout = (double *)malloc( n * sizeof(double) );
-
- MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
- MPI_Type_commit( &vec );
- MPI_Type_extent( vec, &vextent );
- if (vextent != ((n-1)*(MPI_Aint)stride + 1) * sizeof(double) ) {
- err++;
- printf( "Vector extent is %ld, should be %ld\n",
- (long) vextent, (long)(((n-1)*stride+1)*sizeof(double)) );
+ vecin = (double *) malloc(n * stride * size * sizeof(double));
+ vecout = (double *) malloc(n * sizeof(double));
+
+ MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
+ MPI_Type_commit(&vec);
+ MPI_Type_extent(vec, &vextent);
+ if (vextent != ((n - 1) * (MPI_Aint) stride + 1) * sizeof(double)) {
+ err++;
+ printf("Vector extent is %ld, should be %ld\n",
+ (long) vextent, (long) (((n - 1) * stride + 1) * sizeof(double)));
}
/* Note that the exted of type vector is from the first to the
- last element, not n*stride.
- E.g., with n=1, the extent is a single double */
+ * last element, not n*stride.
+ * E.g., with n=1, the extent is a single double */
- for (i=0; i<n*stride*size; i++) vecin[i] = (double)i;
- for (root=0; root<size; root++) {
- for (i=0; i<n; i++) vecout[i] = -1.0;
- if (rank == root) {
- MPI_Scatter( vecin, 1, vec, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
- root, MPI_COMM_WORLD );
- }
- else {
- MPI_Scatter( NULL, -1, MPI_DATATYPE_NULL, vecout, n, MPI_DOUBLE,
- root, MPI_COMM_WORLD );
- ivalue = rank * ((n-1) * stride + 1);
- for (i=0; i<n; i++) {
- if (vecout[i] != ivalue) {
- printf( "[%d] Expected %f but found %f for vecout[%d]\n",
- rank, ivalue, vecout[i], i );
- err++;
- }
- ivalue += stride;
- }
- }
+ for (i = 0; i < n * stride * size; i++)
+ vecin[i] = (double) i;
+ for (root = 0; root < size; root++) {
+ for (i = 0; i < n; i++)
+ vecout[i] = -1.0;
+ if (rank == root) {
+ MPI_Scatter(vecin, 1, vec, MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, root, MPI_COMM_WORLD);
+ }
+ else {
+ MPI_Scatter(NULL, -1, MPI_DATATYPE_NULL, vecout, n, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ ivalue = rank * ((n - 1) * stride + 1);
+ for (i = 0; i < n; i++) {
+ if (vecout[i] != ivalue) {
+ printf("[%d] Expected %f but found %f for vecout[%d]\n",
+ rank, ivalue, vecout[i], i);
+ err++;
+ }
+ ivalue += stride;
+ }
+ }
}
- MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
- if (rank == 0 && MPI_SUCCESS ==
- MPI_Scatter(vecin, 1, MPI_DOUBLE, vecin, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD))
- err++;
-
free(vecin);
free(vecout);
- MTest_Finalize( err );
- MPI_Type_free( &vec );
+ MTest_Finalize(err);
+ MPI_Type_free(&vec);
MPI_Finalize();
return 0;
}
-
/* This example sends contiguous data and receives a vector on some nodes
and contiguous data on others. There is some evidence that some
MPI implementations do not check recvcount on the root process; this
- test checks for that case
+ test checks for that case
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Datatype vec;
double *vecin, *vecout, ivalue;
- int root, i, n, stride, errs = 0;
- int rank, size;
+ int root, i, n, stride, errs = 0;
+ int rank, size;
MPI_Aint vextent;
- MTest_Init( &argc, &argv );
-
- MPI_Comm_size( MPI_COMM_WORLD, &size );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
n = 12;
stride = 10;
/* Note that vecout really needs to be only (n-1)*stride+1 doubles, but
- this is easier and allows a little extra room if there is a bug */
- vecout = (double *)malloc( n * stride * sizeof(double) );
- vecin = (double *)malloc( n * size * sizeof(double) );
-
- MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
- MPI_Type_commit( &vec );
- MPI_Type_extent( vec, &vextent );
- if (vextent != ((n-1)*(MPI_Aint)stride + 1) * sizeof(double) ) {
- errs++;
- printf( "Vector extent is %ld, should be %ld\n",
- (long) vextent, (long)(((n-1)*stride+1)*sizeof(double)) );
+ * this is easier and allows a little extra room if there is a bug */
+ vecout = (double *) malloc(n * stride * sizeof(double));
+ vecin = (double *) malloc(n * size * sizeof(double));
+
+ MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
+ MPI_Type_commit(&vec);
+ MPI_Type_extent(vec, &vextent);
+ if (vextent != ((n - 1) * (MPI_Aint) stride + 1) * sizeof(double)) {
+ errs++;
+ printf("Vector extent is %ld, should be %ld\n",
+ (long) vextent, (long) (((n - 1) * stride + 1) * sizeof(double)));
}
/* Note that the exted of type vector is from the first to the
- last element, not n*stride.
- E.g., with n=1, the extent is a single double */
+ * last element, not n*stride.
+ * E.g., with n=1, the extent is a single double */
- for (i=0; i<n*size; i++) vecin[i] = (double)i;
- for (root=0; root<size; root++) {
- for (i=0; i<n*stride; i++) vecout[i] = -1.0;
- if (rank == root) {
- /* Receive into a vector */
- MPI_Scatter( vecin, n, MPI_DOUBLE, vecout, 1, vec,
- root, MPI_COMM_WORLD );
- for (i=0; i<n; i++) {
- ivalue = n*root + i;
- if (vecout[i*stride] != ivalue) {
- errs++;
- printf( "[%d] Expected %f but found %f for vecout[%d] on root\n",
- rank, ivalue, vecout[i*stride], i *stride );
- }
- }
- }
- else {
- /* Receive into contiguous data */
- MPI_Scatter( NULL, -1, MPI_DATATYPE_NULL, vecout, n, MPI_DOUBLE,
- root, MPI_COMM_WORLD );
- for (i=0; i<n; i++) {
- ivalue = rank * n + i;
- if (vecout[i] != ivalue) {
- printf( "[%d] Expected %f but found %f for vecout[%d]\n",
- rank, ivalue, vecout[i], i );
- errs++;
- }
- }
- }
+ for (i = 0; i < n * size; i++)
+ vecin[i] = (double) i;
+ for (root = 0; root < size; root++) {
+ for (i = 0; i < n * stride; i++)
+ vecout[i] = -1.0;
+ if (rank == root) {
+ /* Receive into a vector */
+ MPI_Scatter(vecin, n, MPI_DOUBLE, vecout, 1, vec, root, MPI_COMM_WORLD);
+ for (i = 0; i < n; i++) {
+ ivalue = n * root + i;
+ if (vecout[i * stride] != ivalue) {
+ errs++;
+ printf("[%d] Expected %f but found %f for vecout[%d] on root\n",
+ rank, ivalue, vecout[i * stride], i * stride);
+ }
+ }
+ }
+ else {
+ /* Receive into contiguous data */
+ MPI_Scatter(NULL, -1, MPI_DATATYPE_NULL, vecout, n, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ for (i = 0; i < n; i++) {
+ ivalue = rank * n + i;
+ if (vecout[i] != ivalue) {
+ printf("[%d] Expected %f but found %f for vecout[%d]\n",
+ rank, ivalue, vecout[i], i);
+ errs++;
+ }
+ }
+ }
}
-
- MTest_Finalize( errs );
- MPI_Type_free( &vec );
+
+ free(vecin);
+ free(vecout);
+ MTest_Finalize(errs);
+ MPI_Type_free(&vec);
MPI_Finalize();
return 0;
}
-
/* This example sends a vector and receives individual elements */
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
MPI_Datatype vec;
double *vecin, *vecout, ivalue;
- int root, i, n, stride, err = 0;
- int rank, size;
+ int root, i, n, stride, err = 0;
+ int rank, size;
- MPI_Init( &argc, &argv );
-
- MPI_Comm_size( MPI_COMM_WORLD, &size );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
n = 12;
stride = 10;
- vecin = (double *)malloc( n * stride * size * sizeof(double) );
- vecout = (double *)malloc( n * sizeof(double) );
-
- MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec );
- MPI_Type_commit( &vec );
-
- for (i=0; i<n*stride*size; i++) vecin[i] = (double)i;
- for (root=0; root<size; root++) {
- for (i=0; i<n; i++) vecout[i] = -1.0;
- MPI_Scatter( vecin, 1, vec, vecout, n, MPI_DOUBLE, root,
- MPI_COMM_WORLD );
- ivalue = rank * ((n-1) * stride + 1);
- for (i=0; i<n; i++) {
- if (vecout[i] != ivalue) {
- printf( "Expected %f but found %f\n",
- ivalue, vecout[i] );
- err++;
- }
- ivalue += stride;
- }
+ vecin = (double *) malloc(n * stride * size * sizeof(double));
+ vecout = (double *) malloc(n * sizeof(double));
+
+ MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
+ MPI_Type_commit(&vec);
+
+ for (i = 0; i < n * stride * size; i++)
+ vecin[i] = (double) i;
+ for (root = 0; root < size; root++) {
+ for (i = 0; i < n; i++)
+ vecout[i] = -1.0;
+ MPI_Scatter(vecin, 1, vec, vecout, n, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ ivalue = rank * ((n - 1) * stride + 1);
+ for (i = 0; i < n; i++) {
+ if (vecout[i] != ivalue) {
+ printf("Expected %f but found %f\n", ivalue, vecout[i]);
+ err++;
+ }
+ ivalue += stride;
+ }
}
i = err;
- MPI_Allreduce( &i, &err, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ MPI_Allreduce(&i, &err, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (rank == 0) {
- if (err > 0) printf( "Found %d errors!\n", err );
- else printf( " No Errors\n" );
+ if (err > 0)
+ printf("Found %d errors!\n", err);
+ else
+ printf(" No Errors\n");
}
- MPI_Type_free( &vec );
+ free(vecin);
+ free(vecout);
+ MPI_Type_free(&vec);
MPI_Finalize();
return 0;
-
-}
+}
#include <stdio.h>
/* Prototypes for picky compilers */
-void SetData ( double *, double *, int, int, int, int, int, int );
-int CheckData ( double *, int, int, int, int, int, int );
+void SetData(double *, double *, int, int, int, int, int, int);
+int CheckData(double *, int, int, int, int, int, int);
/*
This is an example of using scatterv to send a matrix from one
process to all others, with the matrix stored in Fortran order.
*/
-void SetData( double *sendbuf, double *recvbuf, int nx, int ny,
- int myrow, int mycol, int nrow, int ncol )
+void SetData(double *sendbuf, double *recvbuf, int nx, int ny,
+ int myrow, int mycol, int nrow, int ncol)
{
int coldim, i, j, m, k;
double *p;
if (myrow == 0 && mycol == 0) {
coldim = nx * nrow;
- for (j=0; j<ncol; j++) {
- for (i=0; i<nrow; i++) {
+ for (j = 0; j < ncol; j++) {
+ for (i = 0; i < nrow; i++) {
p = sendbuf + i * nx + j * (ny * coldim);
- for (m=0; m<ny; m++) {
- for (k=0; k<nx; k++) {
+ for (m = 0; m < ny; m++) {
+ for (k = 0; k < nx; k++) {
p[k] = 1000 * j + 100 * i + m * nx + k;
}
p += coldim;
}
}
}
- for (i=0; i<nx*ny; i++)
+ for (i = 0; i < nx * ny; i++)
recvbuf[i] = -1.0;
}
-int CheckData( double *recvbuf,
- int nx, int ny, int myrow, int mycol, int nrow,
- int expect_no_value )
+int CheckData(double *recvbuf, int nx, int ny, int myrow, int mycol, int nrow, int expect_no_value)
{
int coldim, m, k;
double *p, val;
int errs = 0;
coldim = nx;
- p = recvbuf;
- for (m=0; m<ny; m++) {
- for (k=0; k<nx; k++) {
+ p = recvbuf;
+ for (m = 0; m < ny; m++) {
+ for (k = 0; k < nx; k++) {
/* If expect_no_value is true then we assume that the pre-scatterv
* value should remain in the recvbuf for our portion of the array.
* This is the case for the root process when using MPI_IN_PLACE. */
errs++;
if (errs < 10) {
printf("Error in (%d,%d) [%d,%d] location, got %f expected %f\n",
- m, k, myrow, mycol, p[k], val );
+ m, k, myrow, mycol, p[k], val);
}
else if (errs == 10) {
- printf( "Too many errors; suppressing printing\n" );
+ printf("Too many errors; suppressing printing\n");
}
}
}
return errs;
}
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
int rank, size, myrow, mycol, nx, ny, stride, cnt, i, j, errs, errs_in_place, tot_errs;
- double *sendbuf, *recvbuf;
+ double *sendbuf, *recvbuf;
MPI_Datatype vec, block, types[2];
MPI_Aint displs[2];
- int *scdispls;
- int blens[2];
+ int *scdispls;
+ int blens[2];
MPI_Comm comm2d;
int dims[2], periods[2], coords[2], lcoords[2];
int *sendcounts;
- MPI_Init( &argc, &argv );
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
/* Get a 2-d decomposition of the processes */
- dims[0] = 0; dims[1] = 0;
- MPI_Dims_create( size, 2, dims );
- periods[0] = 0; periods[1] = 0;
- MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &comm2d );
- MPI_Cart_get( comm2d, 2, dims, periods, coords );
+ dims[0] = 0;
+ dims[1] = 0;
+ MPI_Dims_create(size, 2, dims);
+ periods[0] = 0;
+ periods[1] = 0;
+ MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &comm2d);
+ MPI_Cart_get(comm2d, 2, dims, periods, coords);
myrow = coords[0];
mycol = coords[1];
/*
if (rank == 0)
- printf( "Decomposition is [%d x %d]\n", dims[0], dims[1] );
+ printf("Decomposition is [%d x %d]\n", dims[0], dims[1]);
*/
/* Get the size of the matrix */
ny = 8;
stride = nx * dims[0];
- recvbuf = (double *)malloc( nx * ny * sizeof(double) );
+ recvbuf = (double *) malloc(nx * ny * sizeof(double));
if (!recvbuf) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
sendbuf = 0;
if (myrow == 0 && mycol == 0) {
- sendbuf = (double *)malloc( nx * ny * size * sizeof(double) );
+ sendbuf = (double *) malloc(nx * ny * size * sizeof(double));
if (!sendbuf) {
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
}
- sendcounts = (int *) malloc( size * sizeof(int) );
- scdispls = (int *)malloc( size * sizeof(int) );
+ sendcounts = (int *) malloc(size * sizeof(int));
+ scdispls = (int *) malloc(size * sizeof(int));
- MPI_Type_vector( ny, nx, stride, MPI_DOUBLE, &vec );
- blens[0] = 1; blens[1] = 1;
- types[0] = vec; types[1] = MPI_UB;
- displs[0] = 0; displs[1] = nx * sizeof(double);
+ MPI_Type_vector(ny, nx, stride, MPI_DOUBLE, &vec);
+ blens[0] = 1;
+ blens[1] = 1;
+ types[0] = vec;
+ types[1] = MPI_UB;
+ displs[0] = 0;
+ displs[1] = nx * sizeof(double);
- MPI_Type_struct( 2, blens, displs, types, &block );
- MPI_Type_free( &vec );
- MPI_Type_commit( &block );
+ MPI_Type_struct(2, blens, displs, types, &block);
+ MPI_Type_free(&vec);
+ MPI_Type_commit(&block);
/* Set up the transfer */
- cnt = 0;
- for (i=0; i<dims[1]; i++) {
- for (j=0; j<dims[0]; j++) {
+ cnt = 0;
+ for (i = 0; i < dims[1]; i++) {
+ for (j = 0; j < dims[0]; j++) {
sendcounts[cnt] = 1;
/* Using Cart_coords makes sure that ranks (used by
- sendrecv) matches the cartesian coordinates (used to
- set data in the matrix) */
- MPI_Cart_coords( comm2d, cnt, 2, lcoords );
+ * sendrecv) matches the cartesian coordinates (used to
+ * set data in the matrix) */
+ MPI_Cart_coords(comm2d, cnt, 2, lcoords);
scdispls[cnt++] = lcoords[0] + lcoords[1] * (dims[0] * ny);
}
}
- SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1] );
- MPI_Scatterv( sendbuf, sendcounts, scdispls, block,
- recvbuf, nx * ny, MPI_DOUBLE, 0, comm2d );
- if((errs = CheckData( recvbuf, nx, ny, myrow, mycol, dims[0], 0 ))) {
- fprintf( stdout, "Failed to transfer data\n" );
+ SetData(sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1]);
+ MPI_Scatterv(sendbuf, sendcounts, scdispls, block, recvbuf, nx * ny, MPI_DOUBLE, 0, comm2d);
+ if ((errs = CheckData(recvbuf, nx, ny, myrow, mycol, dims[0], 0))) {
+ fprintf(stdout, "Failed to transfer data\n");
}
/* once more, but this time passing MPI_IN_PLACE for the root */
- SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1] );
- MPI_Scatterv( sendbuf, sendcounts, scdispls, block,
- (rank == 0 ? MPI_IN_PLACE : recvbuf), nx * ny, MPI_DOUBLE, 0, comm2d );
- errs_in_place = CheckData( recvbuf, nx, ny, myrow, mycol, dims[0], (rank == 0) );
- if(errs_in_place) {
- fprintf( stdout, "Failed to transfer data (MPI_IN_PLACE)\n" );
+ SetData(sendbuf, recvbuf, nx, ny, myrow, mycol, dims[0], dims[1]);
+ MPI_Scatterv(sendbuf, sendcounts, scdispls, block,
+ (rank == 0 ? MPI_IN_PLACE : recvbuf), nx * ny, MPI_DOUBLE, 0, comm2d);
+ errs_in_place = CheckData(recvbuf, nx, ny, myrow, mycol, dims[0], (rank == 0));
+ if (errs_in_place) {
+ fprintf(stdout, "Failed to transfer data (MPI_IN_PLACE)\n");
}
errs += errs_in_place;
- MPI_Allreduce( &errs, &tot_errs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ MPI_Allreduce(&errs, &tot_errs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (rank == 0) {
if (tot_errs == 0)
- printf( " No Errors\n" );
+ printf(" No Errors\n");
else
- printf( "%d errors in use of MPI_SCATTERV\n", tot_errs );
+ printf("%d errors in use of MPI_SCATTERV\n", tot_errs);
}
- if (sendbuf) free( sendbuf );
- free( recvbuf );
- free( sendcounts );
- free( scdispls );
- MPI_Type_free( &block );
- MPI_Comm_free( &comm2d );
+ if (sendbuf)
+ free(sendbuf);
+ free(recvbuf);
+ free(sendcounts);
+ free(scdispls);
+ MPI_Type_free(&block);
+ MPI_Comm_free(&comm2d);
MPI_Finalize();
return errs;
}
-
-
allgatherv2 10
allgatherv3 10
allgatherv4 4 timeLimit=600
+allgather_struct 10
bcasttest 4
bcasttest 8
bcasttest 10
-bcast2 4
+bcast_full 4
# More that 8 processes are required to get bcast to switch to the long
# msg algorithm (see coll definitions in mpiimpl.h)
-bcast2 10 timeLimit=420
-bcast3 10 timeLimit=420
+#bcast_min_datatypes 10 timeLimit=420
+#bcast_comm_world 10 timeLimit=420
bcastzerotype 1
bcastzerotype 4
bcastzerotype 5
nonblocking3 4 mpiversion=3.0
nonblocking3 5 mpiversion=3.0
nonblocking3 10 timeLimit=600 mpiversion=3.0
-nonblocking4 4 mpiversion=3.0
iallred 2 mpiversion=3.0
# ibarrier will hang forever if it fails, but will complete quickly if it
# succeeds
#include <stdio.h>
#include <stdlib.h>
-/*
- * Test user-defined operations with a large number of elements.
+/*
+ * Test user-defined operations with a large number of elements.
* Added because a talk at EuroMPI'12 claimed that these failed with
* more than 64k elements
*/
#define MAX_ERRS 10
#define MAX_COUNT 1200000
-void myop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype );
+void myop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype);
-/*
+/*
* myop takes a datatype that is a triple of doubles, and computes
* the sum, max, min of the respective elements of the triple.
*/
-void myop( void *cinPtr, void *coutPtr, int *count, MPI_Datatype *dtype )
+void myop(void *cinPtr, void *coutPtr, int *count, MPI_Datatype * dtype)
{
int i, n = *count;
- double const *cin = (double *)cinPtr;
- double *cout = (double *)coutPtr;
-
- for (i=0; i<n; i++) {
- cout[0] += cin[0];
- cout[1] = (cout[1] > cin[1]) ? cout[1] : cin[1];
- cout[2] = (cout[2] < cin[2]) ? cout[2] : cin[2];
- cin += 3;
- cout += 3;
+ double const *cin = (double *) cinPtr;
+ double *cout = (double *) coutPtr;
+
+ for (i = 0; i < n; i++) {
+ cout[0] += cin[0];
+ cout[1] = (cout[1] > cin[1]) ? cout[1] : cin[1];
+ cout[2] = (cout[2] < cin[2]) ? cout[2] : cin[2];
+ cin += 3;
+ cout += 3;
}
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int wsize, wrank, i, count;
MPI_Datatype tripleType;
double *inVal, *outVal;
- double maxval, sumval;
+ double maxval, sumval, minval;
MPI_Op op;
- MTest_Init( &argc, &argv );
- MPI_Op_create( myop, 0, &op );
- MPI_Type_contiguous( 3, MPI_DOUBLE, &tripleType );
- MPI_Type_commit( &tripleType );
+ MTest_Init(&argc, &argv);
+ MPI_Op_create(myop, 0, &op);
+ MPI_Type_contiguous(3, MPI_DOUBLE, &tripleType);
+ MPI_Type_commit(&tripleType);
- MPI_Comm_size( MPI_COMM_WORLD, &wsize );
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
- for (count=1; count<MAX_COUNT; count += count) {
- if (wrank == 0)
- MTestPrintfMsg( 1, "Count = %d\n", count );
- inVal = (double *)malloc( 3 * count * sizeof(double) );
- outVal = (double *)malloc( 3 * count * sizeof(double) );
- if (!inVal || !outVal) {
- fprintf( stderr, "Unable to allocated %d words for data\n",
- 3 * count );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
- }
- for (i=0; i<count*3; i++) {
- outVal[i] = -1;
- inVal[i] = 1 + (i & 0x3);
- }
- MPI_Reduce( inVal, outVal, count, tripleType, op, 0, MPI_COMM_WORLD );
- /* Check Result values */
- if (wrank == 0) {
- for (i=0; i<3*count; i+=3) {
- sumval = wsize * (1 + (i & 0x3));
- maxval = 1 + ((i+1) & 0x3);
- if (outVal[i] != sumval) {
- if (errs < MAX_ERRS)
- fprintf( stderr, "%d: outval[%d] = %f, expected %f (sum)\n",
- count, i, outVal[i], sumval );
- errs++;
- }
- if (outVal[i+1] != maxval) {
- if (errs < MAX_ERRS)
- fprintf( stderr, "%d: outval[%d] = %f, expected %f (max)\n",
- count, i+1, outVal[i+1], maxval );
- errs++;
- }
- if (outVal[i+2] != 1 + ((i+2)&0x3)) {
- if (errs < MAX_ERRS)
- fprintf( stderr, "%d: outval[%d] = %f, expected %f (min)\n",
- count, i+2, outVal[i+2], (double)(1 + ((i+2)^0x3)) );
- errs++;
- }
- }
- }
+ for (count = 1; count < MAX_COUNT; count += count) {
+ if (wrank == 0)
+ MTestPrintfMsg(1, "Count = %d\n", count);
+ inVal = (double *) malloc(3 * count * sizeof(double));
+ outVal = (double *) malloc(3 * count * sizeof(double));
+ if (!inVal || !outVal) {
+ fprintf(stderr, "Unable to allocate %d words for data\n", 3 * count);
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+ for (i = 0; i < count * 3; i++) {
+ outVal[i] = -1;
+ inVal[i] = 1 + (i & 0x3);
+ }
+ MPI_Reduce(inVal, outVal, count, tripleType, op, 0, MPI_COMM_WORLD);
+ /* Check Result values */
+ if (wrank == 0) {
+ for (i = 0; i < 3 * count; i += 3) {
+ sumval = wsize * (1 + (i & 0x3));
+ maxval = 1 + ((i + 1) & 0x3);
+ minval = 1 + ((i + 2) & 0x3);
+ if (outVal[i] != sumval) {
+ if (errs < MAX_ERRS)
+ fprintf(stderr, "%d: outval[%d] = %f, expected %f (sum)\n",
+ count, i, outVal[i], sumval);
+ errs++;
+ }
+ if (outVal[i + 1] != maxval) {
+ if (errs < MAX_ERRS)
+ fprintf(stderr, "%d: outval[%d] = %f, expected %f (max)\n",
+ count, i + 1, outVal[i + 1], maxval);
+ errs++;
+ }
+ if (outVal[i + 2] != minval) {
+ if (errs < MAX_ERRS)
+ fprintf(stderr, "%d: outval[%d] = %f, expected %f (min)\n",
+ count, i + 2, outVal[i + 2], minval);
+ errs++;
+ }
+ }
+ }
- free( inVal );
- free( outVal );
+ free(inVal);
+ free(outVal);
}
-
- MPI_Op_free( &op );
- MPI_Type_free( &tripleType );
- MTest_Finalize( errs );
- MPI_Finalize( );
+
+ MPI_Op_free(&op);
+ MPI_Type_free(&tripleType);
+ MTest_Finalize(errs);
+ MPI_Finalize();
return 0;
}