add_executable(gather2 gather2.c)
# add_executable(gather2_save gather2_save.c)
add_executable(gather gather.c)
- add_executable(iallred iallred.c)
- add_executable(ibarrier ibarrier.c)
+# add_executable(iallred iallred.c)
+# add_executable(ibarrier ibarrier.c)
# add_executable(icallgather icallgather.c)
# add_executable(icallgatherv icallgatherv.c)
# add_executable(icallreduce icallreduce.c)
# add_executable(icscatter icscatter.c)
# add_executable(icscatterv icscatterv.c)
add_executable(longuser longuser.c)
- add_executable(nonblocking2 nonblocking2.c)
- add_executable(nonblocking3 nonblocking3.c)
- add_executable(nonblocking nonblocking.c)
+# add_executable(nonblocking2 nonblocking2.c)
+# add_executable(nonblocking3 nonblocking3.c)
+# add_executable(nonblocking4 nonblocking3.c)
+# add_executable(nonblocking nonblocking.c)
# add_executable(opband opband.c)
# add_executable(opbor opbor.c)
# add_executable(opbxor opbxor.c)
target_link_libraries(gather2 simgrid mtest_c)
# target_link_libraries(gather2_save simgrid mtest_c)
target_link_libraries(gather simgrid mtest_c)
- target_link_libraries(iallred simgrid mtest_c)
- target_link_libraries(ibarrier simgrid mtest_c)
+# target_link_libraries(iallred simgrid mtest_c)
+# target_link_libraries(ibarrier simgrid mtest_c)
# target_link_libraries(icallgather simgrid mtest_c)
# target_link_libraries(icallgatherv simgrid mtest_c)
# target_link_libraries(icallreduce simgrid mtest_c)
# target_link_libraries(icscatter simgrid mtest_c)
# target_link_libraries(icscatterv simgrid mtest_c)
target_link_libraries(longuser simgrid mtest_c)
- target_link_libraries(nonblocking2 simgrid mtest_c)
- target_link_libraries(nonblocking3 simgrid mtest_c)
- target_link_libraries(nonblocking simgrid mtest_c)
+# target_link_libraries(nonblocking2 simgrid mtest_c)
+# target_link_libraries(nonblocking3 simgrid mtest_c)
+# target_link_libraries(nonblocking4 simgrid mtest_c)
+# target_link_libraries(nonblocking simgrid mtest_c)
# target_link_libraries(opband simgrid mtest_c)
# target_link_libraries(opbor simgrid mtest_c)
# target_link_libraries(opbxor simgrid mtest_c)
MTestFreeComm( &comm );
}
-
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ vecout = (double *) malloc(size * sizeof(double));
+ if (MPI_SUCCESS == MPI_Allgather(&vecout[rank], 1, MPI_DOUBLE,
+ vecout, 1, MPI_DOUBLE, MPI_COMM_WORLD))
+ errs++;
+ free(vecout);
+#endif
+
MTest_Finalize( errs );
MPI_Finalize();
return 0;
}
free( vecout );
}
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ vecout = (double *) malloc(size * sizeof(double));
+ if (MPI_SUCCESS == MPI_Allgatherv(&vecout[rank * recvcounts[rank]], recvcounts[rank], MPI_DOUBLE,
+ vecout, recvcounts, displs, MPI_DOUBLE, comm))
+ errs++;
+ free(vecout);
+#endif
+
free( displs );
free( recvcounts );
MTestFreeComm( &comm );
#define MAX_BUF (32 * 1024 * 1024)
#define LOOPS 10
-SMPI_VARINIT_GLOBAL(sbuf, char*);
-SMPI_VARINIT_GLOBAL(rbuf, char*);
-SMPI_VARINIT_GLOBAL(recvcounts, int*);
-SMPI_VARINIT_GLOBAL(displs, int*);
-SMPI_VARINIT_GLOBAL_AND_SET(errs, int, 0);
+char * sbuf, * rbuf;
+int * recvcounts, * displs;
+int errs = 0;
/* #define dprintf printf */
#define dprintf(...)
if (LARGE_BUF * comm_size > MAX_BUF)
goto fn_exit;
- SMPI_VARGET_GLOBAL(sbuf) = (void *) calloc(MAX_BUF, 1);
- SMPI_VARGET_GLOBAL(rbuf) = (void *) calloc(MAX_BUF, 1);
+ sbuf = (void *) calloc(MAX_BUF, 1);
+ rbuf = (void *) calloc(MAX_BUF, 1);
srand(time(NULL));
- SMPI_VARGET_GLOBAL(recvcounts) = (void *) malloc(comm_size * sizeof(int));
- SMPI_VARGET_GLOBAL(displs) = (void *) malloc(comm_size * sizeof(int));
- if (!SMPI_VARGET_GLOBAL(recvcounts) || !SMPI_VARGET_GLOBAL(displs) || !SMPI_VARGET_GLOBAL(sbuf) || !SMPI_VARGET_GLOBAL(rbuf)) {
+ recvcounts = (void *) malloc(comm_size * sizeof(int));
+ displs = (void *) malloc(comm_size * sizeof(int));
+ if (!recvcounts || !displs || !sbuf || !rbuf) {
fprintf(stderr, "Unable to allocate memory:\n");
- if (!SMPI_VARGET_GLOBAL(sbuf)) fprintf(stderr,"\tsbuf of %d bytes\n", MAX_BUF );
- if (!SMPI_VARGET_GLOBAL(rbuf)) fprintf(stderr,"\trbuf of %d bytes\n", MAX_BUF );
- if (!SMPI_VARGET_GLOBAL(recvcounts)) fprintf(stderr,"\trecvcounts of %zd bytes\n", comm_size * sizeof(int) );
- if (!SMPI_VARGET_GLOBAL(displs)) fprintf(stderr,"\tdispls of %zd bytes\n", comm_size * sizeof(int) );
+ if (!sbuf) fprintf(stderr,"\tsbuf of %d bytes\n", MAX_BUF );
+ if (!rbuf) fprintf(stderr,"\trbuf of %d bytes\n", MAX_BUF );
+ if (!recvcounts) fprintf(stderr,"\trecvcounts of %zd bytes\n", comm_size * sizeof(int) );
+ if (!displs) fprintf(stderr,"\tdispls of %zd bytes\n", comm_size * sizeof(int) );
fflush(stderr);
MPI_Abort(MPI_COMM_WORLD, -1);
exit(-1);
comm_tests(comm);
MPI_Comm_free(&comm);
- //free(SMPI_VARGET_GLOBAL(sbuf));
- //free(SMPI_VARGET_GLOBAL(rbuf));
- free(SMPI_VARGET_GLOBAL(recvcounts));
- free(SMPI_VARGET_GLOBAL(displs));
+ free(sbuf);
+ free(rbuf);
+ free(recvcounts);
+ free(displs);
fn_exit:
- MTest_Finalize(SMPI_VARGET_GLOBAL(errs));
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
MPI_Comm_size(comm, &comm_size);
MPI_Comm_rank(comm, &comm_rank);
- SMPI_VARGET_GLOBAL(displs)[0] = 0;
+ displs[0] = 0;
for (i = 0; i < comm_size; i++) {
if (test_type == REGULAR)
- SMPI_VARGET_GLOBAL(recvcounts)[i] = msg_size;
+ recvcounts[i] = msg_size;
else if (test_type == BCAST)
- SMPI_VARGET_GLOBAL(recvcounts)[i] = (!i) ? msg_size : 0;
+ recvcounts[i] = (!i) ? msg_size : 0;
else if (test_type == SPIKE)
- SMPI_VARGET_GLOBAL(recvcounts)[i] = (!i) ? (msg_size / 2) : (msg_size / (2 * (comm_size - 1)));
+ recvcounts[i] = (!i) ? (msg_size / 2) : (msg_size / (2 * (comm_size - 1)));
else if (test_type == HALF_FULL)
- SMPI_VARGET_GLOBAL(recvcounts)[i] = (i < (comm_size / 2)) ? (2 * msg_size) : 0;
+ recvcounts[i] = (i < (comm_size / 2)) ? (2 * msg_size) : 0;
else if (test_type == LINEAR_DECREASE) {
tmp = 2 * msg_size * (comm_size - 1 - i) / (comm_size - 1);
if (tmp != (int)tmp) {
MPI_Abort( MPI_COMM_WORLD, 1 );
exit(1);
}
- SMPI_VARGET_GLOBAL(recvcounts)[i] = (int) tmp;
+ recvcounts[i] = (int) tmp;
/* If the maximum message size is too large, don't run */
if (tmp > MAX_BUF) return 0;
for (j = 0; j < i; j++) {
if (i - 1 + j >= comm_size) continue;
tmp = msg_size * comm_size / (log(comm_size) * i);
- SMPI_VARGET_GLOBAL(recvcounts)[i - 1 + j] = (int) tmp;
- SMPI_VARGET_GLOBAL(displs)[i - 1 + j] = 0;
+ recvcounts[i - 1 + j] = (int) tmp;
+ displs[i - 1 + j] = 0;
/* If the maximum message size is too large, don't run */
if (tmp > MAX_BUF) return 0;
}
if (i < comm_size - 1)
- SMPI_VARGET_GLOBAL(displs)[i+1] = SMPI_VARGET_GLOBAL(displs)[i] + SMPI_VARGET_GLOBAL(recvcounts)[i];
+ displs[i+1] = displs[i] + recvcounts[i];
}
/* Test that:
MPI_Barrier(comm);
start = MPI_Wtime();
for (i = 0; i < LOOPS; i++) {
- MPI_Allgatherv(SMPI_VARGET_GLOBAL(sbuf), SMPI_VARGET_GLOBAL(recvcounts)[comm_rank], MPI_CHAR,
- SMPI_VARGET_GLOBAL(rbuf), SMPI_VARGET_GLOBAL(recvcounts), SMPI_VARGET_GLOBAL(displs), MPI_CHAR, comm);
+ MPI_Allgatherv(sbuf, recvcounts[comm_rank], MPI_CHAR,
+ rbuf, recvcounts, displs, MPI_CHAR, comm);
}
end = MPI_Wtime();
MPI_Barrier(comm);
free( buf );
free( bufout );
- //MPI_Type_free( &mattype );
+ MPI_Type_free( &mattype );
MTestFreeComm( &comm );
}
- // MPI_Op_free( &op );
+ MPI_Op_free( &op );
MTest_Finalize( errs );
MPI_Finalize();
}
MPI_Op_free( &op );
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* Check to make sure that aliasing is disallowed correctly */
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (MPI_SUCCESS == MPI_Allreduce(&rank, &rank, 1, MPI_INT, MPI_OP_NULL, MPI_COMM_WORLD))
+ errs++;
+#endif
+
MTest_Finalize( errs );
MPI_Finalize();
return 0;
MTestFreeComm( &comm );
}
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* Check to make sure that aliasing is disallowed correctly */
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (MPI_SUCCESS == MPI_Alltoall(&rank, 1, MPI_INT, &rank, 1, MPI_INT, MPI_COMM_WORLD))
+ errs++;
+#endif
+
MTest_Finalize( errs );
MPI_Finalize();
return 0;
}
}
}
+
+ /* Check to make sure that aliasing is disallowed correctly */
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Alltoallv(rbuf, recvcounts, rdispls, MPI_INT,
+ rbuf, recvcounts, rdispls, MPI_INT, comm))
+ err++;
#endif
free( rdispls );
}
}
}
+
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Alltoallw(rbuf, recvcounts, rdispls, recvtypes,
+ rbuf, recvcounts, rdispls, recvtypes, comm))
+ err++;
#endif
free(recvtypes);
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
{
int i, type_size;
MPI_Datatype type = MPI_DATATYPE_NULL;
- int *buf = NULL;
+ char *buf = NULL;
int wrank, wsize;
MPI_Init(&argc, &argv);
/* a random non-zero sized buffer */
#define NELEM (10)
buf = malloc(NELEM*sizeof(int));
- assert(buf!=NULL);
+ assert(buf);
for (i = 0; i < NELEM; i++) {
buf[i] = wrank * NELEM + i;
table[i][j] = rank + 10;
/* Everybody gets the gathered data */
- MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
- &table[0][0], recv_counts, displs,
- MPI_INT, test_comm);
+ if ((char *) &table[begin_row][0] != (char *) table + displs[rank]*sizeof(int))
+ MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
+ &table[0][0], recv_counts, displs,
+ MPI_INT, test_comm);
+ else
+ MPI_Allgatherv(MPI_IN_PLACE, send_count, MPI_INT,
+ &table[0][0], recv_counts, displs,
+ MPI_INT, test_comm);
/* Everybody should have the same table now.
}
}
}
+
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ /* Make sure that we check for buffer aliasing properly */
+ if (MPI_SUCCESS == MPI_Exscan( recvbuf, recvbuf, count, MPI_INT, MPI_SUM, comm ))
+ errs++;
#endif
free( sendbuf );
/* do a zero length gather */
MPI_Gather( NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD );
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* Check to make sure that aliasing is disallowed correctly */
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ if (0 == rank)
+ if (MPI_SUCCESS == MPI_Gather(&rank, 1, MPI_INT,
+ &rank, 1, MPI_INT, 0, MPI_COMM_WORLD))
+ errs++;
+#endif
+
MTest_Finalize( errs );
MPI_Finalize();
return 0;
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
#include <stdio.h>
#include <assert.h>
#include "mpi.h"
#include "mpitest.h"
-/* Since MPICH is currently the only NBC implementation in existence, just use
- * this quick-and-dirty #ifdef to decide whether to test the nonblocking
- * collectives. Eventually we can add a configure option or configure test, or
- * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
- * check */
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_NBC_ROUTINES 1
-#endif
-
int main(int argc, char *argv[])
{
- int size, rank;
-#if defined(TEST_NBC_ROUTINES)
MPI_Request request;
+ int size, rank;
int one = 1, two = 2, isum, sum;
-#endif
+ int errs = 0;
+
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
assert(size == 2);
-#if defined(TEST_NBC_ROUTINES)
MPI_Iallreduce(&one,&isum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD,&request);
MPI_Allreduce(&two,&sum,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
MPI_Wait(&request,MPI_STATUS_IGNORE);
assert(isum == 2);
assert(sum == 4);
- if (rank == 0)
+
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Iallreduce(&one, &one, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &request))
+ errs++;
+
+ if (rank == 0 && errs == 0)
printf(" No errors\n");
-#endif
MPI_Finalize();
return 0;
#include <stdio.h>
#include <unistd.h>
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_NBC_ROUTINES 1
-#endif
-
int main(int argc, char *argv[])
{
-#if defined(TEST_NBC_ROUTINES)
MPI_Request barrier;
- int i,done;
-#endif
- int rank;
+ int rank,i,done;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-#if defined(TEST_NBC_ROUTINES)
MPI_Ibarrier(MPI_COMM_WORLD,&barrier);
for (i=0,done=0; !done; i++) {
usleep(1000);
/*printf("[%d] MPI_Test: %d\n",rank,i);*/
MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);
}
-#endif
+
if (rank == 0)
printf(" No Errors\n");
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
-#include "mpitest.h"
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
-#include "mpitestconf.h"
-
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_NBC_ROUTINES 1
-#endif
#define NUM_INTS (2)
int main(int argc, char **argv)
{
int errs = 0;
+ int i;
int rank, size;
int *sbuf = NULL;
int *rbuf = NULL;
int *rcounts = NULL;
int *sdispls = NULL;
int *rdispls = NULL;
- MPI_Comm comm;
-#if defined(TEST_NBC_ROUTINES)
- int i;
int *types = NULL;
+ MPI_Comm comm;
MPI_Request req;
-#endif
/* intentionally not using MTest_Init/MTest_Finalize in order to make it
* easy to take this test and use it as an NBC sanity test outside of the
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
-#if defined(TEST_NBC_ROUTINES)
/* enough space for every process to contribute at least NUM_INTS ints to any
* collective operation */
sbuf = malloc(NUM_INTS*size*sizeof(int));
MPI_Iexscan(sbuf, rbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
-#endif
-
if (sbuf) free(sbuf);
if (rbuf) free(rbuf);
if (scounts) free(scounts);
*/
/* A basic test of all 17 nonblocking collective operations specified by the
- * draft MPI-3 standard. It only exercises the intracommunicator functionality,
+ * MPI-3 standard. It only exercises the intracommunicator functionality,
* does not use MPI_IN_PLACE, and only transmits/receives simple integer types
* with relatively small counts. It does check a few fancier issues, such as
* ensuring that "premature user releases" of MPI_Op and MPI_Datatype objects
#include "mpi.h"
#include <stdlib.h>
#include <stdio.h>
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
-#include "mpitestconf.h"
#define COUNT (10)
#define PRIME (17)
} \
} while (0)
-/* Since MPICH is currently the only NBC implementation in existence, just use
- * this quick-and-dirty #ifdef to decide whether to test the nonblocking
- * collectives. Eventually we can add a configure option or configure test, or
- * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
- * check */
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_NBC_ROUTINES 1
-#endif
-
static void sum_fn(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype)
{
int i;
int main(int argc, char **argv)
{
+ int i, j;
int rank, size;
int *buf = NULL;
int *recvbuf = NULL;
int *rdispls = NULL;
int *sendtypes = NULL;
int *recvtypes = NULL;
-#if defined(TEST_NBC_ROUTINES)
- int i, j;
- char *buf_alias = NULL;
+ signed char *buf_alias = NULL;
MPI_Request req;
-#endif
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
-#if defined(TEST_NBC_ROUTINES)
buf = malloc(COUNT*size*sizeof(int));
recvbuf = malloc(COUNT*size*sizeof(int));
}
/* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
- buf_alias = (char *)buf;
+ buf_alias = (signed char *)buf;
my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
for (i = 0; i < PRIME; ++i) {
if (rank == 0)
for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
buf_alias[i] = 0xbf;
}
- MPI_Ibcast(buf, PRIME, MPI_SIGNED_CHAR, 0, MPI_COMM_WORLD, &req);
+ MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, MPI_COMM_WORLD, &req);
MPI_Wait(&req, MPI_STATUS_IGNORE);
for (i = 0; i < PRIME; ++i) {
if (buf_alias[i] != i)
}
}
-#endif /* defined(TEST_NBC_ROUTINES) */
-
if (rank == 0)
printf(" No Errors\n");
#include <stdio.h>
#include <string.h>
#include <assert.h>
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
-#include "mpitestconf.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
} \
} while (0)
-/* Since MPICH is currently the only NBC implementation in existence, just use
- * this quick-and-dirty #ifdef to decide whether to test the nonblocking
- * collectives. Eventually we can add a configure option or configure test, or
- * the MPI-3 standard will be released and these can be gated on a MPI_VERSION
- * check */
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_NBC_ROUTINES 1
-#endif
-
-#if defined(TEST_NBC_ROUTINES)
/* Intended to act like "rand_r", but we can be sure that it will exist and be
* consistent across all of comm world. Returns a number in the range
* [0,GEN_PRN_MAX] */
int *rdispls = NULL;
int *sendtypes = NULL;
int *recvtypes = NULL;
- char *buf_alias = NULL;
+ signed char *buf_alias = NULL;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
/* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
- buf_alias = (char *)buf;
+ buf_alias = (signed char *)buf;
my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
for (i = 0; i < PRIME; ++i) {
if (rank == 0)
for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
buf_alias[i] = 0xbf;
}
- MPI_Ibcast(buf, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
+ MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
break;
case 2: /* MPI_Ibarrier */
}
#undef COMPLETION_CASES
}
-#endif /* defined(TEST_NBC_ROUTINES) */
int main(int argc, char **argv)
{
- int wrank, wsize;
-#if defined(TEST_NBC_ROUTINES)
int i, num_posted, num_completed;
+ int wrank, wsize;
unsigned int seed = 0x10bc;
unsigned int post_seq, complete_seq;
struct laundry larr[WINDOW];
int indices[WINDOW];
MPI_Comm comms[NUM_COMMS];
MPI_Comm comm;
-#endif
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
MPI_Comm_size(MPI_COMM_WORLD, &wsize);
-#if defined(TEST_NBC_ROUTINES)
-
/* it is critical that all processes in the communicator start with a
* consistent value for "post_seq" */
post_seq = complete_seq = gen_prn(seed);
MPI_Comm_free(&comms[i]);
}
-#endif /* defined(TEST_NBC_ROUTINES) */
-
if (wrank == 0) {
if (errs)
printf("found %d errors\n", errs);
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2010 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This is a very weak sanity test that all nonblocking collectives specified by
+ * MPI-3 are present in the library and take arguments as expected. This test
+ * does not check for progress, matching issues, or sensible output buffer
+ * values. */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+#define NUM_INTS (2)
+
+#define my_assert(cond_) \
+ do { \
+ if (!(cond_)) { \
+ fprintf(stderr, "assertion (%s) failed, aborting\n", #cond_); \
+ MPI_Abort(MPI_COMM_WORLD, 1); \
+ } \
+ } while (0)
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i;
+ int rank, size;
+ int *sbuf = NULL;
+ int *rbuf = NULL;
+ int *scounts = NULL;
+ int *rcounts = NULL;
+ int *sdispls = NULL;
+ int *rdispls = NULL;
+ int *types = NULL;
+ MPI_Comm comm;
+ MPI_Request req;
+
+ /* intentionally not using MTest_Init/MTest_Finalize in order to make it
+ * easy to take this test and use it as an NBC sanity test outside of the
+ * MPICH test suite */
+ MPI_Init(&argc, &argv);
+
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_rank(comm, &rank);
+
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+
+ /* enough space for every process to contribute at least NUM_INTS ints to any
+ * collective operation */
+ sbuf = malloc(NUM_INTS*size*sizeof(int));
+ my_assert(sbuf);
+ rbuf = malloc(NUM_INTS*size*sizeof(int));
+ my_assert(rbuf);
+ scounts = malloc(size*sizeof(int));
+ my_assert(scounts);
+ rcounts = malloc(size*sizeof(int));
+ my_assert(rcounts);
+ sdispls = malloc(size*sizeof(int));
+ my_assert(sdispls);
+ rdispls = malloc(size*sizeof(int));
+ my_assert(rdispls);
+ types = malloc(size*sizeof(int));
+ my_assert(types);
+
+ for (i = 0; i < size; ++i) {
+ sbuf[2*i] = i;
+ sbuf[2*i+1] = i;
+ rbuf[2*i] = i;
+ rbuf[2*i+1] = i;
+ scounts[i] = NUM_INTS;
+ rcounts[i] = NUM_INTS;
+ sdispls[i] = i * NUM_INTS;
+ rdispls[i] = i * NUM_INTS;
+ types[i] = MPI_INT;
+ }
+
+ if (rank == 0 && MPI_SUCCESS ==
+ MPI_Igather(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
+ errs++;
+
+ if (rank == 0 && MPI_SUCCESS ==
+ MPI_Igatherv(sbuf, NUM_INTS, MPI_INT, sbuf, rcounts, rdispls, MPI_INT, 0, comm, &req))
+ errs++;
+
+ if (rank == 0 && MPI_SUCCESS ==
+ MPI_Iscatter(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
+ errs++;
+
+ if (rank == 0 && MPI_SUCCESS ==
+ MPI_Iscatterv(sbuf, scounts, sdispls, MPI_INT, sbuf, NUM_INTS, MPI_INT, 0, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Iallgather(&sbuf[rank], 1, MPI_INT, sbuf, 1, MPI_INT, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Iallgatherv(&sbuf[rank * rcounts[rank]], rcounts[rank], MPI_INT, sbuf, rcounts, rdispls, MPI_INT, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Ialltoall(sbuf, NUM_INTS, MPI_INT, sbuf, NUM_INTS, MPI_INT, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Ialltoallv(sbuf, scounts, sdispls, MPI_INT, sbuf, scounts, sdispls, MPI_INT, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Ialltoallw(sbuf, scounts, sdispls, types, sbuf, scounts, sdispls, types, comm, &req))
+ errs++;
+
+ if (rank == 0 && MPI_SUCCESS ==
+ MPI_Ireduce(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, 0, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Iallreduce(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Ireduce_scatter(sbuf, sbuf, rcounts, MPI_INT, MPI_SUM, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Ireduce_scatter_block(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Iscan(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
+ errs++;
+
+ if (MPI_SUCCESS ==
+ MPI_Iexscan(sbuf, sbuf, NUM_INTS, MPI_INT, MPI_SUM, comm, &req))
+ errs++;
+
+ if (sbuf) free(sbuf);
+ if (rbuf) free(rbuf);
+ if (scounts) free(scounts);
+ if (rcounts) free(rcounts);
+ if (sdispls) free(sdispls);
+ if (rdispls) free(rdispls);
+
+ if (rank == 0) {
+ if (errs)
+ fprintf(stderr, "Found %d errors\n", errs);
+ else
+ printf(" No errors\n");
+ }
+ MPI_Finalize();
+ return 0;
+}
+
if (rank == root) {
errs += isShiftLeft( comm, bufout );
}
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ /* Try one more time without IN_PLACE to make sure we check
+ * aliasing correctly */
+ if (rank == root) {
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Reduce( bufout, bufout, count, mattype, op, root, comm ))
+ errs++;
+ }
+#endif
}
free( buf );
fprintf(stdout, "Did not get expected value for reduce scatter block\n");
fprintf(stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval);
}
+
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Reduce_scatter_block(recvbuf, recvbuf, 1, MPI_INT, MPI_SUM, comm))
+ err++;
+
free(recvbuf);
#endif
}
}
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
MPI_Reduce_scatter( MPI_IN_PLACE, sendbuf, recvcounts, MPI_INT, MPI_SUM,
comm );
sumval = size * rank + ((size - 1) * size)/2;
/* recv'ed values for my process should be size * (rank + i) */
for (i=0; i<mycount; i++) {
- if (sendbuf[i] != sumval) {
- err++;
- if (err < MAX_ERRORS) {
- fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
- fprintf( stdout, "[%d] Got buf[%d] = %d expected %d\n",
- rank, i, sendbuf[rank*mycount+i], sumval );
- }
- }
+ if (sendbuf[i] != sumval) {
+ err++;
+ if (err < MAX_ERRORS) {
+ fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
+ fprintf( stdout, "[%d] Got buf[%d] = %d expected %d\n",
+ rank, i, sendbuf[i], sumval );
+ }
+ }
}
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Reduce_scatter(sendbuf, sendbuf, recvcounts, MPI_INT, MPI_SUM, comm))
+ err++;
+#endif
+
free(sendbuf);
free(recvbuf);
free(recvcounts);
sumval = size * rank + ((size - 1) * size)/2;
/* recv'ed values for my process should be size * (rank + i) */
for (i=0; i<mycount; i++) {
- if (sendbuf[rank*mycount+i] != sumval) {
- err++;
- fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
- fprintf( stdout, "[%d] Got %d expected %d\n", rank, sendbuf[rank*mycount+i], sumval );
- }
+ if (sendbuf[i] != sumval) {
+ err++;
+ fprintf( stdout, "Did not get expected value for reduce scatter (in place)\n" );
+ fprintf( stdout, "[%d] Got %d expected %d\n", rank, sendbuf[i], sumval );
+ }
}
free(sendbuf);
free( sendbuf );
free( recvbuf );
}
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ if (0 == rank) {
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Reduce(&rank, &rank, 1, MPI_INT, MPI_SUM, 0, comm))
+ errs++;
+ }
+#endif
+
MTestFreeComm( &comm );
}
*/
#include "mpi.h"
#include <stdio.h>
+#include "mpitest.h"
void addem ( int *, int *, int *, MPI_Datatype * );
void assoc ( int *, int *, int *, MPI_Datatype * );
errors++;
}
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (MPI_SUCCESS == MPI_Scan( &data, &data, 1, MPI_INT, op_assoc, comm))
+ errors++;
+#endif
+
MPI_Op_free( &op_assoc );
MPI_Op_free( &op_addem );
}
}
}
+
+ MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
+ if (rank == 0 && MPI_SUCCESS ==
+ MPI_Scatter(vecin, 1, MPI_DOUBLE, vecin, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD))
+ err++;
+ free(vecin);
+ free(vecout);
MTest_Finalize( err );
MPI_Type_free( &vec );
MPI_Finalize();
nonblocking3 4 mpiversion=3.0
nonblocking3 5 mpiversion=3.0
nonblocking3 10 timeLimit=600 mpiversion=3.0
+nonblocking4 4 mpiversion=3.0
iallred 2 mpiversion=3.0
# ibarrier will hang forever if it fails, but will complete quickly if it
# succeeds