foreach(file cmfree cmsplit2 cmsplit cmsplit_type commcreate1 comm_create_group comm_group_half comm_group_rand
comm_idup comm_idup_overlap comm_idup_mul comm_info commname ctxalloc ctxsplit dup dupic dup_with_info ic1 ic2
- iccreate icgroup icm icsplit probe-intercomm)
+ iccreate icgroup icm icsplit probe-intercomm comm_create_group_idup.c comm_idup_comm.c comm_idup_mul.c comm_idup.c comm_idup_iallreduce.c comm_idup_nb.c comm_idup_comm2.c comm_idup_isend.c comm_idup_overlap.c
+)
set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${file}.c)
endforeach()
#define NELM 128
#define NCOMM 1020
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, source, dest, i;
- MPI_Comm comm;
- MPI_Comm tmpComm[NCOMM];
- MPI_Status status;
- MPI_Request req;
- int *buf=0;
+ MPI_Comm comm;
+ MPI_Comm tmpComm[NCOMM];
+ MPI_Status status;
+ MPI_Request req;
+ int *buf = 0;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+ MPI_Comm_dup(MPI_COMM_WORLD, &comm);
/* This is similar to the datatype test, except that we post
- an irecv on a simple data buffer but use a rank-reordered communicator.
- In this case, an error in handling the reference count will most
- likely cause the program to hang, so this should be run only
- if (a) you are confident that the code is correct or (b)
- a timeout is set for mpiexec
- */
+ * an irecv on a simple data buffer but use a rank-reordered communicator.
+ * In this case, an error in handling the reference count will most
+ * likely cause the program to hang, so this should be run only
+ * if (a) you are confident that the code is correct or (b)
+ * a timeout is set for mpiexec
+ */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
if (size < 2) {
- fprintf( stderr, "This test requires at least two processes." );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "This test requires at least two processes.");
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- source = 0;
- dest = size - 1;
+ source = 0;
+ dest = size - 1;
if (rank == dest) {
- buf = (int *)malloc( NELM * sizeof(int) );
- for (i=0; i<NELM; i++) buf[i] = -i;
- MPI_Irecv( buf, NELM, MPI_INT, source, 0, comm, &req );
- MPI_Comm_free( &comm );
-
- if (comm != MPI_COMM_NULL) {
- errs++;
- printf( "Freed comm was not set to COMM_NULL\n" );
- }
-
- for (i=0; i<NCOMM; i++) {
- MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
- }
-
- MPI_Sendrecv( NULL, 0, MPI_INT, source, 1,
- NULL, 0, MPI_INT, source, 1, MPI_COMM_WORLD, &status );
-
- MPI_Wait( &req, &status );
- for (i=0; i<NELM; i++) {
- if (buf[i] != i) {
- errs++;
- if (errs < 10) {
- printf( "buf[%d] = %d, expected %d\n", i, buf[i], i );
- }
- }
- }
- for (i=0; i<NCOMM; i++) {
- MPI_Comm_free( &tmpComm[i] );
- }
- free( buf );
+ buf = (int *) malloc(NELM * sizeof(int));
+ for (i = 0; i < NELM; i++)
+ buf[i] = -i;
+ MPI_Irecv(buf, NELM, MPI_INT, source, 0, comm, &req);
+ MPI_Comm_free(&comm);
+
+ if (comm != MPI_COMM_NULL) {
+ errs++;
+ printf("Freed comm was not set to COMM_NULL\n");
+ }
+
+ for (i = 0; i < NCOMM; i++) {
+ MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, &tmpComm[i]);
+ }
+
+ MPI_Sendrecv(NULL, 0, MPI_INT, source, 1,
+ NULL, 0, MPI_INT, source, 1, MPI_COMM_WORLD, &status);
+
+ MPI_Wait(&req, &status);
+ for (i = 0; i < NELM; i++) {
+ if (buf[i] != i) {
+ errs++;
+ if (errs < 10) {
+ printf("buf[%d] = %d, expected %d\n", i, buf[i], i);
+ }
+ }
+ }
+ for (i = 0; i < NCOMM; i++) {
+ MPI_Comm_free(&tmpComm[i]);
+ }
+ free(buf);
}
else if (rank == source) {
- buf = (int *)malloc( NELM * sizeof(int) );
- for (i=0; i<NELM; i++) buf[i] = i;
-
- for (i=0; i<NCOMM; i++) {
- MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
- }
- /* Synchronize with the receiver */
- MPI_Sendrecv( NULL, 0, MPI_INT, dest, 1,
- NULL, 0, MPI_INT, dest, 1, MPI_COMM_WORLD, &status );
- MPI_Send( buf, NELM, MPI_INT, dest, 0, comm );
- free( buf );
+ buf = (int *) malloc(NELM * sizeof(int));
+ for (i = 0; i < NELM; i++)
+ buf[i] = i;
+
+ for (i = 0; i < NCOMM; i++) {
+ MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, &tmpComm[i]);
+ }
+ /* Synchronize with the receiver */
+ MPI_Sendrecv(NULL, 0, MPI_INT, dest, 1, NULL, 0, MPI_INT, dest, 1, MPI_COMM_WORLD, &status);
+ MPI_Send(buf, NELM, MPI_INT, dest, 0, comm);
+ free(buf);
}
else {
- for (i=0; i<NCOMM; i++) {
- MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &tmpComm[i] );
- }
+ for (i = 0; i < NCOMM; i++) {
+ MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, &tmpComm[i]);
+ }
}
- MPI_Barrier( MPI_COMM_WORLD );
+ MPI_Barrier(MPI_COMM_WORLD);
if (rank != dest) {
- /* Clean up the communicators */
- for (i=0; i<NCOMM; i++) {
- MPI_Comm_free( &tmpComm[i] );
- }
+ /* Clean up the communicators */
+ for (i = 0; i < NCOMM; i++) {
+ MPI_Comm_free(&tmpComm[i]);
+ }
}
if (comm != MPI_COMM_NULL) {
- MPI_Comm_free( &comm );
+ MPI_Comm_free(&comm);
}
-
- MTest_Finalize( errs );
+
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test comm split";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, color, srank;
- MPI_Comm comm, scomm;
+ MPI_Comm comm, scomm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_dup( MPI_COMM_WORLD, &comm );
+ MPI_Comm_dup(MPI_COMM_WORLD, &comm);
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
if (size < 4) {
- fprintf( stderr, "This test requires at least four processes." );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "This test requires at least four processes.");
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
color = MPI_UNDEFINED;
- if (rank < 2) color = 1;
- MPI_Comm_split( comm, color, size - rank, &scomm );
-
+ if (rank < 2)
+ color = 1;
+ MPI_Comm_split(comm, color, size - rank, &scomm);
+
if (rank < 2) {
- /* Check that the ranks are ordered correctly */
- MPI_Comm_rank( scomm, &srank );
- if (srank != 1 - rank) {
- errs++;
- }
- MPI_Comm_free( &scomm );
+ /* Check that the ranks are ordered correctly */
+ MPI_Comm_rank(scomm, &srank);
+ if (srank != 1 - rank) {
+ errs++;
+ }
+ MPI_Comm_free(&scomm);
}
else {
- if (scomm != MPI_COMM_NULL) {
- errs++;
- }
+ if (scomm != MPI_COMM_NULL) {
+ errs++;
+ }
}
- MPI_Comm_free( &comm );
- MTest_Finalize( errs );
+ MPI_Comm_free(&comm);
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
/* there's probably a better way to write these loop bounds and
* indices, but this is the first (correct) way that occurred to me */
for (j = 0; j < (size / modulus + (i < size % modulus ? 1 : 0)); ++j) {
- if (errs < ERRLIMIT && oldranks[pos] != i+modulus*j) {
- printf("size=%d i=%d j=%d modulus=%d pos=%d i+modulus*j=%d oldranks[pos]=%d\n",
- size, i, j, modulus, pos, i+modulus*j, oldranks[pos]);
+ if (errs < ERRLIMIT && oldranks[pos] != i + modulus * j) {
+ printf
+ ("size=%d i=%d j=%d modulus=%d pos=%d i+modulus*j=%d oldranks[pos]=%d\n",
+ size, i, j, modulus, pos, i + modulus * j, oldranks[pos]);
}
- my_assert(oldranks[pos] == i+modulus*j);
+ my_assert(oldranks[pos] == i + modulus * j);
++pos;
}
}
MPI_Finalize();
return 0;
}
-
int main(int argc, char *argv[])
{
- int rank, size, verbose=0;
+ int rank, size, verbose = 0, errs=0, tot_errs=0;
int wrank;
MPI_Comm comm;
+ MPI_Info info;
MPI_Init(&argc, &argv);
if (getenv("MPITEST_VERBOSE"))
verbose = 1;
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
/* Check to see if MPI_COMM_TYPE_SHARED works correctly */
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &comm);
- if (comm == MPI_COMM_NULL)
+ if (comm == MPI_COMM_NULL) {
printf("Expected a non-null communicator, but got MPI_COMM_NULL\n");
+ errs++;
+ }
+ else {
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+ if (rank == 0 && verbose)
+ printf("Created shared subcommunicator of size %d\n", size);
+ MPI_Comm_free(&comm);
+ }
+
+#ifdef MPIX_COMM_TYPE_NEIGHBORHOOD
+ /* the MPICH-specific MPIX_COMM_TYPE_NEIGHBORHOOD*/
+ /* test #1: expected behavior -- user provided a directory, and we
+ * determine which processes share access to it */
+ MPI_Info_create(&info);
+ if (argc == 2)
+ MPI_Info_set(info, "nbhd_common_dirname", argv[1]);
+ else
+ MPI_Info_set(info, "nbhd_common_dirname", ".");
+ MPI_Comm_split_type(MPI_COMM_WORLD, MPIX_COMM_TYPE_NEIGHBORHOOD, 0,
+ info, &comm);
+ if (comm == MPI_COMM_NULL) {
+ printf("Expected a non-null communicator, but got MPI_COMM_NULL\n");
+ errs++;
+ }
else {
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
if (rank == 0 && verbose)
- printf("Created subcommunicator of size %d\n", size);
+ printf("Correctly created common-file subcommunicator of size %d\n", size);
MPI_Comm_free(&comm);
}
+ /* test #2: a hint we don't know about */
+ MPI_Info_delete(info, "nbhd_common_dirname");
+ MPI_Info_set(info, "mpix_tooth_fairy", "enable");
+ MPI_Comm_split_type(MPI_COMM_WORLD, MPIX_COMM_TYPE_NEIGHBORHOOD, 0,
+ info, &comm);
+ if (comm != MPI_COMM_NULL) {
+ printf("Expected a NULL communicator, but got something else\n");
+ errs++;
+ MPI_Comm_free(&comm);
+ }
+ else {
+ if (rank == 0 && verbose)
+ printf("Unknown hint correctly resulted in NULL communicator\n");
+ }
+
+
+ MPI_Info_free(&info);
+#endif
+
/* Check to see if MPI_UNDEFINED is respected */
MPI_Comm_split_type(MPI_COMM_WORLD, (wrank % 2 == 0) ? MPI_COMM_TYPE_SHARED : MPI_UNDEFINED,
0, MPI_INFO_NULL, &comm);
- if ((wrank % 2) && (comm != MPI_COMM_NULL))
+ if ((wrank % 2) && (comm != MPI_COMM_NULL)) {
printf("Expected MPI_COMM_NULL, but did not get one\n");
+ errs++;
+ }
if (wrank % 2 == 0) {
- if (comm == MPI_COMM_NULL)
+ if (comm == MPI_COMM_NULL) {
printf("Expected a non-null communicator, but got MPI_COMM_NULL\n");
+ errs++;
+ }
else {
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
if (rank == 0 && verbose)
- printf("Created subcommunicator of size %d\n", size);
+ printf("Created shared subcommunicator of size %d\n", size);
MPI_Comm_free(&comm);
}
}
+ MPI_Reduce(&errs, &tot_errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* Use wrank because Comm_split_type may return more than one communicator
- across the job, and if so, each will have a rank 0 entry. Test
- output rules are for a single process to write the successful
- test (No Errors) output. */
- if (wrank == 0)
+ * across the job, and if so, each will have a rank 0 entry. Test
+ * output rules are for a single process to write the successful
+ * test (No Errors) output. */
+ if (wrank == 0 && errs == 0)
printf(" No errors\n");
MPI_Finalize();
* See COPYRIGHT in top-level directory.
*/
#include "mpi.h"
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
#include "mpitestconf.h"
#include <stdio.h>
#include <string.h>
{
int size, rank, i, *excl;
MPI_Group world_group, even_group;
- MPI_Comm __attribute__((unused)) even_comm;
+ MPI_Comm even_comm;
MPI_Init(&argc, &argv);
if (size % 2) {
fprintf(stderr, "this program requires a multiple of 2 number of processes\n");
MPI_Abort(MPI_COMM_WORLD, 1);
- exit(1);
}
excl = malloc((size / 2) * sizeof(int));
MPI_Group_excl(world_group, size / 2, excl, &even_group);
MPI_Group_free(&world_group);
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
if (rank % 2 == 0) {
/* Even processes create a group for themselves */
MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &even_comm);
MPI_Barrier(even_comm);
MPI_Comm_free(&even_comm);
}
-#endif /* USE_STRICT_MPI */
MPI_Group_free(&even_group);
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0)
printf(" No errors\n");
+ free(excl);
MPI_Finalize();
return 0;
}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include "mpitestconf.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+
+int main(int argc, char *argv[])
+{
+ int size, rank;
+ MPI_Group world_group;
+ MPI_Comm group_comm, idup_comm;
+ MPI_Request req;
+ MPI_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (size % 2) {
+ fprintf(stderr, "this program requires even number of processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ /* Create some groups */
+ MPI_Comm_group(MPI_COMM_WORLD, &world_group);
+
+ if (rank % 2 == 0) {
+ MPI_Comm_create_group(MPI_COMM_WORLD, world_group, 0, &group_comm);
+ MPI_Comm_idup(MPI_COMM_WORLD, &idup_comm, &req);
+ }
+ else {
+ MPI_Comm_idup(MPI_COMM_WORLD, &idup_comm, &req);
+ MPI_Comm_create_group(MPI_COMM_WORLD, world_group, 0, &group_comm);
+ }
+
+ MPI_Wait(&req, MPI_STATUSES_IGNORE);
+ /*Test new comm with a barrier */
+ MPI_Barrier(idup_comm);
+ MPI_Barrier(group_comm);
+
+ MPI_Group_free(&world_group);
+ MPI_Comm_free(&idup_comm);
+ MPI_Comm_free(&group_comm);
+ if (rank == 0)
+ printf(" No errors\n");
+
+ MPI_Finalize();
+ return 0;
+}
#include <stdio.h>
#include <mpi.h>
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
#include "mpitestconf.h"
int main(int argc, char **argv)
{
- int rank, size;
+ int rank, size, i;
MPI_Group full_group, half_group;
int range[1][3];
- MPI_Comm __attribute__((unused)) comm;
+ MPI_Comm comm;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
range[0][2] = 1;
MPI_Group_range_incl(full_group, 1, range, &half_group);
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
if (rank <= size / 2) {
MPI_Comm_create_group(MPI_COMM_WORLD, half_group, 0, &comm);
MPI_Barrier(comm);
MPI_Comm_free(&comm);
}
-#endif /* USE_STRICT_MPI */
MPI_Group_free(&half_group);
MPI_Group_free(&full_group);
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
-/* USE_STRICT_MPI may be defined in mpitestconf.h */
#include "mpitestconf.h"
#define LOOPS 100
int rank, size, i, j, count;
MPI_Group full_group, sub_group;
int *included, *ranks;
- MPI_Comm __attribute__((unused)) comm;
+ MPI_Comm comm;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_group(MPI_COMM_WORLD, &full_group);
for (j = 0; j < LOOPS; j++) {
- srand(j); /* Deterministic seed */
+ srand(j); /* Deterministic seed */
count = 0;
for (i = 0; i < size; i++) {
- if (rand() % 2) { /* randomly include a rank */
+ if (rand() % 2) { /* randomly include a rank */
included[i] = 1;
ranks[count++] = i;
}
MPI_Group_incl(full_group, count, ranks, &sub_group);
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
if (included[rank]) {
MPI_Comm_create_group(MPI_COMM_WORLD, sub_group, 0, &comm);
MPI_Barrier(comm);
MPI_Comm_free(&comm);
}
-#endif /* USE_STRICT_MPI */
MPI_Group_free(&sub_group);
}
if (rank == 0)
printf(" No Errors\n");
+ free(ranks);
+ free(included);
MPI_Finalize();
return 0;
#include "mpi.h"
#include "mpitest.h"
-/* This is a temporary #ifdef to control whether we test this functionality. A
- * configure-test or similar would be better. Eventually the MPI-3 standard
- * will be released and this can be gated on a MPI_VERSION check */
-#if !defined(USE_STRICT_MPI) && defined(MPICH)
-#define TEST_IDUP 1
-#endif
-
/* assert-like macro that bumps the err count and emits a message */
#define check(x_) \
do { \
if (size < 2) {
printf("this test requires at least 2 processes\n");
MPI_Abort(MPI_COMM_WORLD, 1);
- exit(1);
}
-#ifdef TEST_IDUP
-
/* test plan: make rank 0 wait in a blocking recv until all other processes
* have posted their MPI_Comm_idup ops, then post last. Should ensure that
* idup doesn't block on the non-zero ranks, otherwise we'll get a deadlock.
buf[0] = rank;
buf[1] = 0xfeedface;
MPI_Allreduce(&buf[0], &buf[1], 1, MPI_INT, MPI_SUM, newcomm);
- check(buf[1] == (size * (size-1) / 2));
+ check(buf[1] == (size * (size - 1) / 2));
MPI_Comm_free(&newcomm);
buf[0] = lrank;
buf[1] = 0xfeedface;
MPI_Allreduce(&buf[0], &buf[1], 1, MPI_INT, MPI_SUM, newcomm);
- check(buf[1] == (rsize * (rsize-1) / 2));
+ check(buf[1] == (rsize * (rsize - 1) / 2));
/* free this down here, not before idup, otherwise it will undo our
* stagger_comm work */
MPI_Comm_free(&newcomm);
MPI_Comm_free(&ic);
-#endif /* TEST_IDUP */
-
MPI_Reduce((rank == 0 ? MPI_IN_PLACE : &errs), &errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
if (errs) {
return 0;
}
-
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This test tests overlapping of Comm_idups with other comm. generations calls */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define NUM_IDUPS 5
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i;
+ int rank, size;
+ int *excl;
+ int ranges[1][3];
+ int isLeft, rleader;
+ MPI_Group world_group, high_group, even_group;
+ MPI_Comm local_comm, inter_comm, test_comm, outcomm;
+ MPI_Comm idupcomms[NUM_IDUPS];
+ MPI_Request reqs[NUM_IDUPS];
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_group(MPI_COMM_WORLD, &world_group);
+
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ /* Idup MPI_COMM_WORLD multiple times */
+ for (i = 0; i < NUM_IDUPS; i++) {
+ MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
+ }
+
+ /* Overlap pending idups with various comm generation functions */
+
+ /* Comm_dup */
+ MPI_Comm_dup(MPI_COMM_WORLD, &outcomm);
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Comm_split */
+ MPI_Comm_split(MPI_COMM_WORLD, rank % 2, size - rank, &outcomm);
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Comm_create, high half of MPI_COMM_WORLD */
+ ranges[0][0] = size / 2;
+ ranges[0][1] = size - 1;
+ ranges[0][2] = 1;
+ MPI_Group_range_incl(world_group, 1, ranges, &high_group);
+ MPI_Comm_create(MPI_COMM_WORLD, high_group, &outcomm);
+ MPI_Group_free(&high_group);
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Comm_create_group, even ranks of MPI_COMM_WORLD */
+ /* exclude the odd ranks */
+ excl = malloc((size / 2) * sizeof(int));
+ for (i = 0; i < size / 2; i++)
+ excl[i] = (2 * i) + 1;
+
+ MPI_Group_excl(world_group, size / 2, excl, &even_group);
+ free(excl);
+
+ if (rank % 2 == 0) {
+ MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &outcomm);
+ }
+ else {
+ outcomm = MPI_COMM_NULL;
+ }
+ MPI_Group_free(&even_group);
+
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Intercomm_create & Intercomm_merge */
+ MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);
+
+ if (rank == 0) {
+ rleader = size / 2;
+ }
+ else if (rank == size / 2) {
+ rleader = 0;
+ }
+ else {
+ rleader = -1;
+ }
+ isLeft = rank < size / 2;
+
+ MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
+ MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
+ MPI_Comm_free(&local_comm);
+
+ errs += MTestTestComm(inter_comm);
+ MTestFreeComm(&inter_comm);
+
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
+ for (i = 0; i < NUM_IDUPS; i++) {
+ errs += MTestTestComm(idupcomms[i]);
+ MPI_Comm_free(&idupcomms[i]);
+ }
+
+ MPI_Group_free(&world_group);
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+/* This test tests overlapping of Comm_idups with other comm. generations calls */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define NUM_IDUPS 5
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i;
+ int rank, size;
+ int *excl;
+ int ranges[1][3];
+ int isLeft, rleader;
+ MPI_Group dup_group, high_group, even_group;
+ MPI_Comm local_comm, inter_comm, test_comm, outcomm, dupcomm;
+ MPI_Comm idupcomms[NUM_IDUPS];
+ MPI_Request reqs[NUM_IDUPS];
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_dup(MPI_COMM_WORLD, &dupcomm);
+ MPI_Comm_group(dupcomm, &dup_group);
+
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ /* Idup MPI_COMM_WORLD multiple times */
+ for (i = 0; i < NUM_IDUPS; i++) {
+ MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
+ }
+
+ /* Overlap pending idups with various comm generation functions */
+
+ /* Comm_dup */
+ MPI_Comm_dup(dupcomm, &outcomm);
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Comm_split */
+ MPI_Comm_split(dupcomm, rank % 2, size - rank, &outcomm);
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Comm_create, high half of dupcomm */
+ ranges[0][0] = size / 2;
+ ranges[0][1] = size - 1;
+ ranges[0][2] = 1;
+ MPI_Group_range_incl(dup_group, 1, ranges, &high_group);
+ MPI_Comm_create(dupcomm, high_group, &outcomm);
+ MPI_Group_free(&high_group);
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Comm_create_group, even ranks of dupcomm */
+ /* exclude the odd ranks */
+ excl = malloc((size / 2) * sizeof(int));
+ for (i = 0; i < size / 2; i++)
+ excl[i] = (2 * i) + 1;
+
+ MPI_Group_excl(dup_group, size / 2, excl, &even_group);
+ free(excl);
+
+ if (rank % 2 == 0) {
+ MPI_Comm_create_group(dupcomm, even_group, 0, &outcomm);
+ }
+ else {
+ outcomm = MPI_COMM_NULL;
+ }
+ MPI_Group_free(&even_group);
+
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ /* Intercomm_create & Intercomm_merge */
+ MPI_Comm_split(dupcomm, (rank < size / 2), rank, &local_comm);
+
+ if (rank == 0) {
+ rleader = size / 2;
+ }
+ else if (rank == size / 2) {
+ rleader = 0;
+ }
+ else {
+ rleader = -1;
+ }
+ isLeft = rank < size / 2;
+
+ MPI_Intercomm_create(local_comm, 0, dupcomm, rleader, 99, &inter_comm);
+ MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
+ MPI_Comm_free(&local_comm);
+
+ errs += MTestTestComm(inter_comm);
+ MTestFreeComm(&inter_comm);
+
+ errs += MTestTestComm(outcomm);
+ MTestFreeComm(&outcomm);
+
+ MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
+ for (i = 0; i < NUM_IDUPS; i++) {
+ errs += MTestTestComm(idupcomms[i]);
+ MPI_Comm_free(&idupcomms[i]);
+ }
+
+ MPI_Group_free(&dup_group);
+ MPI_Comm_free(&dupcomm);
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define ITERS 10
+
+/* This test uses several scenarios to overlap iallreduce and comm_idup
+ * 1.) Use comm_idup dublicate the COMM_WORLD and do iallreduce
+ * on the COMM_WORLD
+ * 2.) Do the above test in a loop
+ * 3.) Dublicate COMM_WORLD, overalp iallreduce on one
+ * communicator with comm_idup on the nother communicator
+ * 4.) Split MPI_COMM_WORLD, communicate on the split communicator
+ while dublicating COMM_WORLD
+ * 5.) Duplicate the split communicators with comm_idup
+ * while communicating onCOMM_WORLD
+ * 6.) Ceate an inter-communicator and duplicate it with comm_idup while
+ * communicating on the inter-communicator
+ * 7.) Dublicate the inter-communicator whil communicate on COMM_WORLD
+ * 8.) Merge the inter-communicator to an intra-communicator and idup it,
+ * overlapping with communication on MPI_COMM_WORLD
+ * 9.) Communicate on the merge communicator, while duplicating COMM_WORLD
+*/
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i;
+ int rank, size, lrank, lsize, rsize, isize;
+ int in, out, sol;
+
+ MPI_Comm newcomm, newcomm_v[ITERS], dup_comm, split, ic, merge;
+ MPI_Request sreq[ITERS * 2];
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ /* set input buffer and compare buffer */
+ in = 1;
+ sol = size;
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &sreq[0]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[1]);
+
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ /* Test results of overlapping allreduce */
+ if (sol != out)
+ errs++;
+ /*Test new communicator */
+ errs += MTestTestComm(newcomm);
+ MPI_Comm_free(&newcomm);
+
+ for (i = 0; i < ITERS; i++) {
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm_v[i], &sreq[i]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[i + ITERS]);
+ }
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ errs += MTestTestComm(newcomm_v[i]);
+ MPI_Comm_free(&newcomm_v[i]);
+ }
+
+ MPI_Comm_dup(MPI_COMM_WORLD, &dup_comm);
+
+ if (rank == 0) {
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[0]);
+ MPI_Comm_idup(dup_comm, &newcomm, &sreq[1]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(dup_comm, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ /* Test Iallreduce */
+ if (sol != out)
+ errs++;
+
+ /*Test new communicator */
+ errs += MTestTestComm(newcomm);
+
+ MPI_Comm_free(&newcomm);
+ MPI_Comm_free(&dup_comm);
+
+ MPI_Comm_split(MPI_COMM_WORLD, rank % 2, rank, &split);
+ MPI_Comm_rank(split, &lrank);
+ MPI_Comm_size(split, &lsize);
+
+ sol = lsize;
+ if (lrank == 0) {
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, split, &sreq[0]);
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &sreq[1]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, split, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ /* Test Iallreduce */
+ if (sol != out)
+ errs++;;
+
+ /* Test new communicator */
+ errs += MTestTestComm(newcomm);
+ MPI_Comm_free(&newcomm);
+ sol = size;
+
+ if (lrank == 0) {
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[0]);
+ MPI_Comm_idup(split, &newcomm, &sreq[1]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(split, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ /* Test Iallreduce */
+ if (sol != out)
+ errs++;;
+
+ /* Test new communicator */
+ errs += MTestTestComm(newcomm);
+ MPI_Comm_free(&newcomm);
+
+ MPI_Intercomm_create(split, 0, MPI_COMM_WORLD, (rank == 0 ? 1 : 0), 1234, &ic);
+ MPI_Comm_remote_size(ic, &rsize);
+
+ sol = rsize;
+
+ MPI_Comm_idup(ic, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, ic, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+
+
+ if (sol != out)
+ errs++;;
+ /* Test new inter communicator */
+ errs += MTestTestComm(newcomm);
+ MPI_Comm_free(&newcomm);
+
+ sol = lsize;
+ if (lrank == 0) {
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, split, &sreq[0]);
+ MPI_Comm_idup(ic, &newcomm, &sreq[1]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(ic, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, split, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ /* Test Iallreduce resutls for split-communicator */
+ if (sol != out)
+ errs++;;
+ /* Test new inter-communicator */
+
+ errs += MTestTestComm(newcomm);
+ MPI_Comm_free(&newcomm);
+
+ MPI_Intercomm_merge(ic, rank % 2, &merge);
+ MPI_Comm_size(merge, &isize);
+
+ sol = size;
+ if (rank == 0) {
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[0]);
+ MPI_Comm_idup(merge, &newcomm, &sreq[1]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(merge, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+
+
+ if (sol != out)
+ errs++;;
+ /* Test new communicator */
+ errs += MTestTestComm(newcomm);
+ MPI_Comm_free(&newcomm);
+ sol = isize;
+
+ if (rank == 0) {
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, merge, &sreq[0]);
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &sreq[1]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+ else {
+ MPI_Comm_idup(MPI_COMM_WORLD, &newcomm, &sreq[1]);
+ MPI_Iallreduce(&in, &out, 1, MPI_INT, MPI_SUM, merge, &sreq[0]);
+ MPI_Waitall(2, sreq, MPI_STATUS_IGNORE);
+ }
+
+ MPI_Comm_free(&merge);
+ MPI_Comm_free(&newcomm);
+ MPI_Comm_free(&split);
+ MPI_Comm_free(&ic);
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define ITERS 4
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i, j;
+ int rank, size, rsize;
+ int in[ITERS], out[ITERS], sol[ITERS], cnt;
+ int isLeft;
+ MPI_Comm newcomm[ITERS], testcomm;
+ MPI_Request *sreq;
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ sreq = (MPI_Request *) malloc(sizeof(MPI_Request) * (size + 1) * ITERS);
+
+ while (MTestGetIntracommGeneral(&testcomm, 1, 1)) {
+ if (testcomm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_rank(testcomm, &rank);
+ MPI_Comm_size(testcomm, &size);
+ cnt = 0;
+ for (j = 0; j < ITERS; j++) {
+ if (rank == 0) {
+ out[j] = 815;
+ in[j] = 815;
+ sol[j] = 815;
+ for (i = 1; i < size; i++)
+ MPI_Isend(&out[j], 1, MPI_INT, i, 0, testcomm, &sreq[cnt++]);
+ MPI_Comm_idup(testcomm, &newcomm[j], &sreq[cnt++]);
+ }
+ else {
+ out[j] = 0;
+ in[j] = 0;
+ sol[j] = 815;
+ MPI_Comm_idup(testcomm, &newcomm[j], &sreq[cnt++]);
+ MPI_Irecv(&in[j], 1, MPI_INT, 0, 0, testcomm, &sreq[cnt++]);
+ }
+ }
+ MPI_Waitall(cnt, sreq, MPI_STATUS_IGNORE);
+
+ for (j = 0; j < ITERS; j++) {
+ if (sol[j] != in[j])
+ errs++;
+ errs += MTestTestComm(newcomm[j]);
+ MPI_Comm_free(&newcomm[j]);
+ }
+ MTestFreeComm(&testcomm);
+ }
+ while (MTestGetIntercomm(&testcomm, &isLeft, 1)) {
+ if (testcomm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_rank(testcomm, &rank);
+ MPI_Comm_size(testcomm, &size);
+ MPI_Comm_remote_size(testcomm, &rsize);
+ cnt = 0;
+ for (j = 0; j < ITERS; j++) {
+ if (rank == 0) {
+ out[j] = 815;
+ in[j] = 815;
+ sol[j] = 815;
+ for (i = 1; i < rsize; i++)
+ MPI_Isend(&out[j], 1, MPI_INT, i, 0, testcomm, &sreq[cnt++]);
+ MPI_Comm_idup(testcomm, &newcomm[j], &sreq[cnt++]);
+ }
+ else {
+ out[j] = 0;
+ in[j] = 0;
+ sol[j] = 815;
+ MPI_Comm_idup(testcomm, &newcomm[j], &sreq[cnt++]);
+ MPI_Irecv(&in[j], 1, MPI_INT, 0, 0, testcomm, &sreq[cnt++]);
+ }
+ }
+ MPI_Waitall(cnt, sreq, MPI_STATUS_IGNORE);
+
+ for (j = 0; j < ITERS; j++) {
+ if (sol[j] != in[j])
+ errs++;
+ errs += MTestTestComm(newcomm[j]);
+ MPI_Comm_free(&newcomm[j]);
+ }
+ MTestFreeComm(&testcomm);
+ }
+
+ free(sreq);
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
#include <stdio.h>
#include <mpi.h>
-#define NUM_ITER 2
+#define NUM_ITER 10
int main(int argc, char **argv)
{
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2015 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define ITERS 5
+
+int main(int argc, char **argv)
+{
+ int errs = 0;
+ int i, isleft;
+ MPI_Comm test_comm, new_comm[ITERS];
+ int in[ITERS], out[ITERS], sol;
+ int rank, size, rsize, rrank;
+ MPI_Request sreq[ITERS * 2];
+ int root;
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ if (size < 2) {
+ printf("this test requires at least 2 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ while (MTestGetIntracommGeneral(&test_comm, 1, 1)) {
+ if (test_comm == MPI_COMM_NULL)
+ continue;
+ MPI_Comm_size(test_comm, &size);
+ MPI_Comm_rank(test_comm, &rank);
+
+ /* Ibarrier */
+ for (i = 0; i < ITERS; i++) {
+ MPI_Ibarrier(test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ /*Ibcast */
+ for (i = 0; i < ITERS; i++) {
+ if (rank == 0)
+ in[i] = 815;
+ else
+ in[i] = 10;
+ MPI_Ibcast(&in[i], 1, MPI_INT, 0, test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ sol = 815;
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ if (in[i] != sol)
+ errs++;
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ /* Iallreduce */
+ for (i = 0; i < ITERS; i++) {
+ in[i] = 1;
+ MPI_Iallreduce(&in[i], &out[i], 1, MPI_INT, MPI_SUM, test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ sol = size;
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ if (out[i] != sol)
+ errs++;
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ /* Isann */
+ for (i = 0; i < ITERS; i++) {
+ MPI_Iscan(&rank, &out[i], 1, MPI_INT, MPI_SUM, test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ sol = rank * (rank + 1) / 2;
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ if (out[i] != sol)
+ errs++;
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ /*Ibcast */
+ for (i = 0; i < ITERS; i++) {
+ if (rank == 0)
+ in[i] = 815;
+ else
+ in[i] = 10;
+ MPI_Ibcast(&in[i], 1, MPI_INT, 0, test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+ sol = 815;
+ for (i = 0; i < ITERS; i++) {
+ if (in[i] != sol)
+ errs++;
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ MTestFreeComm(&test_comm);
+ }
+/* Now the test for inter-communicators */
+ while (MTestGetIntercomm(&test_comm, &isleft, 1)) {
+ if (test_comm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_size(test_comm, &size);
+ MPI_Comm_rank(test_comm, &rank);
+
+ MPI_Comm_remote_size(test_comm, &rsize);
+ /* Ibarrier */
+ for (i = 0; i < ITERS; i++) {
+ MPI_Ibarrier(test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+ for (i = 0; i < ITERS; i++) {
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ /*Ibcast */
+ for (i = 0; i < ITERS; i++) {
+ if (isleft) {
+ if (rank == 0) {
+ root = MPI_ROOT;
+ in[i] = 815;
+ }
+ else {
+ root = MPI_PROC_NULL;
+ in[i] = 815; /* not needed, just to make correctness checking easier */
+ }
+ }
+ else {
+ root = 0;
+ in[i] = 213; /* garbage value */
+ }
+ MPI_Ibcast(&in[i], 1, MPI_INT, root, test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ sol = 815;
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ if (in[i] != sol)
+ errs++;
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ /* Iallreduce */
+ for (i = 0; i < ITERS; i++) {
+ in[i] = 1;
+ MPI_Iallreduce(&in[i], &out[i], 1, MPI_INT, MPI_SUM, test_comm, &sreq[i]);
+ MPI_Comm_idup(test_comm, &new_comm[i], &sreq[i + ITERS]);
+ }
+ sol = rsize;
+ MPI_Waitall(ITERS * 2, sreq, MPI_STATUS_IGNORE);
+
+ for (i = 0; i < ITERS; i++) {
+ if (out[i] != sol)
+ errs++;
+ errs += MTestTestComm(new_comm[i]);
+ MPI_Comm_free(&new_comm[i]);
+ }
+ MTestFreeComm(&test_comm);
+ }
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
int main(int argc, char **argv)
{
- int rank;
+ int i, j, rank;
MPI_Info info_in, info_out;
int errors = 0, all_errors = 0;
MPI_Comm comm;
- char __attribute__((unused)) invalid_key[] = "invalid_test_key";
+ void *base;
+ char invalid_key[] = "invalid_test_key";
char buf[MPI_MAX_INFO_VAL];
int flag;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Info_create(&info_in);
- MPI_Info_set(info_in, invalid_key, (char *) "true");
+ MPI_Info_set(info_in, invalid_key, "true");
MPI_Comm_dup(MPI_COMM_WORLD, &comm);
processes in the communicator.
*/
-void abortMsg( const char *, int );
-int BuildComm( MPI_Comm, MPI_Group, const char [] );
+void abortMsg(const char *, int);
+int BuildComm(MPI_Comm, MPI_Group, const char[]);
-void abortMsg( const char *str, int code )
+void abortMsg(const char *str, int code)
{
char msg[MPI_MAX_ERROR_STRING];
int class, resultLen;
- MPI_Error_class( code, &class );
- MPI_Error_string( code, msg, &resultLen );
- fprintf( stderr, "%s: errcode = %d, class = %d, msg = %s\n",
- str, code, class, msg );
- MPI_Abort( MPI_COMM_WORLD, code );
- exit(code);
+ MPI_Error_class(code, &class);
+ MPI_Error_string(code, msg, &resultLen);
+ fprintf(stderr, "%s: errcode = %d, class = %d, msg = %s\n", str, code, class, msg);
+ MPI_Abort(MPI_COMM_WORLD, code);
}
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
- MPI_Comm dupWorld;
- int wrank, wsize, gsize, err, errs = 0;
- int ranges[1][3];
+ MPI_Comm dupWorld;
+ int wrank, wsize, gsize, err, errs = 0;
+ int ranges[1][3];
MPI_Group wGroup, godd, ghigh, geven;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_size( MPI_COMM_WORLD, &wsize );
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
/* Create some groups */
- MPI_Comm_group( MPI_COMM_WORLD, &wGroup );
+ MPI_Comm_group(MPI_COMM_WORLD, &wGroup);
- MTestPrintfMsg( 2, "Creating groups\n" );
- ranges[0][0] = 2*(wsize/2)-1;
+ MTestPrintfMsg(2, "Creating groups\n");
+ ranges[0][0] = 2 * (wsize / 2) - 1;
ranges[0][1] = 1;
ranges[0][2] = -2;
- err = MPI_Group_range_incl( wGroup, 1, ranges, &godd );
- if (err) abortMsg( "Failed to create odd group: ", err );
- err = MPI_Group_size( godd, &gsize );
- if (err) abortMsg( "Failed to get size of odd group: ", err );
- if (gsize != wsize/2) {
- fprintf( stderr, "Group godd size is %d should be %d\n", gsize,
- wsize/2 );
- errs++;
+ err = MPI_Group_range_incl(wGroup, 1, ranges, &godd);
+ if (err)
+ abortMsg("Failed to create odd group: ", err);
+ err = MPI_Group_size(godd, &gsize);
+ if (err)
+ abortMsg("Failed to get size of odd group: ", err);
+ if (gsize != wsize / 2) {
+ fprintf(stderr, "Group godd size is %d should be %d\n", gsize, wsize / 2);
+ errs++;
}
- ranges[0][0] = wsize/2+1;
- ranges[0][1] = wsize-1;
+ ranges[0][0] = wsize / 2 + 1;
+ ranges[0][1] = wsize - 1;
ranges[0][2] = 1;
- err = MPI_Group_range_incl( wGroup, 1, ranges, &ghigh );
- if (err) abortMsg( "Failed to create high group\n", err );
+ err = MPI_Group_range_incl(wGroup, 1, ranges, &ghigh);
+ if (err)
+ abortMsg("Failed to create high group\n", err);
ranges[0][0] = 0;
- ranges[0][1] = wsize-1;
+ ranges[0][1] = wsize - 1;
ranges[0][2] = 2;
- err = MPI_Group_range_incl( wGroup, 1, ranges, &geven );
- if (err) abortMsg( "Failed to create even group:", err );
+ err = MPI_Group_range_incl(wGroup, 1, ranges, &geven);
+ if (err)
+ abortMsg("Failed to create even group:", err);
- MPI_Comm_dup( MPI_COMM_WORLD, &dupWorld );
- MPI_Comm_set_name( dupWorld, (char*)"Dup of world" );
+ MPI_Comm_dup(MPI_COMM_WORLD, &dupWorld);
+ MPI_Comm_set_name(dupWorld, (char *) "Dup of world");
/* First, use the groups to create communicators from world and a dup
- of world */
- errs += BuildComm( MPI_COMM_WORLD, ghigh, "ghigh" );
- errs += BuildComm( MPI_COMM_WORLD, godd, "godd" );
- errs += BuildComm( MPI_COMM_WORLD, geven, "geven" );
- errs += BuildComm( dupWorld, ghigh, "ghigh" );
- errs += BuildComm( dupWorld, godd, "godd" );
- errs += BuildComm( dupWorld, geven, "geven" );
+ * of world */
+ errs += BuildComm(MPI_COMM_WORLD, ghigh, "ghigh");
+ errs += BuildComm(MPI_COMM_WORLD, godd, "godd");
+ errs += BuildComm(MPI_COMM_WORLD, geven, "geven");
+ errs += BuildComm(dupWorld, ghigh, "ghigh");
+ errs += BuildComm(dupWorld, godd, "godd");
+ errs += BuildComm(dupWorld, geven, "geven");
#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
/* check that we can create multiple communicators from a single collective
* call to MPI_Comm_create as long as the groups are all disjoint */
- errs += BuildComm( MPI_COMM_WORLD, (wrank % 2 ? godd : geven), "godd+geven" );
- errs += BuildComm( dupWorld, (wrank % 2 ? godd : geven), "godd+geven" );
- errs += BuildComm( MPI_COMM_WORLD, MPI_GROUP_EMPTY, "MPI_GROUP_EMPTY" );
- errs += BuildComm( dupWorld, MPI_GROUP_EMPTY, "MPI_GROUP_EMPTY" );
+ errs += BuildComm(MPI_COMM_WORLD, (wrank % 2 ? godd : geven), "godd+geven");
+ errs += BuildComm(dupWorld, (wrank % 2 ? godd : geven), "godd+geven");
+ errs += BuildComm(MPI_COMM_WORLD, MPI_GROUP_EMPTY, "MPI_GROUP_EMPTY");
+ errs += BuildComm(dupWorld, MPI_GROUP_EMPTY, "MPI_GROUP_EMPTY");
#endif
- MPI_Comm_free( &dupWorld );
- MPI_Group_free( &ghigh );
- MPI_Group_free( &godd );
- MPI_Group_free( &geven );
- MPI_Group_free( &wGroup );
+ MPI_Comm_free(&dupWorld);
+ MPI_Group_free(&ghigh);
+ MPI_Group_free(&godd);
+ MPI_Group_free(&geven);
+ MPI_Group_free(&wGroup);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
-int BuildComm( MPI_Comm oldcomm, MPI_Group group, const char gname[] )
+int BuildComm(MPI_Comm oldcomm, MPI_Group group, const char gname[])
{
MPI_Comm newcomm;
int grank, gsize, rank, size, errs = 0;
- char cname[MPI_MAX_OBJECT_NAME+1];
- int cnamelen;
-
- MPI_Group_rank( group, &grank );
- MPI_Group_size( group, &gsize );
- MPI_Comm_get_name( oldcomm, cname, &cnamelen );
- MTestPrintfMsg( 2, "Testing comm %s from %s\n", cname, gname );
- MPI_Comm_create( oldcomm, group, &newcomm );
+ char cname[MPI_MAX_OBJECT_NAME + 1];
+ int cnamelen;
+
+ MPI_Group_rank(group, &grank);
+ MPI_Group_size(group, &gsize);
+ MPI_Comm_get_name(oldcomm, cname, &cnamelen);
+ MTestPrintfMsg(2, "Testing comm %s from %s\n", cname, gname);
+ MPI_Comm_create(oldcomm, group, &newcomm);
if (newcomm == MPI_COMM_NULL && grank != MPI_UNDEFINED) {
- errs ++;
- fprintf( stderr, "newcomm is null but process is in group\n" );
+ errs++;
+ fprintf(stderr, "newcomm is null but process is in group\n");
}
if (newcomm != MPI_COMM_NULL && grank == MPI_UNDEFINED) {
- errs ++;
- fprintf( stderr, "newcomm is not null but process is not in group\n" );
+ errs++;
+ fprintf(stderr, "newcomm is not null but process is not in group\n");
}
if (newcomm != MPI_COMM_NULL && grank != MPI_UNDEFINED) {
- MPI_Comm_rank( newcomm, &rank );
- if (rank != grank) {
- errs ++;
- fprintf( stderr, "Rank is %d should be %d in comm from %s\n",
- rank, grank, gname );
- }
- MPI_Comm_size( newcomm, &size );
- if (size != gsize) {
- errs++;
- fprintf( stderr, "Size is %d should be %d in comm from %s\n",
- size, gsize, gname );
- }
- MPI_Comm_free( &newcomm );
- MTestPrintfMsg( 2, "Done testing comm %s from %s\n", cname, gname );
+ MPI_Comm_rank(newcomm, &rank);
+ if (rank != grank) {
+ errs++;
+ fprintf(stderr, "Rank is %d should be %d in comm from %s\n", rank, grank, gname);
+ }
+ MPI_Comm_size(newcomm, &size);
+ if (size != gsize) {
+ errs++;
+ fprintf(stderr, "Size is %d should be %d in comm from %s\n", size, gsize, gname);
+ }
+ MPI_Comm_free(&newcomm);
+ MTestPrintfMsg(2, "Done testing comm %s from %s\n", cname, gname);
}
return errs;
}
#include <string.h>
#endif
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
MPI_Comm comm;
int cnt, rlen;
char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
/* Check world and self firt */
nameout[0] = 0;
- MPI_Comm_get_name( MPI_COMM_WORLD, nameout, &rlen );
- if (strcmp(nameout,"MPI_COMM_WORLD")) {
- errs++;
- printf( "Name of comm world is %s, should be MPI_COMM_WORLD\n",
- nameout );
+ MPI_Comm_get_name(MPI_COMM_WORLD, nameout, &rlen);
+ if (strcmp(nameout, "MPI_COMM_WORLD")) {
+ errs++;
+ printf("Name of comm world is %s, should be MPI_COMM_WORLD\n", nameout);
}
nameout[0] = 0;
- MPI_Comm_get_name( MPI_COMM_SELF, nameout, &rlen );
- if (strcmp(nameout,"MPI_COMM_SELF")) {
- errs++;
- printf( "Name of comm self is %s, should be MPI_COMM_SELF\n",
- nameout );
+ MPI_Comm_get_name(MPI_COMM_SELF, nameout, &rlen);
+ if (strcmp(nameout, "MPI_COMM_SELF")) {
+ errs++;
+ printf("Name of comm self is %s, should be MPI_COMM_SELF\n", nameout);
}
/* Now, handle other communicators, including world/self */
cnt = 0;
- while (MTestGetComm( &comm, 1 )) {
- if (comm == MPI_COMM_NULL) continue;
-
- sprintf( name, "comm-%d", cnt );
- cnt++;
- MPI_Comm_set_name( comm, name );
- nameout[0] = 0;
- MPI_Comm_get_name( comm, nameout, &rlen );
- if (strcmp( name, nameout )) {
- errs++;
- printf( "Unexpected name, was %s but should be %s\n",
- nameout, name );
- }
-
- MTestFreeComm( &comm );
+ while (MTestGetComm(&comm, 1)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+
+ sprintf(name, "comm-%d", cnt);
+ cnt++;
+ MPI_Comm_set_name(comm, name);
+ nameout[0] = 0;
+ MPI_Comm_get_name(comm, nameout, &rlen);
+ if (strcmp(name, nameout)) {
+ errs++;
+ printf("Unexpected name, was %s but should be %s\n", nameout, name);
+ }
+
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
/*
* This program tests the allocation (and deallocation) of contexts.
- *
+ *
*/
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
int errs = 0;
int i, j, err;
MPI_Comm newcomm1, newcomm2[200];
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
/* Get a separate communicator to duplicate */
- MPI_Comm_dup( MPI_COMM_WORLD, &newcomm1 );
+ MPI_Comm_dup(MPI_COMM_WORLD, &newcomm1);
- MPI_Errhandler_set( newcomm1, MPI_ERRORS_RETURN );
+ MPI_Errhandler_set(newcomm1, MPI_ERRORS_RETURN);
/* Allocate many communicators in batches, then free them */
- for (i=0; i<1000; i++) {
- for (j=0; j<200; j++) {
- err = MPI_Comm_dup( newcomm1, &newcomm2[j] );
- if (err) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "Failed to duplicate communicator for (%d,%d)\n", i, j );
- MTestPrintError( err );
- }
- }
- }
- for (j=0; j<200; j++) {
- err = MPI_Comm_free( &newcomm2[j] );
- if (err) {
- errs++;
- if (errs < 10) {
- fprintf( stderr, "Failed to free %d,%d\n", i, j );
- MTestPrintError( err );
- }
- }
- }
+ for (i = 0; i < 1000; i++) {
+ for (j = 0; j < 200; j++) {
+ err = MPI_Comm_dup(newcomm1, &newcomm2[j]);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "Failed to duplicate communicator for (%d,%d)\n", i, j);
+ MTestPrintError(err);
+ }
+ }
+ }
+ for (j = 0; j < 200; j++) {
+ err = MPI_Comm_free(&newcomm2[j]);
+ if (err) {
+ errs++;
+ if (errs < 10) {
+ fprintf(stderr, "Failed to free %d,%d\n", i, j);
+ MTestPrintError(err);
+ }
+ }
+ }
}
- err = MPI_Comm_free( &newcomm1 );
+ err = MPI_Comm_free(&newcomm1);
if (err) {
- errs++;
- fprintf( stderr, "Failed to free newcomm1\n" );
- MTestPrintError( err );
+ errs++;
+ fprintf(stderr, "Failed to free newcomm1\n");
+ MTestPrintError(err);
}
-
- MTest_Finalize( errs );
+
+ MTest_Finalize(errs);
MPI_Finalize();
#include "mpitest.h"
/*
- * This check is intended to fail if there is a leak of context ids.
+ * This check is intended to fail if there is a leak of context ids.
* Because this is trying to exhaust the number of context ids, it needs
- * to run for a longer time than many tests. The for loop uses 100,000
+ * to run for a longer time than many tests. The for loop uses 100,000
* iterations, which is adequate for MPICH (with only about 1k context ids
* available).
*/
-int main(int argc, char** argv) {
+int main(int argc, char **argv)
+{
- int i=0;
- int randval;
- int rank;
- int errs = 0;
- MPI_Comm newcomm;
- double startTime;
- int nLoop = 100000;
-
- MTest_Init(&argc,&argv);
+ int i = 0;
+ int randval;
+ int rank;
+ int errs = 0;
+ MPI_Comm newcomm;
+ double startTime;
+ int nLoop = 100000;
- for (i=1; i<argc; i++) {
- if (strcmp( argv[i], "--loopcount" ) == 0) {
- i++;
- nLoop = atoi( argv[i] );
- }
- else {
- fprintf( stderr, "Unrecognized argument %s\n", argv[i] );
- }
- }
+ MTest_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD,&rank);
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "--loopcount") == 0) {
+ i++;
+ nLoop = atoi(argv[i]);
+ }
+ else {
+ fprintf(stderr, "Unrecognized argument %s\n", argv[i]);
+ }
+ }
- startTime = MPI_Wtime();
- for (i=0; i<nLoop; i++) {
-
- if ( rank == 0 && (i%100 == 0) ) {
- double rate = MPI_Wtime() - startTime;
- if (rate > 0) {
- rate = i / rate;
- MTestPrintfMsg( 10, "After %d (%f)\n", i, rate );
- }
- else {
- MTestPrintfMsg( 10, "After %d\n", i );
- }
- }
-
- /* FIXME: Explain the rationale behind rand in this test */
- randval=rand();
-
- if (randval%(rank+2) == 0) {
- MPI_Comm_split(MPI_COMM_WORLD,1,rank,&newcomm);
- MPI_Comm_free( &newcomm );
- }
- else {
- MPI_Comm_split(MPI_COMM_WORLD,MPI_UNDEFINED,rank,&newcomm);
- if (newcomm != MPI_COMM_NULL) {
- errs++;
- printf( "Created a non-null communicator with MPI_UNDEFINED\n" );
- }
- }
-
- }
-
- MTest_Finalize( errs );
- MPI_Finalize();
-
- return 0;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ startTime = MPI_Wtime();
+ for (i = 0; i < nLoop; i++) {
+
+ if (rank == 0 && (i % 100 == 0)) {
+ double rate = MPI_Wtime() - startTime;
+ if (rate > 0) {
+ rate = i / rate;
+ MTestPrintfMsg(10, "After %d (%f)\n", i, rate);
+ }
+ else {
+ MTestPrintfMsg(10, "After %d\n", i);
+ }
+ }
+
+ /* FIXME: Explain the rationale behind rand in this test */
+ randval = rand();
+
+ if (randval % (rank + 2) == 0) {
+ MPI_Comm_split(MPI_COMM_WORLD, 1, rank, &newcomm);
+ MPI_Comm_free(&newcomm);
+ }
+ else {
+ MPI_Comm_split(MPI_COMM_WORLD, MPI_UNDEFINED, rank, &newcomm);
+ if (newcomm != MPI_COMM_NULL) {
+ errs++;
+ printf("Created a non-null communicator with MPI_UNDEFINED\n");
+ }
+ }
+
+ }
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+
+ return 0;
}
#include <stdio.h>
#include "mpitest.h"
-int main( int argc, char **argv )
+int main(int argc, char **argv)
{
int errs = 0;
int rank, size, wrank, wsize, dest, a, b;
MPI_Comm newcomm;
MPI_Status status;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
/* Can we run comm dup at all? */
- MPI_Comm_dup( MPI_COMM_WORLD, &newcomm );
+ MPI_Comm_dup(MPI_COMM_WORLD, &newcomm);
/* Check basic properties */
- MPI_Comm_size( MPI_COMM_WORLD, &wsize );
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
- MPI_Comm_size( newcomm, &size );
- MPI_Comm_rank( newcomm, &rank );
-
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(newcomm, &size);
+ MPI_Comm_rank(newcomm, &rank);
+
if (size != wsize || rank != wrank) {
- errs++;
- fprintf( stderr, "Size (%d) or rank (%d) wrong\n", size, rank );
- fflush( stderr );
+ errs++;
+ fprintf(stderr, "Size (%d) or rank (%d) wrong\n", size, rank);
+ fflush(stderr);
}
/* Can we communicate with this new communicator? */
dest = MPI_PROC_NULL;
if (rank == 0) {
- dest = size - 1;
- a = rank;
- b = -1;
- MPI_Sendrecv( &a, 1, MPI_INT, dest, 0,
- &b, 1, MPI_INT, dest, 0, newcomm, &status );
- if (b != dest) {
- errs++;
- fprintf( stderr, "Received %d expected %d on %d\n", b, dest, rank );
- fflush( stderr );
- }
- if (status.MPI_SOURCE != dest) {
- errs++;
- fprintf( stderr, "Source not set correctly in status on %d\n",
- rank );
- fflush( stderr );
- }
+ dest = size - 1;
+ a = rank;
+ b = -1;
+ MPI_Sendrecv(&a, 1, MPI_INT, dest, 0, &b, 1, MPI_INT, dest, 0, newcomm, &status);
+ if (b != dest) {
+ errs++;
+ fprintf(stderr, "Received %d expected %d on %d\n", b, dest, rank);
+ fflush(stderr);
+ }
+ if (status.MPI_SOURCE != dest) {
+ errs++;
+ fprintf(stderr, "Source not set correctly in status on %d\n", rank);
+ fflush(stderr);
+ }
}
- else if (rank == size-1) {
- dest = 0;
- a = rank;
- b = -1;
- MPI_Sendrecv( &a, 1, MPI_INT, dest, 0,
- &b, 1, MPI_INT, dest, 0, newcomm, &status );
- if (b != dest) {
- errs++;
- fprintf( stderr, "Received %d expected %d on %d\n", b, dest, rank );
- fflush( stderr );
- }
- if (status.MPI_SOURCE != dest) {
- errs++;
- fprintf( stderr, "Source not set correctly in status on %d\n",
- rank );
- fflush( stderr );
- }
+ else if (rank == size - 1) {
+ dest = 0;
+ a = rank;
+ b = -1;
+ MPI_Sendrecv(&a, 1, MPI_INT, dest, 0, &b, 1, MPI_INT, dest, 0, newcomm, &status);
+ if (b != dest) {
+ errs++;
+ fprintf(stderr, "Received %d expected %d on %d\n", b, dest, rank);
+ fflush(stderr);
+ }
+ if (status.MPI_SOURCE != dest) {
+ errs++;
+ fprintf(stderr, "Source not set correctly in status on %d\n", rank);
+ fflush(stderr);
+ }
}
- MPI_Comm_free( &newcomm );
+ MPI_Comm_free(&newcomm);
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
#include "mpi.h"
#include <stdio.h>
#include "mpitest.h"
-int run_tests(MPI_Comm comm);
+
int run_tests(MPI_Comm comm)
{
int rank, size, wrank, wsize, dest, a, b, errs = 0;
#include <stdio.h>
#include "mpitest.h"
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
MPI_Comm comm, dupcomm, dupcomm2;
int r1buf, r2buf, s1buf, s2buf;
int rank, isLeft;
- MTest_Init( &argc, &argv );
-
- while (MTestGetIntercomm( &comm, &isLeft, 2 )) {
- if (comm == MPI_COMM_NULL) continue;
+ MTest_Init(&argc, &argv);
- MPI_Comm_dup( comm, &dupcomm );
-
- /* Check that there are separate contexts. We do this by setting
- up nonblocking received on both communicators, and then
- sending to them. If the contexts are different, tests on the
- unsatisfied communicator should indicate no available message */
- MPI_Comm_rank( comm, &rank );
- if (rank == 0) {
- s1buf = 456;
- s2buf = 17;
- r1buf = r2buf = -1;
- /* These are send/receives to the process with rank zero
- in the other group (these are intercommunicators) */
- MPI_Irecv( &r1buf, 1, MPI_INT, 0, 0, dupcomm, &rreq[0] );
- MPI_Irecv( &r2buf, 1, MPI_INT, 0, 0, comm, &rreq[1] );
- MPI_Send( &s2buf, 1, MPI_INT, 0, 0, comm );
- MPI_Waitsome(2, rreq, &count, indicies, MPI_STATUSES_IGNORE);
- if (count != 1 || indicies[0] != 1) {
- /* The only valid return is that exactly one message
- has been received */
- errs++;
- if (count == 1 && indicies[0] != 1) {
- printf( "Error in context values for intercomm\n" );
- }
- else if (count == 2) {
- printf( "Error: two messages received!\n" );
- }
- else {
- int i;
- printf( "Error: count = %d", count );
- for (i=0; i<count; i++) {
- printf( " indicies[%d] = %d", i, indicies[i] );
- }
- printf( "\n" );
- }
- }
-
- /* Make sure that we do not send the next message until
- the other process (rank zero in the other group)
- has also completed the first step */
- MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 0, 37,
- MPI_BOTTOM, 0, MPI_BYTE, 0, 37, comm,
- MPI_STATUS_IGNORE );
+ while (MTestGetIntercomm(&comm, &isLeft, 2)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
- /* Complete the receive on dupcomm */
- MPI_Send( &s1buf, 1, MPI_INT, 0, 0, dupcomm );
- MPI_Wait( &rreq[0], MPI_STATUS_IGNORE );
- if (r1buf != s1buf) {
- errs++;
- printf( "Wrong value in communication on dupcomm %d != %d\n",
- r1buf, s1buf );
- }
- if (r2buf != s2buf) {
- errs++;
- printf( "Wrong value in communication on comm %d != %d\n",
- r2buf, s2buf );
- }
- }
- /* Try to duplicate a duplicated intercomm. (This caused problems
- with some MPIs) */
- MPI_Comm_dup( dupcomm, &dupcomm2 );
- MPI_Comm_free( &dupcomm2 );
- MPI_Comm_free( &dupcomm );
- MTestFreeComm( &comm );
+ MPI_Comm_dup(comm, &dupcomm);
+
+ /* Check that there are separate contexts. We do this by setting
+ * up nonblocking received on both communicators, and then
+ * sending to them. If the contexts are different, tests on the
+ * unsatisfied communicator should indicate no available message */
+ MPI_Comm_rank(comm, &rank);
+ if (rank == 0) {
+ s1buf = 456;
+ s2buf = 17;
+ r1buf = r2buf = -1;
+ /* These are send/receives to the process with rank zero
+ * in the other group (these are intercommunicators) */
+ MPI_Irecv(&r1buf, 1, MPI_INT, 0, 0, dupcomm, &rreq[0]);
+ MPI_Irecv(&r2buf, 1, MPI_INT, 0, 0, comm, &rreq[1]);
+ MPI_Send(&s2buf, 1, MPI_INT, 0, 0, comm);
+ MPI_Waitsome(2, rreq, &count, indicies, MPI_STATUSES_IGNORE);
+ if (count != 1 || indicies[0] != 1) {
+ /* The only valid return is that exactly one message
+ * has been received */
+ errs++;
+ if (count == 1 && indicies[0] != 1) {
+ printf("Error in context values for intercomm\n");
+ }
+ else if (count == 2) {
+ printf("Error: two messages received!\n");
+ }
+ else {
+ int i;
+ printf("Error: count = %d", count);
+ for (i = 0; i < count; i++) {
+ printf(" indicies[%d] = %d", i, indicies[i]);
+ }
+ printf("\n");
+ }
+ }
+
+ /* Make sure that we do not send the next message until
+ * the other process (rank zero in the other group)
+ * has also completed the first step */
+ MPI_Sendrecv(MPI_BOTTOM, 0, MPI_BYTE, 0, 37,
+ MPI_BOTTOM, 0, MPI_BYTE, 0, 37, comm, MPI_STATUS_IGNORE);
+
+ /* Complete the receive on dupcomm */
+ MPI_Send(&s1buf, 1, MPI_INT, 0, 0, dupcomm);
+ MPI_Wait(&rreq[0], MPI_STATUS_IGNORE);
+ if (r1buf != s1buf) {
+ errs++;
+ printf("Wrong value in communication on dupcomm %d != %d\n", r1buf, s1buf);
+ }
+ if (r2buf != s2buf) {
+ errs++;
+ printf("Wrong value in communication on comm %d != %d\n", r2buf, s2buf);
+ }
+ }
+ /* Try to duplicate a duplicated intercomm. (This caused problems
+ * with some MPIs) */
+ MPI_Comm_dup(dupcomm, &dupcomm2);
+ MPI_Comm_free(&dupcomm2);
+ MPI_Comm_free(&dupcomm);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
-
+
}
#include <stdio.h>
#include "mpitest.h"
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
MPI_Comm intercomm;
- int remote_rank, rank, size, errs = 0;
+ int remote_rank, rank, size, errs = 0;
+ volatile int trigger;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
+ trigger = 1;
+/* while (trigger) ; */
-
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 2) {
- printf( "Size must be at least 2\n" );
- MPI_Abort( MPI_COMM_WORLD, 0 );
- exit(0);
+ printf("Size must be at least 2\n");
+ MPI_Abort(MPI_COMM_WORLD, 0);
}
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Make an intercomm of the first two elements of comm_world */
if (rank < 2) {
- int lrank = rank, rrank = -1;
- MPI_Status status;
+ int lrank = rank, rrank = -1;
+ MPI_Status status;
- remote_rank = 1 - rank;
- MPI_Intercomm_create( MPI_COMM_SELF, 0,
- MPI_COMM_WORLD, remote_rank, 27,
- &intercomm );
+ remote_rank = 1 - rank;
+ MPI_Intercomm_create(MPI_COMM_SELF, 0, MPI_COMM_WORLD, remote_rank, 27, &intercomm);
- /* Now, communicate between them */
- MPI_Sendrecv( &lrank, 1, MPI_INT, 0, 13,
- &rrank, 1, MPI_INT, 0, 13, intercomm, &status );
+ /* Now, communicate between them */
+ MPI_Sendrecv(&lrank, 1, MPI_INT, 0, 13, &rrank, 1, MPI_INT, 0, 13, intercomm, &status);
- if (rrank != remote_rank) {
- errs++;
- printf( "%d Expected %d but received %d\n",
- rank, remote_rank, rrank );
- }
+ if (rrank != remote_rank) {
+ errs++;
+ printf("%d Expected %d but received %d\n", rank, remote_rank, rrank);
+ }
- MPI_Comm_free( &intercomm );
+ MPI_Comm_free(&intercomm);
}
-
+
/* The next test should create an intercomm with groups of different
- sizes FIXME */
+ * sizes FIXME */
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
-
+
return 0;
}
if (size < 33) {
printf("ERROR: this test requires at least 33 processes\n");
MPI_Abort(MPI_COMM_WORLD, 1);
- exit(1);
+ return 1;
}
/* group of c0
MPI_Comm_free(&ic);
- MPI_Reduce((rank == 0 ? MPI_IN_PLACE : &errs), &errs,
- 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ MPI_Reduce((rank == 0 ? MPI_IN_PLACE : &errs), &errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
if (errs) {
printf("found %d errors\n", errs);
return 0;
}
-
* This program tests that MPI_Comm_create applies to intercommunicators;
* this is an extension added in MPI-2
*/
-
-int TestIntercomm( MPI_Comm );
-
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int size, isLeft, wrank;
MPI_Comm intercomm, newcomm;
MPI_Group oldgroup, newgroup;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 4) {
- printf( "This test requires at least 4 processes\n" );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ printf("This test requires at least 4 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
-
- while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
- int ranks[10], nranks, result;
-
- if (intercomm == MPI_COMM_NULL) continue;
-
- MPI_Comm_group( intercomm, &oldgroup );
- ranks[0] = 0;
- nranks = 1;
- MTestPrintfMsg( 1, "Creating a new intercomm 0-0\n" );
- MPI_Group_incl( oldgroup, nranks, ranks, &newgroup );
- MPI_Comm_create( intercomm, newgroup, &newcomm );
-
- /* Make sure that the new communicator has the appropriate pieces */
- if (newcomm != MPI_COMM_NULL) {
- int new_rsize, new_size, flag, commok = 1;
-
- MPI_Comm_set_name( newcomm, (char*)"Single rank in each group" );
- MPI_Comm_test_inter( intercomm, &flag );
- if (!flag) {
- errs++;
- printf( "[%d] Output communicator is not an intercomm\n",
- wrank );
- commok = 0;
- }
-
- MPI_Comm_remote_size( newcomm, &new_rsize );
- MPI_Comm_size( newcomm, &new_size );
- /* The new communicator has 1 process in each group */
- if (new_rsize != 1) {
- errs++;
- printf( "[%d] Remote size is %d, should be one\n",
- wrank, new_rsize );
- commok = 0;
- }
- if (new_size != 1) {
- errs++;
- printf( "[%d] Local size is %d, should be one\n",
- wrank, new_size );
- commok = 0;
- }
- /* ... more to do */
- if (commok) {
- errs += TestIntercomm( newcomm );
- }
- }
- MPI_Group_free( &newgroup );
- if (newcomm != MPI_COMM_NULL) {
- MPI_Comm_free( &newcomm );
- }
-
- /* Now, do a sort of dup, using the original group */
- MTestPrintfMsg( 1, "Creating a new intercomm (manual dup)\n" );
- MPI_Comm_create( intercomm, oldgroup, &newcomm );
- MPI_Comm_set_name( newcomm, (char*)"Dup of original" );
- MTestPrintfMsg( 1, "Creating a new intercomm (manual dup (done))\n" );
-
- MPI_Comm_compare( intercomm, newcomm, &result );
- MTestPrintfMsg( 1, "Result of comm/intercomm compare is %d\n", result );
- if (result != MPI_CONGRUENT) {
- const char *rname=0;
- errs++;
- switch (result) {
- case MPI_IDENT: rname = "IDENT"; break;
- case MPI_CONGRUENT: rname = "CONGRUENT"; break;
- case MPI_SIMILAR: rname = "SIMILAR"; break;
- case MPI_UNEQUAL: rname = "UNEQUAL"; break;
- printf( "[%d] Expected MPI_CONGRUENT but saw %d (%s)",
- wrank, result, rname ); fflush(stdout);
- }
- }
- else {
- /* Try to communication between each member of intercomm */
- errs += TestIntercomm( newcomm );
- }
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+
+ while (MTestGetIntercomm(&intercomm, &isLeft, 2)) {
+ int ranks[10], nranks, result;
+
+ if (intercomm == MPI_COMM_NULL)
+ continue;
+
+ MPI_Comm_group(intercomm, &oldgroup);
+ ranks[0] = 0;
+ nranks = 1;
+ MTestPrintfMsg(1, "Creating a new intercomm 0-0\n");
+ MPI_Group_incl(oldgroup, nranks, ranks, &newgroup);
+ MPI_Comm_create(intercomm, newgroup, &newcomm);
+
+ /* Make sure that the new communicator has the appropriate pieces */
+ if (newcomm != MPI_COMM_NULL) {
+ int new_rsize, new_size, flag, commok = 1;
+
+ MPI_Comm_set_name(newcomm, (char *) "Single rank in each group");
+ MPI_Comm_test_inter(intercomm, &flag);
+ if (!flag) {
+ errs++;
+ printf("[%d] Output communicator is not an intercomm\n", wrank);
+ commok = 0;
+ }
+
+ MPI_Comm_remote_size(newcomm, &new_rsize);
+ MPI_Comm_size(newcomm, &new_size);
+ /* The new communicator has 1 process in each group */
+ if (new_rsize != 1) {
+ errs++;
+ printf("[%d] Remote size is %d, should be one\n", wrank, new_rsize);
+ commok = 0;
+ }
+ if (new_size != 1) {
+ errs++;
+ printf("[%d] Local size is %d, should be one\n", wrank, new_size);
+ commok = 0;
+ }
+ /* ... more to do */
+ if (commok) {
+ errs += MTestTestComm(newcomm);
+ }
+ }
+ MPI_Group_free(&newgroup);
+ if (newcomm != MPI_COMM_NULL) {
+ MPI_Comm_free(&newcomm);
+ }
+
+ /* Now, do a sort of dup, using the original group */
+ MTestPrintfMsg(1, "Creating a new intercomm (manual dup)\n");
+ MPI_Comm_create(intercomm, oldgroup, &newcomm);
+ MPI_Comm_set_name(newcomm, (char *) "Dup of original");
+ MTestPrintfMsg(1, "Creating a new intercomm (manual dup (done))\n");
+
+ MPI_Comm_compare(intercomm, newcomm, &result);
+ MTestPrintfMsg(1, "Result of comm/intercomm compare is %d\n", result);
+ if (result != MPI_CONGRUENT) {
+ const char *rname = 0;
+ errs++;
+ switch (result) {
+ case MPI_IDENT:
+ rname = "IDENT";
+ break;
+ case MPI_CONGRUENT:
+ rname = "CONGRUENT";
+ break;
+ case MPI_SIMILAR:
+ rname = "SIMILAR";
+ break;
+ case MPI_UNEQUAL:
+ rname = "UNEQUAL";
+ break;
+ printf("[%d] Expected MPI_CONGRUENT but saw %d (%s)", wrank, result, rname);
+ fflush(stdout);
+ }
+ }
+ else {
+ /* Try to communication between each member of intercomm */
+ errs += MTestTestComm(newcomm);
+ }
if (newcomm != MPI_COMM_NULL) {
MPI_Comm_free(&newcomm);
MPI_Comm_create(intercomm, MPI_GROUP_EMPTY, &newcomm);
}
if (newcomm != MPI_COMM_NULL) {
- printf("[%d] expected MPI_COMM_NULL, but got a different communicator\n", wrank); fflush(stdout);
+ printf("[%d] expected MPI_COMM_NULL, but got a different communicator\n", wrank);
+ fflush(stdout);
errs++;
}
if (newcomm != MPI_COMM_NULL) {
MPI_Comm_free(&newcomm);
}
- MPI_Group_free( &oldgroup );
- MPI_Comm_free( &intercomm );
+ MPI_Group_free(&oldgroup);
+ MPI_Comm_free(&intercomm);
}
MTest_Finalize(errs);
return 0;
}
-
-int TestIntercomm( MPI_Comm comm )
-{
- int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
- int errs = 0, wrank, nsize;
- char commname[MPI_MAX_OBJECT_NAME+1];
- MPI_Request *reqs;
-
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
- MPI_Comm_size( comm, &local_size );
- MPI_Comm_remote_size( comm, &remote_size );
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_get_name( comm, commname, &nsize );
-
- MTestPrintfMsg( 1, "Testing communication on intercomm '%s', remote_size=%d\n",
- commname, remote_size );
-
- reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
- if (!reqs) {
- printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufs = (int **) malloc( remote_size * sizeof(int *) );
- if (!bufs) {
- printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
- if (!bufmem) {
- printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
- wrank, 2*remote_size, commname );
- errs++;
- return errs;
- }
-
- /* Each process sends a message containing its own rank and the
- rank of the destination with a nonblocking send. Because we're using
- nonblocking sends, we need to use different buffers for each isend */
- /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although
- it doesn't really hurt to keep separate buffers for our purposes */
- for (j=0; j<remote_size; j++) {
- bufs[j] = &bufmem[2*j];
- bufs[j][0] = rank;
- bufs[j][1] = j;
- MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
- }
- MTestPrintfMsg( 2, "isends posted, about to recv\n" );
-
- for (j=0; j<remote_size; j++) {
- MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
- if (rbuf[0] != j) {
- printf( "[%d] Expected rank %d but saw %d in %s\n",
- wrank, j, rbuf[0], commname );
- errs++;
- }
- if (rbuf[1] != rank) {
- printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
- wrank, rank, rbuf[1], j, commname );
- errs++;
- }
- }
- if (errs)
- fflush(stdout);
- MTestPrintfMsg( 2, "my recvs completed, about to waitall\n" );
- MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );
-
- free( reqs );
- free( bufs );
- free( bufmem );
-
- return errs;
-}
static char MTEST_Descrip[] = "Get the group of an intercommunicator";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, grank, gsize;
- int minsize = 2, isleft;
- MPI_Comm comm;
- MPI_Group group;
+ int minsize = 2, isleft;
+ MPI_Comm comm;
+ MPI_Group group;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- /* The following illustrates the use of the routines to
- run through a selection of communicators and datatypes.
- Use subsets of these for tests that do not involve combinations
- of communicators, datatypes, and counts of datatypes */
- while (MTestGetIntercomm( &comm, &isleft, minsize )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_size( comm, &size );
- MPI_Comm_group( comm, &group );
- MPI_Group_rank( group, &grank );
- MPI_Group_size( group, &gsize );
- if (rank != grank) {
- errs++;
- fprintf( stderr, "Ranks of groups do not match %d != %d\n",
- rank, grank );
- }
- if (size != gsize) {
- errs++;
- fprintf( stderr, "Sizes of groups do not match %d != %d\n",
- size, gsize );
- }
- MPI_Group_free( &group );
- MTestFreeComm( &comm );
+ /* The following illustrates the use of the routines to
+ * run through a selection of communicators and datatypes.
+ * Use subsets of these for tests that do not involve combinations
+ * of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntercomm(&comm, &isleft, minsize)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_size(comm, &size);
+ MPI_Comm_group(comm, &group);
+ MPI_Group_rank(group, &grank);
+ MPI_Group_size(group, &gsize);
+ if (rank != grank) {
+ errs++;
+ fprintf(stderr, "Ranks of groups do not match %d != %d\n", rank, grank);
+ }
+ if (size != gsize) {
+ errs++;
+ fprintf(stderr, "Sizes of groups do not match %d != %d\n", size, gsize);
+ }
+ MPI_Group_free(&group);
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
static char MTEST_Descrip[] = "Test intercomm merge, including the choice of the high value";
*/
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int rank, size, rsize;
int nsize, nrank;
int minsize = 2;
int isLeft;
- MPI_Comm comm, comm1, comm2, comm3, comm4;
+ MPI_Comm comm, comm1, comm2, comm3, comm4;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- /* The following illustrates the use of the routines to
- run through a selection of communicators and datatypes.
- Use subsets of these for tests that do not involve combinations
- of communicators, datatypes, and counts of datatypes */
- while (MTestGetIntercomm( &comm, &isLeft, minsize )) {
- if (comm == MPI_COMM_NULL) continue;
- /* Determine the sender and receiver */
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_remote_size( comm, &rsize );
- MPI_Comm_size( comm, &size );
+ /* The following illustrates the use of the routines to
+ * run through a selection of communicators and datatypes.
+ * Use subsets of these for tests that do not involve combinations
+ * of communicators, datatypes, and counts of datatypes */
+ while (MTestGetIntercomm(&comm, &isLeft, minsize)) {
+ if (comm == MPI_COMM_NULL)
+ continue;
+ /* Determine the sender and receiver */
+ MPI_Comm_rank(comm, &rank);
+ MPI_Comm_remote_size(comm, &rsize);
+ MPI_Comm_size(comm, &size);
- /* Try building intercomms */
- MPI_Intercomm_merge( comm, isLeft, &comm1 );
- /* Check the size and ranks */
- MPI_Comm_size( comm1, &nsize );
- MPI_Comm_rank( comm1, &nrank );
- if (nsize != size + rsize) {
- errs++;
- printf( "(1) Comm size is %d but should be %d\n", nsize,
- size + rsize );
- if (isLeft) {
- /* The left processes should be high */
- if (nrank != rsize + rank) {
- errs++;
- printf( "(1) rank for high process is %d should be %d\n",
- nrank, rsize + rank );
- }
- }
- else {
- /* The right processes should be low */
- if (nrank != rank) {
- errs++;
- printf( "(1) rank for low process is %d should be %d\n",
- nrank, rank );
- }
- }
- }
-
- MPI_Intercomm_merge( comm, !isLeft, &comm2 );
- /* Check the size and ranks */
- MPI_Comm_size( comm1, &nsize );
- MPI_Comm_rank( comm1, &nrank );
- if (nsize != size + rsize) {
- errs++;
- printf( "(2) Comm size is %d but should be %d\n", nsize,
- size + rsize );
- if (!isLeft) {
- /* The right processes should be high */
- if (nrank != rsize + rank) {
- errs++;
- printf( "(2) rank for high process is %d should be %d\n",
- nrank, rsize + rank );
- }
- }
- else {
- /* The left processes should be low */
- if (nrank != rank) {
- errs++;
- printf( "(2) rank for low process is %d should be %d\n",
- nrank, rank );
- }
- }
- }
-
+ /* Try building intercomms */
+ MPI_Intercomm_merge(comm, isLeft, &comm1);
+ /* Check the size and ranks */
+ MPI_Comm_size(comm1, &nsize);
+ MPI_Comm_rank(comm1, &nrank);
+ if (nsize != size + rsize) {
+ errs++;
+ printf("(1) Comm size is %d but should be %d\n", nsize, size + rsize);
+ if (isLeft) {
+ /* The left processes should be high */
+ if (nrank != rsize + rank) {
+ errs++;
+ printf("(1) rank for high process is %d should be %d\n", nrank, rsize + rank);
+ }
+ }
+ else {
+ /* The right processes should be low */
+ if (nrank != rank) {
+ errs++;
+ printf("(1) rank for low process is %d should be %d\n", nrank, rank);
+ }
+ }
+ }
- MPI_Intercomm_merge( comm, 0, &comm3 );
+ MPI_Intercomm_merge(comm, !isLeft, &comm2);
+ /* Check the size and ranks */
+ MPI_Comm_size(comm1, &nsize);
+ MPI_Comm_rank(comm1, &nrank);
+ if (nsize != size + rsize) {
+ errs++;
+ printf("(2) Comm size is %d but should be %d\n", nsize, size + rsize);
+ if (!isLeft) {
+ /* The right processes should be high */
+ if (nrank != rsize + rank) {
+ errs++;
+ printf("(2) rank for high process is %d should be %d\n", nrank, rsize + rank);
+ }
+ }
+ else {
+ /* The left processes should be low */
+ if (nrank != rank) {
+ errs++;
+ printf("(2) rank for low process is %d should be %d\n", nrank, rank);
+ }
+ }
+ }
- MPI_Intercomm_merge( comm, 1, &comm4 );
-
- MPI_Comm_free( &comm1 );
- MPI_Comm_free( &comm2 );
- MPI_Comm_free( &comm3 );
- MPI_Comm_free( &comm4 );
-
- MTestFreeComm( &comm );
+
+ MPI_Intercomm_merge(comm, 0, &comm3);
+
+ MPI_Intercomm_merge(comm, 1, &comm4);
+
+ MPI_Comm_free(&comm1);
+ MPI_Comm_free(&comm2);
+ MPI_Comm_free(&comm3);
+ MPI_Comm_free(&comm4);
+
+ MTestFreeComm(&comm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}
* This program tests that MPI_Comm_split applies to intercommunicators;
* this is an extension added in MPI-2
*/
-
-int TestIntercomm( MPI_Comm );
-
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0;
int size, isLeft;
MPI_Comm intercomm, newcomm;
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 4) {
- printf( "This test requires at least 4 processes\n" );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ printf("This test requires at least 4 processes\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
- int key, color;
-
- if (intercomm == MPI_COMM_NULL) continue;
-
- /* Split this intercomm. The new intercomms contain the
- processes that had odd (resp even) rank in their local group
- in the original intercomm */
- MTestPrintfMsg( 1, "Created intercomm %s\n", MTestGetIntercommName() );
- MPI_Comm_rank( intercomm, &key );
- color = (key % 2);
- MPI_Comm_split( intercomm, color, key, &newcomm );
- /* Make sure that the new communicator has the appropriate pieces */
- if (newcomm != MPI_COMM_NULL) {
- int orig_rsize, orig_size, new_rsize, new_size;
- int predicted_size, flag, commok=1;
-
- MPI_Comm_test_inter( intercomm, &flag );
- if (!flag) {
- errs++;
- printf( "Output communicator is not an intercomm\n" );
- commok = 0;
- }
-
- MPI_Comm_remote_size( intercomm, &orig_rsize );
- MPI_Comm_remote_size( newcomm, &new_rsize );
- MPI_Comm_size( intercomm, &orig_size );
- MPI_Comm_size( newcomm, &new_size );
- /* The local size is 1/2 the original size, +1 if the
- size was odd and the color was even. More precisely,
- let n be the orig_size. Then
- color 0 color 1
- orig size even n/2 n/2
- orig size odd (n+1)/2 n/2
-
- However, since these are integer valued, if n is even,
- then (n+1)/2 = n/2, so this table is much simpler:
- color 0 color 1
- orig size even (n+1)/2 n/2
- orig size odd (n+1)/2 n/2
-
- */
- predicted_size = (orig_size + !color) / 2;
- if (predicted_size != new_size) {
- errs++;
- printf( "Predicted size = %d but found %d for %s (%d,%d)\n",
- predicted_size, new_size, MTestGetIntercommName(),
- orig_size, orig_rsize );
- commok = 0;
- }
- predicted_size = (orig_rsize + !color) / 2;
- if (predicted_size != new_rsize) {
- errs++;
- printf( "Predicted remote size = %d but found %d for %s (%d,%d)\n",
- predicted_size, new_rsize, MTestGetIntercommName(),
- orig_size, orig_rsize );
- commok = 0;
- }
- /* ... more to do */
- if (commok) {
- errs += TestIntercomm( newcomm );
- }
- }
- else {
- int orig_rsize;
- /* If the newcomm is null, then this means that remote group
- for this color is of size zero (since all processes in this
- test have been given colors other than MPI_UNDEFINED).
- Confirm that here */
- /* FIXME: ToDo */
- MPI_Comm_remote_size( intercomm, &orig_rsize );
- if (orig_rsize == 1) {
- if (color == 0) {
- errs++;
- printf( "Returned null intercomm when non-null expected\n" );
- }
- }
- }
- if (newcomm != MPI_COMM_NULL)
- MPI_Comm_free( &newcomm );
- MPI_Comm_free( &intercomm );
+ while (MTestGetIntercomm(&intercomm, &isLeft, 2)) {
+ int key, color;
+
+ if (intercomm == MPI_COMM_NULL)
+ continue;
+
+ /* Split this intercomm. The new intercomms contain the
+ * processes that had odd (resp even) rank in their local group
+ * in the original intercomm */
+ MTestPrintfMsg(1, "Created intercomm %s\n", MTestGetIntercommName());
+ MPI_Comm_rank(intercomm, &key);
+ color = (key % 2);
+ MPI_Comm_split(intercomm, color, key, &newcomm);
+ /* Make sure that the new communicator has the appropriate pieces */
+ if (newcomm != MPI_COMM_NULL) {
+ int orig_rsize, orig_size, new_rsize, new_size;
+ int predicted_size, flag, commok = 1;
+
+ MPI_Comm_test_inter(intercomm, &flag);
+ if (!flag) {
+ errs++;
+ printf("Output communicator is not an intercomm\n");
+ commok = 0;
+ }
+
+ MPI_Comm_remote_size(intercomm, &orig_rsize);
+ MPI_Comm_remote_size(newcomm, &new_rsize);
+ MPI_Comm_size(intercomm, &orig_size);
+ MPI_Comm_size(newcomm, &new_size);
+ /* The local size is 1/2 the original size, +1 if the
+ * size was odd and the color was even. More precisely,
+ * let n be the orig_size. Then
+ * color 0 color 1
+ * orig size even n/2 n/2
+ * orig size odd (n+1)/2 n/2
+ *
+ * However, since these are integer valued, if n is even,
+ * then (n+1)/2 = n/2, so this table is much simpler:
+ * color 0 color 1
+ * orig size even (n+1)/2 n/2
+ * orig size odd (n+1)/2 n/2
+ *
+ */
+ predicted_size = (orig_size + !color) / 2;
+ if (predicted_size != new_size) {
+ errs++;
+ printf("Predicted size = %d but found %d for %s (%d,%d)\n",
+ predicted_size, new_size, MTestGetIntercommName(), orig_size, orig_rsize);
+ commok = 0;
+ }
+ predicted_size = (orig_rsize + !color) / 2;
+ if (predicted_size != new_rsize) {
+ errs++;
+ printf("Predicted remote size = %d but found %d for %s (%d,%d)\n",
+ predicted_size, new_rsize, MTestGetIntercommName(), orig_size, orig_rsize);
+ commok = 0;
+ }
+ /* ... more to do */
+ if (commok) {
+ errs += MTestTestComm(newcomm);
+ }
+ }
+ else {
+ int orig_rsize;
+ /* If the newcomm is null, then this means that remote group
+ * for this color is of size zero (since all processes in this
+ * test have been given colors other than MPI_UNDEFINED).
+ * Confirm that here */
+ /* FIXME: ToDo */
+ MPI_Comm_remote_size(intercomm, &orig_rsize);
+ if (orig_rsize == 1) {
+ if (color == 0) {
+ errs++;
+ printf("Returned null intercomm when non-null expected\n");
+ }
+ }
+ }
+ if (newcomm != MPI_COMM_NULL)
+ MPI_Comm_free(&newcomm);
+ MPI_Comm_free(&intercomm);
}
MTest_Finalize(errs);
return 0;
}
-
-/* FIXME: This is copied from iccreate. It should be in one place */
-int TestIntercomm( MPI_Comm comm )
-{
- int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
- int errs = 0, wrank, nsize;
- char commname[MPI_MAX_OBJECT_NAME+1];
- MPI_Request *reqs;
-
- MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
- MPI_Comm_size( comm, &local_size );
- MPI_Comm_remote_size( comm, &remote_size );
- MPI_Comm_rank( comm, &rank );
- MPI_Comm_get_name( comm, commname, &nsize );
-
- MTestPrintfMsg( 1, "Testing communication on intercomm %s\n", commname );
-
- reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
- if (!reqs) {
- printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufs = (int **) malloc( remote_size * sizeof(int *) );
- if (!bufs) {
- printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
- wrank, remote_size, commname );
- errs++;
- return errs;
- }
- bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
- if (!bufmem) {
- printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
- wrank, 2*remote_size, commname );
- errs++;
- return errs;
- }
-
- /* Each process sends a message containing its own rank and the
- rank of the destination with a nonblocking send. Because we're using
- nonblocking sends, we need to use different buffers for each isend */
- for (j=0; j<remote_size; j++) {
- bufs[j] = &bufmem[2*j];
- bufs[j][0] = rank;
- bufs[j][1] = j;
- MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
- }
-
- for (j=0; j<remote_size; j++) {
- MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
- if (rbuf[0] != j) {
- printf( "[%d] Expected rank %d but saw %d in %s\n",
- wrank, j, rbuf[0], commname );
- errs++;
- }
- if (rbuf[1] != rank) {
- printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
- wrank, rank, rbuf[1], j, commname );
- errs++;
- }
- }
- if (errs)
- fflush(stdout);
- MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );
-
- free( reqs );
- free( bufs );
- free( bufmem );
-
- return errs;
-}
*/
#define MAX_DATA_LEN 100
-int main( int argc, char *argv[] )
+int main(int argc, char *argv[])
{
int errs = 0, recvlen, isLeft;
MPI_Status status;
int rank, size;
- MPI_Comm intercomm;
+ MPI_Comm intercomm;
char buf[MAX_DATA_LEN];
const char *test_str = "test";
- MTest_Init( &argc, &argv );
+ MTest_Init(&argc, &argv);
- MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 2) {
- fprintf( stderr, "This test requires at least two processes." );
- MPI_Abort( MPI_COMM_WORLD, 1 );
- exit(1);
+ fprintf(stderr, "This test requires at least two processes.");
+ MPI_Abort(MPI_COMM_WORLD, 1);
}
- while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
- if (intercomm == MPI_COMM_NULL) continue;
+ while (MTestGetIntercomm(&intercomm, &isLeft, 2)) {
+ if (intercomm == MPI_COMM_NULL)
+ continue;
MPI_Comm_rank(intercomm, &rank);
/* 0 ranks on each side communicate, everyone else does nothing */
- if(rank == 0) {
+ if (rank == 0) {
if (isLeft) {
recvlen = -1;
MPI_Probe(0, 0, intercomm, &status);
MPI_Get_count(&status, MPI_CHAR, &recvlen);
if (recvlen != (strlen(test_str) + 1)) {
- printf(" Error: recvlen (%d) != strlen(\"%s\")+1 (%d)\n", recvlen, test_str, (int)strlen(test_str) + 1);
+ printf(" Error: recvlen (%d) != strlen(\"%s\")+1 (%d)\n", recvlen, test_str,
+ (int) strlen(test_str) + 1);
++errs;
}
buf[0] = '\0';
MPI_Recv(buf, recvlen, MPI_CHAR, 0, 0, intercomm, &status);
- if (strcmp(test_str,buf)) {
+ if (strcmp(test_str, buf)) {
printf(" Error: strcmp(test_str,buf)!=0\n");
++errs;
}
}
else {
strncpy(buf, test_str, 5);
- MPI_Send(buf, strlen(buf)+1, MPI_CHAR, 0, 0, intercomm);
+ MPI_Send(buf, strlen(buf) + 1, MPI_CHAR, 0, 0, intercomm);
}
}
MTestFreeComm(&intercomm);
}
- MTest_Finalize( errs );
+ MTest_Finalize(errs);
MPI_Finalize();
return 0;
}