1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
4 * (C) 2007 by Argonne National Laboratory.
5 * See COPYRIGHT in top-level directory.
13 * This program tests that MPI_Comm_create applies to intercommunicators;
14 * this is an extension added in MPI-2
17 int TestIntercomm( MPI_Comm );
19 int main( int argc, char *argv[] )
22 int size, isLeft, wrank;
23 MPI_Comm intercomm, newcomm;
24 MPI_Group oldgroup, newgroup;
26 MTest_Init( &argc, &argv );
28 MPI_Comm_size( MPI_COMM_WORLD, &size );
30 printf( "This test requires at least 4 processes\n" );
31 MPI_Abort( MPI_COMM_WORLD, 1 );
33 MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
35 while (MTestGetIntercomm( &intercomm, &isLeft, 2 )) {
36 int ranks[10], nranks, result;
38 if (intercomm == MPI_COMM_NULL) continue;
40 MPI_Comm_group( intercomm, &oldgroup );
43 MTestPrintfMsg( 1, "Creating a new intercomm 0-0\n" );
44 MPI_Group_incl( oldgroup, nranks, ranks, &newgroup );
45 MPI_Comm_create( intercomm, newgroup, &newcomm );
47 /* Make sure that the new communicator has the appropriate pieces */
48 if (newcomm != MPI_COMM_NULL) {
49 int new_rsize, new_size, flag, commok = 1;
51 MPI_Comm_set_name( newcomm, (char*)"Single rank in each group" );
52 MPI_Comm_test_inter( intercomm, &flag );
55 printf( "[%d] Output communicator is not an intercomm\n",
60 MPI_Comm_remote_size( newcomm, &new_rsize );
61 MPI_Comm_size( newcomm, &new_size );
62 /* The new communicator has 1 process in each group */
65 printf( "[%d] Remote size is %d, should be one\n",
71 printf( "[%d] Local size is %d, should be one\n",
77 errs += TestIntercomm( newcomm );
80 MPI_Group_free( &newgroup );
81 if (newcomm != MPI_COMM_NULL) {
82 MPI_Comm_free( &newcomm );
85 /* Now, do a sort of dup, using the original group */
86 MTestPrintfMsg( 1, "Creating a new intercomm (manual dup)\n" );
87 MPI_Comm_create( intercomm, oldgroup, &newcomm );
88 MPI_Comm_set_name( newcomm, (char*)"Dup of original" );
89 MTestPrintfMsg( 1, "Creating a new intercomm (manual dup (done))\n" );
91 MPI_Comm_compare( intercomm, newcomm, &result );
92 MTestPrintfMsg( 1, "Result of comm/intercomm compare is %d\n", result );
93 if (result != MPI_CONGRUENT) {
97 case MPI_IDENT: rname = "IDENT"; break;
98 case MPI_CONGRUENT: rname = "CONGRUENT"; break;
99 case MPI_SIMILAR: rname = "SIMILAR"; break;
100 case MPI_UNEQUAL: rname = "UNEQUAL"; break;
101 printf( "[%d] Expected MPI_CONGRUENT but saw %d (%s)",
102 wrank, result, rname ); fflush(stdout);
106 /* Try to communication between each member of intercomm */
107 errs += TestIntercomm( newcomm );
110 if (newcomm != MPI_COMM_NULL) {
111 MPI_Comm_free(&newcomm);
113 /* test that an empty group in either side of the intercomm results in
114 * MPI_COMM_NULL for all members of the comm */
116 /* left side reuses oldgroup, our local group in intercomm */
117 MPI_Comm_create(intercomm, oldgroup, &newcomm);
120 /* right side passes MPI_GROUP_EMPTY */
121 MPI_Comm_create(intercomm, MPI_GROUP_EMPTY, &newcomm);
123 if (newcomm != MPI_COMM_NULL) {
124 printf("[%d] expected MPI_COMM_NULL, but got a different communicator\n", wrank); fflush(stdout);
128 if (newcomm != MPI_COMM_NULL) {
129 MPI_Comm_free(&newcomm);
131 MPI_Group_free( &oldgroup );
132 MPI_Comm_free( &intercomm );
135 MTest_Finalize(errs);
142 int TestIntercomm( MPI_Comm comm )
144 int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
145 int errs = 0, wrank, nsize;
146 char commname[MPI_MAX_OBJECT_NAME+1];
149 MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
150 MPI_Comm_size( comm, &local_size );
151 MPI_Comm_remote_size( comm, &remote_size );
152 MPI_Comm_rank( comm, &rank );
153 MPI_Comm_get_name( comm, commname, &nsize );
155 MTestPrintfMsg( 1, "Testing communication on intercomm '%s', remote_size=%d\n",
156 commname, remote_size );
158 reqs = (MPI_Request *)malloc( remote_size * sizeof(MPI_Request) );
160 printf( "[%d] Unable to allocated %d requests for testing intercomm %s\n",
161 wrank, remote_size, commname );
165 bufs = (int **) malloc( remote_size * sizeof(int *) );
167 printf( "[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
168 wrank, remote_size, commname );
172 bufmem = (int *) malloc( remote_size * 2 * sizeof(int) );
174 printf( "[%d] Unable to allocated %d int data for testing intercomm %s\n",
175 wrank, 2*remote_size, commname );
180 /* Each process sends a message containing its own rank and the
181 rank of the destination with a nonblocking send. Because we're using
182 nonblocking sends, we need to use different buffers for each isend */
183 /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although
184 it doesn't really hurt to keep separate buffers for our purposes */
185 for (j=0; j<remote_size; j++) {
186 bufs[j] = &bufmem[2*j];
189 MPI_Isend( bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j] );
191 MTestPrintfMsg( 2, "isends posted, about to recv\n" );
193 for (j=0; j<remote_size; j++) {
194 MPI_Recv( rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE );
196 printf( "[%d] Expected rank %d but saw %d in %s\n",
197 wrank, j, rbuf[0], commname );
200 if (rbuf[1] != rank) {
201 printf( "[%d] Expected target rank %d but saw %d from %d in %s\n",
202 wrank, rank, rbuf[1], j, commname );
208 MTestPrintfMsg( 2, "my recvs completed, about to waitall\n" );
209 MPI_Waitall( remote_size, reqs, MPI_STATUSES_IGNORE );