2 /* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Oct 17, 2002 */
3 /* partial-recv-exhaustive.c -- do lots pt-2-pt ops w/oversized recv bufs */
7 "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/partial-recv-exhaustive.c,v 1.1 2002/10/24 17:04:56 bronis Exp $";
17 #define NUM_SEND_TYPES 8
18 #define NUM_PERSISTENT_SEND_TYPES 4
19 #define NUM_BSEND_TYPES 2
20 #define NUM_COMPLETION_MECHANISMS 8
21 #define NUM_RECV_TYPES 2
26 main (int argc, char **argv)
30 MPI_Comm comm = MPI_COMM_WORLD;
31 char processor_name[128];
33 int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES];
34 int buf[BUF_SIZE * 2 * NUM_SEND_TYPES + SLOP];
35 int i, j, k, l, m, at_size, send_t_number, index, outcount, total, flag;
36 int num_errors, error_count, indices[2 * NUM_SEND_TYPES];
37 MPI_Request aReq[2 * NUM_SEND_TYPES];
38 MPI_Status aStatus[2 * NUM_SEND_TYPES];
41 MPI_Init (&argc, &argv);
42 MPI_Comm_size (comm, &nprocs);
43 MPI_Comm_rank (comm, &rank);
44 MPI_Get_processor_name (processor_name, &namelen);
45 printf ("(%d) is alive on %s\n", rank, processor_name);
48 MPI_Buffer_attach (bbuf, sizeof(int) *
49 (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);
52 /* set up persistent sends... */
53 send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES;
55 MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
56 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
57 MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
58 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
59 comm, &aReq[send_t_number * 2 + 1]);
63 MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
64 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
65 MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
66 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
67 comm, &aReq[send_t_number * 2 + 1]);
72 MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
73 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
74 MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
75 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
76 comm, &aReq[send_t_number * 2 + 1]);
80 MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
81 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
82 MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
83 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
84 comm, &aReq[send_t_number * 2 + 1]);
87 for (m = 0; m < NUM_RECV_TYPES; m++) {
88 if ((m == 1) && (rank == 1)) {
89 /* set up the persistent receives... */
90 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
91 MPI_Recv_init (&buf[j * BUF_SIZE],
92 BUF_SIZE + (j % 2) * SLOP,
93 MPI_INT, 0, j, comm, &aReq[j]);
97 for (l = 0; l < (NUM_COMPLETION_MECHANISMS * 2); l++) {
98 for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) {
100 /* initialize all of the send buffers */
101 for (j = 0; j < NUM_SEND_TYPES; j++) {
102 for (i = 0; i < BUF_SIZE; i++) {
103 buf[2 * j * BUF_SIZE + i] = i;
104 buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i;
108 else if (rank == 1) {
109 /* zero out all of the receive buffers */
110 bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES);
113 MPI_Barrier(MPI_COMM_WORLD);
116 /* set up transient sends... */
119 MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
120 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
121 MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
122 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
123 comm, &aReq[send_t_number * 2 + 1]);
127 MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
128 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
129 MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
130 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
131 comm, &aReq[send_t_number * 2 + 1]);
135 /* Barrier to ensure receives are posted for rsends... */
136 MPI_Barrier(MPI_COMM_WORLD);
138 MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
139 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
140 MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
141 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
142 comm, &aReq[send_t_number * 2 + 1]);
146 MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
147 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
148 MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
149 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
150 comm, &aReq[send_t_number * 2 + 1]);
152 /* just to be paranoid */
154 assert (send_t_number ==
155 NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);
157 /* start the persistent sends... */
159 MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2,
160 &aReq[2 * send_t_number]);
163 for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) {
164 MPI_Start (&aReq[2 * send_t_number + j]);
168 /* complete the sends */
172 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
173 MPI_Wait (&aReq[j], &aStatus[j]);
178 /* use MPI_Waitall */
179 MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
183 /* use MPI_Waitany */
184 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
185 MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
191 /* use MPI_Waitsome */
193 while (total < NUM_SEND_TYPES * 2) {
194 MPI_Waitsome (NUM_SEND_TYPES * 2, aReq,
195 &outcount, indices, aStatus);
204 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
208 MPI_Test (&aReq[j], &flag, &aStatus[j]);
215 /* use MPI_Testall */
218 MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
224 /* use MPI_Testany */
225 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
228 MPI_Testany (NUM_SEND_TYPES * 2, aReq,
229 &index, &flag, aStatus);
236 /* use MPI_Testsome */
238 while (total < NUM_SEND_TYPES * 2) {
242 MPI_Testsome (NUM_SEND_TYPES * 2, aReq,
243 &outcount, indices, aStatus);
256 else if (rank == 1) {
257 /* start receives for all of the sends */
259 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
260 MPI_Irecv (&buf[j * BUF_SIZE],
261 BUF_SIZE + (j % 2) * SLOP,
262 MPI_INT, 0, j, comm, &aReq[j]);
266 /* start the persistent receives... */
268 MPI_Startall (NUM_SEND_TYPES * 2, aReq);
271 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
272 MPI_Start (&aReq[j]);
277 /* Barrier to ensure receives are posted for rsends... */
278 MPI_Barrier(MPI_COMM_WORLD);
280 /* complete all of the receives... */
284 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
285 MPI_Wait (&aReq[j], &aStatus[j]);
290 /* use MPI_Waitall */
291 MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
295 /* use MPI_Waitany */
296 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
297 MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
303 /* use MPI_Waitsome */
305 while (total < NUM_SEND_TYPES * 2) {
306 MPI_Waitsome (NUM_SEND_TYPES * 2, aReq,
307 &outcount, indices, aStatus);
316 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
320 MPI_Test (&aReq[j], &flag, &aStatus[j]);
327 /* use MPI_Testall */
330 MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
336 /* use MPI_Testany */
337 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
340 MPI_Testany (NUM_SEND_TYPES * 2, aReq,
341 &index, &flag, aStatus);
348 /* use MPI_Testsome */
350 while (total < NUM_SEND_TYPES * 2) {
354 MPI_Testsome (NUM_SEND_TYPES * 2, aReq,
355 &outcount, indices, aStatus);
369 /* Barrier to ensure receives are posted for rsends... */
370 MPI_Barrier(MPI_COMM_WORLD);
376 MPI_Barrier(MPI_COMM_WORLD);
379 /* free the persistent send requests */
380 for (i = 2* (NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);
381 i < 2 * NUM_SEND_TYPES; i++) {
382 MPI_Request_free (&aReq[i]);
385 else if (rank == 1) {
386 /* free the persistent receive requests */
387 for (i = 0; i < 2 * NUM_SEND_TYPES; i++) {
388 MPI_Request_free (&aReq[i]);
393 MPI_Buffer_detach (bbuf, &at_size);
396 sizeof(int) * (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);
399 printf ("(%d) Finished normally\n", rank);