2 /* Creator: Bronis R. de Supinski (bronis@llnl.gov) Thu Oct 17, 2002 */
3 /* pt2pt-byte-int-mismatch.c -- do lots pt-2-pt ops type mismatches */
7 "$Header: /usr/gapps/asde/cvs-vault/umpire/tests/pt2pt-byte-int-mismatch.c,v 1.1 2002/10/24 17:04:56 bronis Exp $";
17 #define NUM_SEND_TYPES 8
18 #define NUM_PERSISTENT_SEND_TYPES 4
19 #define NUM_BSEND_TYPES 2
20 #define NUM_COMPLETION_MECHANISMS 8
21 #define NUM_RECV_TYPES 2
25 main (int argc, char **argv)
29 int comm = MPI_COMM_WORLD;
30 char processor_name[128];
32 int bbuf[(BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES];
33 int buf[BUF_SIZE * 2 * NUM_SEND_TYPES];
34 int i, j, k, l, m, at_size, send_t_number, index, outcount, total, flag;
35 int num_errors, error_count, indices[2 * NUM_SEND_TYPES];
36 MPI_Request aReq[2 * NUM_SEND_TYPES];
37 MPI_Status aStatus[2 * NUM_SEND_TYPES];
40 MPI_Init (&argc, &argv);
41 MPI_Comm_size (comm, &nprocs);
42 MPI_Comm_rank (comm, &rank);
43 MPI_Get_processor_name (processor_name, &namelen);
44 printf ("(%d) is alive on %s\n", rank, processor_name);
47 MPI_Buffer_attach (bbuf, sizeof(int) *
48 (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);
51 /* set up persistent sends... */
52 send_t_number = NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES;
54 MPI_Send_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
55 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
56 MPI_Send_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
57 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
58 comm, &aReq[send_t_number * 2 + 1]);
62 MPI_Bsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
63 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
64 MPI_Bsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
65 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
66 comm, &aReq[send_t_number * 2 + 1]);
71 MPI_Rsend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
72 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
73 MPI_Rsend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
74 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
75 comm, &aReq[send_t_number * 2 + 1]);
79 MPI_Ssend_init (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
80 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
81 MPI_Ssend_init (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
82 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
83 comm, &aReq[send_t_number * 2 + 1]);
86 for (m = 0; m < NUM_RECV_TYPES; m++) {
87 if ((m == 1) && (rank == 1)) {
88 /* set up the persistent receives... */
89 for (j = 0; j < NUM_SEND_TYPES * 2; j+=2) {
90 MPI_Recv_init (&buf[j * BUF_SIZE],
91 BUF_SIZE, MPI_INT, 0, j, comm, &aReq[j]);
92 MPI_Recv_init (&buf[(j + 1) * BUF_SIZE],
93 BUF_SIZE * sizeof(int),
94 MPI_BYTE, 0, j + 1, comm, &aReq[j + 1]);
98 for (l = 0; l < (NUM_COMPLETION_MECHANISMS * 2); l++) {
99 for (k = 0; k < (NUM_COMPLETION_MECHANISMS * 2); k++) {
101 /* initialize all of the send buffers */
102 for (j = 0; j < NUM_SEND_TYPES; j++) {
103 for (i = 0; i < BUF_SIZE; i++) {
104 buf[2 * j * BUF_SIZE + i] = i;
105 buf[((2 * j + 1) * BUF_SIZE) + i] = BUF_SIZE - 1 - i;
109 else if (rank == 1) {
110 /* zero out all of the receive buffers */
111 bzero (buf, sizeof(int) * BUF_SIZE * 2 * NUM_SEND_TYPES);
114 MPI_Barrier(MPI_COMM_WORLD);
117 /* set up transient sends... */
120 MPI_Isend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
121 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
122 MPI_Isend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
123 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
124 comm, &aReq[send_t_number * 2 + 1]);
128 MPI_Ibsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
129 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
130 MPI_Ibsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
131 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
132 comm, &aReq[send_t_number * 2 + 1]);
136 /* Barrier to ensure receives are posted for rsends... */
137 MPI_Barrier(MPI_COMM_WORLD);
139 MPI_Irsend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
140 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
141 MPI_Irsend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
142 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
143 comm, &aReq[send_t_number * 2 + 1]);
147 MPI_Issend (&buf[send_t_number * 2 * BUF_SIZE], BUF_SIZE, MPI_INT,
148 1, send_t_number * 2, comm, &aReq[send_t_number * 2]);
149 MPI_Issend (&buf[(send_t_number * 2 + 1) * BUF_SIZE],
150 BUF_SIZE, MPI_INT, 1, send_t_number * 2 + 1,
151 comm, &aReq[send_t_number * 2 + 1]);
153 /* just to be paranoid */
155 assert (send_t_number ==
156 NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);
158 /* start the persistent sends... */
160 MPI_Startall (NUM_PERSISTENT_SEND_TYPES * 2,
161 &aReq[2 * send_t_number]);
164 for (j = 0; j < NUM_PERSISTENT_SEND_TYPES * 2; j++) {
165 MPI_Start (&aReq[2 * send_t_number + j]);
169 /* complete the sends */
173 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
174 MPI_Wait (&aReq[j], &aStatus[j]);
179 /* use MPI_Waitall */
180 MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
184 /* use MPI_Waitany */
185 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
186 MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
192 /* use MPI_Waitsome */
194 while (total < NUM_SEND_TYPES * 2) {
195 MPI_Waitsome (NUM_SEND_TYPES * 2, aReq,
196 &outcount, indices, aStatus);
205 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
209 MPI_Test (&aReq[j], &flag, &aStatus[j]);
216 /* use MPI_Testall */
219 MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
225 /* use MPI_Testany */
226 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
229 MPI_Testany (NUM_SEND_TYPES * 2, aReq,
230 &index, &flag, aStatus);
237 /* use MPI_Testsome */
239 while (total < NUM_SEND_TYPES * 2) {
243 MPI_Testsome (NUM_SEND_TYPES * 2, aReq,
244 &outcount, indices, aStatus);
257 else if (rank == 1) {
258 /* start receives for all of the sends */
260 for (j = 0; j < NUM_SEND_TYPES * 2; j+=2) {
261 MPI_Irecv (&buf[j * BUF_SIZE],
262 BUF_SIZE, MPI_INT, 0, j, comm, &aReq[j]);
263 MPI_Irecv (&buf[(j + 1) * BUF_SIZE],
264 BUF_SIZE * sizeof(int),
265 MPI_BYTE, 0, j + 1, comm, &aReq[j + 1]);
269 /* start the persistent receives... */
271 MPI_Startall (NUM_SEND_TYPES * 2, aReq);
274 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
275 MPI_Start (&aReq[j]);
280 /* Barrier to ensure receives are posted for rsends... */
281 MPI_Barrier(MPI_COMM_WORLD);
283 /* complete all of the receives... */
287 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
288 MPI_Wait (&aReq[j], &aStatus[j]);
293 /* use MPI_Waitall */
294 MPI_Waitall (NUM_SEND_TYPES * 2, aReq, aStatus);
298 /* use MPI_Waitany */
299 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
300 MPI_Waitany (NUM_SEND_TYPES * 2, aReq, &index, aStatus);
306 /* use MPI_Waitsome */
308 while (total < NUM_SEND_TYPES * 2) {
309 MPI_Waitsome (NUM_SEND_TYPES * 2, aReq,
310 &outcount, indices, aStatus);
319 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
323 MPI_Test (&aReq[j], &flag, &aStatus[j]);
330 /* use MPI_Testall */
333 MPI_Testall (NUM_SEND_TYPES * 2, aReq, &flag, aStatus);
339 /* use MPI_Testany */
340 for (j = 0; j < NUM_SEND_TYPES * 2; j++) {
343 MPI_Testany (NUM_SEND_TYPES * 2, aReq,
344 &index, &flag, aStatus);
351 /* use MPI_Testsome */
353 while (total < NUM_SEND_TYPES * 2) {
357 MPI_Testsome (NUM_SEND_TYPES * 2, aReq,
358 &outcount, indices, aStatus);
372 /* Barrier to ensure receives are posted for rsends... */
373 MPI_Barrier(MPI_COMM_WORLD);
379 MPI_Barrier(MPI_COMM_WORLD);
382 /* free the persistent send requests */
383 for (i = 2* (NUM_SEND_TYPES - NUM_PERSISTENT_SEND_TYPES);
384 i < 2 * NUM_SEND_TYPES; i++) {
385 MPI_Request_free (&aReq[i]);
388 else if (rank == 1) {
389 /* free the persistent receive requests */
390 for (i = 0; i < 2 * NUM_SEND_TYPES; i++) {
391 MPI_Request_free (&aReq[i]);
396 MPI_Buffer_detach (bbuf, &at_size);
399 sizeof(int) * (BUF_SIZE + MPI_BSEND_OVERHEAD) * 2 * NUM_BSEND_TYPES);
402 printf ("(%d) Finished normally\n", rank);