1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
3 * (C) 2001 by Argonne National Laboratory.
4 * See COPYRIGHT in top-level directory.
7 /* One-Sided MPI 2-D Strided Accumulate Test
9 * Author: James Dinan <dinan@mcs.anl.gov>
10 * Date : December, 2010
12 * This code performs N accumulates into a 2d patch of a shared array. The
13 * array has dimensions [X, Y] and the subarray has dimensions [SUB_X, SUB_Y]
14 * and begins at index [0, 0]. The input and output buffers are specified
15 * using an MPI indexed type.
31 int main(int argc, char **argv)
33 int itr, i, j, rank, nranks, peer, bufsize, errors;
34 double *win_buf, *src_buf;
37 MTest_Init(&argc, &argv);
39 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
40 MPI_Comm_size(MPI_COMM_WORLD, &nranks);
42 bufsize = XDIM * YDIM * sizeof(double);
43 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
44 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
46 for (i = 0; i < XDIM * YDIM; i++) {
47 *(win_buf + i) = -1.0;
48 *(src_buf + i) = 1.0 + rank;
51 MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
53 peer = (rank + 1) % nranks;
55 /* Perform ITERATIONS strided accumulate operations */
57 for (itr = 0; itr < ITERATIONS; itr++) {
58 MPI_Aint idx_loc[SUB_YDIM];
59 int idx_rem[SUB_YDIM];
60 int blk_len[SUB_YDIM];
61 MPI_Datatype src_type, dst_type;
63 for (i = 0; i < SUB_YDIM; i++) {
64 MPI_Get_address(&src_buf[i * XDIM], &idx_loc[i]);
65 idx_rem[i] = i * XDIM;
66 blk_len[i] = SUB_XDIM;
70 MPI_Type_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_DOUBLE, &src_type);
72 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
74 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
76 MPI_Type_commit(&src_type);
77 MPI_Type_commit(&dst_type);
79 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
82 MPI_Accumulate(MPI_BOTTOM, 1, src_type, peer, 0, 1, dst_type, MPI_SUM, buf_win);
84 MPI_Accumulate(src_buf, 1, src_type, peer, 0, 1, dst_type, MPI_SUM, buf_win);
87 MPI_Win_unlock(peer, buf_win);
89 MPI_Type_free(&src_type);
90 MPI_Type_free(&dst_type);
93 MPI_Barrier(MPI_COMM_WORLD);
95 /* Verify that the results are correct */
97 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
99 for (i = 0; i < SUB_XDIM; i++) {
100 for (j = 0; j < SUB_YDIM; j++) {
101 const double actual = *(win_buf + i + j * XDIM);
102 const double expected = -1.0 + (1.0 + ((rank + nranks - 1) % nranks)) * (ITERATIONS);
103 if (fabs(actual - expected) > 1.0e-10) {
104 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
105 rank, j, i, expected, actual););
111 for (i = SUB_XDIM; i < XDIM; i++) {
112 for (j = 0; j < SUB_YDIM; j++) {
113 const double actual = *(win_buf + i + j * XDIM);
114 const double expected = -1.0;
115 if (fabs(actual - expected) > 1.0e-10) {
116 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
117 rank, j, i, expected, actual););
123 for (i = 0; i < XDIM; i++) {
124 for (j = SUB_YDIM; j < YDIM; j++) {
125 const double actual = *(win_buf + i + j * XDIM);
126 const double expected = -1.0;
127 if (fabs(actual - expected) > 1.0e-10) {
128 SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
129 rank, j, i, expected, actual););
135 MPI_Win_unlock(rank, buf_win);
137 MPI_Win_free(&buf_win);
138 MPI_Free_mem(win_buf);
139 MPI_Free_mem(src_buf);
141 MTest_Finalize(errors);
143 return MTestReturnValue(errors);