1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
3 * (C) 2001 by Argonne National Laboratory.
4 * See COPYRIGHT in top-level directory.
7 /* One-Sided MPI 2-D Strided Accumulate Test
9 * Author: James Dinan <dinan@mcs.anl.gov>
10 * Date : December, 2010
12 * This code performs N strided put operations followed by get operations into
13 * a 2d patch of a shared array. The array has dimensions [X, Y] and the
14 * subarray has dimensions [SUB_X, SUB_Y] and begins at index [0, 0]. The
15 * input and output buffers are specified using an MPI indexed type.
31 int main(int argc, char **argv) {
32 int i, j, rank, nranks, peer, bufsize, errors;
33 double *win_buf, *src_buf, *dst_buf;
36 MTest_Init(&argc, &argv);
38 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
39 MPI_Comm_size(MPI_COMM_WORLD, &nranks);
41 bufsize = XDIM * YDIM * sizeof(double);
42 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
43 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
44 MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &dst_buf);
46 for (i = 0; i < XDIM*YDIM; i++) {
47 *(win_buf + i) = -1.0;
48 *(src_buf + i) = 1.0 + rank;
51 MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
53 peer = (rank+1) % nranks;
55 /* Perform ITERATIONS strided accumulate operations */
57 for (i = 0; i < ITERATIONS; i++) {
58 int idx_rem[SUB_YDIM];
59 int blk_len[SUB_YDIM];
60 MPI_Datatype src_type, dst_type;
62 for (j = 0; j < SUB_YDIM; j++) {
64 blk_len[j] = SUB_XDIM;
67 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
68 MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
70 MPI_Type_commit(&src_type);
71 MPI_Type_commit(&dst_type);
74 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
75 MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
76 1, dst_type, MPI_REPLACE, buf_win);
77 MPI_Win_unlock(peer, buf_win);
80 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
81 MPI_Get_accumulate(src_buf, 1, src_type, dst_buf, 1, src_type, peer, 0,
82 1, dst_type, MPI_NO_OP, buf_win);
83 MPI_Win_unlock(peer, buf_win);
85 MPI_Type_free(&src_type);
86 MPI_Type_free(&dst_type);
89 MPI_Barrier(MPI_COMM_WORLD);
91 /* Verify that the results are correct */
93 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
95 for (i = 0; i < SUB_XDIM; i++) {
96 for (j = 0; j < SUB_YDIM; j++) {
97 const double actual = *(win_buf + i + j*XDIM);
98 const double expected = (1.0 + ((rank+nranks-1)%nranks));
99 if (fabs(actual - expected) > 1.0e-10) {
100 SQUELCH( printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
101 rank, j, i, expected, actual); );
107 for (i = SUB_XDIM; i < XDIM; i++) {
108 for (j = 0; j < SUB_YDIM; j++) {
109 const double actual = *(win_buf + i + j*XDIM);
110 const double expected = -1.0;
111 if (fabs(actual - expected) > 1.0e-10) {
112 SQUELCH( printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
113 rank, j, i, expected, actual); );
119 for (i = 0; i < XDIM; i++) {
120 for (j = SUB_YDIM; j < YDIM; j++) {
121 const double actual = *(win_buf + i + j*XDIM);
122 const double expected = -1.0;
123 if (fabs(actual - expected) > 1.0e-10) {
124 SQUELCH( printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
125 rank, j, i, expected, actual); );
131 MPI_Win_unlock(rank, buf_win);
133 MPI_Win_free(&buf_win);
134 MPI_Free_mem(win_buf);
135 MPI_Free_mem(src_buf);
136 MPI_Free_mem(dst_buf);
138 MTest_Finalize( errors );
140 return MTestReturnValue( errors );