1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
4 * (C) 2014 by Argonne National Laboratory.
5 * See COPYRIGHT in top-level directory.
15 double local_buf[BUF_CNT], check_buf[BUF_CNT];
17 const int verbose = 0;
19 int main(int argc, char *argv[])
21 int rank, nproc, i, x;
22 int errors = 0, all_errors = 0;
23 MPI_Win win = MPI_WIN_NULL;
25 MPI_Comm shm_comm = MPI_COMM_NULL;
26 int shm_nproc, shm_rank;
27 double **shm_bases = NULL, *my_base;
28 MPI_Win shm_win = MPI_WIN_NULL;
29 MPI_Group shm_group = MPI_GROUP_NULL, world_group = MPI_GROUP_NULL;
30 int *shm_ranks = NULL, *shm_ranks_in_world = NULL;
31 MPI_Aint get_target_base_offsets = 0;
33 int win_size = sizeof(double) * BUF_CNT;
34 int new_win_size = win_size;
35 int win_unit = sizeof(double);
36 int shm_root_rank_in_world;
37 int origin = -1, put_target, get_target;
39 MPI_Init(&argc, &argv);
40 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
41 MPI_Comm_size(MPI_COMM_WORLD, &nproc);
42 MPI_Comm_group(MPI_COMM_WORLD, &world_group);
46 printf("Error: must be run with four processes\n");
47 MPI_Abort(MPI_COMM_WORLD, 1);
50 MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL, &shm_comm);
51 MPI_Comm_rank(shm_comm, &shm_rank);
52 MPI_Comm_size(shm_comm, &shm_nproc);
53 MPI_Comm_group(shm_comm, &shm_group);
55 /* Platform does not support shared memory or wrong host file, just return. */
60 shm_bases = (double **) calloc(shm_nproc, sizeof(double *));
61 shm_ranks = (int *) calloc(shm_nproc, sizeof(int));
62 shm_ranks_in_world = (int *) calloc(shm_nproc, sizeof(int));
65 shm_root_rank_in_world = rank;
66 MPI_Bcast(&shm_root_rank_in_world, 1, MPI_INT, 0, shm_comm);
68 /* Identify ranks of target processes which are located on node 0 */
70 for (i = 0; i < shm_nproc; i++) {
73 MPI_Group_translate_ranks(shm_group, shm_nproc, shm_ranks, world_group, shm_ranks_in_world);
75 MPI_Bcast(shm_ranks_in_world, shm_nproc, MPI_INT, 0, MPI_COMM_WORLD);
77 put_target = shm_ranks_in_world[shm_nproc - 1];
78 get_target = shm_ranks_in_world[0];
80 /* Identify the rank of origin process which are located on node 1 */
81 if (shm_root_rank_in_world == 1 && shm_rank == 0) {
84 printf("---- I am origin = %d, get_target = %d, put_target = %d\n",
85 origin, get_target, put_target);
89 /* Allocate shared memory among local processes */
90 MPI_Win_allocate_shared(win_size, win_unit, MPI_INFO_NULL, shm_comm, &my_base, &shm_win);
92 if (shm_root_rank_in_world == 0 && verbose) {
95 for (i = 0; i < shm_nproc; i++) {
96 MPI_Win_shared_query(shm_win, i, &size, &disp_unit, &shm_bases[i]);
97 printf("%d -- shared query: base[%d]=%p, size %zd, "
98 "unit %d\n", rank, i, shm_bases[i], size, disp_unit);
102 /* Get offset of put target(1) on get target(0) */
103 get_target_base_offsets = (shm_nproc - 1) * win_size / win_unit;
105 if (origin == rank && verbose)
106 printf("%d -- base_offset of put_target %d on get_target %d: %zd\n",
107 rank, put_target, get_target, get_target_base_offsets);
109 /* Create using MPI_Win_create(). Note that new window size of get_target(0)
110 * is equal to the total size of shm segments on this node, thus get_target
111 * process can read the byte located on put_target process.*/
112 for (i = 0; i < BUF_CNT; i++) {
113 local_buf[i] = (i + 1) * 1.0;
117 if (get_target == rank)
118 new_win_size = win_size * shm_nproc;
120 MPI_Win_create(my_base, new_win_size, win_unit, MPI_INFO_NULL, MPI_COMM_WORLD, &win);
123 printf("%d -- new window my_base %p, size %d\n", rank, my_base, new_win_size);
125 MPI_Barrier(MPI_COMM_WORLD);
127 /* Check if flush guarantees the completion of put operations on target side.
129 * P exclusively locks 2 processes whose windows are shared with each other.
130 * P first put and flush to a process, then get the updated data from another process.
131 * If flush returns before operations are done on the target side, the data may be
133 for (x = 0; x < ITER; x++) {
134 for (i = 0; i < BUF_CNT; i++) {
139 if (rank == origin) {
140 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, put_target, 0, win);
141 MPI_Win_lock(MPI_LOCK_EXCLUSIVE, get_target, 0, win);
143 for (i = 0; i < BUF_CNT; i++) {
144 MPI_Put(&local_buf[i], 1, MPI_DOUBLE, put_target, i, 1, MPI_DOUBLE, win);
146 MPI_Win_flush(put_target, win);
148 MPI_Get(check_buf, BUF_CNT, MPI_DOUBLE, get_target,
149 get_target_base_offsets, BUF_CNT, MPI_DOUBLE, win);
150 MPI_Win_flush(get_target, win);
152 for (i = 0; i < BUF_CNT; i++) {
153 if (check_buf[i] != local_buf[i]) {
154 printf("%d(iter %d) - Got check_buf[%d] = %.1lf, expected %.1lf\n",
155 rank, x, i, check_buf[i], local_buf[i]);
160 MPI_Win_unlock(put_target, win);
161 MPI_Win_unlock(get_target, win);
165 MPI_Barrier(MPI_COMM_WORLD);
167 MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
171 if (rank == 0 && all_errors == 0)
172 printf(" No Errors\n");
178 if (shm_ranks_in_world)
179 free(shm_ranks_in_world);
181 if (shm_win != MPI_WIN_NULL)
182 MPI_Win_free(&shm_win);
184 if (win != MPI_WIN_NULL)
187 if (shm_comm != MPI_COMM_NULL)
188 MPI_Comm_free(&shm_comm);
190 if (shm_group != MPI_GROUP_NULL)
191 MPI_Group_free(&shm_group);
193 if (world_group != MPI_GROUP_NULL)
194 MPI_Group_free(&world_group);