2 //#include <star-reduction.c>
4 int smpi_coll_tuned_allreduce_rab_rsag(void *sbuff, void *rbuff, int count,
5 MPI_Datatype dtype, MPI_Op op,
8 int nprocs, rank, type_size, tag = 543;
9 int mask, dst, pof2, newrank, rem, newdst, i,
10 send_idx, recv_idx, last_idx, send_cnt, recv_cnt, *cnts, *disps;
14 MPI_Comm_size(comm, &nprocs);
15 MPI_Comm_rank(comm, &rank);
17 MPI_Type_extent(dtype, &extent);
18 tmp_buf = (void *) malloc(count * extent);
20 printf("Could not allocate memory for tmp_buf\n");
24 MPI_Sendrecv(sbuff, count, dtype, rank, tag, rbuff, count, dtype, rank, tag,
27 MPI_Type_size(dtype, &type_size);
29 // find nearest power-of-two less than or equal to comm_size
31 while (pof2 <= nprocs)
37 // In the non-power-of-two case, all even-numbered
38 // processes of rank < 2*rem send their data to
39 // (rank+1). These even-numbered processes no longer
40 // participate in the algorithm until the very end. The
41 // remaining processes form a nice power-of-two.
47 MPI_Send(rbuff, count, dtype, rank + 1, tag, comm);
49 // temporarily set the rank to -1 so that this
50 // process does not pariticipate in recursive
55 MPI_Recv(tmp_buf, count, dtype, rank - 1, tag, comm, &status);
56 // do the reduction on received data. since the
57 // ordering is right, it doesn't matter whether
58 // the operation is commutative or not.
59 star_reduction(op, tmp_buf, rbuff, &count, &dtype);
66 else // rank >= 2 * rem
69 // If op is user-defined or count is less than pof2, use
70 // recursive doubling algorithm. Otherwise do a reduce-scatter
71 // followed by allgather. (If op is user-defined,
72 // derived datatypes are allowed and the user could pass basic
73 // datatypes on one process and derived on another as long as
74 // the type maps are the same. Breaking up derived
75 // datatypes to do the reduce-scatter is tricky, therefore
76 // using recursive doubling in that case.)
79 // do a reduce-scatter followed by allgather. for the
80 // reduce-scatter, calculate the count that each process receives
81 // and the displacement within the buffer
83 cnts = (int *) malloc(pof2 * sizeof(int));
84 disps = (int *) malloc(pof2 * sizeof(int));
86 for (i = 0; i < (pof2 - 1); i++)
87 cnts[i] = count / pof2;
88 cnts[pof2 - 1] = count - (count / pof2) * (pof2 - 1);
91 for (i = 1; i < pof2; i++)
92 disps[i] = disps[i - 1] + cnts[i - 1];
95 send_idx = recv_idx = 0;
98 newdst = newrank ^ mask;
99 // find real rank of dest
100 dst = (newdst < rem) ? newdst * 2 + 1 : newdst + rem;
102 send_cnt = recv_cnt = 0;
103 if (newrank < newdst) {
104 send_idx = recv_idx + pof2 / (mask * 2);
105 for (i = send_idx; i < last_idx; i++)
107 for (i = recv_idx; i < send_idx; i++)
110 recv_idx = send_idx + pof2 / (mask * 2);
111 for (i = send_idx; i < recv_idx; i++)
113 for (i = recv_idx; i < last_idx; i++)
117 // Send data from recvbuf. Recv into tmp_buf
118 MPI_Sendrecv((char *) rbuff + disps[send_idx] * extent, send_cnt,
120 (char *) tmp_buf + disps[recv_idx] * extent, recv_cnt,
121 dtype, dst, tag, comm, &status);
123 // tmp_buf contains data received in this step.
124 // recvbuf contains data accumulated so far
126 // This algorithm is used only for predefined ops
127 // and predefined ops are always commutative.
128 star_reduction(op, (char *) tmp_buf + disps[recv_idx] * extent,
129 (char *) rbuff + disps[recv_idx] * extent,
132 // update send_idx for next iteration
136 // update last_idx, but not in last iteration because the value
137 // is needed in the allgather step below.
139 last_idx = recv_idx + pof2 / mask;
142 // now do the allgather
146 newdst = newrank ^ mask;
147 // find real rank of dest
148 dst = (newdst < rem) ? newdst * 2 + 1 : newdst + rem;
150 send_cnt = recv_cnt = 0;
151 if (newrank < newdst) {
152 // update last_idx except on first iteration
153 if (mask != pof2 / 2)
154 last_idx = last_idx + pof2 / (mask * 2);
156 recv_idx = send_idx + pof2 / (mask * 2);
157 for (i = send_idx; i < recv_idx; i++)
159 for (i = recv_idx; i < last_idx; i++)
162 recv_idx = send_idx - pof2 / (mask * 2);
163 for (i = send_idx; i < last_idx; i++)
165 for (i = recv_idx; i < send_idx; i++)
169 MPI_Sendrecv((char *) rbuff + disps[send_idx] * extent, send_cnt,
171 (char *) rbuff + disps[recv_idx] * extent, recv_cnt,
172 dtype, dst, tag, comm, &status);
174 if (newrank > newdst)
184 // In the non-power-of-two case, all odd-numbered processes of
185 // rank < 2 * rem send the result to (rank-1), the ranks who didn't
186 // participate above.
188 if (rank < 2 * rem) {
190 MPI_Send(rbuff, count, dtype, rank - 1, tag, comm);
192 MPI_Recv(rbuff, count, dtype, rank + 1, tag, comm, &status);