1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9 * University Research and Technology
10 * Corporation. All rights reserved.
11 * Copyright (c) 2004-2009 The University of Tennessee and The University
12 * of Tennessee Research Foundation. All rights
14 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15 * University of Stuttgart. All rights reserved.
16 * Copyright (c) 2004-2005 The Regents of the University of California.
17 * All rights reserved.
18 * Copyright (c) 2009 University of Houston. All rights reserved.
20 * Additional copyrights may follow
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer listed
31 * in this license in the documentation and/or other materials
32 * provided with the distribution.
34 * - Neither the name of the copyright holders nor the names of its
35 * contributors may be used to endorse or promote products derived from
36 * this software without specific prior written permission.
38 * The copyright holders provide no reassurances that the source code
39 * provided does not infringe any patent, copyright, or any other
40 * intellectual property rights of third parties. The copyright holders
41 * disclaim any liability to any recipient for claims brought against
42 * recipient by any third party for infringement of that parties
43 * intellectual property rights.
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 * ompi_coll_tuned_allreduce_intra_ring_segmented
61 * Function: Pipelined ring algorithm for allreduce operation
62 * Accepts: Same as MPI_Allreduce(), segment size
63 * Returns: MPI_SUCCESS or error code
65 * Description: Implements pipelined ring algorithm for allreduce:
66 * user supplies suggested segment size for the pipelining of
68 * The segment size determines the number of phases, np, for
69 * the algorithm execution.
70 * The message is automatically divided into blocks of
71 * approximately (count / (np * segcount)) elements.
72 * At the end of reduction phase, allgather like step is
74 * Algorithm requires (np + 1)*(N - 1) steps.
76 * Limitations: The algorithm DOES NOT preserve order of operations so it
77 * can be used only for commutative operations.
78 * In addition, algorithm cannot work if the total size is
79 * less than size * segment size.
80 * Example on 3 nodes with 2 phases
90 * COMPUTATION PHASE 0 (a)
91 * Step 0: rank r sends block ra to rank (r+1) and receives bloc (r-1)a
92 * from rank (r-1) [with wraparound].
94 * [00a] [00a+10a] [20a]
96 * [01a] [11a] [11a+21a]
98 * [22a+02a] [12a] [22a]
101 * Step 1: rank r sends block (r-1)a to rank (r+1) and receives bloc
102 * (r-2)a from rank (r-1) [with wraparound].
104 * [00a] [00a+10a] [00a+10a+20a]
106 * [11a+21a+01a] [11a] [11a+21a]
108 * [22a+02a] [22a+02a+12a] [22a]
111 * COMPUTATION PHASE 1 (b)
112 * Step 0: rank r sends block rb to rank (r+1) and receives bloc (r-1)b
113 * from rank (r-1) [with wraparound].
115 * [00a] [00a+10a] [20a]
116 * [00b] [00b+10b] [20b]
117 * [01a] [11a] [11a+21a]
118 * [01b] [11b] [11b+21b]
119 * [22a+02a] [12a] [22a]
120 * [22b+02b] [12b] [22b]
122 * Step 1: rank r sends block (r-1)b to rank (r+1) and receives bloc
123 * (r-2)b from rank (r-1) [with wraparound].
125 * [00a] [00a+10a] [00a+10a+20a]
126 * [00b] [10b] [0bb+10b+20b]
127 * [11a+21a+01a] [11a] [11a+21a]
128 * [11b+21b+01b] [11b] [21b]
129 * [22a+02a] [22a+02a+12a] [22a]
130 * [02b] [22b+01b+12b] [22b]
133 * DISTRIBUTION PHASE: ring ALLGATHER with ranks shifted by 1 (same as
134 * in regular ring algorithm.
138 #define COLL_TUNED_COMPUTED_SEGCOUNT(SEGSIZE, TYPELNG, SEGCOUNT) \
139 if( ((SEGSIZE) >= (TYPELNG)) && \
140 ((SEGSIZE) < ((TYPELNG) * (SEGCOUNT))) ) { \
142 (SEGCOUNT) = (int)((SEGSIZE) / (TYPELNG)); \
143 residual = (SEGSIZE) - (SEGCOUNT) * (TYPELNG); \
144 if( residual > ((TYPELNG) >> 1) ) \
148 #define COLL_TUNED_COMPUTE_BLOCKCOUNT( COUNT, NUM_BLOCKS, SPLIT_INDEX, \
149 EARLY_BLOCK_COUNT, LATE_BLOCK_COUNT ) \
150 EARLY_BLOCK_COUNT = LATE_BLOCK_COUNT = COUNT / NUM_BLOCKS; \
151 SPLIT_INDEX = COUNT % NUM_BLOCKS; \
152 if (0 != SPLIT_INDEX) { \
153 EARLY_BLOCK_COUNT = EARLY_BLOCK_COUNT + 1; \
156 #include "colls_private.h"
158 smpi_coll_tuned_allreduce_ompi_ring_segmented(void *sbuf, void *rbuf, int count,
163 int ret = MPI_SUCCESS;
165 int rank, size, k, recv_from, send_to;
166 int early_blockcount, late_blockcount, split_rank;
167 int segcount, max_segcount;
168 int num_phases, phase;
169 int block_count, inbi;
171 char *tmpsend = NULL, *tmprecv = NULL;
172 char *inbuf[2] = {NULL, NULL};
173 ptrdiff_t true_extent, extent;
174 ptrdiff_t block_offset, max_real_segsize;
175 MPI_Request reqs[2] = {NULL, NULL};
176 const size_t segsize = 1 << 20; /* 1 MB */
177 size = smpi_comm_size(comm);
178 rank = smpi_comm_rank(comm);
180 XBT_DEBUG("coll:tuned:allreduce_intra_ring_segmented rank %d, count %d", rank, count);
182 /* Special case for size == 1 */
184 if (MPI_IN_PLACE != sbuf) {
185 ret= smpi_datatype_copy(sbuf, count, dtype,rbuf, count, dtype);
186 if (ret < 0) { line = __LINE__; goto error_hndl; }
191 /* Determine segment count based on the suggested segment size */
192 extent = smpi_datatype_get_extent(dtype);
193 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
194 true_extent = smpi_datatype_get_extent(dtype);
195 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
196 typelng = smpi_datatype_size(dtype);
197 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
199 COLL_TUNED_COMPUTED_SEGCOUNT(segsize, typelng, segcount)
201 /* Special case for count less than size * segcount - use regular ring */
202 if (count < size * segcount) {
203 XBT_DEBUG( "coll:tuned:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count);
204 return (smpi_coll_tuned_allreduce_lr(sbuf, rbuf, count, dtype, op,
208 /* Determine the number of phases of the algorithm */
209 num_phases = count / (size * segcount);
210 if ((count % (size * segcount) >= size) &&
211 (count % (size * segcount) > ((size * segcount) / 2))) {
215 /* Determine the number of elements per block and corresponding
217 The blocks are divided into "early" and "late" ones:
218 blocks 0 .. (split_rank - 1) are "early" and
219 blocks (split_rank) .. (size - 1) are "late".
220 Early blocks are at most 1 element larger than the late ones.
221 Note, these blocks will be split into num_phases segments,
222 out of the largest one will have max_segcount elements.
224 COLL_TUNED_COMPUTE_BLOCKCOUNT( count, size, split_rank,
225 early_blockcount, late_blockcount )
226 COLL_TUNED_COMPUTE_BLOCKCOUNT( early_blockcount, num_phases, inbi,
228 max_real_segsize = true_extent + (max_segcount - 1) * extent;
230 /* Allocate and initialize temporary buffers */
231 inbuf[0] = (char*)smpi_get_tmp_sendbuffer(max_real_segsize);
232 if (NULL == inbuf[0]) { ret = -1; line = __LINE__; goto error_hndl; }
234 inbuf[1] = (char*)smpi_get_tmp_recvbuffer(max_real_segsize);
235 if (NULL == inbuf[1]) { ret = -1; line = __LINE__; goto error_hndl; }
238 /* Handle MPI_IN_PLACE */
239 if (MPI_IN_PLACE != sbuf) {
240 ret= smpi_datatype_copy(sbuf, count, dtype,rbuf, count, dtype);
241 if (ret < 0) { line = __LINE__; goto error_hndl; }
244 /* Computation loop: for each phase, repeat ring allreduce computation loop */
245 for (phase = 0; phase < num_phases; phase ++) {
246 ptrdiff_t phase_offset;
247 int early_phase_segcount, late_phase_segcount, split_phase, phase_count;
250 For each of the remote nodes:
251 - post irecv for block (r-1)
253 To do this, first compute block offset and count, and use block offset
254 to compute phase offset.
255 - in loop for every step k = 2 .. n
256 - post irecv for block (r + n - k) % n
257 - wait on block (r + n - k + 1) % n to arrive
258 - compute on block (r + n - k + 1) % n
259 - send block (r + n - k + 1) % n
260 - wait on block (r + 1)
261 - compute on block (r + 1)
262 - send block (r + 1) to rank (r + 1)
263 Note that we must be careful when computing the begining of buffers and
264 for send operations and computation we must compute the exact block size.
266 send_to = (rank + 1) % size;
267 recv_from = (rank + size - 1) % size;
270 /* Initialize first receive from the neighbor on the left */
271 reqs[inbi] = smpi_mpi_irecv(inbuf[inbi], max_segcount, dtype, recv_from,
273 /* Send first block (my block) to the neighbor on the right:
274 - compute my block and phase offset
276 block_offset = ((rank < split_rank)?
277 (rank * early_blockcount) :
278 (rank * late_blockcount + split_rank));
279 block_count = ((rank < split_rank)? early_blockcount : late_blockcount);
280 COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
281 early_phase_segcount, late_phase_segcount)
282 phase_count = ((phase < split_phase)?
283 (early_phase_segcount) : (late_phase_segcount));
284 phase_offset = ((phase < split_phase)?
285 (phase * early_phase_segcount) :
286 (phase * late_phase_segcount + split_phase));
287 tmpsend = ((char*)rbuf) + (block_offset + phase_offset) * extent;
288 smpi_mpi_send(tmpsend, phase_count, dtype, send_to,
291 for (k = 2; k < size; k++) {
292 const int prevblock = (rank + size - k + 1) % size;
296 /* Post irecv for the current block */
297 reqs[inbi] = smpi_mpi_irecv(inbuf[inbi], max_segcount, dtype, recv_from,
299 if (MPI_SUCCESS != ret) { line = __LINE__; goto error_hndl; }
301 /* Wait on previous block to arrive */
302 smpi_mpi_wait(&reqs[inbi ^ 0x1], MPI_STATUS_IGNORE);
304 /* Apply operation on previous block: result goes to rbuf
305 rbuf[prevblock] = inbuf[inbi ^ 0x1] (op) rbuf[prevblock]
307 block_offset = ((prevblock < split_rank)?
308 (prevblock * early_blockcount) :
309 (prevblock * late_blockcount + split_rank));
310 block_count = ((prevblock < split_rank)?
311 early_blockcount : late_blockcount);
312 COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
313 early_phase_segcount, late_phase_segcount)
314 phase_count = ((phase < split_phase)?
315 (early_phase_segcount) : (late_phase_segcount));
316 phase_offset = ((phase < split_phase)?
317 (phase * early_phase_segcount) :
318 (phase * late_phase_segcount + split_phase));
319 tmprecv = ((char*)rbuf) + (block_offset + phase_offset) * extent;
320 smpi_op_apply(op, inbuf[inbi ^ 0x1], tmprecv, &phase_count, &dtype);
321 /* send previous block to send_to */
322 smpi_mpi_send(tmprecv, phase_count, dtype, send_to,
326 /* Wait on the last block to arrive */
327 smpi_mpi_wait(&reqs[inbi], MPI_STATUS_IGNORE);
330 /* Apply operation on the last block (from neighbor (rank + 1)
331 rbuf[rank+1] = inbuf[inbi] (op) rbuf[rank + 1] */
332 recv_from = (rank + 1) % size;
333 block_offset = ((recv_from < split_rank)?
334 (recv_from * early_blockcount) :
335 (recv_from * late_blockcount + split_rank));
336 block_count = ((recv_from < split_rank)?
337 early_blockcount : late_blockcount);
338 COLL_TUNED_COMPUTE_BLOCKCOUNT(block_count, num_phases, split_phase,
339 early_phase_segcount, late_phase_segcount)
340 phase_count = ((phase < split_phase)?
341 (early_phase_segcount) : (late_phase_segcount));
342 phase_offset = ((phase < split_phase)?
343 (phase * early_phase_segcount) :
344 (phase * late_phase_segcount + split_phase));
345 tmprecv = ((char*)rbuf) + (block_offset + phase_offset) * extent;
346 smpi_op_apply(op, inbuf[inbi], tmprecv, &phase_count, &dtype);
349 /* Distribution loop - variation of ring allgather */
350 send_to = (rank + 1) % size;
351 recv_from = (rank + size - 1) % size;
352 for (k = 0; k < size - 1; k++) {
353 const int recv_data_from = (rank + size - k) % size;
354 const int send_data_from = (rank + 1 + size - k) % size;
355 const int send_block_offset =
356 ((send_data_from < split_rank)?
357 (send_data_from * early_blockcount) :
358 (send_data_from * late_blockcount + split_rank));
359 const int recv_block_offset =
360 ((recv_data_from < split_rank)?
361 (recv_data_from * early_blockcount) :
362 (recv_data_from * late_blockcount + split_rank));
363 block_count = ((send_data_from < split_rank)?
364 early_blockcount : late_blockcount);
366 tmprecv = (char*)rbuf + recv_block_offset * extent;
367 tmpsend = (char*)rbuf + send_block_offset * extent;
369 smpi_mpi_sendrecv(tmpsend, block_count, dtype, send_to,
371 tmprecv, early_blockcount, dtype, recv_from,
373 comm, MPI_STATUS_IGNORE);
377 if (NULL != inbuf[0]) smpi_free_tmp_buffer(inbuf[0]);
378 if (NULL != inbuf[1]) smpi_free_tmp_buffer(inbuf[1]);
383 XBT_DEBUG("%s:%4d\tRank %d Error occurred %d\n",
384 __FILE__, line, rank, ret);
385 if (NULL != inbuf[0]) smpi_free_tmp_buffer(inbuf[0]);
386 if (NULL != inbuf[1]) smpi_free_tmp_buffer(inbuf[1]);