1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
9 /*****************************************************************************
11 Copyright (c) 2006, Ahmad Faraj & Xin Yuan,
14 Redistribution and use in source and binary forms, with or without
15 modification, are permitted provided that the following conditions are met:
17 * Redistributions of source code must retain the above copyright notice,
18 this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above copyright notice,
21 this list of conditions and the following disclaimer in the documentation
22 and/or other materials provided with the distribution.
24 * Neither the name of the Florida State University nor the names of its
25 contributors may be used to endorse or promote products derived from this
26 software without specific prior written permission.
28 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
29 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
30 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
31 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
32 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
33 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *************************************************************************
40 * Any results obtained from executing this software require the *
41 * acknowledgment and citation of the software and its owners. *
42 * The full citation is given below: *
44 * A. Faraj and X. Yuan. "Automatic Generation and Tuning of MPI *
45 * Collective Communication Routines." The 19th ACM International *
46 * Conference on Supercomputing (ICS), Cambridge, Massachusetts, *
48 *************************************************************************
50 *****************************************************************************/
52 /*****************************************************************************
54 * Function: bcast_scatter_rdb_allgather
59 buff: send input buffer
60 count: number of elements to send
61 data_type: data type of elements being sent
65 * Descrp: broadcasts using a scatter followed by rdb allgather.
67 * Auther: MPICH / modified by Ahmad Faraj
69 ****************************************************************************/
72 smpi_coll_tuned_bcast_scatter_rdb_allgather(void *buff, int count, MPI_Datatype
73 data_type, int root, MPI_Comm comm)
78 int i, j, k, src, dst, rank, num_procs, send_offset, recv_offset;
79 int mask, relative_rank, curr_size, recv_size = 0, send_size, nbytes;
80 int scatter_size, tree_root, relative_dst, dst_tree_root;
81 int my_tree_root, offset, tmp_mask, num_procs_completed;
82 int tag = COLL_TAG_BCAST;
84 rank = smpi_comm_rank(comm);
85 num_procs = smpi_comm_size(comm);
86 extent = smpi_datatype_get_extent(data_type);
88 nbytes = extent * count;
89 scatter_size = (nbytes + num_procs - 1) / num_procs; // ceiling division
90 curr_size = (rank == root) ? nbytes : 0; // root starts with all the data
91 relative_rank = (rank >= root) ? rank - root : rank - root + num_procs;
94 while (mask < num_procs) {
95 if (relative_rank & mask) {
99 recv_size = nbytes - relative_rank * scatter_size;
100 // recv_size is larger than what might actually be sent by the
101 // sender. We don't need compute the exact value because MPI
102 // allows you to post a larger recv.
104 curr_size = 0; // this process doesn't receive any data
105 // because of uneven division
107 smpi_mpi_recv((char *)buff + relative_rank * scatter_size, recv_size,
108 MPI_BYTE, src, tag, comm, &status);
109 curr_size = smpi_mpi_get_count(&status, MPI_BYTE);
116 // This process is responsible for all processes that have bits
117 // set from the LSB upto (but not including) mask. Because of
118 // the "not including", we start by shifting mask back down
123 if (relative_rank + mask < num_procs) {
124 send_size = curr_size - scatter_size * mask;
125 // mask is also the size of this process's subtree
129 if (dst >= num_procs)
131 smpi_mpi_send((char *)buff + scatter_size * (relative_rank + mask),
132 send_size, MPI_BYTE, dst, tag, comm);
134 curr_size -= send_size;
140 // done scatter now do allgather
145 while (mask < num_procs) {
146 relative_dst = relative_rank ^ mask;
148 dst = (relative_dst + root) % num_procs;
150 /* find offset into send and recv buffers.
151 zero out the least significant "i" bits of relative_rank and
152 relative_dst to find root of src and dst
153 subtrees. Use ranks of roots as index to send from
154 and recv into buffer */
156 dst_tree_root = relative_dst >> i;
159 my_tree_root = relative_rank >> i;
162 send_offset = my_tree_root * scatter_size;
163 recv_offset = dst_tree_root * scatter_size;
165 if (relative_dst < num_procs) {
166 smpi_mpi_sendrecv((char *)buff + send_offset, curr_size, MPI_BYTE, dst, tag,
167 (char *)buff + recv_offset, scatter_size * mask, MPI_BYTE, dst,
169 recv_size = smpi_mpi_get_count(&status, MPI_BYTE);
170 curr_size += recv_size;
173 /* if some processes in this process's subtree in this step
174 did not have any destination process to communicate with
175 because of non-power-of-two, we need to send them the
176 data that they would normally have received from those
177 processes. That is, the haves in this subtree must send to
178 the havenots. We use a logarithmic recursive-halfing algorithm
181 if (dst_tree_root + mask > num_procs) {
182 num_procs_completed = num_procs - my_tree_root - mask;
183 /* num_procs_completed is the number of processes in this
184 subtree that have all the data. Send data to others
185 in a tree fashion. First find root of current tree
186 that is being divided into two. k is the number of
187 least-significant bits in this process's rank that
188 must be zeroed out to find the rank of the root */
197 offset = scatter_size * (my_tree_root + mask);
198 tmp_mask = mask >> 1;
201 relative_dst = relative_rank ^ tmp_mask;
202 dst = (relative_dst + root) % num_procs;
204 tree_root = relative_rank >> k;
207 /* send only if this proc has data and destination
208 doesn't have data. */
210 if ((relative_dst > relative_rank)
211 && (relative_rank < tree_root + num_procs_completed)
212 && (relative_dst >= tree_root + num_procs_completed)) {
213 smpi_mpi_send((char *)buff + offset, recv_size, MPI_BYTE, dst, tag, comm);
215 /* recv_size was set in the previous
216 receive. that's the amount of data to be
219 /* recv only if this proc. doesn't have data and sender
221 else if ((relative_dst < relative_rank)
222 && (relative_dst < tree_root + num_procs_completed)
223 && (relative_rank >= tree_root + num_procs_completed)) {
225 smpi_mpi_recv((char *)buff + offset, scatter_size * num_procs_completed,
226 MPI_BYTE, dst, tag, comm, &status);
228 /* num_procs_completed is also equal to the no. of processes
229 whose data we don't have */
230 recv_size = smpi_mpi_get_count(&status, MPI_BYTE);
231 curr_size += recv_size;