1 #include "colls_private.h"
3 /*****************************************************************************
5 Copyright (c) 2006, Ahmad Faraj & Xin Yuan,
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 * Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 * Redistributions in binary form must reproduce the above copyright notice,
15 this list of conditions and the following disclaimer in the documentation
16 and/or other materials provided with the distribution.
18 * Neither the name of the Florida State University nor the names of its
19 contributors may be used to endorse or promote products derived from this
20 software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
26 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *************************************************************************
34 * Any results obtained from executing this software require the *
35 * acknowledgment and citation of the software and its owners. *
36 * The full citation is given below: *
38 * A. Faraj and X. Yuan. "Automatic Generation and Tuning of MPI *
39 * Collective Communication Routines." The 19th ACM International *
40 * Conference on Supercomputing (ICS), Cambridge, Massachusetts, *
42 *************************************************************************
44 *****************************************************************************/
46 /*****************************************************************************
48 * Function: bcast_scatter_LR_allgather
53 buff: send input buffer
54 count: number of elements to send
55 data_type: data type of elements being sent
59 * Descrp: broadcasts using a scatter followed by LR allgather.
61 * Auther: MPIH / modified by Ahmad Faraj
63 ****************************************************************************/
65 smpi_coll_tuned_bcast_scatter_LR_allgather(void *buff, int count,
66 MPI_Datatype data_type, int root,
71 int i, src, dst, rank, num_procs;
72 int mask, relative_rank, curr_size, recv_size, send_size, nbytes;
73 int scatter_size, left, right, next_src, *recv_counts, *disps;
74 int tag = COLL_TAG_BCAST;
76 rank = smpi_comm_rank(comm);
77 num_procs = smpi_comm_size(comm);
78 extent = smpi_datatype_get_extent(data_type);
81 nbytes = extent * count;
82 scatter_size = (nbytes + num_procs - 1) / num_procs; // ceiling division
83 curr_size = (rank == root) ? nbytes : 0; // root starts with all the data
84 relative_rank = (rank >= root) ? rank - root : rank - root + num_procs;
87 while (mask < num_procs) {
88 if (relative_rank & mask) {
92 recv_size = nbytes - relative_rank * scatter_size;
93 // recv_size is larger than what might actually be sent by the
94 // sender. We don't need compute the exact value because MPI
95 // allows you to post a larger recv.
97 curr_size = 0; // this process doesn't receive any data
98 // because of uneven division
100 smpi_mpi_recv((char *) buff + relative_rank * scatter_size, recv_size,
101 MPI_BYTE, src, tag, comm, &status);
102 curr_size = smpi_mpi_get_count(&status, MPI_BYTE);
109 // This process is responsible for all processes that have bits
110 // set from the LSB upto (but not including) mask. Because of
111 // the "not including", we start by shifting mask back down
116 if (relative_rank + mask < num_procs) {
117 send_size = curr_size - scatter_size * mask;
118 // mask is also the size of this process's subtree
122 if (dst >= num_procs)
124 smpi_mpi_send((char *) buff + scatter_size * (relative_rank + mask),
125 send_size, MPI_BYTE, dst, tag, comm);
127 curr_size -= send_size;
133 // done scatter now do allgather
134 recv_counts = (int *) xbt_malloc(sizeof(int) * num_procs);
135 disps = (int *) xbt_malloc(sizeof(int) * num_procs);
137 for (i = 0; i < num_procs; i++) {
138 recv_counts[i] = nbytes - i * scatter_size;
139 if (recv_counts[i] > scatter_size)
140 recv_counts[i] = scatter_size;
141 if (recv_counts[i] < 0)
146 for (i = 1; i < num_procs; i++)
147 disps[i] = disps[i - 1] + recv_counts[i - 1];
149 left = (num_procs + rank - 1) % num_procs;
150 right = (rank + 1) % num_procs;
155 for (i = 1; i < num_procs; i++) {
156 smpi_mpi_sendrecv((char *) buff + disps[(src - root + num_procs) % num_procs],
157 recv_counts[(src - root + num_procs) % num_procs],
158 MPI_BYTE, right, tag,
160 disps[(next_src - root + num_procs) % num_procs],
161 recv_counts[(next_src - root + num_procs) % num_procs],
162 MPI_BYTE, left, tag, comm, &status);
164 next_src = (num_procs + next_src - 1) % num_procs;