3 /*****************************************************************************
5 Copyright (c) 2006, Ahmad Faraj & Xin Yuan,
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 * Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 * Redistributions in binary form must reproduce the above copyright notice,
15 this list of conditions and the following disclaimer in the documentation
16 and/or other materials provided with the distribution.
18 * Neither the name of the Florida State University nor the names of its
19 contributors may be used to endorse or promote products derived from this
20 software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
26 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *************************************************************************
34 * Any results obtained from executing this software require the *
35 * acknowledgment and citation of the software and its owners. *
36 * The full citation is given below: *
38 * A. Faraj and X. Yuan. "Automatic Generation and Tuning of MPI *
39 * Collective Communication Routines." The 19th ACM International *
40 * Conference on Supercomputing (ICS), Cambridge, Massachusetts, *
42 *************************************************************************
44 *****************************************************************************/
46 /*****************************************************************************
48 * Function: bcast_scatter_LR_allgather
53 buff: send input buffer
54 count: number of elements to send
55 data_type: data type of elements being sent
59 * Descrp: broadcasts using a scatter followed by LR allgather.
61 * Auther: MPIH / modified by Ahmad Faraj
63 ****************************************************************************/
65 smpi_coll_tuned_bcast_scatter_LR_allgather(void *buff, int count,
66 MPI_Datatype data_type, int root,
71 int i, src, dst, rank, num_procs;
72 int mask, relative_rank, curr_size, recv_size, send_size, nbytes;
73 int scatter_size, left, right, next_src, *recv_counts, *disps;
74 int tag = 1, success = 0, failure = 1;
76 MPI_Comm_rank(comm, &rank);
77 MPI_Comm_size(comm, &num_procs);
78 MPI_Type_extent(data_type, &extent);
81 nbytes = extent * count;
82 scatter_size = (nbytes + num_procs - 1) / num_procs; // ceiling division
83 curr_size = (rank == root) ? nbytes : 0; // root starts with all the data
84 relative_rank = (rank >= root) ? rank - root : rank - root + num_procs;
87 while (mask < num_procs) {
88 if (relative_rank & mask) {
92 recv_size = nbytes - relative_rank * scatter_size;
93 // recv_size is larger than what might actually be sent by the
94 // sender. We don't need compute the exact value because MPI
95 // allows you to post a larger recv.
97 curr_size = 0; // this process doesn't receive any data
98 // because of uneven division
100 MPI_Recv((char *) buff + relative_rank * scatter_size, recv_size,
101 MPI_BYTE, src, tag, comm, &status);
102 MPI_Get_count(&status, MPI_BYTE, &curr_size);
109 // This process is responsible for all processes that have bits
110 // set from the LSB upto (but not including) mask. Because of
111 // the "not including", we start by shifting mask back down
116 if (relative_rank + mask < num_procs) {
117 send_size = curr_size - scatter_size * mask;
118 // mask is also the size of this process's subtree
122 if (dst >= num_procs)
124 MPI_Send((char *) buff + scatter_size * (relative_rank + mask),
125 send_size, MPI_BYTE, dst, tag, comm);
127 curr_size -= send_size;
133 // done scatter now do allgather
134 recv_counts = (int *) malloc(sizeof(int) * num_procs);
136 printf("bcast-scatter-LR-allgather:95: cannot allocate memory\n");
141 disps = (int *) malloc(sizeof(int) * num_procs);
143 printf("bcast-scatter-LR-allgather:103: cannot allocate memory\n");
148 for (i = 0; i < num_procs; i++) {
149 recv_counts[i] = nbytes - i * scatter_size;
150 if (recv_counts[i] > scatter_size)
151 recv_counts[i] = scatter_size;
152 if (recv_counts[i] < 0)
157 for (i = 1; i < num_procs; i++)
158 disps[i] = disps[i - 1] + recv_counts[i - 1];
160 left = (num_procs + rank - 1) % num_procs;
161 right = (rank + 1) % num_procs;
166 for (i = 1; i < num_procs; i++) {
167 MPI_Sendrecv((char *) buff + disps[(src - root + num_procs) % num_procs],
168 recv_counts[(src - root + num_procs) % num_procs],
169 MPI_BYTE, right, tag,
171 disps[(next_src - root + num_procs) % num_procs],
172 recv_counts[(next_src - root + num_procs) % num_procs],
173 MPI_BYTE, left, tag, comm, &status);
175 next_src = (num_procs + next_src - 1) % num_procs;