1 #include "colls_private.h"
3 /*****************************************************************************
5 Copyright (c) 2006, Ahmad Faraj & Xin Yuan,
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 * Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 * Redistributions in binary form must reproduce the above copyright notice,
15 this list of conditions and the following disclaimer in the documentation
16 and/or other materials provided with the distribution.
18 * Neither the name of the Florida State University nor the names of its
19 contributors may be used to endorse or promote products derived from this
20 software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
26 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *************************************************************************
34 * Any results obtained from executing this software require the *
35 * acknowledgment and citation of the software and its owners. *
36 * The full citation is given below: *
38 * A. Faraj and X. Yuan. "Automatic Generation and Tuning of MPI *
39 * Collective Communication Routines." The 19th ACM International *
40 * Conference on Supercomputing (ICS), Cambridge, Massachusetts, *
42 *************************************************************************
44 *****************************************************************************/
46 /*****************************************************************************
49 * num: the number of processors in a communicator
53 * descp: takes a number and tries to find a factoring of x*y*z mesh out of it
54 ****************************************************************************/
57 static int is_3dmesh(int num, int *i, int *j, int *k)
63 if ((num % (x * x)) == 0) {
73 /*****************************************************************************
74 * Function: allgather_3dmesh_shoot
76 * send_buff: send input buffer
77 * send_count: number of elements to send
78 * send_type: data type of elements being sent
79 * recv_buff: receive output buffer
80 * recv_count: number of elements to received
81 * recv_type: data type of elements being received
83 * Descrp: Function realizes the allgather operation using the 2dmesh
84 * algorithm. Allgather ommunication occurs first in the x dimension, y
85 * dimension, and then in the z dimension. Communication in each dimension
88 ****************************************************************************/
89 int smpi_coll_tuned_allgather_3dmesh(void *send_buff, int send_count,
90 MPI_Datatype send_type, void *recv_buff,
91 int recv_count, MPI_Datatype recv_type,
94 MPI_Request *req, *req_ptr;
97 int i, src, dst, rank, num_procs, block_size, my_z_base;
98 int my_z, X, Y, Z, send_offset, recv_offset;
99 int two_dsize, my_row_base, my_col_base, src_row_base, src_z_base, num_reqs;
100 int tag = COLL_TAG_ALLGATHER;
102 rank = smpi_comm_rank(comm);
103 num_procs = smpi_comm_size(comm);
104 extent = smpi_datatype_get_extent(send_type);
106 if (!is_3dmesh(num_procs, &X, &Y, &Z))
107 THROWF(arg_error,0, "allgather_3dmesh algorithm can't be used with this number of processes! ");
118 my_z = rank / two_dsize;
120 my_row_base = (rank / X) * X;
121 my_col_base = (rank % Y) + (my_z * two_dsize);
122 my_z_base = my_z * two_dsize;
124 block_size = extent * send_count;
126 req = (MPI_Request *) xbt_malloc(num_reqs * sizeof(MPI_Request));
130 // do local allgather/local copy
131 recv_offset = rank * block_size;
132 smpi_datatype_copy(send_buff, send_count, send_type, (char *)recv_buff + recv_offset,
133 recv_count, recv_type);
136 for (i = 0; i < Y; i++) {
137 src = i + my_row_base;
140 recv_offset = src * block_size;
141 *(req_ptr++) = smpi_mpi_irecv((char *)recv_buff + recv_offset, send_count, recv_type, src, tag,
145 for (i = 0; i < Y; i++) {
146 dst = i + my_row_base;
149 smpi_mpi_send(send_buff, send_count, send_type, dst, tag, comm);
152 smpi_mpi_waitall(Y - 1, req, MPI_STATUSES_IGNORE);
155 // do colwise comm, it does not matter here if i*X or i *Y since X == Y
157 for (i = 0; i < X; i++) {
158 src = (i * Y + my_col_base);
162 src_row_base = (src / X) * X;
163 recv_offset = src_row_base * block_size;
164 *(req_ptr++) = smpi_mpi_irecv((char *)recv_buff + recv_offset, recv_count * Y, recv_type, src, tag,
168 send_offset = my_row_base * block_size;
170 for (i = 0; i < X; i++) {
171 dst = (i * Y + my_col_base);
174 smpi_mpi_send((char *)recv_buff + send_offset, send_count * Y, send_type, dst, tag,
178 smpi_mpi_waitall(X - 1, req, MPI_STATUSES_IGNORE);
181 for (i = 1; i < Z; i++) {
182 src = (rank + i * two_dsize) % num_procs;
183 src_z_base = (src / two_dsize) * two_dsize;
185 recv_offset = (src_z_base * block_size);
187 *(req_ptr++) = smpi_mpi_irecv((char *)recv_buff + recv_offset, recv_count * two_dsize, recv_type,
191 for (i = 1; i < Z; i++) {
192 dst = (rank + i * two_dsize) % num_procs;
193 send_offset = my_z_base * block_size;
194 smpi_mpi_send((char *)recv_buff + send_offset, send_count * two_dsize, send_type,
197 smpi_mpi_waitall(Z - 1, req, MPI_STATUSES_IGNORE);