1 #include "colls_private.h"
3 /*****************************************************************************
5 Copyright (c) 2006, Ahmad Faraj & Xin Yuan,
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 * Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 * Redistributions in binary form must reproduce the above copyright notice,
15 this list of conditions and the following disclaimer in the documentation
16 and/or other materials provided with the distribution.
18 * Neither the name of the Florida State University nor the names of its
19 contributors may be used to endorse or promote products derived from this
20 software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
26 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *************************************************************************
34 * Any results obtained from executing this software require the *
35 * acknowledgment and citation of the software and its owners. *
36 * The full citation is given below: *
38 * A. Faraj and X. Yuan. "Automatic Generation and Tuning of MPI *
39 * Collective Communication Routines." The 19th ACM International *
40 * Conference on Supercomputing (ICS), Cambridge, Massachusetts, *
42 *************************************************************************
44 *****************************************************************************/
47 /*****************************************************************************
48 * Function: allgather_bruck
51 * send_buff: send input buffer
52 * send_count: number of elements to send
53 * send_type: data type of elements being sent
54 * recv_buff: receive output buffer
55 * recv_count: number of elements to received
56 * recv_type: data type of elements being received
58 * Descrp: Function realizes the allgather operation using the bruck
61 * Comment: Original bruck algorithm from MPICH is slightly modified by
63 ****************************************************************************/
64 int smpi_coll_tuned_allgather_bruck(void *send_buff, int send_count,
65 MPI_Datatype send_type, void *recv_buff,
66 int recv_count, MPI_Datatype recv_type,
73 // local int variables
74 int i, src, dst, rank, num_procs, count, remainder;
79 // local string variables
81 char *send_ptr = (char *) send_buff;
82 char *recv_ptr = (char *) recv_buff;
84 // get size of the communicator, followed by rank
85 num_procs = smpi_comm_size(comm);
86 rank = smpi_comm_rank(comm);
88 // get size of single element's type for recv buffer
89 recv_extent = smpi_datatype_get_extent(recv_type);
93 tmp_buff = (char *) xbt_malloc(num_procs * recv_count * recv_extent);
95 printf("allgather-bruck:54: cannot allocate memory\n");
99 // perform a local copy
100 MPIR_Localcopy(send_ptr, send_count, send_type, tmp_buff, recv_count,
103 while (pof2 <= (num_procs / 2)) {
104 src = (rank + pof2) % num_procs;
105 dst = (rank - pof2 + num_procs) % num_procs;
107 MPIC_Sendrecv(tmp_buff, count, recv_type, dst, tag,
108 tmp_buff + count * recv_extent, count, recv_type,
109 src, tag, comm, &status);
114 remainder = num_procs - pof2;
116 src = (rank + pof2) % num_procs;
117 dst = (rank - pof2 + num_procs) % num_procs;
119 MPIC_Sendrecv(tmp_buff, remainder * recv_count, recv_type, dst, tag,
120 tmp_buff + count * recv_extent, remainder * recv_count,
121 recv_type, src, tag, comm, &status);
124 MPIC_Sendrecv(tmp_buff, (num_procs - rank) * recv_count, recv_type, rank,
125 tag, recv_ptr + rank * recv_count * recv_extent,
126 (num_procs - rank) * recv_count, recv_type, rank, tag, comm,
130 MPIC_Sendrecv(tmp_buff + (num_procs - rank) * recv_count * recv_extent,
131 rank * recv_count, recv_type, rank, tag, recv_ptr,
132 rank * recv_count, recv_type, rank, tag, comm, &status);
137 /*#include "ompi_bindings.h"
139 int ompi_coll_tuned_alltoall_intra_pairwise(void *sbuf, int scount,
141 void* rbuf, int rcount,
145 int line = -1, err = 0;
146 int rank, size, step;
147 int sendto, recvfrom;
148 void * tmpsend, *tmprecv;
149 ptrdiff_t lb, sext, rext;
151 size = ompi_comm_size(comm);
152 rank = ompi_comm_rank(comm);
154 OPAL_OUTPUT((ompi_coll_tuned_stream,
155 "coll:tuned:alltoall_intra_pairwise rank %d", rank));
157 err = ompi_datatype_get_extent (sdtype, &lb, &sext);
158 if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
159 err = ompi_datatype_get_extent (rdtype, &lb, &rext);
160 if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
163 // Perform pairwise exchange - starting from 1 so the local copy is last
164 for (step = 1; step < size + 1; step++) {
166 // Determine sender and receiver for this step.
167 sendto = (rank + step) % size;
168 recvfrom = (rank + size - step) % size;
170 // Determine sending and receiving locations
171 tmpsend = (char*)sbuf + sendto * sext * scount;
172 tmprecv = (char*)rbuf + recvfrom * rext * rcount;
175 err = ompi_coll_tuned_sendrecv( tmpsend, scount, sdtype, sendto,
176 MCA_COLL_BASE_TAG_ALLTOALL,
177 tmprecv, rcount, rdtype, recvfrom,
178 MCA_COLL_BASE_TAG_ALLTOALL,
179 comm, MPI_STATUS_IGNORE, rank);
180 if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl; }
186 OPAL_OUTPUT((ompi_coll_tuned_stream,
187 "%s:%4d\tError occurred %d, rank %2d", __FILE__, line,