Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
openmpi recently changed its decision for alltoallv algo
[simgrid.git] / src / smpi / colls / allgatherv-ompi-bruck.c
1 /*
2  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3  *                         University Research and Technology
4  *                         Corporation.  All rights reserved.
5  * Copyright (c) 2004-2009 The University of Tennessee and The University
6  *                         of Tennessee Research Foundation.  All rights
7  *                         reserved.
8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9  *                         University of Stuttgart.  All rights reserved.
10  * Copyright (c) 2004-2005 The Regents of the University of California.
11  *                         All rights reserved.
12  * Copyright (c) 2009      University of Houston. All rights reserved.
13  * $COPYRIGHT$
14  *
15  * Additional copyrights may follow
16  *
17  * $HEADER$
18  */
19
20 #include "colls_private.h"
21 #define  MCA_COLL_BASE_TAG_ALLGATHERV 444
22 /*
23  * ompi_coll_tuned_allgatherv_intra_bruck
24  *
25  * Function:     allgather using O(log(N)) steps.
26  * Accepts:      Same arguments as MPI_Allgather
27  * Returns:      MPI_SUCCESS or error code
28  *
29  * Description:  Variation to All-to-all algorithm described by Bruck et al.in
30  *               "Efficient Algorithms for All-to-all Communications
31  *                in Multiport Message-Passing Systems"
32  * Note:         Unlike in case of allgather implementation, we relay on
33  *               indexed datatype to select buffers appropriately.
34  *               The only additional memory requirement is for creation of 
35  *               temporary datatypes.
36  * Example on 7 nodes (memory lay out need not be in-order)
37  *   Initial set up:
38  *    #     0      1      2      3      4      5      6
39  *         [0]    [ ]    [ ]    [ ]    [ ]    [ ]    [ ]
40  *         [ ]    [1]    [ ]    [ ]    [ ]    [ ]    [ ]
41  *         [ ]    [ ]    [2]    [ ]    [ ]    [ ]    [ ]
42  *         [ ]    [ ]    [ ]    [3]    [ ]    [ ]    [ ]
43  *         [ ]    [ ]    [ ]    [ ]    [4]    [ ]    [ ]
44  *         [ ]    [ ]    [ ]    [ ]    [ ]    [5]    [ ]
45  *         [ ]    [ ]    [ ]    [ ]    [ ]    [ ]    [6]
46  *   Step 0: send message to (rank - 2^0), receive message from (rank + 2^0)
47  *    #     0      1      2      3      4      5      6
48  *         [0]    [ ]    [ ]    [ ]    [ ]    [ ]    [0]
49  *         [1]    [1]    [ ]    [ ]    [ ]    [ ]    [ ]
50  *         [ ]    [2]    [2]    [ ]    [ ]    [ ]    [ ]
51  *         [ ]    [ ]    [3]    [3]    [ ]    [ ]    [ ]
52  *         [ ]    [ ]    [ ]    [4]    [4]    [ ]    [ ]
53  *         [ ]    [ ]    [ ]    [ ]    [5]    [5]    [ ]
54  *         [ ]    [ ]    [ ]    [ ]    [ ]    [6]    [6]
55  *   Step 1: send message to (rank - 2^1), receive message from (rank + 2^1).
56  *           message contains all blocks from (rank) .. (rank + 2^2) with 
57  *           wrap around.
58  *    #     0      1      2      3      4      5      6
59  *         [0]    [ ]    [ ]    [ ]    [0]    [0]    [0]
60  *         [1]    [1]    [ ]    [ ]    [ ]    [1]    [1]
61  *         [2]    [2]    [2]    [ ]    [ ]    [ ]    [2]
62  *         [3]    [3]    [3]    [3]    [ ]    [ ]    [ ]
63  *         [ ]    [4]    [4]    [4]    [4]    [ ]    [ ]
64  *         [ ]    [ ]    [5]    [5]    [5]    [5]    [ ]
65  *         [ ]    [ ]    [ ]    [6]    [6]    [6]    [6]
66  *   Step 2: send message to (rank - 2^2), receive message from (rank + 2^2).
67  *           message size is "all remaining blocks" 
68  *    #     0      1      2      3      4      5      6
69  *         [0]    [0]    [0]    [0]    [0]    [0]    [0]
70  *         [1]    [1]    [1]    [1]    [1]    [1]    [1]
71  *         [2]    [2]    [2]    [2]    [2]    [2]    [2]
72  *         [3]    [3]    [3]    [3]    [3]    [3]    [3]
73  *         [4]    [4]    [4]    [4]    [4]    [4]    [4]
74  *         [5]    [5]    [5]    [5]    [5]    [5]    [5]
75  *         [6]    [6]    [6]    [6]    [6]    [6]    [6]
76  */
77 int smpi_coll_tuned_allgatherv_ompi_bruck(void *sbuf, int scount,
78                                            MPI_Datatype sdtype,
79                                            void *rbuf, int *rcounts,
80                                            int *rdispls, 
81                                            MPI_Datatype rdtype,
82                                            MPI_Comm comm)
83 {
84    int rank, size;
85    int sendto, recvfrom, distance, blockcount, i;
86    int *new_rcounts = NULL, *new_rdispls = NULL;
87    int *new_scounts = NULL, *new_sdispls = NULL;
88    ptrdiff_t slb, rlb, sext, rext;
89    char *tmpsend = NULL, *tmprecv = NULL;
90    MPI_Datatype new_rdtype, new_sdtype;
91
92    size = smpi_comm_size(comm);
93    rank = smpi_comm_rank(comm);
94
95    XBT_DEBUG(
96                 "coll:tuned:allgather_ompi_bruck rank %d", rank);
97    
98    smpi_datatype_extent (sdtype, &slb, &sext);
99
100    smpi_datatype_extent (rdtype, &rlb, &rext);
101
102    /* Initialization step:
103       - if send buffer is not MPI_IN_PLACE, copy send buffer to block rank of 
104         the receive buffer.
105    */
106    tmprecv = (char*) rbuf + rdispls[rank] * rext;
107    if (MPI_IN_PLACE != sbuf) {
108       tmpsend = (char*) sbuf;
109       smpi_datatype_copy(tmpsend, scount, sdtype, 
110                             tmprecv, rcounts[rank], rdtype);
111    }
112    
113    /* Communication step:
114       At every step i, rank r:
115       - doubles the distance
116       - sends message with blockcount blocks, (rbuf[rank] .. rbuf[rank + 2^i])
117         to rank (r - distance)
118       - receives message of blockcount blocks, 
119         (rbuf[r + distance] ... rbuf[(r+distance) + 2^i]) from 
120         rank (r + distance)
121       - blockcount doubles until the last step when only the remaining data is 
122       exchanged.
123    */
124    blockcount = 1;
125    tmpsend = (char*) rbuf;
126
127    new_rcounts = (int*) calloc(4*size, sizeof(int));
128    new_rdispls = new_rcounts + size;
129    new_scounts = new_rdispls + size;
130    new_sdispls = new_scounts + size;
131
132    for (distance = 1; distance < size; distance<<=1) {
133
134       recvfrom = (rank + distance) % size;
135       sendto = (rank - distance + size) % size;
136
137       if (distance <= (size >> 1)) {
138          blockcount = distance;
139       } else { 
140          blockcount = size - distance;
141       }
142
143       /* create send and receive datatypes */
144       for (i = 0; i < blockcount; i++) {
145           const int tmp_srank = (rank + i) % size;
146           const int tmp_rrank = (recvfrom + i) % size;
147           new_scounts[i] = rcounts[tmp_srank];
148           new_sdispls[i] = rdispls[tmp_srank];
149           new_rcounts[i] = rcounts[tmp_rrank];
150           new_rdispls[i] = rdispls[tmp_rrank];
151       }
152       smpi_datatype_indexed(blockcount, new_scounts, new_sdispls, 
153                                     rdtype, &new_sdtype);
154       smpi_datatype_indexed(blockcount, new_rcounts, new_rdispls,
155                                     rdtype, &new_rdtype);
156
157       smpi_datatype_commit(&new_sdtype);
158       smpi_datatype_commit(&new_rdtype);
159
160       /* Sendreceive */
161       smpi_mpi_sendrecv(rbuf, 1, new_sdtype, sendto,
162                                      MCA_COLL_BASE_TAG_ALLGATHERV,
163                                      rbuf, 1, new_rdtype, recvfrom,
164                                      MCA_COLL_BASE_TAG_ALLGATHERV,
165                                      comm, MPI_STATUS_IGNORE);
166       smpi_datatype_free(&new_sdtype);
167       smpi_datatype_free(&new_rdtype);
168
169    }
170
171    free(new_rcounts);
172
173    return MPI_SUCCESS;
174
175 }
176