Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'master' into fix/execute_benched
[simgrid.git] / src / smpi / colls / allgatherv / allgatherv-ompi-bruck.cpp
1 /* Copyright (c) 2013-2017. The SimGrid Team.
2  * All rights reserved.                                                     */
3
4 /* This program is free software; you can redistribute it and/or modify it
5  * under the terms of the license (GNU LGPL) which comes with this package. */
6
7 /*
8  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9  *                         University Research and Technology
10  *                         Corporation.  All rights reserved.
11  * Copyright (c) 2004-2009 The University of Tennessee and The University
12  *                         of Tennessee Research Foundation.  All rights
13  *                         reserved.
14  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15  *                         University of Stuttgart.  All rights reserved.
16  * Copyright (c) 2004-2005 The Regents of the University of California.
17  *                         All rights reserved.
18  * Copyright (c) 2009      University of Houston. All rights reserved.
19  *
20  * Additional copyrights may follow
21  */
22
23 #include "../colls_private.hpp"
24 /*
25  * ompi_coll_tuned_allgatherv_intra_bruck
26  *
27  * Function:     allgather using O(log(N)) steps.
28  * Accepts:      Same arguments as MPI_Allgather
29  * Returns:      MPI_SUCCESS or error code
30  *
31  * Description:  Variation to All-to-all algorithm described by Bruck et al.in
32  *               "Efficient Algorithms for All-to-all Communications
33  *                in Multiport Message-Passing Systems"
34  * Note:         Unlike in case of allgather implementation, we relay on
35  *               indexed datatype to select buffers appropriately.
36  *               The only additional memory requirement is for creation of
37  *               temporary datatypes.
38  * Example on 7 nodes (memory lay out need not be in-order)
39  *   Initial set up:
40  *    #     0      1      2      3      4      5      6
41  *         [0]    [ ]    [ ]    [ ]    [ ]    [ ]    [ ]
42  *         [ ]    [1]    [ ]    [ ]    [ ]    [ ]    [ ]
43  *         [ ]    [ ]    [2]    [ ]    [ ]    [ ]    [ ]
44  *         [ ]    [ ]    [ ]    [3]    [ ]    [ ]    [ ]
45  *         [ ]    [ ]    [ ]    [ ]    [4]    [ ]    [ ]
46  *         [ ]    [ ]    [ ]    [ ]    [ ]    [5]    [ ]
47  *         [ ]    [ ]    [ ]    [ ]    [ ]    [ ]    [6]
48  *   Step 0: send message to (rank - 2^0), receive message from (rank + 2^0)
49  *    #     0      1      2      3      4      5      6
50  *         [0]    [ ]    [ ]    [ ]    [ ]    [ ]    [0]
51  *         [1]    [1]    [ ]    [ ]    [ ]    [ ]    [ ]
52  *         [ ]    [2]    [2]    [ ]    [ ]    [ ]    [ ]
53  *         [ ]    [ ]    [3]    [3]    [ ]    [ ]    [ ]
54  *         [ ]    [ ]    [ ]    [4]    [4]    [ ]    [ ]
55  *         [ ]    [ ]    [ ]    [ ]    [5]    [5]    [ ]
56  *         [ ]    [ ]    [ ]    [ ]    [ ]    [6]    [6]
57  *   Step 1: send message to (rank - 2^1), receive message from (rank + 2^1).
58  *           message contains all blocks from (rank) .. (rank + 2^2) with
59  *           wrap around.
60  *    #     0      1      2      3      4      5      6
61  *         [0]    [ ]    [ ]    [ ]    [0]    [0]    [0]
62  *         [1]    [1]    [ ]    [ ]    [ ]    [1]    [1]
63  *         [2]    [2]    [2]    [ ]    [ ]    [ ]    [2]
64  *         [3]    [3]    [3]    [3]    [ ]    [ ]    [ ]
65  *         [ ]    [4]    [4]    [4]    [4]    [ ]    [ ]
66  *         [ ]    [ ]    [5]    [5]    [5]    [5]    [ ]
67  *         [ ]    [ ]    [ ]    [6]    [6]    [6]    [6]
68  *   Step 2: send message to (rank - 2^2), receive message from (rank + 2^2).
69  *           message size is "all remaining blocks"
70  *    #     0      1      2      3      4      5      6
71  *         [0]    [0]    [0]    [0]    [0]    [0]    [0]
72  *         [1]    [1]    [1]    [1]    [1]    [1]    [1]
73  *         [2]    [2]    [2]    [2]    [2]    [2]    [2]
74  *         [3]    [3]    [3]    [3]    [3]    [3]    [3]
75  *         [4]    [4]    [4]    [4]    [4]    [4]    [4]
76  *         [5]    [5]    [5]    [5]    [5]    [5]    [5]
77  *         [6]    [6]    [6]    [6]    [6]    [6]    [6]
78  */
79
80 namespace simgrid{
81 namespace smpi{
82
83 int Coll_allgatherv_ompi_bruck::allgatherv(void *sbuf, int scount,
84                                            MPI_Datatype sdtype,
85                                            void *rbuf, int *rcounts,
86                                            int *rdispls,
87                                            MPI_Datatype rdtype,
88                                            MPI_Comm comm)
89 {
90    int sendto, recvfrom, blockcount, i;
91    unsigned int distance;
92    ptrdiff_t slb, rlb, sext, rext;
93    char *tmpsend = NULL, *tmprecv = NULL;
94    MPI_Datatype new_rdtype = MPI_DATATYPE_NULL, new_sdtype = MPI_DATATYPE_NULL;
95
96    unsigned int size = comm->size();
97    unsigned int rank = comm->rank();
98
99    XBT_DEBUG("coll:tuned:allgather_ompi_bruck rank %u", rank);
100
101    sdtype->extent(&slb, &sext);
102
103    rdtype->extent(&rlb, &rext);
104
105    /* Initialization step:
106       - if send buffer is not MPI_IN_PLACE, copy send buffer to block rank of
107         the receive buffer.
108    */
109    tmprecv = (char*) rbuf + rdispls[rank] * rext;
110    if (MPI_IN_PLACE != sbuf) {
111       tmpsend = (char*) sbuf;
112       Datatype::copy(tmpsend, scount, sdtype,
113                             tmprecv, rcounts[rank], rdtype);
114    }
115
116    /* Communication step:
117       At every step i, rank r:
118       - doubles the distance
119       - sends message with blockcount blocks, (rbuf[rank] .. rbuf[rank + 2^i])
120         to rank (r - distance)
121       - receives message of blockcount blocks,
122         (rbuf[r + distance] ... rbuf[(r+distance) + 2^i]) from
123         rank (r + distance)
124       - blockcount doubles until the last step when only the remaining data is
125       exchanged.
126    */
127    int* new_rcounts = new int[4 * size];
128    int* new_rdispls = new_rcounts + size;
129    int* new_scounts = new_rdispls + size;
130    int* new_sdispls = new_scounts + size;
131
132    for (distance = 1; distance < size; distance<<=1) {
133
134       recvfrom = (rank + distance) % size;
135       sendto = (rank - distance + size) % size;
136
137       if (distance <= (size >> 1)) {
138          blockcount = distance;
139       } else {
140          blockcount = size - distance;
141       }
142
143       /* create send and receive datatypes */
144       for (i = 0; i < blockcount; i++) {
145           const int tmp_srank = (rank + i) % size;
146           const int tmp_rrank = (recvfrom + i) % size;
147           new_scounts[i] = rcounts[tmp_srank];
148           new_sdispls[i] = rdispls[tmp_srank];
149           new_rcounts[i] = rcounts[tmp_rrank];
150           new_rdispls[i] = rdispls[tmp_rrank];
151       }
152       Datatype::create_indexed(blockcount, new_scounts, new_sdispls,
153                                     rdtype, &new_sdtype);
154       Datatype::create_indexed(blockcount, new_rcounts, new_rdispls,
155                                     rdtype, &new_rdtype);
156
157       new_sdtype->commit();
158       new_rdtype->commit();
159
160       /* Sendreceive */
161       Request::sendrecv(rbuf, 1, new_sdtype, sendto,
162                                      COLL_TAG_ALLGATHERV,
163                                      rbuf, 1, new_rdtype, recvfrom,
164                                      COLL_TAG_ALLGATHERV,
165                                      comm, MPI_STATUS_IGNORE);
166       Datatype::unref(new_sdtype);
167       Datatype::unref(new_rdtype);
168
169    }
170
171    delete[] new_rcounts;
172
173    return MPI_SUCCESS;
174
175 }
176
177
178 }
179 }