1 /* Copyright (c) 2013-2022. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
9 namespace simgrid::smpi {
11 int allgather__SMP_NTS(const void *sbuf, int scount,
12 MPI_Datatype stype, void *rbuf,
13 int rcount, MPI_Datatype rtype,
16 int src, dst, comm_size, rank;
17 comm_size = comm->size();
19 MPI_Aint rextent, sextent;
20 rextent = rtype->get_extent();
21 sextent = stype->get_extent();
22 int tag = COLL_TAG_ALLGATHER;
24 int i, send_offset, recv_offset;
25 int intra_rank, inter_rank;
27 if(comm->get_leaders_comm()==MPI_COMM_NULL){
31 if (comm->is_uniform()){
32 num_core = comm->get_intra_comm()->size();
36 intra_rank = rank % num_core;
37 inter_rank = rank / num_core;
38 int inter_comm_size = (comm_size + num_core - 1) / num_core;
39 int num_core_in_current_smp = num_core;
41 if(comm_size%num_core)
42 throw std::invalid_argument(xbt::string_printf(
43 "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes!", num_core));
45 /* for too small number of processes, use default implementation */
46 if (comm_size <= num_core) {
47 XBT_INFO("MPI_allgather_SMP_NTS: comm_size <= num_core, use default MPI_allgather.");
48 allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
52 // the last SMP node may have fewer number of running processes than all others
53 if (inter_rank == (inter_comm_size - 1)) {
54 num_core_in_current_smp = comm_size - (inter_rank * num_core);
56 //copy corresponding message from sbuf to rbuf
57 recv_offset = rank * rextent * rcount;
58 Request::sendrecv(sbuf, scount, stype, rank, tag,
59 ((char *) rbuf + recv_offset), rcount, rtype, rank, tag, comm,
62 //gather to root of each SMP
64 for (i = 1; i < num_core_in_current_smp; i++) {
67 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
69 (inter_rank * num_core) + (intra_rank - i +
70 num_core_in_current_smp) %
71 (num_core_in_current_smp);
72 recv_offset = src * rextent * rcount;
74 Request::sendrecv(sbuf, scount, stype, dst, tag,
75 ((char *) rbuf + recv_offset), rcount, rtype, src, tag, comm,
80 // INTER-SMP-ALLGATHER
81 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
82 // Use logical ring algorithm
85 if (intra_rank == 0) {
86 auto* rrequest_array = new MPI_Request[inter_comm_size - 1];
87 auto* srequest_array = new MPI_Request[inter_comm_size - 1];
89 src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
90 dst = ((inter_rank + 1) % inter_comm_size) * num_core;
92 // post all inter Irecv
93 for (i = 0; i < inter_comm_size - 1; i++) {
95 ((inter_rank - i - 1 +
96 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
97 rrequest_array[i] = Request::irecv((char *)rbuf + recv_offset, rcount * num_core,
98 rtype, src, tag + i, comm);
101 // send first message
104 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
105 srequest_array[0] = Request::isend((char *)rbuf + send_offset, scount * num_core,
106 stype, dst, tag, comm);
108 // loop : recv-inter , send-inter, send-intra (linear-bcast)
109 for (i = 0; i < inter_comm_size - 2; i++) {
111 ((inter_rank - i - 1 +
112 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
113 Request::wait(&rrequest_array[i], MPI_STATUS_IGNORE);
114 srequest_array[i + 1] = Request::isend((char *)rbuf + recv_offset, scount * num_core,
115 stype, dst, tag + i + 1, comm);
116 if (num_core_in_current_smp > 1) {
117 Request::send((char *)rbuf + recv_offset, scount * num_core,
118 stype, (rank + 1), tag + i + 1, comm);
122 // recv last message and send_intra
124 ((inter_rank - i - 1 +
125 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
126 //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
127 //i=inter_comm_size-2;
128 Request::wait(&rrequest_array[i], MPI_STATUS_IGNORE);
129 if (num_core_in_current_smp > 1) {
130 Request::send((char *)rbuf + recv_offset, scount * num_core,
131 stype, (rank + 1), tag + i + 1, comm);
134 Request::waitall(inter_comm_size - 1, srequest_array, MPI_STATUSES_IGNORE);
135 delete[] rrequest_array;
136 delete[] srequest_array;
138 // last rank of each SMP
139 else if (intra_rank == (num_core_in_current_smp - 1)) {
140 for (i = 0; i < inter_comm_size - 1; i++) {
142 ((inter_rank - i - 1 +
143 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
144 Request::recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
145 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
148 // intermediate rank of each SMP
150 for (i = 0; i < inter_comm_size - 1; i++) {
152 ((inter_rank - i - 1 +
153 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
154 Request::recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
155 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
156 Request::send((char *) rbuf + recv_offset, (scount * num_core), stype,
157 (rank + 1), tag + i + 1, comm);
164 } // namespace simgrid::smpi