1 /* Copyright (c) 2013-2021. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
13 int allgather__loosely_lr(const void *sbuf, int scount,
14 MPI_Datatype stype, void *rbuf,
15 int rcount, MPI_Datatype rtype,
19 int tag = COLL_TAG_ALLGATHER;
20 int i, j, send_offset, recv_offset;
21 int intra_rank, inter_rank, inter_comm_size, intra_comm_size;
22 int inter_dst, inter_src;
24 comm_size = comm->size();
26 if(comm->get_leaders_comm()==MPI_COMM_NULL){
30 if (comm->is_uniform()){
31 num_core = comm->get_intra_comm()->size();
34 if(comm_size%num_core)
35 throw std::invalid_argument(xbt::string_printf(
36 "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes!",
40 MPI_Aint rextent, sextent;
41 rextent = rtype->get_extent();
42 sextent = stype->get_extent();
43 MPI_Request inter_rrequest;
44 MPI_Request rrequest_array[128];
45 MPI_Request srequest_array[128];
46 MPI_Request inter_srequest_array[128];
49 int rrequest_count = 0;
50 int srequest_count = 0;
51 int inter_srequest_count = 0;
55 intra_rank = rank % num_core;
56 inter_rank = rank / num_core;
57 inter_comm_size = (comm_size + num_core - 1) / num_core;
58 intra_comm_size = num_core;
62 //copy corresponding message from sbuf to rbuf
63 recv_offset = rank * rextent * rcount;
64 Request::sendrecv(sbuf, scount, stype, rank, tag,
65 (char *)rbuf + recv_offset, rcount, rtype, rank, tag, comm, &status);
68 int inter_send_offset, inter_recv_offset;
72 inter_srequest_count = 0;
74 for (i = 0; i < inter_comm_size; i++) {
76 // inter_communication
78 inter_dst = (rank + intra_comm_size) % comm_size;
79 inter_src = (rank - intra_comm_size + comm_size) % comm_size;
82 ((inter_rank - 1 - i +
83 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
86 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
88 inter_send_offset = dst_seg * sextent * scount;
89 inter_recv_offset = src_seg * rextent * rcount;
91 for (j = 0; j < intra_comm_size; j++) {
93 // inter communication
94 if (intra_rank == j) {
95 if (i != inter_comm_size - 1) {
97 inter_rrequest = Request::irecv((char*)rbuf + inter_recv_offset, rcount, rtype, inter_src, tag, comm);
98 inter_srequest_array[inter_srequest_count++] =
99 Request::isend((char*)rbuf + inter_send_offset, scount, stype, inter_dst, tag, comm);
102 //intra_communication
103 src = inter_rank * intra_comm_size + j;
104 dst = inter_rank * intra_comm_size + j;
108 inter_comm_size) % inter_comm_size) * intra_comm_size + j;
111 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
113 send_offset = dst_seg * sextent * scount;
114 recv_offset = src_seg * rextent * rcount;
117 if (j != intra_rank) {
119 rrequest_array[rrequest_count++] = Request::irecv((char *)rbuf + recv_offset, rcount, rtype, src, tag, comm);
120 srequest_array[srequest_count++] = Request::isend((char *)rbuf + send_offset, scount, stype, dst, tag, comm);
126 // wait for inter communication to finish for these rounds (# of round equals num_core)
127 if (i != inter_comm_size - 1) {
128 Request::wait(&inter_rrequest, &status);
133 Request::waitall(rrequest_count, rrequest_array, MPI_STATUSES_IGNORE);
134 Request::waitall(srequest_count, srequest_array, MPI_STATUSES_IGNORE);
135 Request::waitall(inter_srequest_count, inter_srequest_array, MPI_STATUSES_IGNORE);