1 /* Copyright (c) 2013-2017. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.h"
13 int Coll_allgather_loosely_lr::allgather(void *sbuf, int scount,
14 MPI_Datatype stype, void *rbuf,
15 int rcount, MPI_Datatype rtype,
19 int tag = COLL_TAG_ALLGATHER;
20 int i, j, send_offset, recv_offset;
21 int intra_rank, inter_rank, inter_comm_size, intra_comm_size;
22 int inter_dst, inter_src;
24 comm_size = comm->size();
26 if(comm->get_leaders_comm()==MPI_COMM_NULL){
30 if (comm->is_uniform()){
31 num_core = comm->get_intra_comm()->size();
34 if(comm_size%num_core)
35 THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
38 MPI_Aint rextent, sextent;
39 rextent = rtype->get_extent();
40 sextent = stype->get_extent();
41 MPI_Request inter_rrequest;
42 MPI_Request rrequest_array[128];
43 MPI_Request srequest_array[128];
44 MPI_Request inter_srequest_array[128];
47 int rrequest_count = 0;
48 int srequest_count = 0;
49 int inter_srequest_count = 0;
53 intra_rank = rank % num_core;
54 inter_rank = rank / num_core;
55 inter_comm_size = (comm_size + num_core - 1) / num_core;
56 intra_comm_size = num_core;
60 //copy corresponding message from sbuf to rbuf
61 recv_offset = rank * rextent * rcount;
62 Request::sendrecv(sbuf, scount, stype, rank, tag,
63 (char *)rbuf + recv_offset, rcount, rtype, rank, tag, comm, &status);
66 int inter_send_offset, inter_recv_offset;
70 inter_srequest_count = 0;
72 for (i = 0; i < inter_comm_size; i++) {
74 // inter_communication
76 inter_dst = (rank + intra_comm_size) % comm_size;
77 inter_src = (rank - intra_comm_size + comm_size) % comm_size;
80 ((inter_rank - 1 - i +
81 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
84 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
86 inter_send_offset = dst_seg * sextent * scount;
87 inter_recv_offset = src_seg * rextent * rcount;
89 for (j = 0; j < intra_comm_size; j++) {
91 // inter communication
92 if (intra_rank == j) {
93 if (i != inter_comm_size - 1) {
95 inter_rrequest = Request::irecv((char *)rbuf + inter_recv_offset, rcount, rtype,
96 inter_src, tag, comm);
97 inter_srequest_array[inter_srequest_count++] = Request::isend((char *)rbuf + inter_send_offset, scount, stype,
98 inter_dst, tag, comm);
101 //intra_communication
102 src = inter_rank * intra_comm_size + j;
103 dst = inter_rank * intra_comm_size + j;
107 inter_comm_size) % inter_comm_size) * intra_comm_size + j;
110 inter_comm_size) % inter_comm_size) * intra_comm_size + intra_rank;
112 send_offset = dst_seg * sextent * scount;
113 recv_offset = src_seg * rextent * rcount;
116 if (j != intra_rank) {
118 rrequest_array[rrequest_count++] = Request::irecv((char *)rbuf + recv_offset, rcount, rtype, src, tag, comm);
119 srequest_array[srequest_count++] = Request::isend((char *)rbuf + send_offset, scount, stype, dst, tag, comm);
125 // wait for inter communication to finish for these rounds (# of round equals num_core)
126 if (i != inter_comm_size - 1) {
127 Request::wait(&inter_rrequest, &status);
132 Request::waitall(rrequest_count, rrequest_array, MPI_STATUSES_IGNORE);
133 Request::waitall(srequest_count, srequest_array, MPI_STATUSES_IGNORE);
134 Request::waitall(inter_srequest_count, inter_srequest_array, MPI_STATUSES_IGNORE);