1 /* Copyright (c) 2013-2020. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
13 int allgather__smp_simple(const void *send_buf, int scount,
14 MPI_Datatype stype, void *recv_buf,
15 int rcount, MPI_Datatype rtype,
18 int src, dst, comm_size, rank;
19 comm_size = comm->size();
21 if(comm->get_leaders_comm()==MPI_COMM_NULL){
25 if (comm->is_uniform()){
26 num_core = comm->get_intra_comm()->size();
29 if(comm_size%num_core)
30 throw std::invalid_argument(xbt::string_printf(
31 "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes!",
35 MPI_Aint rextent, sextent;
36 rextent = rtype->get_extent();
37 sextent = stype->get_extent();
38 int tag = COLL_TAG_ALLGATHER;
40 int i, send_offset, recv_offset;
41 int intra_rank, inter_rank;
42 intra_rank = rank % num_core;
43 inter_rank = rank / num_core;
44 int inter_comm_size = (comm_size + num_core - 1) / num_core;
45 int num_core_in_current_smp = num_core;
47 // the last SMP node may have fewer number of running processes than all others
48 if (inter_rank == (inter_comm_size - 1)) {
49 num_core_in_current_smp = comm_size - (inter_rank * num_core);
52 recv_offset = rank * rextent * rcount;
53 Request::sendrecv(send_buf, scount, stype, rank, tag,
54 ((char *) recv_buf + recv_offset), rcount, rtype, rank, tag,
56 for (i = 1; i < num_core_in_current_smp; i++) {
59 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
61 (inter_rank * num_core) + (intra_rank - i +
62 num_core_in_current_smp) %
63 (num_core_in_current_smp);
64 recv_offset = src * rextent * rcount;
66 Request::sendrecv(send_buf, scount, stype, dst, tag,
67 ((char *) recv_buf + recv_offset), rcount, rtype, src, tag,
72 // INTER-SMP-ALLGATHER
73 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
77 if (intra_rank == 0) {
78 int num_req = (inter_comm_size - 1) * 2;
79 MPI_Request* reqs = new MPI_Request[num_req];
80 MPI_Request* req_ptr = reqs;
81 MPI_Status* stat = new MPI_Status[num_req];
83 for (i = 1; i < inter_comm_size; i++) {
85 //dst = ((inter_rank+i)%inter_comm_size) * num_core;
86 src = ((inter_rank - i + inter_comm_size) % inter_comm_size) * num_core;
87 //send_offset = (rank * sextent * scount);
88 recv_offset = (src * sextent * scount);
89 // Request::sendrecv((recv_buf+send_offset), (scount * num_core), stype, dst, tag,
90 // (recv_buf+recv_offset), (rcount * num_core), rtype, src, tag, comm, &status);
91 //MPIC_Isend((recv_buf+send_offset), (scount * num_core), stype, dst, tag, comm, req_ptr++);
92 *(req_ptr++) = Request::irecv(((char *) recv_buf + recv_offset), (rcount * num_core), rtype,
95 for (i = 1; i < inter_comm_size; i++) {
97 dst = ((inter_rank + i) % inter_comm_size) * num_core;
98 //src = ((inter_rank-i+inter_comm_size)%inter_comm_size) * num_core;
99 send_offset = (rank * sextent * scount);
100 //recv_offset = (src * sextent * scount);
101 // Request::sendrecv((recv_buf+send_offset), (scount * num_core), stype, dst, tag,
102 // (recv_buf+recv_offset), (rcount * num_core), rtype, src, tag, comm, &status);
103 *(req_ptr++) = Request::isend(((char *) recv_buf + send_offset), (scount * num_core), stype,
105 //MPIC_Irecv((recv_buf+recv_offset), (rcount * num_core), rtype, src, tag, comm, req_ptr++);
107 Request::waitall(num_req, reqs, stat);
111 //INTRA-BCAST (use flat tree)
113 if (intra_rank == 0) {
114 for (i = 1; i < num_core_in_current_smp; i++) {
115 //printf("rank = %d, num = %d send to %d\n",rank, num_core_in_current_smp, (rank + i));
116 Request::send(recv_buf, (scount * comm_size), stype, (rank + i), tag, comm);
119 //printf("rank = %d recv from %d\n",rank, (inter_rank * num_core));
120 Request::recv(recv_buf, (rcount * comm_size), rtype, (inter_rank * num_core),