1 /* Copyright (c) 2013-2017. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
8 /* IMPLEMENTED BY PITCH PATARASUK
9 Non-topoloty-specific (however, number of cores/node need to be changed)
10 all-reduce operation designed for smp clusters
11 It uses 2-layer communication: binomial for both intra-communication
16 Use -DMPICH2 if this code does not compile.
17 MPICH1 code also work on MPICH2 on our cluster and the performance are similar.
18 This code assume commutative and associative reduce operator (MPI_SUM, MPI_MAX, etc).
21 //#include <star-reduction.c>
24 This fucntion performs all-reduce operation as follow.
25 1) binomial_tree reduce inside each SMP node
26 2) binomial_tree reduce intra-communication between root of each SMP node
27 3) binomial_tree bcast intra-communication between root of each SMP node
28 4) binomial_tree bcast inside each SMP node
32 int Coll_allreduce_smp_binomial::allreduce(void *send_buf, void *recv_buf,
33 int count, MPI_Datatype dtype,
34 MPI_Op op, MPI_Comm comm)
38 int tag = COLL_TAG_ALLREDUCE;
41 if(comm->get_leaders_comm()==MPI_COMM_NULL){
45 if (comm->is_uniform()){
46 num_core = comm->get_intra_comm()->size();
50 comm_size=comm->size();
53 dtype->extent(&lb, &extent);
54 tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent);
56 /* compute intra and inter ranking */
57 int intra_rank, inter_rank;
58 intra_rank = rank % num_core;
59 inter_rank = rank / num_core;
61 /* size of processes participate in intra communications =>
62 should be equal to number of machines */
63 int inter_comm_size = (comm_size + num_core - 1) / num_core;
65 /* copy input buffer to output buffer */
66 Request::sendrecv(send_buf, count, dtype, rank, tag,
67 recv_buf, count, dtype, rank, tag, comm, &status);
69 /* start binomial reduce intra communication inside each SMP node */
71 while (mask < num_core) {
72 if ((mask & intra_rank) == 0) {
73 src = (inter_rank * num_core) + (intra_rank | mask);
74 if (src < comm_size) {
75 Request::recv(tmp_buf, count, dtype, src, tag, comm, &status);
76 if(op!=MPI_OP_NULL) op->apply( tmp_buf, recv_buf, &count, dtype);
79 dst = (inter_rank * num_core) + (intra_rank & (~mask));
80 Request::send(recv_buf, count, dtype, dst, tag, comm);
86 /* start binomial reduce inter-communication between each SMP nodes:
87 each node only have one process that can communicate to other nodes */
88 if (intra_rank == 0) {
90 while (mask < inter_comm_size) {
91 if ((mask & inter_rank) == 0) {
92 src = (inter_rank | mask) * num_core;
93 if (src < comm_size) {
94 Request::recv(tmp_buf, count, dtype, src, tag, comm, &status);
95 if(op!=MPI_OP_NULL) op->apply( tmp_buf, recv_buf, &count, dtype);
98 dst = (inter_rank & (~mask)) * num_core;
99 Request::send(recv_buf, count, dtype, dst, tag, comm);
106 /* start binomial broadcast inter-communication between each SMP nodes:
107 each node only have one process that can communicate to other nodes */
108 if (intra_rank == 0) {
110 while (mask < inter_comm_size) {
111 if (inter_rank & mask) {
112 src = (inter_rank - mask) * num_core;
113 Request::recv(recv_buf, count, dtype, src, tag, comm, &status);
121 if (inter_rank < inter_comm_size) {
122 dst = (inter_rank + mask) * num_core;
123 if (dst < comm_size) {
124 Request::send(recv_buf, count, dtype, dst, tag, comm);
131 /* start binomial broadcast intra-communication inside each SMP nodes */
132 int num_core_in_current_smp = num_core;
133 if (inter_rank == (inter_comm_size - 1)) {
134 num_core_in_current_smp = comm_size - (inter_rank * num_core);
137 while (mask < num_core_in_current_smp) {
138 if (intra_rank & mask) {
139 src = (inter_rank * num_core) + (intra_rank - mask);
140 Request::recv(recv_buf, count, dtype, src, tag, comm, &status);
148 dst = (inter_rank * num_core) + (intra_rank + mask);
149 if (dst < comm_size) {
150 Request::send(recv_buf, count, dtype, dst, tag, comm);
155 smpi_free_tmp_buffer(tmp_buf);