1 /* Copyright (c) 2013-2017. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
8 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
9 * University Research and Technology
10 * Corporation. All rights reserved.
11 * Copyright (c) 2004-2009 The University of Tennessee and The University
12 * of Tennessee Research Foundation. All rights
14 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
15 * University of Stuttgart. All rights reserved.
16 * Copyright (c) 2004-2005 The Regents of the University of California.
17 * All rights reserved.
19 * Additional copyrights may follow
21 /* -*- Mode: C; c-basic-offset:4 ; -*- */
22 /* Copyright (c) 2001-2014, The Ohio State University. All rights
25 * This file is part of the MVAPICH2 software package developed by the
26 * team members of The Ohio State University's Network-Based Computing
27 * Laboratory (NBCL), headed by Professor Dhabaleswar K. (DK) Panda.
29 * For detailed copyright and licensing information, please refer to the
30 * copyright file COPYRIGHT in the top level MVAPICH2 directory.
34 * (C) 2001 by Argonne National Laboratory.
35 * See COPYRIGHT in top-level directory.
38 #include "../colls_private.hpp"
40 extern int (*MV2_Bcast_function) (void *buffer, int count, MPI_Datatype datatype,
41 int root, MPI_Comm comm_ptr);
43 extern int (*MV2_Bcast_intra_node_function) (void *buffer, int count, MPI_Datatype datatype,
44 int root, MPI_Comm comm_ptr);
46 extern int zcpy_knomial_factor;
47 extern int mv2_pipelined_zcpy_knomial_factor;
48 extern int bcast_segment_size;
49 extern int mv2_inter_node_knomial_factor;
50 extern int mv2_intra_node_knomial_factor;
51 extern int mv2_bcast_two_level_system_size;
52 #define INTRA_NODE_ROOT 0
54 #define MPIR_Pipelined_Bcast_Zcpy_MV2 Coll_bcast_mpich::bcast
55 #define MPIR_Pipelined_Bcast_MV2 Coll_bcast_mpich::bcast
56 #define MPIR_Bcast_binomial_MV2 Coll_bcast_binomial_tree::bcast
57 #define MPIR_Bcast_scatter_ring_allgather_shm_MV2 Coll_bcast_scatter_LR_allgather::bcast
58 #define MPIR_Bcast_scatter_doubling_allgather_MV2 Coll_bcast_scatter_rdb_allgather::bcast
59 #define MPIR_Bcast_scatter_ring_allgather_MV2 Coll_bcast_scatter_LR_allgather::bcast
60 #define MPIR_Shmem_Bcast_MV2 Coll_bcast_mpich::bcast
61 #define MPIR_Bcast_tune_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast
62 #define MPIR_Bcast_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast
63 #define MPIR_Knomial_Bcast_intra_node_MV2 Coll_bcast_mvapich2_knomial_intra_node::bcast
64 #define MPIR_Bcast_intra_MV2 Coll_bcast_mvapich2_intra_node::bcast
66 extern int zcpy_knomial_factor;
67 extern int mv2_pipelined_zcpy_knomial_factor;
68 extern int bcast_segment_size;
69 extern int mv2_inter_node_knomial_factor;
70 extern int mv2_intra_node_knomial_factor;
71 #define mv2_bcast_two_level_system_size 64
72 #define mv2_bcast_short_msg 16384
73 #define mv2_bcast_large_msg 512*1024
74 #define mv2_knomial_intra_node_threshold 131072
75 #define mv2_scatter_rd_inter_leader_bcast 1
78 int Coll_bcast_mvapich2_inter_node::bcast(void *buffer,
80 MPI_Datatype datatype,
85 int mpi_errno = MPI_SUCCESS;
86 MPI_Comm shmem_comm, leader_comm;
87 int local_rank, local_size, global_rank = -1;
88 int leader_root, leader_of_root;
92 //comm_size = comm->size();
95 if (MV2_Bcast_function==NULL){
96 MV2_Bcast_function=Coll_bcast_mpich::bcast;
99 if (MV2_Bcast_intra_node_function==NULL){
100 MV2_Bcast_intra_node_function= Coll_bcast_mpich::bcast;
103 if(comm->get_leaders_comm()==MPI_COMM_NULL){
107 shmem_comm = comm->get_intra_comm();
108 local_rank = shmem_comm->rank();
109 local_size = shmem_comm->size();
111 leader_comm = comm->get_leaders_comm();
113 if ((local_rank == 0) && (local_size > 1)) {
114 global_rank = leader_comm->rank();
117 int* leaders_map = comm->get_leaders_map();
118 leader_of_root = comm->group()->rank(leaders_map[root]);
119 leader_root = leader_comm->group()->rank(leaders_map[root]);
122 if (local_size > 1) {
123 if ((local_rank == 0) && (root != rank) && (leader_root == global_rank)) {
124 Request::recv(buffer, count, datatype, root,
125 COLL_TAG_BCAST, comm, MPI_STATUS_IGNORE);
127 if ((local_rank != 0) && (root == rank)) {
128 Request::send(buffer, count, datatype,
129 leader_of_root, COLL_TAG_BCAST, comm);
132 #if defined(_MCST_SUPPORT_)
133 if (comm_ptr->ch.is_mcast_ok) {
134 mpi_errno = MPIR_Mcast_inter_node_MV2(buffer, count, datatype, root, comm_ptr,
136 if (mpi_errno == MPI_SUCCESS) {
144 if (local_rank == 0) {
145 leader_comm = comm->get_leaders_comm();
149 if (MV2_Bcast_function == &MPIR_Pipelined_Bcast_MV2) {
150 mpi_errno = MPIR_Pipelined_Bcast_MV2(buffer, count, datatype,
152 } else if (MV2_Bcast_function == &MPIR_Bcast_scatter_ring_allgather_shm_MV2) {
153 mpi_errno = MPIR_Bcast_scatter_ring_allgather_shm_MV2(buffer, count,
157 if (local_rank == 0) {
158 /* if (MV2_Bcast_function == &MPIR_Knomial_Bcast_inter_node_wrapper_MV2) {
159 mpi_errno = MPIR_Knomial_Bcast_inter_node_wrapper_MV2(buffer, count,
163 mpi_errno = MV2_Bcast_function(buffer, count, datatype,
164 leader_root, leader_comm);
173 int Coll_bcast_mvapich2_knomial_intra_node::bcast(void *buffer,
175 MPI_Datatype datatype,
176 int root, MPI_Comm comm)
178 int local_size = 0, rank;
179 int mpi_errno = MPI_SUCCESS;
180 MPI_Request *reqarray = NULL;
181 MPI_Status *starray = NULL;
182 int src, dst, mask, relative_rank;
184 if (MV2_Bcast_function==NULL){
185 MV2_Bcast_function=Coll_bcast_mpich::bcast;
188 if (MV2_Bcast_intra_node_function==NULL){
189 MV2_Bcast_intra_node_function= Coll_bcast_mpich::bcast;
192 if(comm->get_leaders_comm()==MPI_COMM_NULL){
196 local_size = comm->size();
200 reqarray=(MPI_Request *)xbt_malloc(2 * mv2_intra_node_knomial_factor * sizeof (MPI_Request));
202 starray=(MPI_Status *)xbt_malloc(2 * mv2_intra_node_knomial_factor * sizeof (MPI_Status));
204 /* intra-node k-nomial bcast */
205 if (local_size > 1) {
206 relative_rank = (rank >= root) ? rank - root : rank - root + local_size;
209 while (mask < local_size) {
210 if (relative_rank % (mv2_intra_node_knomial_factor * mask)) {
211 src = relative_rank / (mv2_intra_node_knomial_factor * mask) *
212 (mv2_intra_node_knomial_factor * mask) + root;
213 if (src >= local_size) {
217 Request::recv(buffer, count, datatype, src,
218 COLL_TAG_BCAST, comm,
222 mask *= mv2_intra_node_knomial_factor;
224 mask /= mv2_intra_node_knomial_factor;
228 for (k = 1; k < mv2_intra_node_knomial_factor; k++) {
229 if (relative_rank + mask * k < local_size) {
230 dst = rank + mask * k;
231 if (dst >= local_size) {
234 reqarray[reqs++]=Request::isend(buffer, count, datatype, dst,
235 COLL_TAG_BCAST, comm);
238 Request::waitall(reqs, reqarray, starray);
240 mask /= mv2_intra_node_knomial_factor;
249 int Coll_bcast_mvapich2_intra_node::bcast(void *buffer,
251 MPI_Datatype datatype,
252 int root, MPI_Comm comm)
254 int mpi_errno = MPI_SUCCESS;
256 int two_level_bcast = 1;
258 int is_homogeneous, is_contig;
260 void *tmp_buf = NULL;
265 if (MV2_Bcast_function==NULL){
266 MV2_Bcast_function=Coll_bcast_mpich::bcast;
269 if (MV2_Bcast_intra_node_function==NULL){
270 MV2_Bcast_intra_node_function= Coll_bcast_mpich::bcast;
273 if(comm->get_leaders_comm()==MPI_COMM_NULL){
277 comm_size = comm->size();
278 // rank = comm->rank();
280 if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN)*/
283 MPID_Datatype_get_ptr(datatype, dtp);
284 is_contig = dtp->is_contig;
288 #ifdef MPID_HAS_HETERO
289 if (comm_ptr->is_hetero)
293 /* MPI_Type_size() might not give the accurate size of the packed
294 * datatype for heterogeneous systems (because of padding, encoding,
295 * etc). On the other hand, MPI_Pack_size() can become very
296 * expensive, depending on the implementation, especially for
297 * heterogeneous systems. We want to use MPI_Type_size() wherever
298 * possible, and MPI_Pack_size() in other places.
300 //if (is_homogeneous) {
301 type_size=datatype->size();
304 /* MPIR_Pack_size_impl(1, datatype, &type_size);*/
306 nbytes = (size_t) (count) * (type_size);
307 if (comm_size <= mv2_bcast_two_level_system_size) {
308 if (nbytes > mv2_bcast_short_msg && nbytes < mv2_bcast_large_msg) {
315 if (two_level_bcast == 1
316 #if defined(_MCST_SUPPORT_)
317 || comm_ptr->ch.is_mcast_ok
321 if (not is_contig || not is_homogeneous) {
322 tmp_buf = (void*)smpi_get_tmp_sendbuffer(nbytes);
324 /* TODO: Pipeline the packing and communication */
326 /* if (rank == root) {*/
328 /* MPIR_Pack_impl(buffer, count, datatype, tmp_buf, nbytes, &position);*/
330 /* MPIU_ERR_POP(mpi_errno);*/
334 shmem_comm = comm->get_intra_comm();
335 if (not is_contig || not is_homogeneous) {
336 mpi_errno = MPIR_Bcast_inter_node_helper_MV2(tmp_buf, nbytes, MPI_BYTE, root, comm);
339 MPIR_Bcast_inter_node_helper_MV2(buffer, count, datatype, root,
343 /* We are now done with the inter-node phase */
344 if (nbytes <= mv2_knomial_intra_node_threshold) {
345 if (not is_contig || not is_homogeneous) {
346 mpi_errno = MPIR_Shmem_Bcast_MV2(tmp_buf, nbytes, MPI_BYTE, root, shmem_comm);
348 mpi_errno = MPIR_Shmem_Bcast_MV2(buffer, count, datatype,
352 if (not is_contig || not is_homogeneous) {
353 mpi_errno = MPIR_Knomial_Bcast_intra_node_MV2(tmp_buf, nbytes, MPI_BYTE, INTRA_NODE_ROOT, shmem_comm);
356 MPIR_Knomial_Bcast_intra_node_MV2(buffer, count,
364 if (nbytes <= mv2_bcast_short_msg) {
365 mpi_errno = MPIR_Bcast_binomial_MV2(buffer, count, datatype, root,
368 if (mv2_scatter_rd_inter_leader_bcast) {
369 mpi_errno = MPIR_Bcast_scatter_ring_allgather_MV2(buffer, count,
375 MPIR_Bcast_scatter_doubling_allgather_MV2(buffer, count,