1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
9 /*****************************************************************************
11 * Function: alltoall_rdb
16 send_buff: send input buffer
17 send_count: number of elements to send
18 send_type: data type of elements being sent
19 recv_buff: receive output buffer
20 recv_count: number of elements to received
21 recv_type: data type of elements being received
24 * Descrp: Function realizes the allgather operation using the recursive
27 * Auther: MPICH / slightly modified by Ahmad Faraj.
29 ****************************************************************************/
30 int smpi_coll_tuned_alltoall_rdb(void *send_buff, int send_count,
31 MPI_Datatype send_type,
32 void *recv_buff, int recv_count,
33 MPI_Datatype recv_type, MPI_Comm comm)
37 MPI_Aint send_increment, recv_increment, extent;
39 int dst_tree_root, rank_tree_root, send_offset, recv_offset;
40 int rank, num_procs, j, k, dst, curr_size, max_size;
41 int last_recv_count = 0, tmp_mask, tree_root, num_procs_completed;
42 int tag = COLL_TAG_ALLTOALL, mask = 1, i = 0;
45 char *send_ptr = (char *) send_buff;
46 char *recv_ptr = (char *) recv_buff;
48 num_procs = smpi_comm_size(comm);
49 rank = smpi_comm_rank(comm);
50 send_increment = smpi_datatype_get_extent(send_type);
51 recv_increment = smpi_datatype_get_extent(recv_type);
52 extent = smpi_datatype_get_extent(recv_type);
54 send_increment *= (send_count * num_procs);
55 recv_increment *= (recv_count * num_procs);
57 max_size = num_procs * recv_increment;
59 tmp_buff = (char *) smpi_get_tmp_sendbuffer(max_size);
61 curr_size = send_count * num_procs;
63 smpi_mpi_sendrecv(send_ptr, curr_size, send_type, rank, tag,
64 tmp_buff + (rank * recv_increment),
65 curr_size, recv_type, rank, tag, comm, &status);
67 while (mask < num_procs) {
69 dst_tree_root = dst >> i;
71 rank_tree_root = rank >> i;
73 send_offset = rank_tree_root * send_increment;
74 recv_offset = dst_tree_root * recv_increment;
76 if (dst < num_procs) {
77 smpi_mpi_sendrecv(tmp_buff + send_offset, curr_size, send_type, dst, tag,
78 tmp_buff + recv_offset, mask * recv_count * num_procs,
79 recv_type, dst, tag, comm, &status);
81 last_recv_count = smpi_mpi_get_count(&status, recv_type);
82 curr_size += last_recv_count;
86 if (dst_tree_root + mask > num_procs) {
88 num_procs_completed = num_procs - rank_tree_root - mask;
89 /* num_procs_completed is the number of processes in this
90 subtree that have all the data. Send data to others
91 in a tree fashion. First find root of current tree
92 that is being divided into two. k is the number of
93 least-significant bits in this process's rank that
94 must be zeroed out to find the rank of the root */
104 tmp_mask = mask >> 1;
107 dst = rank ^ tmp_mask;
109 tree_root = rank >> k;
112 /* send only if this proc has data and destination
113 doesn't have data. at any step, multiple processes
114 can send if they have the data */
117 && (rank < tree_root + num_procs_completed)
118 && (dst >= tree_root + num_procs_completed)) {
119 smpi_mpi_send(tmp_buff + dst_tree_root * send_increment,
120 last_recv_count, send_type, dst, tag, comm);
124 /* recv only if this proc. doesn't have data and sender
127 else if ((dst < rank)
128 && (dst < tree_root + num_procs_completed)
129 && (rank >= tree_root + num_procs_completed)) {
130 smpi_mpi_recv(tmp_buff + dst_tree_root * send_increment,
131 mask * num_procs * send_count, send_type, dst,
134 last_recv_count = smpi_mpi_get_count(&status, send_type);
135 curr_size += last_recv_count;
147 for (i = 0; i < num_procs; i++)
148 smpi_mpi_sendrecv(tmp_buff + (rank + i * num_procs) * send_count * extent,
149 send_count, send_type, rank, tag,
150 recv_ptr + (i * recv_count * extent),
151 recv_count, recv_type, rank, tag, comm, &status);
152 smpi_free_tmp_buffer(tmp_buff);