3 /*****************************************************************************
5 * Function: alltoall_rdb
10 send_buff: send input buffer
11 send_count: number of elements to send
12 send_type: data type of elements being sent
13 recv_buff: receive output buffer
14 recv_count: number of elements to received
15 recv_type: data type of elements being received
18 * Descrp: Function realizes the allgather operation using the recursive
21 * Auther: MPICH / slightly modified by Ahmad Faraj.
23 ****************************************************************************/
24 int smpi_coll_tuned_alltoall_rdb(void *send_buff, int send_count,
25 MPI_Datatype send_type,
26 void *recv_buff, int recv_count,
27 MPI_Datatype recv_type, MPI_Comm comm)
31 MPI_Aint send_increment, recv_increment, extent;
33 int dst_tree_root, rank_tree_root, send_offset, recv_offset;
34 int rank, num_procs, j, k, dst, curr_size, max_size;
35 int last_recv_count, tmp_mask, tree_root, num_procs_completed;
36 int tag = 1, mask = 1, i = 0;
39 char *send_ptr = (char *) send_buff;
40 char *recv_ptr = (char *) recv_buff;
42 MPI_Comm_size(comm, &num_procs);
43 MPI_Comm_rank(comm, &rank);
44 MPI_Type_extent(send_type, &send_increment);
45 MPI_Type_extent(recv_type, &recv_increment);
46 MPI_Type_extent(recv_type, &extent);
48 send_increment *= (send_count * num_procs);
49 recv_increment *= (recv_count * num_procs);
51 max_size = num_procs * recv_increment;
53 tmp_buff = (char *) xbt_malloc(max_size);
55 curr_size = send_count * num_procs;
57 MPI_Sendrecv(send_ptr, curr_size, send_type, rank, tag,
58 tmp_buff + (rank * recv_increment),
59 curr_size, recv_type, rank, tag, comm, &status);
61 while (mask < num_procs) {
63 dst_tree_root = dst >> i;
65 rank_tree_root = rank >> i;
67 send_offset = rank_tree_root * send_increment;
68 recv_offset = dst_tree_root * recv_increment;
70 if (dst < num_procs) {
71 MPI_Sendrecv(tmp_buff + send_offset, curr_size, send_type, dst, tag,
72 tmp_buff + recv_offset, mask * recv_count * num_procs,
73 recv_type, dst, tag, comm, &status);
75 MPI_Get_count(&status, recv_type, &last_recv_count);
76 curr_size += last_recv_count;
80 if (dst_tree_root + mask > num_procs) {
82 num_procs_completed = num_procs - rank_tree_root - mask;
83 /* num_procs_completed is the number of processes in this
84 subtree that have all the data. Send data to others
85 in a tree fashion. First find root of current tree
86 that is being divided into two. k is the number of
87 least-significant bits in this process's rank that
88 must be zeroed out to find the rank of the root */
101 dst = rank ^ tmp_mask;
103 tree_root = rank >> k;
106 /* send only if this proc has data and destination
107 doesn't have data. at any step, multiple processes
108 can send if they have the data */
111 && (rank < tree_root + num_procs_completed)
112 && (dst >= tree_root + num_procs_completed)) {
113 MPI_Send(tmp_buff + dst_tree_root * send_increment,
114 last_recv_count, send_type, dst, tag, comm);
118 /* recv only if this proc. doesn't have data and sender
121 else if ((dst < rank)
122 && (dst < tree_root + num_procs_completed)
123 && (rank >= tree_root + num_procs_completed)) {
124 MPI_Recv(tmp_buff + dst_tree_root * send_increment,
125 mask * num_procs * send_count, send_type, dst,
128 MPI_Get_count(&status, send_type, &last_recv_count);
129 curr_size += last_recv_count;
141 for (i = 0; i < num_procs; i++)
142 MPI_Sendrecv(tmp_buff + (rank + i * num_procs) * send_count * extent,
143 send_count, send_type, rank, tag,
144 recv_ptr + (i * recv_count * extent),
145 recv_count, recv_type, rank, tag, comm, &status);