1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
10 /*****************************************************************************
12 * Function: alltoall_2dmesh_shoot
17 send_buff: send input buffer
18 send_count: number of elements to send
19 send_type: data type of elements being sent
20 recv_buff: receive output buffer
21 recv_count: number of elements to received
22 recv_type: data type of elements being received
25 * Descrp: Function realizes the alltoall operation using the 2dmesh
26 algorithm. It actually performs allgather operation in x dimension
27 then in the y dimension. Each node then extracts the needed data.
28 The communication in each dimension follows "simple."
32 ****************************************************************************/
33 static int alltoall_check_is_2dmesh(int num, int *i, int *j)
56 int smpi_coll_tuned_alltoall_2dmesh(void *send_buff, int send_count,
57 MPI_Datatype send_type,
58 void *recv_buff, int recv_count,
59 MPI_Datatype recv_type, MPI_Comm comm)
61 MPI_Status *statuses, s;
62 MPI_Request *reqs, *req_ptr;;
65 char *tmp_buff1, *tmp_buff2;
66 int i, j, src, dst, rank, num_procs, count, num_reqs;
67 int X, Y, send_offset, recv_offset;
68 int my_row_base, my_col_base, src_row_base, block_size;
69 int tag = COLL_TAG_ALLTOALL;
71 rank = smpi_comm_rank(comm);
72 num_procs = smpi_comm_size(comm);
73 extent = smpi_datatype_get_extent(send_type);
75 if (!alltoall_check_is_2dmesh(num_procs, &X, &Y))
78 my_row_base = (rank / Y) * Y;
79 my_col_base = rank % Y;
81 block_size = extent * send_count;
83 tmp_buff1 = (char *) smpi_get_tmp_sendbuffer(block_size * num_procs * Y);
84 tmp_buff2 = (char *) smpi_get_tmp_recvbuffer(block_size * Y);
90 statuses = (MPI_Status *) xbt_malloc(num_reqs * sizeof(MPI_Status));
91 reqs = (MPI_Request *) xbt_malloc(num_reqs * sizeof(MPI_Request));
95 count = send_count * num_procs;
97 for (i = 0; i < Y; i++) {
98 src = i + my_row_base;
102 recv_offset = (src % Y) * block_size * num_procs;
103 *(req_ptr++) = smpi_mpi_irecv(tmp_buff1 + recv_offset, count, recv_type, src, tag, comm);
106 for (i = 0; i < Y; i++) {
107 dst = i + my_row_base;
110 smpi_mpi_send(send_buff, count, send_type, dst, tag, comm);
113 smpi_mpi_waitall(Y - 1, reqs, statuses);
116 for (i = 0; i < Y; i++) {
117 send_offset = (rank * block_size) + (i * block_size * num_procs);
118 recv_offset = (my_row_base * block_size) + (i * block_size);
120 if (i + my_row_base == rank)
121 smpi_mpi_sendrecv((char *) send_buff + recv_offset, send_count, send_type,
123 (char *) recv_buff + recv_offset, recv_count, recv_type,
124 rank, tag, comm, &s);
127 smpi_mpi_sendrecv(tmp_buff1 + send_offset, send_count, send_type,
129 (char *) recv_buff + recv_offset, recv_count, recv_type,
130 rank, tag, comm, &s);
134 for (i = 0; i < X; i++) {
135 src = (i * Y + my_col_base);
138 src_row_base = (src / Y) * Y;
140 *(req_ptr++) = smpi_mpi_irecv((char *) recv_buff + src_row_base * block_size, recv_count * Y,
141 recv_type, src, tag, comm);
144 for (i = 0; i < X; i++) {
145 dst = (i * Y + my_col_base);
150 for (j = 0; j < Y; j++) {
151 send_offset = (dst + j * num_procs) * block_size;
153 if (j + my_row_base == rank)
154 smpi_mpi_sendrecv((char *) send_buff + dst * block_size, send_count,
155 send_type, rank, tag, tmp_buff2 + recv_offset, recv_count,
156 recv_type, rank, tag, comm, &s);
158 smpi_mpi_sendrecv(tmp_buff1 + send_offset, send_count, send_type,
160 tmp_buff2 + recv_offset, recv_count, recv_type,
161 rank, tag, comm, &s);
163 recv_offset += block_size;
166 smpi_mpi_send(tmp_buff2, send_count * Y, send_type, dst, tag, comm);
168 smpi_mpi_waitall(X - 1, reqs, statuses);
171 smpi_free_tmp_buffer(tmp_buff1);
172 smpi_free_tmp_buffer(tmp_buff2);