1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
9 int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192;
11 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE
12 #define BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE 1024
15 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE
16 #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128
19 /* Non-topology-specific pipelined linear-bcast function */
20 int smpi_coll_tuned_bcast_arrival_pattern_aware_wait(void *buf, int count,
21 MPI_Datatype datatype,
22 int root, MPI_Comm comm)
26 MPI_Request *send_request_array;
27 MPI_Request *recv_request_array;
28 MPI_Status *send_status_array;
29 MPI_Status *recv_status_array;
32 MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
36 int tag = -COLL_TAG_BCAST;
37 int will_send[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
41 int flag_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
42 int already_sent[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
44 int header_buf[BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE];
45 char temp_buf[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
47 int max_node = BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE;
48 int header_size = BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE;
51 extent = smpi_datatype_get_extent(datatype);
53 /* source and destination */
58 rank = smpi_comm_rank(comm);
59 size = smpi_comm_size(comm);
62 /* segment is segment size in number of elements (not bytes) */
63 int segment = bcast_arrival_pattern_aware_wait_segment_size_in_byte / extent;
64 segment = segment == 0 ? 1 :segment;
66 int pipe_length = count / segment;
68 /* use for buffer offset for sending and receiving data = segment size in byte */
69 int increment = segment * extent;
71 /* if the input size is not divisible by segment size =>
72 the small remainder will be done with native implementation */
73 int remainder = count % segment;
75 /* if root is not zero send to rank zero first
76 this can be modified to make it faster by using logical src, dst.
80 smpi_mpi_send(buf, count, datatype, 0, tag, comm);
81 } else if (rank == 0) {
82 smpi_mpi_recv(buf, count, datatype, root, tag, comm, &status);
87 /* value == 0 means root has not send data (or header) to the node yet */
88 for (i = 0; i < max_node; i++) {
92 /* when a message is smaller than a block size => no pipeline */
93 if (count <= segment) {
98 /* start pipeline bcast */
101 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
103 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
105 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
107 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
114 for (i = 0; i < BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; i++)
116 while (sent_count < (size - 1)) {
119 /* loop k times to let more processes arrive before start sending data */
120 for (k = 0; k < 3; k++) {
121 for (i = 1; i < size; i++) {
122 if ((already_sent[i] == 0) && (will_send[i] == 0)) {
123 smpi_mpi_iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
124 &temp_status_array[i]);
125 if (flag_array[i] == 1) {
127 smpi_mpi_recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
137 /* recv 1-byte message */
138 for (i = 1; i < size; i++) {
140 if ((will_send[i] == 1) && (already_sent[i] == 0)) {
141 header_buf[header_index] = i;
145 /* will send in the next step */
150 /* send header followed by data */
151 if (header_index != 0) {
152 header_buf[header_index] = -1;
156 smpi_mpi_send(header_buf, header_size, MPI_INT, to, tag, comm);
158 /* send data - pipeline */
159 for (i = 0; i < pipe_length; i++) {
160 send_request_array[i] = smpi_mpi_isend((char *)buf + (i * increment), segment, datatype, to, tag, comm);
162 smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
166 /* end - send header followed by data */
167 /* randomly MPI_Send to one node */
168 /* this part has been commented out - performance-wise */
170 /* search for the first node that never received data before */
171 for (i = 0; i < size; i++) {
174 if (already_sent[i] == 0) {
179 smpi_mpi_send(header_buf, header_size, MPI_INT, to, tag, comm);
181 /* still need to chop data so that we can use the same non-root code */
182 for (j = 0; j < pipe_length; j++) {
183 smpi_mpi_send((char *)buf + (j * increment), segment, datatype, to, tag, comm);
188 } /* end - while (send_count < size-1) loop */
195 /* send 1-byte message to root */
196 smpi_mpi_send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
198 /* wait for header forward when required */
199 request = smpi_mpi_irecv(header_buf, header_size, MPI_INT, MPI_ANY_SOURCE, tag, comm);
200 smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
202 /* search for where it is */
204 while (rank != header_buf[myordering]) {
208 to = header_buf[myordering + 1];
209 if (myordering == 0) {
212 from = header_buf[myordering - 1];
215 /* send header when required */
217 smpi_mpi_send(header_buf, header_size, MPI_INT, to, tag, comm);
222 for (i = 0; i < pipe_length; i++) {
223 recv_request_array[i] = smpi_mpi_irecv((char *)buf + (i * increment), segment, datatype, from, tag, comm);
228 for (i = 0; i < pipe_length; i++) {
229 smpi_mpi_wait(&recv_request_array[i], MPI_STATUS_IGNORE);
230 send_request_array[i] = smpi_mpi_isend((char *)buf + (i * increment), segment, datatype, to, tag, comm);
232 smpi_mpi_waitall((pipe_length), send_request_array, send_status_array);
237 smpi_mpi_waitall((pipe_length), recv_request_array, recv_status_array);
241 free(send_request_array);
242 free(recv_request_array);
243 free(send_status_array);
244 free(recv_status_array);
247 /* when count is not divisible by block size, use default BCAST for the remainder */
248 if ((remainder != 0) && (count > segment)) {
249 XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast.");
250 smpi_mpi_bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);