1 /* Copyright (c) 2013-2017. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
9 int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192;
11 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE
12 #define BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE 1024
15 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE
16 #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128
20 /* Non-topology-specific pipelined linear-bcast function */
21 int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count,
22 MPI_Datatype datatype,
23 int root, MPI_Comm comm)
27 MPI_Request *send_request_array;
28 MPI_Request *recv_request_array;
29 MPI_Status *send_status_array;
30 MPI_Status *recv_status_array;
33 MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
37 int tag = -COLL_TAG_BCAST;
38 int will_send[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
42 int flag_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
43 int already_sent[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
45 int header_buf[BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE];
46 char temp_buf[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
48 int max_node = BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE;
49 int header_size = BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE;
52 extent = datatype->get_extent();
54 /* source and destination */
63 /* segment is segment size in number of elements (not bytes) */
64 int segment = bcast_arrival_pattern_aware_wait_segment_size_in_byte / extent;
65 segment = segment == 0 ? 1 :segment;
67 int pipe_length = count / segment;
69 /* use for buffer offset for sending and receiving data = segment size in byte */
70 int increment = segment * extent;
72 /* if the input size is not divisible by segment size =>
73 the small remainder will be done with native implementation */
74 int remainder = count % segment;
76 /* if root is not zero send to rank zero first
77 this can be modified to make it faster by using logical src, dst.
81 Request::send(buf, count, datatype, 0, tag, comm);
82 } else if (rank == 0) {
83 Request::recv(buf, count, datatype, root, tag, comm, &status);
88 /* value == 0 means root has not send data (or header) to the node yet */
89 for (i = 0; i < max_node; i++) {
93 /* when a message is smaller than a block size => no pipeline */
94 if (count <= segment) {
99 /* start pipeline bcast */
102 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
104 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
106 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
108 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
115 for (i = 0; i < BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; i++)
117 while (sent_count < (size - 1)) {
120 /* loop k times to let more processes arrive before start sending data */
121 for (k = 0; k < 3; k++) {
122 for (i = 1; i < size; i++) {
123 if ((already_sent[i] == 0) && (will_send[i] == 0)) {
124 Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
125 &temp_status_array[i]);
126 if (flag_array[i] == 1) {
128 Request::recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
138 /* recv 1-byte message */
139 for (i = 1; i < size; i++) {
141 if ((will_send[i] == 1) && (already_sent[i] == 0)) {
142 header_buf[header_index] = i;
146 /* will send in the next step */
151 /* send header followed by data */
152 if (header_index != 0) {
153 header_buf[header_index] = -1;
157 Request::send(header_buf, header_size, MPI_INT, to, tag, comm);
159 /* send data - pipeline */
160 for (i = 0; i < pipe_length; i++) {
161 send_request_array[i] = Request::isend((char *)buf + (i * increment), segment, datatype, to, tag, comm);
163 Request::waitall((pipe_length), send_request_array, send_status_array);
167 /* end - send header followed by data */
168 /* randomly MPI_Send to one node */
169 /* this part has been commented out - performance-wise */
171 /* search for the first node that never received data before */
172 for (i = 0; i < size; i++) {
175 if (already_sent[i] == 0) {
180 Request::send(header_buf, header_size, MPI_INT, to, tag, comm);
182 /* still need to chop data so that we can use the same non-root code */
183 for (j = 0; j < pipe_length; j++) {
184 Request::send((char *)buf + (j * increment), segment, datatype, to, tag, comm);
189 } /* end - while (send_count < size-1) loop */
196 /* send 1-byte message to root */
197 Request::send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
199 /* wait for header forward when required */
200 request = Request::irecv(header_buf, header_size, MPI_INT, MPI_ANY_SOURCE, tag, comm);
201 Request::wait(&request, MPI_STATUS_IGNORE);
203 /* search for where it is */
205 while (rank != header_buf[myordering]) {
209 to = header_buf[myordering + 1];
210 if (myordering == 0) {
213 from = header_buf[myordering - 1];
216 /* send header when required */
218 Request::send(header_buf, header_size, MPI_INT, to, tag, comm);
223 for (i = 0; i < pipe_length; i++) {
224 recv_request_array[i] = Request::irecv((char *)buf + (i * increment), segment, datatype, from, tag, comm);
229 for (i = 0; i < pipe_length; i++) {
230 Request::wait(&recv_request_array[i], MPI_STATUS_IGNORE);
231 send_request_array[i] = Request::isend((char *)buf + (i * increment), segment, datatype, to, tag, comm);
233 Request::waitall((pipe_length), send_request_array, send_status_array);
238 Request::waitall((pipe_length), recv_request_array, recv_status_array);
242 free(send_request_array);
243 free(recv_request_array);
244 free(send_status_array);
245 free(recv_status_array);
248 /* when count is not divisible by block size, use default BCAST for the remainder */
249 if ((remainder != 0) && (count > segment)) {
250 XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast.");
251 Colls::bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);