3 int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192;
5 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE
6 #define BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE 1024
9 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE
10 #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128
13 /* Non-topology-specific pipelined linear-bcast function */
14 int smpi_coll_tuned_bcast_arrival_pattern_aware_wait(void *buf, int count,
15 MPI_Datatype datatype,
16 int root, MPI_Comm comm)
20 MPI_Request *send_request_array;
21 MPI_Request *recv_request_array;
22 MPI_Status *send_status_array;
23 MPI_Status *recv_status_array;
26 MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
31 int will_send[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
35 int flag_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
36 int already_sent[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
38 int header_buf[BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE];
39 char temp_buf[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
41 int max_node = BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE;
42 int header_size = BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE;
45 MPI_Type_extent(datatype, &extent);
47 /* source and destination */
52 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
53 MPI_Comm_size(MPI_COMM_WORLD, &size);
56 /* segment is segment size in number of elements (not bytes) */
57 int segment = bcast_arrival_pattern_aware_wait_segment_size_in_byte / extent;
60 int pipe_length = count / segment;
62 /* use for buffer offset for sending and receiving data = segment size in byte */
63 int increment = segment * extent;
65 /* if the input size is not divisible by segment size =>
66 the small remainder will be done with native implementation */
67 int remainder = count % segment;
69 /* if root is not zero send to rank zero first
70 this can be modified to make it faster by using logical src, dst.
74 MPI_Send(buf, count, datatype, 0, tag, comm);
75 } else if (rank == 0) {
76 MPI_Recv(buf, count, datatype, root, tag, comm, &status);
81 /* value == 0 means root has not send data (or header) to the node yet */
82 for (i = 0; i < max_node; i++) {
86 /* when a message is smaller than a block size => no pipeline */
87 if (count <= segment) {
92 /* start pipeline bcast */
95 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
97 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
99 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
101 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
108 for (i = 0; i < BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE; i++)
110 while (sent_count < (size - 1)) {
113 /* loop k times to let more processes arrive before start sending data */
114 for (k = 0; k < 3; k++) {
115 for (i = 1; i < size; i++) {
116 if ((already_sent[i] == 0) && (will_send[i] == 0)) {
117 MPI_Iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
118 &temp_status_array[i]);
119 if (flag_array[i] == 1) {
121 MPI_Recv(&temp_buf[i], 1, MPI_CHAR, i, tag, MPI_COMM_WORLD,
131 /* recv 1-byte message */
132 for (i = 1; i < size; i++) {
134 if ((will_send[i] == 1) && (already_sent[i] == 0)) {
135 header_buf[header_index] = i;
139 /* will send in the next step */
144 /* send header followed by data */
145 if (header_index != 0) {
146 header_buf[header_index] = -1;
150 MPI_Send(header_buf, header_size, MPI_INT, to, tag, comm);
152 /* send data - pipeline */
153 for (i = 0; i < pipe_length; i++) {
154 MPI_Isend((char *)buf + (i * increment), segment, datatype, to, tag, comm,
155 &send_request_array[i]);
157 MPI_Waitall((pipe_length), send_request_array, send_status_array);
161 /* end - send header followed by data */
162 /* randomly MPI_Send to one node */
163 /* this part has been commented out - performance-wise */
165 /* search for the first node that never received data before */
166 for (i = 0; i < size; i++) {
169 if (already_sent[i] == 0) {
174 MPI_Send(header_buf, header_size, MPI_INT, to, tag, comm);
176 /* still need to chop data so that we can use the same non-root code */
177 for (j = 0; j < pipe_length; j++) {
178 MPI_Send((char *)buf + (j * increment), segment, datatype, to, tag, comm);
183 } /* end - while (send_count < size-1) loop */
190 /* send 1-byte message to root */
191 MPI_Send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
193 /* wait for header forward when required */
194 MPI_Irecv(header_buf, header_size, MPI_INT, MPI_ANY_SOURCE, tag, comm,
196 MPI_Wait(&request, MPI_STATUS_IGNORE);
198 /* search for where it is */
200 while (rank != header_buf[myordering]) {
204 to = header_buf[myordering + 1];
205 if (myordering == 0) {
208 from = header_buf[myordering - 1];
211 /* send header when required */
213 MPI_Send(header_buf, header_size, MPI_INT, to, tag, comm);
218 for (i = 0; i < pipe_length; i++) {
219 MPI_Irecv((char *)buf + (i * increment), segment, datatype, from, tag, comm,
220 &recv_request_array[i]);
225 for (i = 0; i < pipe_length; i++) {
226 MPI_Wait(&recv_request_array[i], MPI_STATUS_IGNORE);
227 MPI_Isend((char *)buf + (i * increment), segment, datatype, to, tag, comm,
228 &send_request_array[i]);
230 MPI_Waitall((pipe_length), send_request_array, send_status_array);
235 MPI_Waitall((pipe_length), recv_request_array, recv_status_array);
239 free(send_request_array);
240 free(recv_request_array);
241 free(send_status_array);
242 free(recv_status_array);
245 /* when count is not divisible by block size, use default BCAST for the remainder */
246 if ((remainder != 0) && (count > segment)) {
247 MPI_Bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);