1 /* Copyright (c) 2013-2017. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.h"
9 static int bcast_NTSL_segment_size_in_byte = 8192;
11 #define HEADER_SIZE 1024
16 /* Non-topology-specific pipelined linear-bcast function */
17 int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count,
18 MPI_Datatype datatype, int root,
21 int tag = -COLL_TAG_BCAST;
24 MPI_Request *send_request_array;
25 MPI_Request *recv_request_array;
26 MPI_Status *send_status_array;
27 MPI_Status *recv_status_array;
29 MPI_Status temp_status_array[MAX_NODE];
36 int flag_array[MAX_NODE];
37 int already_sent[MAX_NODE];
38 int to_clean[MAX_NODE];
39 int header_buf[HEADER_SIZE];
40 char temp_buf[MAX_NODE];
43 extent = datatype->get_extent();
54 /* segment is segment size in number of elements (not bytes) */
55 int segment = bcast_NTSL_segment_size_in_byte / extent;
56 segment = segment == 0 ? 1 :segment;
58 int pipe_length = count / segment;
60 /* use for buffer offset for sending and receiving data = segment size in byte */
61 int increment = segment * extent;
63 /* if the input size is not divisible by segment size =>
64 the small remainder will be done with native implementation */
65 int remainder = count % segment;
67 /* if root is not zero send to rank zero first
68 this can be modified to make it faster by using logical src, dst.
72 Request::send(buf, count, datatype, 0, tag, comm);
73 } else if (rank == 0) {
74 Request::recv(buf, count, datatype, root, tag, comm, &status);
78 /* value == 0 means root has not send data (or header) to the node yet */
79 for (i = 0; i < MAX_NODE; i++) {
84 /* when a message is smaller than a block size => no pipeline */
85 if (count <= segment) {
89 while (sent_count < (size - 1)) {
90 for (i = 1; i < size; i++) {
91 Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
96 /* recv 1-byte message */
97 for (i = 1; i < size; i++) {
100 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
101 Request::recv(temp_buf, 1, MPI_CHAR, i, tag, comm, &status);
102 header_buf[header_index] = i;
106 /* will send in the next step */
111 /* send header followed by data */
112 if (header_index != 0) {
113 header_buf[header_index] = -1;
115 Request::send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
116 Request::send(buf, count, datatype, to, tag, comm);
119 /* randomly MPI_Send to one */
121 /* search for the first node that never received data before */
122 for (i = 1; i < size; i++) {
123 if (already_sent[i] == 0) {
126 Request::send(header_buf, HEADER_SIZE, MPI_INT, i, tag, comm);
127 Request::send(buf, count, datatype, i, tag, comm);
142 /* send 1-byte message to root */
143 Request::send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
145 /* wait for header and data, forward when required */
146 Request::recv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
148 Request::recv(buf, count, datatype, MPI_ANY_SOURCE, tag, comm, &status);
150 /* search for where it is */
152 while (rank != header_buf[myordering]) {
156 /* send header followed by data */
157 if (header_buf[myordering + 1] != -1) {
158 Request::send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
160 Request::send(buf, count, datatype, header_buf[myordering + 1], tag, comm);
167 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
169 (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
171 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
173 (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
176 //double start2 = MPI_Wtime();
179 while (sent_count < (size - 1)) {
181 //start = MPI_Wtime();
182 for (i = 1; i < size; i++) {
183 Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
184 &temp_status_array[i]);
186 //total = MPI_Wtime() - start;
188 //printf("Iprobe time = %.2f\n",total);
192 /* recv 1-byte message */
193 for (i = 1; i < size; i++) {
195 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
196 Request::recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
198 header_buf[header_index] = i;
202 /* will send in the next step */
206 //total = MPI_Wtime() - start;
208 //printf("Recv 1-byte time = %.2f\n",total);
211 if (header_index != 0) {
212 printf("header index = %d node = ",header_index);
213 for (i=0;i<header_index;i++) {
214 printf("%d ",header_buf[i]);
220 /* send header followed by data */
221 if (header_index != 0) {
222 header_buf[header_index] = -1;
225 //start = MPI_Wtime();
228 Request::send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
230 //total = MPI_Wtime() - start;
232 //printf("\tSend header to %d time = %.2f\n",to,total);
234 //start = MPI_Wtime();
236 /* send data - non-pipeline case */
239 //if (header_index == 1) {
240 Request::send(buf, count, datatype, to, tag, comm);
244 /* send data - pipeline */
246 for (i = 0; i < pipe_length; i++) {
247 Request::send((char *)buf + (i * increment), segment, datatype, to, tag, comm);
249 //Request::waitall((pipe_length), send_request_array, send_status_array);
251 //total = MPI_Wtime() - start;
253 //printf("\tSend data to %d time = %.2f\n",to,total);
259 /* randomly MPI_Send to one node */
261 /* search for the first node that never received data before */
262 for (i = 1; i < size; i++) {
263 if (already_sent[i] == 0) {
268 //start = MPI_Wtime();
269 Request::send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
271 /* still need to chop data so that we can use the same non-root code */
272 for (j = 0; j < pipe_length; j++) {
273 Request::send((char *)buf + (j * increment), segment, datatype, to, tag,
277 //Request::send(buf,count,datatype,to,tag,comm);
278 //Request::wait(&request,MPI_STATUS_IGNORE);
280 //total = MPI_Wtime() - start;
282 //printf("SEND TO SINGLE node %d time = %.2f\n",i,total);
295 for(i=0; i<size; i++)
296 if(to_clean[i]!=0)Request::recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
298 //total = MPI_Wtime() - start2;
300 //printf("Node zero iter = %d time = %.2f\n",iteration,total);
306 /* send 1-byte message to root */
307 Request::send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
309 /* wait for header forward when required */
310 request = Request::irecv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm);
311 Request::wait(&request, MPI_STATUS_IGNORE);
313 /* search for where it is */
315 while (rank != header_buf[myordering]) {
319 /* send header when required */
320 if (header_buf[myordering + 1] != -1) {
321 Request::send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
328 //if (header_buf[1] == -1) {
329 request = Request::irecv(buf, count, datatype, 0, tag, comm);
330 Request::wait(&request, MPI_STATUS_IGNORE);
331 //printf("\t\tnode %d ordering = %d receive data from root\n",rank,myordering);
333 for (i = 0; i < pipe_length; i++) {
334 recv_request_array[i] = Request::irecv((char *)buf + (i * increment), segment, datatype, MPI_ANY_SOURCE,
340 if (header_buf[myordering + 1] != -1) {
341 for (i = 0; i < pipe_length; i++) {
342 Request::wait(&recv_request_array[i], MPI_STATUS_IGNORE);
343 send_request_array[i] = Request::isend((char *)buf + (i * increment), segment, datatype,
344 header_buf[myordering + 1], tag, comm);
346 Request::waitall((pipe_length), send_request_array, send_status_array);
348 Request::waitall(pipe_length, recv_request_array, recv_status_array);
353 free(send_request_array);
354 free(recv_request_array);
355 free(send_status_array);
356 free(recv_status_array);
359 /* when count is not divisible by block size, use default BCAST for the remainder */
360 if ((remainder != 0) && (count > segment)) {
361 XBT_WARN("MPI_bcast_arrival_pattern_aware use default MPI_bcast.");
362 Colls::bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);