3 static int bcast_NTSL_segment_size_in_byte = 8192;
5 #define HEADER_SIZE 1024
8 /* Non-topology-specific pipelined linear-bcast function */
9 int smpi_coll_tuned_bcast_arrival_pattern_aware(void *buf, int count,
10 MPI_Datatype datatype, int root,
16 MPI_Request *send_request_array;
17 MPI_Request *recv_request_array;
18 MPI_Status *send_status_array;
19 MPI_Status *recv_status_array;
21 MPI_Status temp_status_array[MAX_NODE];
28 int flag_array[MAX_NODE];
29 int already_sent[MAX_NODE];
31 int header_buf[HEADER_SIZE];
32 char temp_buf[MAX_NODE];
35 MPI_Type_extent(datatype, &extent);
42 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
43 MPI_Comm_size(MPI_COMM_WORLD, &size);
46 /* segment is segment size in number of elements (not bytes) */
47 int segment = bcast_NTSL_segment_size_in_byte / extent;
50 int pipe_length = count / segment;
52 /* use for buffer offset for sending and receiving data = segment size in byte */
53 int increment = segment * extent;
55 /* if the input size is not divisible by segment size =>
56 the small remainder will be done with native implementation */
57 int remainder = count % segment;
59 /* if root is not zero send to rank zero first
60 this can be modified to make it faster by using logical src, dst.
64 MPI_Send(buf, count, datatype, 0, tag, comm);
65 } else if (rank == 0) {
66 MPI_Recv(buf, count, datatype, root, tag, comm, &status);
70 /* value == 0 means root has not send data (or header) to the node yet */
71 for (i = 0; i < MAX_NODE; i++) {
75 /* when a message is smaller than a block size => no pipeline */
76 if (count <= segment) {
80 while (sent_count < (size - 1)) {
81 for (i = 1; i < size; i++) {
82 MPI_Iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
87 /* recv 1-byte message */
88 for (i = 1; i < size; i++) {
91 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
92 MPI_Recv(temp_buf, 1, MPI_CHAR, i, tag, MPI_COMM_WORLD, &status);
93 header_buf[header_index] = i;
97 /* will send in the next step */
102 /* send header followed by data */
103 if (header_index != 0) {
104 header_buf[header_index] = -1;
106 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
107 MPI_Send(buf, count, datatype, to, tag, comm);
110 /* randomly MPI_Send to one */
112 /* search for the first node that never received data before */
113 for (i = 1; i < size; i++) {
114 if (already_sent[i] == 0) {
117 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, i, tag, comm);
118 MPI_Send(buf, count, datatype, i, tag, comm);
133 /* send 1-byte message to root */
134 MPI_Send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
136 /* wait for header and data, forward when required */
137 MPI_Recv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
139 MPI_Recv(buf, count, datatype, MPI_ANY_SOURCE, tag, comm, &status);
141 /* search for where it is */
143 while (rank != header_buf[myordering]) {
147 /* send header followed by data */
148 if (header_buf[myordering + 1] != -1) {
149 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
151 MPI_Send(buf, count, datatype, header_buf[myordering + 1], tag, comm);
158 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
160 (MPI_Request *) malloc((size + pipe_length) * sizeof(MPI_Request));
162 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
164 (MPI_Status *) malloc((size + pipe_length) * sizeof(MPI_Status));
167 //double start2 = MPI_Wtime();
170 while (sent_count < (size - 1)) {
172 //start = MPI_Wtime();
173 for (i = 1; i < size; i++) {
174 MPI_Iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
175 &temp_status_array[i]);
177 //total = MPI_Wtime() - start;
179 //printf("Iprobe time = %.2f\n",total);
183 /* recv 1-byte message */
184 for (i = 1; i < size; i++) {
186 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
187 MPI_Recv(&temp_buf[i], 1, MPI_CHAR, i, tag, MPI_COMM_WORLD,
189 header_buf[header_index] = i;
193 /* will send in the next step */
197 //total = MPI_Wtime() - start;
199 //printf("Recv 1-byte time = %.2f\n",total);
202 if (header_index != 0) {
203 printf("header index = %d node = ",header_index);
204 for (i=0;i<header_index;i++) {
205 printf("%d ",header_buf[i]);
211 /* send header followed by data */
212 if (header_index != 0) {
213 header_buf[header_index] = -1;
216 //start = MPI_Wtime();
219 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
221 //total = MPI_Wtime() - start;
223 //printf("\tSend header to %d time = %.2f\n",to,total);
225 //start = MPI_Wtime();
227 /* send data - non-pipeline case */
230 //if (header_index == 1) {
231 MPI_Send(buf, count, datatype, to, tag, comm);
235 /* send data - pipeline */
237 for (i = 0; i < pipe_length; i++) {
238 MPI_Send((char *)buf + (i * increment), segment, datatype, to, tag, comm);
240 //MPI_Waitall((pipe_length), send_request_array, send_status_array);
242 //total = MPI_Wtime() - start;
244 //printf("\tSend data to %d time = %.2f\n",to,total);
250 /* randomly MPI_Send to one node */
252 /* search for the first node that never received data before */
253 for (i = 1; i < size; i++) {
254 if (already_sent[i] == 0) {
259 //start = MPI_Wtime();
260 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
262 /* still need to chop data so that we can use the same non-root code */
263 for (j = 0; j < pipe_length; j++) {
264 MPI_Send((char *)buf + (j * increment), segment, datatype, to, tag,
268 //MPI_Send(buf,count,datatype,to,tag,comm);
269 //MPI_Wait(&request,MPI_STATUS_IGNORE);
271 //total = MPI_Wtime() - start;
273 //printf("SEND TO SINGLE node %d time = %.2f\n",i,total);
285 //total = MPI_Wtime() - start2;
287 //printf("Node zero iter = %d time = %.2f\n",iteration,total);
293 /* send 1-byte message to root */
294 MPI_Send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
296 /* wait for header forward when required */
297 MPI_Irecv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
299 MPI_Wait(&request, MPI_STATUS_IGNORE);
301 /* search for where it is */
303 while (rank != header_buf[myordering]) {
307 /* send header when required */
308 if (header_buf[myordering + 1] != -1) {
309 MPI_Send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
316 //if (header_buf[1] == -1) {
317 MPI_Irecv(buf, count, datatype, 0, tag, comm, &request);
318 MPI_Wait(&request, MPI_STATUS_IGNORE);
319 //printf("\t\tnode %d ordering = %d receive data from root\n",rank,myordering);
321 for (i = 0; i < pipe_length; i++) {
322 MPI_Irecv((char *)buf + (i * increment), segment, datatype, MPI_ANY_SOURCE,
323 tag, comm, &recv_request_array[i]);
328 if (header_buf[myordering + 1] != -1) {
329 for (i = 0; i < pipe_length; i++) {
330 MPI_Wait(&recv_request_array[i], MPI_STATUS_IGNORE);
331 MPI_Isend((char *)buf + (i * increment), segment, datatype,
332 header_buf[myordering + 1], tag, comm,
333 &send_request_array[i]);
335 MPI_Waitall((pipe_length), send_request_array, send_status_array);
340 free(send_request_array);
341 free(recv_request_array);
342 free(send_status_array);
343 free(recv_status_array);
346 /* when count is not divisible by block size, use default BCAST for the remainder */
347 if ((remainder != 0) && (count > segment)) {
348 MPI_Bcast((char *)buf + (pipe_length * increment), remainder, datatype, root, comm);