1 /* Copyright (c) 2013-2023. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "../colls_private.hpp"
9 static int bcast_NTSL_segment_size_in_byte = 8192;
11 #define HEADER_SIZE 1024
14 namespace simgrid::smpi {
15 /* Non-topology-specific pipelined linear-bcast function */
16 int bcast__arrival_pattern_aware(void *buf, int count,
17 MPI_Datatype datatype, int root,
20 int tag = -COLL_TAG_BCAST;
24 MPI_Status temp_status_array[MAX_NODE];
31 int flag_array[MAX_NODE];
32 int already_sent[MAX_NODE];
33 int to_clean[MAX_NODE];
34 int header_buf[HEADER_SIZE];
35 char temp_buf[MAX_NODE];
38 extent = datatype->get_extent();
49 /* segment is segment size in number of elements (not bytes) */
50 int segment = bcast_NTSL_segment_size_in_byte / extent;
51 segment = segment == 0 ? 1 :segment;
53 int pipe_length = count / segment;
55 /* use for buffer offset for sending and receiving data = segment size in byte */
56 int increment = segment * extent;
58 /* if the input size is not divisible by segment size =>
59 the small remainder will be done with native implementation */
60 int remainder = count % segment;
62 /* if root is not zero send to rank zero first
63 this can be modified to make it faster by using logical src, dst.
67 Request::send(buf, count, datatype, 0, tag, comm);
68 } else if (rank == 0) {
69 Request::recv(buf, count, datatype, root, tag, comm, &status);
73 /* value == 0 means root has not send data (or header) to the node yet */
74 for (i = 0; i < MAX_NODE; i++) {
79 /* when a message is smaller than a block size => no pipeline */
80 if (count <= segment) {
84 while (sent_count < (size - 1)) {
85 for (i = 1; i < size; i++) {
86 Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
91 /* recv 1-byte message */
92 for (i = 1; i < size; i++) {
95 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
96 Request::recv(temp_buf, 1, MPI_CHAR, i, tag, comm, &status);
97 header_buf[header_index] = i;
101 /* will send in the next step */
106 /* send header followed by data */
107 if (header_index != 0) {
108 header_buf[header_index] = -1;
110 Request::send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
111 Request::send(buf, count, datatype, to, tag, comm);
114 /* randomly MPI_Send to one */
116 /* search for the first node that never received data before */
117 for (i = 1; i < size; i++) {
118 if (already_sent[i] == 0) {
121 Request::send(header_buf, HEADER_SIZE, MPI_INT, i, tag, comm);
122 Request::send(buf, count, datatype, i, tag, comm);
137 /* send 1-byte message to root */
138 Request::send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
140 /* wait for header and data, forward when required */
141 Request::recv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm,
143 Request::recv(buf, count, datatype, MPI_ANY_SOURCE, tag, comm, &status);
145 /* search for where it is */
147 while (rank != header_buf[myordering]) {
151 /* send header followed by data */
152 if (header_buf[myordering + 1] != -1) {
153 Request::send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
155 Request::send(buf, count, datatype, header_buf[myordering + 1], tag, comm);
161 auto* send_request_array = new MPI_Request[size + pipe_length];
162 auto* recv_request_array = new MPI_Request[size + pipe_length];
163 auto* send_status_array = new MPI_Status[size + pipe_length];
164 auto* recv_status_array = new MPI_Status[size + pipe_length];
167 //double start2 = MPI_Wtime();
170 while (sent_count < (size - 1)) {
172 //start = MPI_Wtime();
173 for (i = 1; i < size; i++) {
174 Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
175 &temp_status_array[i]);
177 //total = MPI_Wtime() - start;
179 //printf("Iprobe time = %.2f\n",total);
183 /* recv 1-byte message */
184 for (i = 1; i < size; i++) {
186 if ((flag_array[i] == 1) && (already_sent[i] == 0)) {
187 Request::recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
189 header_buf[header_index] = i;
193 /* will send in the next step */
197 //total = MPI_Wtime() - start;
199 //printf("Recv 1-byte time = %.2f\n",total);
202 if (header_index != 0) {
203 printf("header index = %d node = ",header_index);
204 for (i=0;i<header_index;i++) {
205 printf("%d ",header_buf[i]);
211 /* send header followed by data */
212 if (header_index != 0) {
213 header_buf[header_index] = -1;
216 //start = MPI_Wtime();
219 Request::send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
221 //total = MPI_Wtime() - start;
223 //printf("\tSend header to %d time = %.2f\n",to,total);
225 //start = MPI_Wtime();
227 /* send data - non-pipeline case */
230 //if (header_index == 1) {
231 Request::send(buf, count, datatype, to, tag, comm);
235 /* send data - pipeline */
237 for (i = 0; i < pipe_length; i++) {
238 Request::send((char *)buf + (i * increment), segment, datatype, to, tag, comm);
240 //Request::waitall((pipe_length), send_request_array, send_status_array);
242 //total = MPI_Wtime() - start;
244 //printf("\tSend data to %d time = %.2f\n",to,total);
250 /* randomly MPI_Send to one node */
252 /* search for the first node that never received data before */
253 for (i = 1; i < size; i++) {
254 if (already_sent[i] == 0) {
259 //start = MPI_Wtime();
260 Request::send(header_buf, HEADER_SIZE, MPI_INT, to, tag, comm);
262 /* still need to chop data so that we can use the same non-root code */
263 for (j = 0; j < pipe_length; j++) {
264 Request::send((char *)buf + (j * increment), segment, datatype, to, tag,
268 //Request::send(buf,count,datatype,to,tag,comm);
269 //Request::wait(&request,MPI_STATUS_IGNORE);
271 //total = MPI_Wtime() - start;
273 //printf("SEND TO SINGLE node %d time = %.2f\n",i,total);
286 for(i=0; i<size; i++)
287 if(to_clean[i]!=0)Request::recv(&temp_buf[i], 1, MPI_CHAR, i, tag, comm,
289 //total = MPI_Wtime() - start2;
291 //printf("Node zero iter = %d time = %.2f\n",iteration,total);
297 /* send 1-byte message to root */
298 Request::send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
300 /* wait for header forward when required */
301 request = Request::irecv(header_buf, HEADER_SIZE, MPI_INT, MPI_ANY_SOURCE, tag, comm);
302 Request::wait(&request, MPI_STATUS_IGNORE);
304 /* search for where it is */
306 while (rank != header_buf[myordering]) {
310 /* send header when required */
311 if (header_buf[myordering + 1] != -1) {
312 Request::send(header_buf, HEADER_SIZE, MPI_INT, header_buf[myordering + 1],
319 //if (header_buf[1] == -1) {
320 request = Request::irecv(buf, count, datatype, 0, tag, comm);
321 Request::wait(&request, MPI_STATUS_IGNORE);
322 //printf("\t\tnode %d ordering = %d receive data from root\n",rank,myordering);
324 for (i = 0; i < pipe_length; i++) {
325 recv_request_array[i] = Request::irecv((char *)buf + (i * increment), segment, datatype, MPI_ANY_SOURCE,
331 if (header_buf[myordering + 1] != -1) {
332 for (i = 0; i < pipe_length; i++) {
333 Request::wait(&recv_request_array[i], MPI_STATUS_IGNORE);
334 send_request_array[i] = Request::isend((char *)buf + (i * increment), segment, datatype,
335 header_buf[myordering + 1], tag, comm);
337 Request::waitall((pipe_length), send_request_array, send_status_array);
339 Request::waitall(pipe_length, recv_request_array, recv_status_array);
344 delete[] send_request_array;
345 delete[] recv_request_array;
346 delete[] send_status_array;
347 delete[] recv_status_array;
350 if ((remainder != 0) && (count > segment)) {
351 XBT_INFO("MPI_bcast_arrival_pattern_aware: count is not divisible by block size, use default MPI_bcast for remainder.");
352 colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
358 } // namespace simgrid::smpi