3 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE
4 #define BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE 128
7 #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE
8 #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128
11 /* Non-topology-specific pipelined linear-bcast function */
12 int smpi_coll_tuned_bcast_arrival_scatter(void *buf, int count,
13 MPI_Datatype datatype, int root,
28 MPI_Status temp_status_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
35 int flag_array[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
36 int already_sent[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
37 int header_buf[BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE];
38 char temp_buf[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
39 int will_send[BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE];
40 int max_node = BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE;
41 int header_size = BCAST_ARRIVAL_PATTERN_AWARE_HEADER_SIZE;
44 MPI_Type_extent(datatype, &extent);
47 /* source and destination */
50 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
51 MPI_Comm_size(MPI_COMM_WORLD, &size);
53 /* message too small */
55 return MPI_Bcast(buf, count, datatype, root, comm);
60 /* if root is not zero send to rank zero first
61 this can be modified to make it faster by using logical src, dst.
65 MPI_Send(buf, count, datatype, 0, tag - 1, comm);
66 } else if (rank == 0) {
67 MPI_Recv(buf, count, datatype, root, tag - 1, comm, &status);
72 /* value == 0 means root has not send data (or header) to the node yet */
73 for (i = 0; i < max_node; i++) {
82 for (i = 0; i < max_node; i++)
86 while (sent_count < (size - 1)) {
88 for (k = 0; k < 3; k++) {
89 for (i = 1; i < size; i++) {
90 if ((already_sent[i] == 0) && (will_send[i] == 0)) {
91 MPI_Iprobe(i, MPI_ANY_TAG, MPI_COMM_WORLD, &flag_array[i],
92 &temp_status_array[i]);
93 if (flag_array[i] == 1) {
95 MPI_Recv(&temp_buf[i], 1, MPI_CHAR, i, tag, MPI_COMM_WORLD,
104 /* recv 1-byte message in this round */
105 for (i = 1; i < size; i++) {
107 if ((will_send[i] == 1) && (already_sent[i] == 0)) {
108 header_buf[header_index] = i;
112 /* will send in the next step */
118 if (header_index != 0) {
119 printf("header index = %d node = ",header_index);
120 for (i=0;i<header_index;i++) {
121 printf("%d ",header_buf[i]);
127 /* send header followed by data */
128 if (header_index != 0) {
129 header_buf[header_index] = -1;
132 for (i = 0; i < header_index; i++) {
134 MPI_Send(header_buf, header_size, MPI_INT, to, header_tag, comm);
137 curr_remainder = count % header_index;
138 curr_size = (count / header_index);
139 curr_increment = curr_size * extent;
143 for (i = 0; i < header_index; i++) {
145 if ((i == (header_index - 1)) || (curr_size == 0))
146 curr_size += curr_remainder;
147 //printf("Root send to %d index %d\n",to,(i*curr_increment));
148 MPI_Send((char *) buf + (i * curr_increment), curr_size, datatype, to,
152 } /* while (sent_count < size-1) */
158 /* send 1-byte message to root */
159 MPI_Send(temp_buf, 1, MPI_CHAR, 0, tag, comm);
161 /* wait for header forward when required */
162 MPI_Recv(header_buf, header_size, MPI_INT, 0, header_tag, comm, &status);
164 /* search for where it is */
166 while (rank != header_buf[myordering]) {
171 while (header_buf[total_nodes] != -1) {
175 curr_remainder = count % total_nodes;
176 curr_size = (count / total_nodes);
177 curr_increment = curr_size * extent;
178 int recv_size = curr_size;
181 if (myordering == (total_nodes - 1))
182 recv_size += curr_remainder;
183 MPI_Recv((char *) buf + (myordering * curr_increment), recv_size, datatype,
184 0, tag, comm, &status);
186 /* at this point all nodes in this set perform all-gather operation */
187 to = header_buf[myordering + 1];
188 from = header_buf[myordering - 1];
190 from = header_buf[total_nodes - 1];
191 if (myordering == (total_nodes - 1))
195 /* last segment may have a larger size since it also include the remainder */
196 int last_segment_ptr = (total_nodes - 1) * (count / total_nodes) * extent;
200 for (i = 0; i < total_nodes - 1; i++) {
202 ((myordering - i + total_nodes) % total_nodes) * curr_increment;
204 ((myordering - i - 1 + total_nodes) % total_nodes) * curr_increment;
207 if (send_offset != last_segment_ptr)
208 send_count = curr_size;
210 send_count = curr_size + curr_remainder;
212 if (recv_offset != last_segment_ptr)
213 recv_count = curr_size;
215 recv_count = curr_size + curr_remainder;
217 //printf("\t\tnode %d sent_to %d recv_from %d send_size %d recv_size %d\n",rank,to,from,send_count,recv_count);
218 //printf("\tnode %d sent_offset %d send_count %d\n",rank,send_offset,send_count);
221 MPI_Sendrecv((char *) buf + send_offset, send_count, datatype, to,
222 tag + i, (char *) buf + recv_offset, recv_count, datatype,
223 from, tag + i, comm, &status);