6 int smpi_coll_tuned_allgather_SMP_NTS(void *sbuf, int scount,
7 MPI_Datatype stype, void *rbuf,
8 int rcount, MPI_Datatype rtype,
11 int src, dst, comm_size, rank;
12 MPI_Comm_size(comm, &comm_size);
13 MPI_Comm_rank(comm, &rank);
14 MPI_Aint rextent, sextent;
15 MPI_Type_extent(rtype, &rextent);
16 MPI_Type_extent(stype, &sextent);
19 MPI_Request rrequest_array[128];
20 MPI_Request srequest_array[128];
23 int i, send_offset, recv_offset;
24 int intra_rank, inter_rank;
25 intra_rank = rank % NUM_CORE;
26 inter_rank = rank / NUM_CORE;
27 int inter_comm_size = (comm_size + NUM_CORE - 1) / NUM_CORE;
28 int num_core_in_current_smp = NUM_CORE;
30 /* for too small number of processes, use default implementation */
31 if (comm_size <= NUM_CORE) {
32 return MPI_Allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
34 // the last SMP node may have fewer number of running processes than all others
35 if (inter_rank == (inter_comm_size - 1)) {
36 num_core_in_current_smp = comm_size - (inter_rank * NUM_CORE);
38 //copy corresponding message from sbuf to rbuf
39 recv_offset = rank * rextent * rcount;
40 MPI_Sendrecv(sbuf, scount, stype, rank, tag,
41 ((char *) rbuf + recv_offset), rcount, rtype, rank, tag, comm,
44 //gather to root of each SMP
46 for (i = 1; i < num_core_in_current_smp; i++) {
49 (inter_rank * NUM_CORE) + (intra_rank + i) % (num_core_in_current_smp);
51 (inter_rank * NUM_CORE) + (intra_rank - i +
52 num_core_in_current_smp) %
53 (num_core_in_current_smp);
54 recv_offset = src * rextent * rcount;
56 MPI_Sendrecv(sbuf, scount, stype, dst, tag,
57 ((char *) rbuf + recv_offset), rcount, rtype, src, tag, comm,
62 // INTER-SMP-ALLGATHER
63 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
64 // Use logical ring algorithm
67 if (intra_rank == 0) {
68 src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * NUM_CORE;
69 dst = ((inter_rank + 1) % inter_comm_size) * NUM_CORE;
71 // post all inter Irecv
72 for (i = 0; i < inter_comm_size - 1; i++) {
74 ((inter_rank - i - 1 +
75 inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
76 MPI_Irecv((char *) rbuf + recv_offset, rcount * NUM_CORE, rtype, src,
77 tag + i, comm, &rrequest_array[i]);
83 inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
84 MPI_Isend((char *) rbuf + send_offset, scount * NUM_CORE, stype, dst, tag,
85 comm, &srequest_array[0]);
87 // loop : recv-inter , send-inter, send-intra (linear-bcast)
88 for (i = 0; i < inter_comm_size - 2; i++) {
90 ((inter_rank - i - 1 +
91 inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
92 MPI_Wait(&rrequest_array[i], &status);
93 MPI_Isend((char *) rbuf + recv_offset, scount * NUM_CORE, stype, dst,
94 tag + i + 1, comm, &srequest_array[i + 1]);
95 if (num_core_in_current_smp > 1) {
96 MPI_Isend((char *) rbuf + recv_offset, scount * NUM_CORE, stype,
97 (rank + 1), tag + i + 1, comm, &request);
101 // recv last message and send_intra
103 ((inter_rank - i - 1 +
104 inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
105 //recv_offset = ((inter_rank + 1) % inter_comm_size) * NUM_CORE * sextent * scount;
106 //i=inter_comm_size-2;
107 MPI_Wait(&rrequest_array[i], &status);
108 if (num_core_in_current_smp > 1) {
109 MPI_Isend((char *) rbuf + recv_offset, scount * NUM_CORE, stype,
110 (rank + 1), tag + i + 1, comm, &request);
113 // last rank of each SMP
114 else if (intra_rank == (num_core_in_current_smp - 1)) {
115 for (i = 0; i < inter_comm_size - 1; i++) {
117 ((inter_rank - i - 1 +
118 inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
119 MPI_Irecv((char *) rbuf + recv_offset, (rcount * NUM_CORE), rtype,
120 rank - 1, tag + i + 1, comm, &request);
121 MPI_Wait(&request, &status);
124 // intermediate rank of each SMP
126 for (i = 0; i < inter_comm_size - 1; i++) {
128 ((inter_rank - i - 1 +
129 inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
130 MPI_Irecv((char *) rbuf + recv_offset, (rcount * NUM_CORE), rtype,
131 rank - 1, tag + i + 1, comm, &request);
132 MPI_Wait(&request, &status);
133 MPI_Isend((char *) rbuf + recv_offset, (scount * NUM_CORE), stype,
134 (rank + 1), tag + i + 1, comm, &request);