1 #include "colls_private.h"
6 int smpi_coll_tuned_allgather_SMP_NTS(void *sbuf, int scount,
7 MPI_Datatype stype, void *rbuf,
8 int rcount, MPI_Datatype rtype,
11 int src, dst, comm_size, rank;
12 comm_size = smpi_comm_size(comm);
13 rank = smpi_comm_rank(comm);
14 MPI_Aint rextent, sextent;
15 rextent = smpi_datatype_get_extent(rtype);
16 sextent = smpi_datatype_get_extent(stype);
17 int tag = COLL_TAG_ALLGATHER;
19 int i, send_offset, recv_offset;
20 int intra_rank, inter_rank;
22 int num_core = simcall_host_get_core(SIMIX_host_self());
23 // do we use the default one or the number of cores in the platform ?
24 // if the number of cores is one, the platform may be simulated with 1 node = 1 core
25 if (num_core == 1) num_core = NUM_CORE;
28 intra_rank = rank % num_core;
29 inter_rank = rank / num_core;
30 int inter_comm_size = (comm_size + num_core - 1) / num_core;
31 int num_core_in_current_smp = num_core;
33 if(comm_size%num_core)
34 THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
36 /* for too small number of processes, use default implementation */
37 if (comm_size <= num_core) {
38 XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
39 smpi_mpi_allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
43 // the last SMP node may have fewer number of running processes than all others
44 if (inter_rank == (inter_comm_size - 1)) {
45 num_core_in_current_smp = comm_size - (inter_rank * num_core);
47 //copy corresponding message from sbuf to rbuf
48 recv_offset = rank * rextent * rcount;
49 smpi_mpi_sendrecv(sbuf, scount, stype, rank, tag,
50 ((char *) rbuf + recv_offset), rcount, rtype, rank, tag, comm,
53 //gather to root of each SMP
55 for (i = 1; i < num_core_in_current_smp; i++) {
58 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
60 (inter_rank * num_core) + (intra_rank - i +
61 num_core_in_current_smp) %
62 (num_core_in_current_smp);
63 recv_offset = src * rextent * rcount;
65 smpi_mpi_sendrecv(sbuf, scount, stype, dst, tag,
66 ((char *) rbuf + recv_offset), rcount, rtype, src, tag, comm,
71 // INTER-SMP-ALLGATHER
72 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
73 // Use logical ring algorithm
76 if (intra_rank == 0) {
77 MPI_Request *rrequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
78 MPI_Request *srequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
80 src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
81 dst = ((inter_rank + 1) % inter_comm_size) * num_core;
83 // post all inter Irecv
84 for (i = 0; i < inter_comm_size - 1; i++) {
86 ((inter_rank - i - 1 +
87 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
88 rrequest_array[i] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount * num_core,
89 rtype, src, tag + i, comm);
95 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
96 srequest_array[0] = smpi_mpi_isend((char *)rbuf + send_offset, scount * num_core,
97 stype, dst, tag, comm);
99 // loop : recv-inter , send-inter, send-intra (linear-bcast)
100 for (i = 0; i < inter_comm_size - 2; i++) {
102 ((inter_rank - i - 1 +
103 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
104 smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
105 srequest_array[i + 1] = smpi_mpi_isend((char *)rbuf + recv_offset, scount * num_core,
106 stype, dst, tag + i + 1, comm);
107 if (num_core_in_current_smp > 1) {
108 smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
109 stype, (rank + 1), tag + i + 1, comm);
113 // recv last message and send_intra
115 ((inter_rank - i - 1 +
116 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
117 //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
118 //i=inter_comm_size-2;
119 smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
120 if (num_core_in_current_smp > 1) {
121 smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
122 stype, (rank + 1), tag + i + 1, comm);
125 smpi_mpi_waitall(inter_comm_size - 1, srequest_array, MPI_STATUSES_IGNORE);
126 xbt_free(rrequest_array);
127 xbt_free(srequest_array);
129 // last rank of each SMP
130 else if (intra_rank == (num_core_in_current_smp - 1)) {
131 for (i = 0; i < inter_comm_size - 1; i++) {
133 ((inter_rank - i - 1 +
134 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
135 smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
136 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
139 // intermediate rank of each SMP
141 for (i = 0; i < inter_comm_size - 1; i++) {
143 ((inter_rank - i - 1 +
144 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
145 smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
146 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
147 smpi_mpi_send((char *) rbuf + recv_offset, (scount * num_core), stype,
148 (rank + 1), tag + i + 1, comm);