1 /* Copyright (c) 2013-2014. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #include "colls_private.h"
9 int smpi_coll_tuned_allgather_SMP_NTS(void *sbuf, int scount,
10 MPI_Datatype stype, void *rbuf,
11 int rcount, MPI_Datatype rtype,
14 int src, dst, comm_size, rank;
15 comm_size = smpi_comm_size(comm);
16 rank = smpi_comm_rank(comm);
17 MPI_Aint rextent, sextent;
18 rextent = smpi_datatype_get_extent(rtype);
19 sextent = smpi_datatype_get_extent(stype);
20 int tag = COLL_TAG_ALLGATHER;
22 int i, send_offset, recv_offset;
23 int intra_rank, inter_rank;
25 if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
26 smpi_comm_init_smp(comm);
29 if (smpi_comm_is_uniform(comm)){
30 num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm));
34 intra_rank = rank % num_core;
35 inter_rank = rank / num_core;
36 int inter_comm_size = (comm_size + num_core - 1) / num_core;
37 int num_core_in_current_smp = num_core;
39 if(comm_size%num_core)
40 THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
42 /* for too small number of processes, use default implementation */
43 if (comm_size <= num_core) {
44 XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
45 smpi_mpi_allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
49 // the last SMP node may have fewer number of running processes than all others
50 if (inter_rank == (inter_comm_size - 1)) {
51 num_core_in_current_smp = comm_size - (inter_rank * num_core);
53 //copy corresponding message from sbuf to rbuf
54 recv_offset = rank * rextent * rcount;
55 smpi_mpi_sendrecv(sbuf, scount, stype, rank, tag,
56 ((char *) rbuf + recv_offset), rcount, rtype, rank, tag, comm,
59 //gather to root of each SMP
61 for (i = 1; i < num_core_in_current_smp; i++) {
64 (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
66 (inter_rank * num_core) + (intra_rank - i +
67 num_core_in_current_smp) %
68 (num_core_in_current_smp);
69 recv_offset = src * rextent * rcount;
71 smpi_mpi_sendrecv(sbuf, scount, stype, dst, tag,
72 ((char *) rbuf + recv_offset), rcount, rtype, src, tag, comm,
77 // INTER-SMP-ALLGATHER
78 // Every root of each SMP node post INTER-Sendrecv, then do INTRA-Bcast for each receiving message
79 // Use logical ring algorithm
82 if (intra_rank == 0) {
83 MPI_Request *rrequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
84 MPI_Request *srequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
86 src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
87 dst = ((inter_rank + 1) % inter_comm_size) * num_core;
89 // post all inter Irecv
90 for (i = 0; i < inter_comm_size - 1; i++) {
92 ((inter_rank - i - 1 +
93 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
94 rrequest_array[i] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount * num_core,
95 rtype, src, tag + i, comm);
101 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
102 srequest_array[0] = smpi_mpi_isend((char *)rbuf + send_offset, scount * num_core,
103 stype, dst, tag, comm);
105 // loop : recv-inter , send-inter, send-intra (linear-bcast)
106 for (i = 0; i < inter_comm_size - 2; i++) {
108 ((inter_rank - i - 1 +
109 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
110 smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
111 srequest_array[i + 1] = smpi_mpi_isend((char *)rbuf + recv_offset, scount * num_core,
112 stype, dst, tag + i + 1, comm);
113 if (num_core_in_current_smp > 1) {
114 smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
115 stype, (rank + 1), tag + i + 1, comm);
119 // recv last message and send_intra
121 ((inter_rank - i - 1 +
122 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
123 //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
124 //i=inter_comm_size-2;
125 smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
126 if (num_core_in_current_smp > 1) {
127 smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
128 stype, (rank + 1), tag + i + 1, comm);
131 smpi_mpi_waitall(inter_comm_size - 1, srequest_array, MPI_STATUSES_IGNORE);
132 xbt_free(rrequest_array);
133 xbt_free(srequest_array);
135 // last rank of each SMP
136 else if (intra_rank == (num_core_in_current_smp - 1)) {
137 for (i = 0; i < inter_comm_size - 1; i++) {
139 ((inter_rank - i - 1 +
140 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
141 smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
142 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
145 // intermediate rank of each SMP
147 for (i = 0; i < inter_comm_size - 1; i++) {
149 ((inter_rank - i - 1 +
150 inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
151 smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
152 rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
153 smpi_mpi_send((char *) rbuf + recv_offset, (scount * num_core), stype,
154 (rank + 1), tag + i + 1, comm);