1 /* selector with default/naive Simgrid algorithms. These should not be trusted for performance evaluations */
3 /* Copyright (c) 2009-2010, 2013-2018. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
9 #include "colls_private.hpp"
10 #include "smpi_process.hpp"
15 int Coll_bcast_default::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
17 return Coll_bcast_binomial_tree::bcast(buf, count, datatype, root, comm);
20 int Coll_barrier_default::barrier(MPI_Comm comm)
22 return Coll_barrier_ompi_basic_linear::barrier(comm);
26 int Coll_gather_default::gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
27 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
29 int system_tag = COLL_TAG_GATHER;
33 int rank = comm->rank();
34 int size = comm->size();
36 // Send buffer to root
37 Request::send(sendbuf, sendcount, sendtype, root, system_tag, comm);
39 recvtype->extent(&lb, &recvext);
40 // Local copy from root
41 Datatype::copy(sendbuf, sendcount, sendtype, static_cast<char*>(recvbuf) + root * recvcount * recvext,
43 // Receive buffers from senders
44 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
46 for (int src = 0; src < size; src++) {
48 requests[index] = Request::irecv_init(static_cast<char*>(recvbuf) + src * recvcount * recvext, recvcount, recvtype,
49 src, system_tag, comm);
53 // Wait for completion of irecv's.
54 Request::startall(size - 1, requests);
55 Request::waitall(size - 1, requests, MPI_STATUS_IGNORE);
56 for (int src = 0; src < size-1; src++) {
57 Request::unref(&requests[src]);
64 int Coll_reduce_scatter_default::reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op,
67 int rank = comm->rank();
69 /* arbitrarily choose root as rank 0 */
70 int size = comm->size();
72 int *displs = xbt_new(int, size);
73 for (int i = 0; i < size; i++) {
75 count += recvcounts[i];
77 void *tmpbuf = static_cast<void*>(smpi_get_tmp_sendbuffer(count*datatype->get_extent()));
78 int ret = MPI_SUCCESS;
80 ret = Coll_reduce_default::reduce(sendbuf, tmpbuf, count, datatype, op, 0, comm);
82 ret = Colls::scatterv(tmpbuf, recvcounts, displs, datatype, recvbuf, recvcounts[rank], datatype, 0, comm);
84 smpi_free_tmp_buffer(tmpbuf);
89 int Coll_allgather_default::allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
90 void *recvbuf,int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
92 int system_tag = COLL_TAG_ALLGATHER;
95 MPI_Request *requests;
97 int rank = comm->rank();
98 int size = comm->size();
99 // FIXME: check for errors
100 recvtype->extent(&lb, &recvext);
101 // Local copy from self
102 Datatype::copy(sendbuf, sendcount, sendtype, static_cast<char *>(recvbuf) + rank * recvcount * recvext, recvcount,
104 // Send/Recv buffers to/from others;
105 requests = xbt_new(MPI_Request, 2 * (size - 1));
107 for (int other = 0; other < size; other++) {
109 requests[index] = Request::isend_init(sendbuf, sendcount, sendtype, other, system_tag,comm);
111 requests[index] = Request::irecv_init(static_cast<char *>(recvbuf) + other * recvcount * recvext, recvcount, recvtype,
112 other, system_tag, comm);
116 // Wait for completion of all comms.
117 Request::startall(2 * (size - 1), requests);
118 Request::waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
119 for (int other = 0; other < 2*(size-1); other++) {
120 Request::unref(&requests[other]);
126 int Coll_allgatherv_default::allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
127 int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm)
129 int system_tag = COLL_TAG_ALLGATHERV;
131 MPI_Aint recvext = 0;
133 int rank = comm->rank();
134 int size = comm->size();
135 recvtype->extent(&lb, &recvext);
136 // Local copy from self
137 Datatype::copy(sendbuf, sendcount, sendtype,
138 static_cast<char *>(recvbuf) + displs[rank] * recvext,recvcounts[rank], recvtype);
139 // Send buffers to others;
140 MPI_Request *requests = xbt_new(MPI_Request, 2 * (size - 1));
142 for (int other = 0; other < size; other++) {
145 Request::isend_init(sendbuf, sendcount, sendtype, other, system_tag, comm);
147 requests[index] = Request::irecv_init(static_cast<char *>(recvbuf) + displs[other] * recvext, recvcounts[other],
148 recvtype, other, system_tag, comm);
152 // Wait for completion of all comms.
153 Request::startall(2 * (size - 1), requests);
154 Request::waitall(2 * (size - 1), requests, MPI_STATUS_IGNORE);
155 for (int other = 0; other < 2*(size-1); other++) {
156 Request::unref(&requests[other]);
162 int Coll_scatter_default::scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
163 void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
165 int system_tag = COLL_TAG_SCATTER;
167 MPI_Aint sendext = 0;
168 MPI_Request *requests;
170 int rank = comm->rank();
171 int size = comm->size();
173 // Recv buffer from root
174 Request::recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE);
176 sendtype->extent(&lb, &sendext);
177 // Local copy from root
178 if(recvbuf!=MPI_IN_PLACE){
179 Datatype::copy(static_cast<char *>(sendbuf) + root * sendcount * sendext,
180 sendcount, sendtype, recvbuf, recvcount, recvtype);
182 // Send buffers to receivers
183 requests = xbt_new(MPI_Request, size - 1);
185 for(int dst = 0; dst < size; dst++) {
187 requests[index] = Request::isend_init(static_cast<char *>(sendbuf) + dst * sendcount * sendext, sendcount, sendtype,
188 dst, system_tag, comm);
192 // Wait for completion of isend's.
193 Request::startall(size - 1, requests);
194 Request::waitall(size - 1, requests, MPI_STATUS_IGNORE);
195 for (int dst = 0; dst < size-1; dst++) {
196 Request::unref(&requests[dst]);
205 int Coll_reduce_default::reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
208 int system_tag = COLL_TAG_REDUCE;
210 MPI_Aint dataext = 0;
212 char* sendtmpbuf = static_cast<char *>(sendbuf);
214 int rank = comm->rank();
215 int size = comm->size();
218 //non commutative case, use a working algo from openmpi
219 if (op != MPI_OP_NULL && not op->is_commutative()) {
220 return Coll_reduce_ompi_basic_linear::reduce(sendtmpbuf, recvbuf, count, datatype, op, root, comm);
223 if( sendbuf == MPI_IN_PLACE ) {
224 sendtmpbuf = static_cast<char *>(smpi_get_tmp_sendbuffer(count*datatype->get_extent()));
225 Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
229 // Send buffer to root
230 Request::send(sendtmpbuf, count, datatype, root, system_tag, comm);
232 datatype->extent(&lb, &dataext);
233 // Local copy from root
234 if (sendtmpbuf != nullptr && recvbuf != nullptr)
235 Datatype::copy(sendtmpbuf, count, datatype, recvbuf, count, datatype);
236 // Receive buffers from senders
237 MPI_Request *requests = xbt_new(MPI_Request, size - 1);
238 void **tmpbufs = xbt_new(void *, size - 1);
240 for (int src = 0; src < size; src++) {
242 if (not smpi_process()->replaying())
243 tmpbufs[index] = xbt_malloc(count * dataext);
245 tmpbufs[index] = smpi_get_tmp_sendbuffer(count * dataext);
247 Request::irecv_init(tmpbufs[index], count, datatype, src, system_tag, comm);
251 // Wait for completion of irecv's.
252 Request::startall(size - 1, requests);
253 for (int src = 0; src < size - 1; src++) {
254 index = Request::waitany(size - 1, requests, MPI_STATUS_IGNORE);
255 XBT_DEBUG("finished waiting any request with index %d", index);
256 if(index == MPI_UNDEFINED) {
259 Request::unref(&requests[index]);
261 if(op) /* op can be MPI_OP_NULL that does nothing */
262 if(op!=MPI_OP_NULL) op->apply( tmpbufs[index], recvbuf, &count, datatype);
264 for(index = 0; index < size - 1; index++) {
265 smpi_free_tmp_buffer(tmpbufs[index]);
271 if( sendbuf == MPI_IN_PLACE ) {
272 smpi_free_tmp_buffer(sendtmpbuf);
277 int Coll_allreduce_default::allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
280 ret = Colls::reduce(sendbuf, recvbuf, count, datatype, op, 0, comm);
282 ret = Colls::bcast(recvbuf, count, datatype, 0, comm);
286 int Coll_alltoall_default::alltoall( void *sbuf, int scount, MPI_Datatype sdtype, void* rbuf, int rcount, MPI_Datatype rdtype, MPI_Comm comm)
288 return Coll_alltoall_ompi::alltoall(sbuf, scount, sdtype, rbuf, rcount, rdtype, comm);
293 int Coll_alltoallv_default::alltoallv(void *sendbuf, int *sendcounts, int *senddisps, MPI_Datatype sendtype,
294 void *recvbuf, int *recvcounts, int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
296 int system_tag = 889;
300 MPI_Aint sendext = 0;
301 MPI_Aint recvext = 0;
302 MPI_Request *requests;
305 int rank = comm->rank();
306 int size = comm->size();
307 XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank);
308 sendtype->extent(&lb, &sendext);
309 recvtype->extent(&lb, &recvext);
310 /* Local copy from self */
311 int err = Datatype::copy(static_cast<char *>(sendbuf) + senddisps[rank] * sendext, sendcounts[rank], sendtype,
312 static_cast<char *>(recvbuf) + recvdisps[rank] * recvext, recvcounts[rank], recvtype);
313 if (err == MPI_SUCCESS && size > 1) {
314 /* Initiate all send/recv to/from others. */
315 requests = xbt_new(MPI_Request, 2 * (size - 1));
317 /* Create all receives that will be posted first */
318 for (i = 0; i < size; ++i) {
319 if (i != rank && recvcounts[i] != 0) {
320 requests[count] = Request::irecv_init(static_cast<char *>(recvbuf) + recvdisps[i] * recvext,
321 recvcounts[i], recvtype, i, system_tag, comm);
324 XBT_DEBUG("<%d> skip request creation [src = %d, recvcounts[src] = %d]", rank, i, recvcounts[i]);
327 /* Now create all sends */
328 for (i = 0; i < size; ++i) {
329 if (i != rank && sendcounts[i] != 0) {
330 requests[count] = Request::isend_init(static_cast<char *>(sendbuf) + senddisps[i] * sendext,
331 sendcounts[i], sendtype, i, system_tag, comm);
334 XBT_DEBUG("<%d> skip request creation [dst = %d, sendcounts[dst] = %d]", rank, i, sendcounts[i]);
337 /* Wait for them all. */
338 Request::startall(count, requests);
339 XBT_DEBUG("<%d> wait for %d requests", rank, count);
340 Request::waitall(count, requests, MPI_STATUS_IGNORE);
341 for(i = 0; i < count; i++) {
342 if(requests[i]!=MPI_REQUEST_NULL)
343 Request::unref(&requests[i]);