X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/61427a88a76a2c2ef25d0d5b8132995c6f550e5c..5ed37babb2fa9097abe82df299c0aa259ed84d5a:/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp diff --git a/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp b/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp index 2736daf63e..c26616934a 100644 --- a/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp +++ b/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017. The SimGrid Team. +/* Copyright (c) 2013-2023. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -88,7 +88,7 @@ * [02b] [12b] [22b] * * COMPUTATION PHASE 0 (a) - * Step 0: rank r sends block ra to rank (r+1) and receives bloc (r-1)a + * Step 0: rank r sends block ra to rank (r+1) and receives block (r-1)a * from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [20a] @@ -98,7 +98,7 @@ * [22a+02a] [12a] [22a] * [02b] [12b] [22b] * - * Step 1: rank r sends block (r-1)a to rank (r+1) and receives bloc + * Step 1: rank r sends block (r-1)a to rank (r+1) and receives block * (r-2)a from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [00a+10a+20a] @@ -109,7 +109,7 @@ * [02b] [12b] [22b] * * COMPUTATION PHASE 1 (b) - * Step 0: rank r sends block rb to rank (r+1) and receives bloc (r-1)b + * Step 0: rank r sends block rb to rank (r+1) and receives block (r-1)b * from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [20a] @@ -119,7 +119,7 @@ * [22a+02a] [12a] [22a] * [22b+02b] [12b] [22b] * - * Step 1: rank r sends block (r-1)b to rank (r+1) and receives bloc + * Step 1: rank r sends block (r-1)b to rank (r+1) and receives block * (r-2)b from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [00a+10a+20a] @@ -153,15 +153,13 @@ EARLY_BLOCK_COUNT = EARLY_BLOCK_COUNT + 1; \ } \ -#include "../colls_private.h" +#include "../colls_private.hpp" -namespace simgrid{ -namespace smpi{ -int -Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, - MPI_Datatype dtype, - MPI_Op op, - MPI_Comm comm) +namespace simgrid::smpi { +int allreduce__ompi_ring_segmented(const void *sbuf, void *rbuf, int count, + MPI_Datatype dtype, + MPI_Op op, + MPI_Comm comm) { int ret = MPI_SUCCESS; int line; @@ -172,11 +170,11 @@ Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, int block_count; unsigned int inbi; size_t typelng; - char *tmpsend = NULL, *tmprecv = NULL; - char *inbuf[2] = {NULL, NULL}; + char *tmpsend = nullptr, *tmprecv = nullptr; + unsigned char* inbuf[2] = {nullptr, nullptr}; ptrdiff_t true_extent, extent; ptrdiff_t block_offset, max_real_segsize; - MPI_Request reqs[2] = {NULL, NULL}; + MPI_Request reqs[2] = {nullptr, nullptr}; const size_t segsize = 1 << 20; /* 1 MB */ int size = comm->size(); int rank = comm->rank(); @@ -205,8 +203,7 @@ Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, /* Special case for count less than size * segcount - use regular ring */ if (count < size * segcount) { XBT_DEBUG( "coll:tuned:allreduce_ring_segmented rank %d/%d, count %d, switching to regular ring", rank, size, count); - return (Coll_allreduce_lr::allreduce(sbuf, rbuf, count, dtype, op, - comm)); + return (allreduce__lr(sbuf, rbuf, count, dtype, op, comm)); } /* Determine the number of phases of the algorithm */ @@ -232,11 +229,19 @@ Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, max_real_segsize = true_extent + (max_segcount - 1) * extent; /* Allocate and initialize temporary buffers */ - inbuf[0] = (char*)smpi_get_tmp_sendbuffer(max_real_segsize); - if (NULL == inbuf[0]) { ret = -1; line = __LINE__; goto error_hndl; } + inbuf[0] = smpi_get_tmp_sendbuffer(max_real_segsize); + if (nullptr == inbuf[0]) { + ret = -1; + line = __LINE__; + goto error_hndl; + } if (size > 2) { - inbuf[1] = (char*)smpi_get_tmp_recvbuffer(max_real_segsize); - if (NULL == inbuf[1]) { ret = -1; line = __LINE__; goto error_hndl; } + inbuf[1] = smpi_get_tmp_recvbuffer(max_real_segsize); + if (nullptr == inbuf[1]) { + ret = -1; + line = __LINE__; + goto error_hndl; + } } /* Handle MPI_IN_PLACE */ @@ -264,7 +269,7 @@ Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, - wait on block (r + 1) - compute on block (r + 1) - send block (r + 1) to rank (r + 1) - Note that we must be careful when computing the begining of buffers and + Note that we must be careful when computing the beginning of buffers and for send operations and computation we must compute the exact block size. */ send_to = (rank + 1) % size; @@ -378,17 +383,16 @@ Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, } - if (NULL != inbuf[0]) smpi_free_tmp_buffer(inbuf[0]); - if (NULL != inbuf[1]) smpi_free_tmp_buffer(inbuf[1]); + smpi_free_tmp_buffer(inbuf[0]); + smpi_free_tmp_buffer(inbuf[1]); return MPI_SUCCESS; error_hndl: XBT_DEBUG("%s:%4d\tRank %d Error occurred %d\n", __FILE__, line, rank, ret); - if (NULL != inbuf[0]) smpi_free_tmp_buffer(inbuf[0]); - if (NULL != inbuf[1]) smpi_free_tmp_buffer(inbuf[1]); + smpi_free_tmp_buffer(inbuf[0]); + smpi_free_tmp_buffer(inbuf[1]); return ret; } -} -} +} // namespace simgrid::smpi