From: Arnaud Giersch Date: Thu, 5 Dec 2019 12:36:39 +0000 (+0100) Subject: Reindent. X-Git-Tag: v3.25~336^2~1 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/6adf284aaee012249d40eb03d231b6e696354e15 Reindent. --- diff --git a/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp b/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp index d317ed5799..db5e79aaba 100644 --- a/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp @@ -22,88 +22,79 @@ int alltoallv__ompi_basic_linear(const void *sbuf, const int *scounts, const int MPI_Datatype rdtype, MPI_Comm comm) { - int i, size, rank; - char *psnd, *prcv; - int nreqs; - ptrdiff_t sext, rext; - MPI_Request *preq; - size = comm->size(); - rank = comm->rank(); - MPI_Request* ireqs = new MPI_Request[size * 2]; - XBT_DEBUG( - "coll:tuned:alltoallv_intra_basic_linear rank %d", rank); - - sext=sdtype->get_extent(); - rext=rdtype->get_extent(); - - /* Simple optimization - handle send to self first */ - psnd = ((char *) sbuf) + (sdisps[rank] * sext); - prcv = ((char *) rbuf) + (rdisps[rank] * rext); - if (0 != scounts[rank]) { - Datatype::copy(psnd, scounts[rank], sdtype, - prcv, rcounts[rank], rdtype); - } - - /* If only one process, we're done. */ - if (1 == size) { - return MPI_SUCCESS; - } - - /* Now, initiate all send/recv to/from others. */ - nreqs = 0; - preq = ireqs; - - /* Post all receives first */ - for (i = 0; i < size; ++i) { - if (i == rank) { - continue; - } - - prcv = ((char *) rbuf) + (rdisps[i] * rext); - - *preq = Request::irecv_init(prcv, rcounts[i], rdtype, - i, COLL_TAG_ALLTOALLV, comm - ); - preq++; - ++nreqs; - - } + int i, size, rank; + char *psnd, *prcv; + int nreqs; + ptrdiff_t sext, rext; + MPI_Request* preq; + size = comm->size(); + rank = comm->rank(); + MPI_Request* ireqs = new MPI_Request[size * 2]; + XBT_DEBUG("coll:tuned:alltoallv_intra_basic_linear rank %d", rank); + + sext = sdtype->get_extent(); + rext = rdtype->get_extent(); + + /* Simple optimization - handle send to self first */ + psnd = ((char*)sbuf) + (sdisps[rank] * sext); + prcv = ((char*)rbuf) + (rdisps[rank] * rext); + if (0 != scounts[rank]) { + Datatype::copy(psnd, scounts[rank], sdtype, prcv, rcounts[rank], rdtype); + } + + /* If only one process, we're done. */ + if (1 == size) { + return MPI_SUCCESS; + } - /* Now post all sends */ - for (i = 0; i < size; ++i) { - if (i == rank) { - continue; - } + /* Now, initiate all send/recv to/from others. */ + nreqs = 0; + preq = ireqs; - psnd = ((char *) sbuf) + (sdisps[i] * sext); - *preq=Request::isend_init(psnd, scounts[i], sdtype, - i, COLL_TAG_ALLTOALLV, comm - ); - preq++; - ++nreqs; + /* Post all receives first */ + for (i = 0; i < size; ++i) { + if (i == rank) { + continue; } - /* Start your engines. This will never return an error. */ - Request::startall(nreqs, ireqs); + prcv = ((char*)rbuf) + (rdisps[i] * rext); - /* Wait for them all. If there's an error, note that we don't care - * what the error was -- just that there *was* an error. The PML - * will finish all requests, even if one or more of them fail. - * i.e., by the end of this call, all the requests are free-able. - * So free them anyway -- even if there was an error, and return the - * error after we free everything. */ - Request::waitall(nreqs, ireqs, - MPI_STATUSES_IGNORE); + *preq = Request::irecv_init(prcv, rcounts[i], rdtype, i, COLL_TAG_ALLTOALLV, comm); + preq++; + ++nreqs; + } - /* Free the requests. */ - for (i = 0; i < nreqs; ++i) { - if(ireqs[i]!=MPI_REQUEST_NULL) - Request::unref(&ireqs[i]); + /* Now post all sends */ + for (i = 0; i < size; ++i) { + if (i == rank) { + continue; } - delete[] ireqs; - return MPI_SUCCESS; + psnd = ((char*)sbuf) + (sdisps[i] * sext); + *preq = Request::isend_init(psnd, scounts[i], sdtype, i, COLL_TAG_ALLTOALLV, comm); + preq++; + ++nreqs; + } + + /* Start your engines. This will never return an error. */ + Request::startall(nreqs, ireqs); + + /* Wait for them all. If there's an error, note that we don't care + * what the error was -- just that there *was* an error. The PML + * will finish all requests, even if one or more of them fail. + * i.e., by the end of this call, all the requests are free-able. + * So free them anyway -- even if there was an error, and return the + * error after we free everything. */ + Request::waitall(nreqs, ireqs, MPI_STATUSES_IGNORE); + + /* Free the requests. */ + for (i = 0; i < nreqs; ++i) { + if (ireqs[i] != MPI_REQUEST_NULL) + Request::unref(&ireqs[i]); + } + delete[] ireqs; + + return MPI_SUCCESS; } } } -