-/* Copyright (c) 2013-2019. The SimGrid Team.
+/* Copyright (c) 2013-2021. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.hpp"
/*
- * Barrier is ment to be a synchronous operation, as some BTLs can mark
+ * Barrier is meant to be a synchronous operation, as some BTLs can mark
* a request done before its passed to the NIC and progress might not be made
* elsewhere we cannot allow a process to exit the barrier until its last
* [round of] sends are completed.
/*
* Simple double ring version of barrier
*
- * synchronous gurantee made by last ring of sends are synchronous
+ * synchronous guarantee made by last ring of sends are synchronous
*
*/
-namespace simgrid{
-namespace smpi{
-int Coll_barrier_ompi_doublering::barrier(MPI_Comm comm)
+namespace simgrid {
+namespace smpi {
+int barrier__ompi_doublering(MPI_Comm comm)
{
int rank, size;
int left, right;
right = ((rank+1)%size);
if (rank > 0) { /* receive message from the left */
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* Send message to the right */
- Request::send((void*)NULL, 0, MPI_BYTE, right,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, right, COLL_TAG_BARRIER, comm);
/* root needs to receive from the last node */
if (rank == 0) {
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* Allow nodes to exit */
if (rank > 0) { /* post Receive from left */
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* send message to the right one */
- Request::send((void*)NULL, 0, MPI_BYTE, right,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, right, COLL_TAG_BARRIER, comm);
/* rank 0 post receive from the last node */
if (rank == 0) {
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
return MPI_SUCCESS;
* To make synchronous, uses sync sends and sync sendrecvs
*/
-int Coll_barrier_ompi_recursivedoubling::barrier(MPI_Comm comm)
+int barrier__ompi_recursivedoubling(MPI_Comm comm)
{
int rank, size, adjsize;
int mask, remote;
if (rank >= adjsize) {
/* send message to lower ranked node */
remote = rank - adjsize;
- Request::sendrecv(NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, remote,
+ COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
} else if (rank < (size - adjsize)) {
/* receive message from high level rank */
- Request::recv((void*)NULL, 0, MPI_BYTE, rank+adjsize,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
-
+ Request::recv(nullptr, 0, MPI_BYTE, rank + adjsize, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
}
if (remote >= adjsize) continue;
/* post receive from the remote node */
- Request::sendrecv(NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, remote,
+ COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
}
if (rank < (size - adjsize)) {
/* send enter message to higher ranked node */
remote = rank + adjsize;
- Request::send((void*)NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm);
-
+ Request::send(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, comm);
}
}
* To make synchronous, uses sync sends and sync sendrecvs
*/
-int Coll_barrier_ompi_bruck::barrier(MPI_Comm comm)
+int barrier__ompi_bruck(MPI_Comm comm)
{
int rank, size;
int distance, to, from;
to = (rank + distance) % size;
/* send message to lower ranked node */
- Request::sendrecv(NULL, 0, MPI_BYTE, to,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, from,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, to, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, from, COLL_TAG_BARRIER,
+ comm, MPI_STATUS_IGNORE);
}
return MPI_SUCCESS;
* To make synchronous, uses sync sends and sync sendrecvs
*/
/* special case for two processes */
-int Coll_barrier_ompi_two_procs::barrier(MPI_Comm comm)
+int barrier__ompi_two_procs(MPI_Comm comm)
{
int remote;
"ompi_coll_tuned_barrier_ompi_two_procs rank %d", remote);
remote = (remote + 1) & 0x1;
- Request::sendrecv(NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER,
+ comm, MPI_STATUS_IGNORE);
return (MPI_SUCCESS);
}
/* copied function (with appropriate renaming) starts here */
-int Coll_barrier_ompi_basic_linear::barrier(MPI_Comm comm)
+int barrier__ompi_basic_linear(MPI_Comm comm)
{
int i;
int size = comm->size();
/* All non-root send & receive zero-length message. */
if (rank > 0) {
- Request::send (NULL, 0, MPI_BYTE, 0,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, 0, COLL_TAG_BARRIER, comm);
- Request::recv (NULL, 0, MPI_BYTE, 0,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, 0, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* The root collects and broadcasts the messages. */
requests = new MPI_Request[size];
for (i = 1; i < size; ++i) {
- requests[i] = Request::irecv(NULL, 0, MPI_BYTE, i, COLL_TAG_BARRIER, comm);
+ requests[i] = Request::irecv(nullptr, 0, MPI_BYTE, i, COLL_TAG_BARRIER, comm);
}
Request::waitall( size-1, requests+1, MPI_STATUSES_IGNORE );
for (i = 1; i < size; ++i) {
- requests[i] = Request::isend(NULL, 0, MPI_BYTE, i,
- COLL_TAG_BARRIER,
- comm
- );
+ requests[i] = Request::isend(nullptr, 0, MPI_BYTE, i, COLL_TAG_BARRIER, comm);
}
Request::waitall( size-1, requests+1, MPI_STATUSES_IGNORE );
delete[] requests;
* Another recursive doubling type algorithm, but in this case
* we go up the tree and back down the tree.
*/
-int Coll_barrier_ompi_tree::barrier(MPI_Comm comm)
+int barrier__ompi_tree(MPI_Comm comm)
{
int rank, size, depth;
int jump, partner;
partner = rank ^ jump;
if (!(partner & (jump-1)) && partner < size) {
if (partner > rank) {
- Request::recv (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
} else if (partner < rank) {
- Request::send (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm);
}
}
}
partner = rank ^ jump;
if (!(partner & (jump-1)) && partner < size) {
if (partner > rank) {
- Request::send (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm);
} else if (partner < rank) {
- Request::recv (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
}
}