-/* Copyright (c) 2013-2017. The SimGrid Team.
+/* Copyright (c) 2013-2022. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "../colls_private.h"
-#include "src/smpi/smpi_win.hpp"
+#include "../colls_private.hpp"
+#include "smpi_win.hpp"
/*****************************************************************************
* Descrp: Function works when P is power of two. In each phase of P - 1
phases, nodes in pair communicate their data.
- * Auther: Ahmad Faraj
+ * Author: Ahmad Faraj
****************************************************************************/
namespace simgrid{
namespace smpi{
-int Coll_alltoall_pair_rma::alltoall(void *send_buff, int send_count, MPI_Datatype send_type,
- void *recv_buff, int recv_count, MPI_Datatype recv_type,
- MPI_Comm comm)
+int alltoall__pair_rma(const void *send_buff, int send_count, MPI_Datatype send_type,
+ void *recv_buff, int recv_count, MPI_Datatype recv_type,
+ MPI_Comm comm)
{
MPI_Aint send_chunk, recv_chunk;
send_chunk = send_type->get_extent();
recv_chunk = recv_type->get_extent();
- win=new Win(recv_buff, num_procs * recv_chunk * send_count, recv_chunk, 0,
- comm);
+ win = new Win(recv_buff, num_procs * recv_chunk * send_count, recv_chunk, nullptr, comm);
send_chunk *= send_count;
recv_chunk *= recv_count;
}
-int Coll_alltoall_pair::alltoall(void *send_buff, int send_count,
- MPI_Datatype send_type,
- void *recv_buff, int recv_count,
- MPI_Datatype recv_type, MPI_Comm comm)
+int alltoall__pair(const void *send_buff, int send_count,
+ MPI_Datatype send_type,
+ void *recv_buff, int recv_count,
+ MPI_Datatype recv_type, MPI_Comm comm)
{
MPI_Aint send_chunk, recv_chunk;
num_procs = comm->size();
if((num_procs&(num_procs-1)))
- THROWF(arg_error,0, "alltoall pair algorithm can't be used with non power of two number of processes ! ");
+ throw std::invalid_argument("alltoall pair algorithm can't be used with non power of two number of processes!");
send_chunk = send_type->get_extent();
recv_chunk = recv_type->get_extent();
for (i = 0; i < num_procs; i++) {
src = dst = rank ^ i;
- Request::sendrecv(send_ptr + dst * send_chunk, send_count, send_type, dst, tag,
- recv_ptr + src * recv_chunk, recv_count, recv_type, src, tag,
- comm, &s);
+ Request::sendrecv(send_ptr + dst * send_chunk, send_count, send_type, dst, tag, recv_ptr + src * recv_chunk,
+ recv_count, recv_type, src, tag, comm, &s);
}
return MPI_SUCCESS;