-/* Copyright (c) 2013-2017. The SimGrid Team.
+/* Copyright (c) 2013-2019. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "../colls_private.h"
+#include "../colls_private.hpp"
/* IMPLEMENTED BY PITCH PATARASUK
- Non-topoloty-specific (however, number of cores/node need to be changed)
+ Non-topology-specific (however, number of cores/node need to be changed)
all-reduce operation designed for smp clusters
It uses 2-layer communication: binomial for intra-communication
and rdb for inter-communication*/
-
/* ** NOTE **
Use -DMPICH2 if this code does not compile.
MPICH1 code also work on MPICH2 on our cluster and the performance are similar.
*/
namespace simgrid{
namespace smpi{
-int Coll_allreduce_smp_rdb::allreduce(void *send_buf, void *recv_buf, int count,
- MPI_Datatype dtype, MPI_Op op,
- MPI_Comm comm)
+int allreduce__smp_rdb(const void *send_buf, void *recv_buf, int count,
+ MPI_Datatype dtype, MPI_Op op,
+ MPI_Comm comm)
{
int comm_size, rank;
- void *tmp_buf;
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
#else
MPI_User_function *uop;
- struct MPIR_OP *op_ptr;
+ MPIR_OP *op_ptr;
op_ptr = MPIR_ToPointer(op);
uop = op_ptr->op;
#endif
rank = comm->rank();
MPI_Aint extent;
extent = dtype->get_extent();
- tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent);
+ unsigned char* tmp_buf = smpi_get_tmp_sendbuffer(count * extent);
/* compute intra and inter ranking */
int intra_rank, inter_rank;