-/* Copyright (c) 2013-2017. The SimGrid Team.
+/* Copyright (c) 2013-2020. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.hpp"
/* IMPLEMENTED BY PITCH PATARASUK
- Non-topoloty-specific (however, number of cores/node need to be changed)
+ Non-topology-specific (however, number of cores/node need to be changed)
all-reduce operation designed for smp clusters
It uses 2-layer communication: binomial for both intra-communication
inter-communication
The communication are done in a pipeline fashion */
-
-
/* this is a default segment size for pipelining,
but it is typically passed as a command line argument */
int allreduce_smp_binomial_pipeline_segment_size = 4096;
*/
namespace simgrid{
namespace smpi{
-int Coll_allreduce_smp_binomial_pipeline::allreduce(void *send_buf,
- void *recv_buf, int count,
- MPI_Datatype dtype,
- MPI_Op op, MPI_Comm comm)
+int allreduce__smp_binomial_pipeline(const void *send_buf,
+ void *recv_buf, int count,
+ MPI_Datatype dtype,
+ MPI_Op op, MPI_Comm comm)
{
int comm_size, rank;
- void *tmp_buf;
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
rank = comm->rank();
MPI_Aint extent;
extent = dtype->get_extent();
- tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent);
+ unsigned char* tmp_buf = smpi_get_tmp_sendbuffer(count * extent);
int intra_rank, inter_rank;
intra_rank = rank % num_core;