-#include "colls.h"
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "colls_private.h"
/* IMPLEMENTED BY PITCH PATARASUK
Non-topoloty-specific (however, number of cores/node need to be changed)
all-reduce operation designed for smp clusters
It uses 2-layer communication: binomial for intra-communication
and rdb for inter-communication*/
-/* change number of core per smp-node
- we assume that number of core per process will be the same for all implementations */
-#ifndef NUM_CORE
-#define NUM_CORE 8
-#endif
/* ** NOTE **
Use -DMPICH2 if this code does not compile.
{
int comm_size, rank;
void *tmp_buf;
- int tag = 50;
+ int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
+ smpi_comm_init_smp(comm);
+ }
+ int num_core=1;
+ if (smpi_comm_is_uniform(comm)){
+ num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm));
+ }
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
uop = op_ptr->op;
#endif
*/
- MPI_Comm_size(comm, &comm_size);
- MPI_Comm_rank(comm, &rank);
+ comm_size = smpi_comm_size(comm);
+ rank = smpi_comm_rank(comm);
MPI_Aint extent;
- MPI_Type_extent(dtype, &extent);
- tmp_buf = (void *) malloc(count * extent);
+ extent = smpi_datatype_get_extent(dtype);
+ tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent);
/* compute intra and inter ranking */
int intra_rank, inter_rank;
int inter_comm_size = (comm_size + num_core - 1) / num_core;
/* copy input buffer to output buffer */
- MPI_Sendrecv(send_buf, count, dtype, rank, tag,
+ smpi_mpi_sendrecv(send_buf, count, dtype, rank, tag,
recv_buf, count, dtype, rank, tag, comm, &status);
/* start binomial reduce intra communication inside each SMP node */
if ((mask & intra_rank) == 0) {
src = (inter_rank * num_core) + (intra_rank | mask);
if (src < comm_size) {
- MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status);
- star_reduction(op, tmp_buf, recv_buf, &count, &dtype);
+ smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, recv_buf, &count, &dtype);
}
} else {
dst = (inter_rank * num_core) + (intra_rank & (~mask));
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
break;
}
mask <<= 1;
if (inter_rank < 2 * rem) {
if (inter_rank % 2 == 0) {
dst = rank + num_core;
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
newrank = -1;
} else {
src = rank - num_core;
- MPI_Recv(tmp_buf, count, dtype, src, tag, comm, &status);
- star_reduction(op, tmp_buf, recv_buf, &count, &dtype);
+ smpi_mpi_recv(tmp_buf, count, dtype, src, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, recv_buf, &count, &dtype);
newrank = inter_rank / 2;
}
} else {
dst *= num_core;
/* exchange data in rdb manner */
- MPI_Sendrecv(recv_buf, count, dtype, dst, tag, tmp_buf, count, dtype,
+ smpi_mpi_sendrecv(recv_buf, count, dtype, dst, tag, tmp_buf, count, dtype,
dst, tag, comm, &status);
- star_reduction(op, tmp_buf, recv_buf, &count, &dtype);
+ smpi_op_apply(op, tmp_buf, recv_buf, &count, &dtype);
mask <<= 1;
}
}
*/
if (inter_rank < 2 * rem) {
if (inter_rank % 2) {
- MPI_Send(recv_buf, count, dtype, rank - num_core, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, rank - num_core, tag, comm);
} else {
- MPI_Recv(recv_buf, count, dtype, rank + num_core, tag, comm, &status);
+ smpi_mpi_recv(recv_buf, count, dtype, rank + num_core, tag, comm, &status);
}
}
}
while (mask < num_core_in_current_smp) {
if (intra_rank & mask) {
src = (inter_rank * num_core) + (intra_rank - mask);
- MPI_Recv(recv_buf, count, dtype, src, tag, comm, &status);
+ smpi_mpi_recv(recv_buf, count, dtype, src, tag, comm, &status);
break;
}
mask <<= 1;
while (mask > 0) {
dst = (inter_rank * num_core) + (intra_rank + mask);
if (dst < comm_size) {
- MPI_Send(recv_buf, count, dtype, dst, tag, comm);
+ smpi_mpi_send(recv_buf, count, dtype, dst, tag, comm);
}
mask >>= 1;
}
- free(tmp_buf);
+ smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}