-#include "colls.h"
+/* Copyright (c) 2013-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "colls_private.h"
/* IMPLEMENTED BY PITCH PATARASUK
Non-topoloty-specific (however, number of cores/node need to be changed)
all-reduce operation designed for smp clusters
inter-communication
The communication are done in a pipeline fashion */
-/* change number of core per smp-node
- we assume that number of core per process will be the same for all implementations */
-#ifndef NUM_CORE
-#define NUM_CORE 8
-#endif
+
/* this is a default segment size for pipelining,
but it is typically passed as a command line argument */
This code assume commutative and associative reduce operator (MPI_SUM, MPI_MAX, etc).
*/
-#ifndef MPICH2
-extern void *MPIR_ToPointer();
-
-struct MPIR_OP {
- MPI_User_function *op;
- int commute, permanent;
-};
-
-#else
-extern MPI_User_function *MPIR_Op_table[];
-#endif
-
/*
This fucntion performs all-reduce operation as follow. ** in a pipeline fashion **
1) binomial_tree reduce inside each SMP node
{
int comm_size, rank;
void *tmp_buf;
- int tag = 50;
+ int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
-
- MPI_User_function *uop;
-#ifndef MPICH2
- struct MPIR_OP *op_ptr = MPIR_ToPointer(op);
- uop = (MPI_User_function *) op_ptr->op;
-#else
- uop = MPIR_Op_table[op % 16 - 1];
-#endif
-
- MPI_Comm_size(comm, &comm_size);
- MPI_Comm_rank(comm, &rank);
+ if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){
+ smpi_comm_init_smp(comm);
+ }
+ int num_core=1;
+ if (smpi_comm_is_uniform(comm)){
+ num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm));
+ }
+
+ comm_size = smpi_comm_size(comm);
+ rank = smpi_comm_rank(comm);
MPI_Aint extent;
- MPI_Type_extent(dtype, &extent);
- tmp_buf = (void *) malloc(count * extent);
+ extent = smpi_datatype_get_extent(dtype);
+ tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent);
int intra_rank, inter_rank;
intra_rank = rank % num_core;
int inter_comm_size = (comm_size + num_core - 1) / num_core;
/* copy input buffer to output buffer */
- MPI_Sendrecv(send_buf, count, dtype, rank, tag,
+ smpi_mpi_sendrecv(send_buf, count, dtype, rank, tag,
recv_buf, count, dtype, rank, tag, comm, &status);
/* compute pipe length */
src = (inter_rank * num_core) + (intra_rank | mask);
if (src < comm_size) {
recv_offset = phase * pcount * extent;
- MPI_Recv(tmp_buf, pcount, dtype, src, tag, comm, &status);
- (*uop) (tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype);
+ smpi_mpi_recv(tmp_buf, pcount, dtype, src, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype);
}
} else {
send_offset = phase * pcount * extent;
dst = (inter_rank * num_core) + (intra_rank & (~mask));
- MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
+ smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
break;
}
mask <<= 1;
src = (inter_rank | mask) * num_core;
if (src < comm_size) {
recv_offset = (phase - 1) * pcount * extent;
- MPI_Recv(tmp_buf, pcount, dtype, src, tag, comm, &status);
- (*uop) (tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype);
+ smpi_mpi_recv(tmp_buf, pcount, dtype, src, tag, comm, &status);
+ smpi_op_apply(op, tmp_buf, (char *)recv_buf + recv_offset, &pcount, &dtype);
}
} else {
dst = (inter_rank & (~mask)) * num_core;
send_offset = (phase - 1) * pcount * extent;
- MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
+ smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
break;
}
mask <<= 1;
if (inter_rank & mask) {
src = (inter_rank - mask) * num_core;
recv_offset = (phase - 2) * pcount * extent;
- MPI_Recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm,
+ smpi_mpi_recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm,
&status);
break;
}
if (dst < comm_size) {
//printf("Node %d send to node %d when mask is %d\n", rank, dst, mask);
send_offset = (phase - 2) * pcount * extent;
- MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
+ smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
}
}
mask >>= 1;
if (intra_rank & mask) {
src = (inter_rank * num_core) + (intra_rank - mask);
recv_offset = (phase - 3) * pcount * extent;
- MPI_Recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm,
+ smpi_mpi_recv((char *)recv_buf + recv_offset, pcount, dtype, src, tag, comm,
&status);
break;
}
dst = (inter_rank * num_core) + (intra_rank + mask);
if (dst < comm_size) {
send_offset = (phase - 3) * pcount * extent;
- MPI_Send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
+ smpi_mpi_send((char *)recv_buf + send_offset, pcount, dtype, dst, tag, comm);
}
mask >>= 1;
}
}
} // for phase
- free(tmp_buf);
+ smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}