smp_rsag_lr smp_rsag_rab redbcast)
ADD_TEST(smpi-allreduce-coll-${ALLREDUCE_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/allreduce:${ALLREDUCE_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/allreduce_coll.tesh)
ENDFOREACH()
- FOREACH (ALLTOALL_COLL 2dmesh 3dmesh pair pair_one_barrier pair_light_barrier pair_mpi_barrier rdb
- ring ring_light_barrier ring_mpi_barrier ring_one_barrier
+ FOREACH (ALLTOALL_COLL 2dmesh 3dmesh pair pair_one_barrier pair_light_barrier
+ pair_mpi_barrier rdb ring ring_light_barrier
+ ring_mpi_barrier ring_one_barrier
simple bruck basic_linear pairwise)
ADD_TEST(smpi-alltoall-coll-${ALLTOALL_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/alltoall:${ALLTOALL_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoall_coll.tesh)
ENDFOREACH()
- FOREACH (ALLTOALLV_COLL default bruck pairwise)
+ FOREACH (ALLTOALLV_COLL default pair pair_light_barrier pair_mpi_barrier pair_one_barrier bruck pairwise)
ADD_TEST(smpi-alltoallv-coll-${ALLTOALLV_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/alltoallv:${ALLTOALLV_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoallv_coll.tesh)
ENDFOREACH()
FOREACH (BCAST_COLL default arrival_nb arrival_pattern_aware arrival_pattern_aware_wait arrival_scatter
src/smpi/colls/alltoall-ring-mpi-barrier.c
src/smpi/colls/alltoall-ring-one-barrier.c
src/smpi/colls/alltoall-simple.c
+ src/smpi/colls/alltoallv-pair.c
+ src/smpi/colls/alltoallv-pair-light-barrier.c
+ src/smpi/colls/alltoallv-pair-mpi-barrier.c
+ src/smpi/colls/alltoallv-pair-one-barrier.c
src/smpi/colls/alltoallv-pairwise.c
src/smpi/colls/alltoallv-bruck.c
src/smpi/colls/bcast-arrival-nb.c
--- /dev/null
+#include "colls_private.h"
+/*****************************************************************************
+
+ * Function: alltoall_pair_light_barrier
+
+ * Return: int
+
+ * Inputs:
+ send_buff: send input buffer
+ send_count: number of elements to send
+ send_type: data type of elements being sent
+ recv_buff: receive output buffer
+ recv_count: number of elements to received
+ recv_type: data type of elements being received
+ comm: communicator
+
+ * Descrp: Function works in P - 1 steps. In step i, node j exchanges data
+ with node i ^ j. Light barriers are inserted between
+ communications in different phases.
+
+ * Auther: Ahmad Faraj
+
+ ****************************************************************************/
+int
+smpi_coll_tuned_alltoallv_pair_light_barrier(void *send_buff, int *send_counts, int *send_disps,
+ MPI_Datatype send_type,
+ void *recv_buff, int *recv_counts, int *recv_disps,
+ MPI_Datatype recv_type,
+ MPI_Comm comm)
+{
+ MPI_Aint send_chunk, recv_chunk;
+ MPI_Status s;
+ int i, src, dst, rank, num_procs, next_partner;
+ int tag = 1; /*, failure = 0; */
+
+ char send_sync = 'a', recv_sync = 'b';
+ char *send_ptr = (char *) send_buff;
+ char *recv_ptr = (char *) recv_buff;
+
+ rank = smpi_comm_rank(comm);
+ num_procs = smpi_comm_size(comm);
+ send_chunk = smpi_datatype_get_extent(send_type);
+ recv_chunk = smpi_datatype_get_extent(recv_type);
+
+ smpi_mpi_sendrecv(send_ptr + send_disps[rank] * send_chunk, send_counts[rank], send_type, rank, tag,
+ recv_ptr + recv_disps[rank] * recv_chunk, recv_counts[rank], recv_type, rank, tag,
+ comm, &s);
+
+ for (i = 1; i < num_procs; i++) {
+ src = dst = rank ^ i;
+
+ smpi_mpi_sendrecv(send_ptr + send_disps[dst] * send_chunk, send_counts[dst], send_type,
+ dst, tag, recv_ptr + recv_disps[src] *recv_chunk, recv_counts[dst],
+ recv_type, src, tag, comm, &s);
+
+ if ((i + 1) < num_procs) {
+ next_partner = rank ^ (i + 1);
+ smpi_mpi_sendrecv(&send_sync, 1, MPI_CHAR, next_partner, tag,
+ &recv_sync, 1, MPI_CHAR, next_partner, tag, comm, &s);
+ }
+ }
+ return MPI_SUCCESS;
+}
--- /dev/null
+#include "colls_private.h"
+/*****************************************************************************
+
+ * Function: alltoall_pair_mpi_barrier
+
+ * Return: int
+
+ * Inputs:
+ send_buff: send input buffer
+ send_count: number of elements to send
+ send_type: data type of elements being sent
+ recv_buff: receive output buffer
+ recv_count: number of elements to received
+ recv_type: data type of elements being received
+ comm: communicator
+
+ * Descrp: Function works when P is power of two. In each phase of P - 1
+ phases, nodes in pair communicate their data. MPI barriers are
+ inserted between each two phases.
+
+ * Auther: Ahmad Faraj
+
+ ****************************************************************************/
+int
+smpi_coll_tuned_alltoallv_pair_mpi_barrier(void *send_buff, int *send_counts, int *send_disps,
+ MPI_Datatype send_type,
+ void *recv_buff, int *recv_counts, int *recv_disps,
+ MPI_Datatype recv_type, MPI_Comm comm)
+{
+ MPI_Status s;
+ MPI_Aint send_chunk, recv_chunk;
+ int i, src, dst, rank, num_procs;
+ int tag = 101;
+ char *send_ptr = (char *) send_buff;
+ char *recv_ptr = (char *) recv_buff;
+
+ rank = smpi_comm_rank(comm);
+ num_procs = smpi_comm_size(comm);
+ send_chunk = smpi_datatype_get_extent(send_type);
+ recv_chunk = smpi_datatype_get_extent(recv_type);
+
+ for (i = 0; i < num_procs; i++) {
+ src = dst = rank ^ i;
+ smpi_mpi_barrier(comm);
+ smpi_mpi_sendrecv(send_ptr + send_disps[dst] * send_chunk, send_counts[dst], send_type, dst,
+ tag, recv_ptr + recv_disps[src] * recv_chunk, recv_counts[src], recv_type,
+ src, tag, comm, &s);
+ }
+ return MPI_SUCCESS;
+}
--- /dev/null
+#include "colls_private.h"
+/*****************************************************************************
+
+ * Function: alltoall_pair
+
+ * Return: int
+
+ * Inputs:
+ send_buff: send input buffer
+ send_count: number of elements to send
+ send_type: data type of elements being sent
+ recv_buff: receive output buffer
+ recv_count: number of elements to received
+ recv_type: data type of elements being received
+ comm: communicator
+
+ * Descrp: Function works when P is power of two. In each phase of P - 1
+ phases, nodes in pair communicate their data.
+
+ * Auther: Ahmad Faraj
+
+ ****************************************************************************/
+int
+smpi_coll_tuned_alltoallv_pair_one_barrier(void *send_buff, int *send_counts, int *send_disps,
+ MPI_Datatype send_type,
+ void *recv_buff, int *recv_counts, int *recv_disps, MPI_Datatype recv_type, MPI_Comm comm)
+{
+
+ MPI_Aint send_chunk, recv_chunk;
+ MPI_Status s;
+ int i, src, dst, rank, num_procs;
+ int tag = 1;
+
+ char *send_ptr = (char *) send_buff;
+ char *recv_ptr = (char *) recv_buff;
+
+ rank = smpi_comm_rank(comm);
+ num_procs = smpi_comm_size(comm);
+ send_chunk = smpi_datatype_get_extent(send_type);
+ recv_chunk = smpi_datatype_get_extent(recv_type);
+
+ smpi_mpi_barrier(comm);
+ for (i = 0; i < num_procs; i++) {
+ src = dst = rank ^ i;
+ smpi_mpi_sendrecv(send_ptr + send_disps[dst] * send_chunk, send_counts[dst], send_type, dst,
+ tag, recv_ptr + recv_disps[src] * recv_chunk, recv_counts[src], recv_type,
+ src, tag, comm, &s);
+ }
+
+ return MPI_SUCCESS;
+}
#define COLL_ALLTOALLVS(action, COLL_sep) \
COLL_APPLY(action, COLL_ALLTOALLV_SIG, bruck) COLL_sep \
+COLL_APPLY(action, COLL_ALLTOALLV_SIG, pair) COLL_sep \
+COLL_APPLY(action, COLL_ALLTOALLV_SIG, pair_light_barrier) COLL_sep \
+COLL_APPLY(action, COLL_ALLTOALLV_SIG, pair_mpi_barrier) COLL_sep \
+COLL_APPLY(action, COLL_ALLTOALLV_SIG, pair_one_barrier) COLL_sep \
COLL_APPLY(action, COLL_ALLTOALLV_SIG, pairwise)
COLL_ALLTOALLVS(COLL_PROTO, COLL_NOsep)