smp_simple spreading_simple)
ADD_TEST(smpi-allgather-coll-${ALLGATHER_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/allgather:${ALLGATHER_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/allgather_coll.tesh)
ENDFOREACH()
- FOREACH (ALLGATHERV_COLL default pair ring)
+ FOREACH (ALLGATHERV_COLL default GB pair ring)
ADD_TEST(smpi-allgatherv-coll-${ALLGATHERV_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/allgatherv:${ALLGATHERV_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/allgatherv_coll.tesh)
ENDFOREACH()
FOREACH (ALLREDUCE_COLL default lr NTS rab1 rab2 rab_rdb
src/smpi/colls/allgather-SMP-NTS.c
src/smpi/colls/allgather-smp-simple.c
src/smpi/colls/allgather-spreading-simple.c
+ src/smpi/colls/allgatherv-GB.c
src/smpi/colls/allgatherv-pair.c
src/smpi/colls/allgatherv-ring.c
src/smpi/colls/allreduce-lr.c
--- /dev/null
+#include "colls_private.h"
+
+// Allgather - gather/bcast algorithm
+int smpi_coll_tuned_allgatherv_GB(void *send_buff, int send_count,
+ MPI_Datatype send_type, void *recv_buff,
+ int *recv_counts, int *recv_disps, MPI_Datatype recv_type,
+ MPI_Comm comm)
+{
+ smpi_mpi_gatherv(send_buff, send_count, send_type, recv_buff, recv_counts,
+ recv_disps, recv_type, 0, comm);
+ int num_procs, i, current, max = 0;
+ num_procs = smpi_comm_size(comm);
+ for (i = 0; i < num_procs; i++) {
+ current = recv_disps[i] + recv_counts[i];
+ if (current > max) max = current;
+ }
+ mpi_coll_bcast_fun(recv_buff, current, recv_type, 0, comm);
+
+ return MPI_SUCCESS;
+}
MPI_Datatype recv_type, MPI_Comm comm)
#define COLL_ALLGATHERVS(action, COLL_sep) \
+COLL_APPLY(action, COLL_ALLGATHERV_SIG, GB) COLL_sep \
COLL_APPLY(action, COLL_ALLGATHERV_SIG, pair) COLL_sep \
COLL_APPLY(action, COLL_ALLGATHERV_SIG, ring)