simple bruck basic_linear pairwise)
ADD_TEST(smpi-alltoall-coll-${ALLTOALL_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/alltoall:${ALLTOALL_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoall_coll.tesh)
ENDFOREACH()
+ FOREACH (ALLTOALLV_COLL default)
+ ADD_TEST(smpi-alltoallv-coll-${ALLTOALLV_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/alltoallv:${ALLTOALLV_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoallv_coll.tesh)
+ ENDFOREACH()
FOREACH (BCAST_COLL default arrival_nb arrival_pattern_aware arrival_pattern_aware_wait arrival_scatter
binomial_tree flattree flattree_pipeline NTSB NTSL NTSL_Isend scatter_LR_allgather
scatter_rdb_allgather SMP_binary SMP_binomial SMP_linear)
MPI_Op op, MPI_Comm comm));
-/** \ingroup MPI alltoallcollectives
+/** \ingroup MPI alltoall
* \brief The list of all available alltoall collectives
*/
XBT_PUBLIC_DATA(s_mpi_coll_description_t) mpi_coll_alltoall_description[];
(void *, int, MPI_Datatype, void *, int, MPI_Datatype,
MPI_Comm));
+/** \ingroup MPI alltoallv
+ * \brief The list of all available alltoallv collectives
+ */
+XBT_PUBLIC_DATA(s_mpi_coll_description_t) mpi_coll_alltoallv_description[];
+XBT_PUBLIC_DATA(int (*mpi_coll_alltoallv_fun)
+ (void *, int*, int*, MPI_Datatype, void *, int*, int*, MPI_Datatype,
+ MPI_Comm));
+
/** \ingroup MPI bcast
* \brief The list of all available bcast collectives
{
_sg_cfg_cb__coll("alltoall", mpi_coll_alltoall_description, name, pos);
}
+static void _sg_cfg_cb__coll_alltoallv(const char *name, int pos)
+{
+ _sg_cfg_cb__coll("alltoallv", mpi_coll_alltoallv_description, name, pos);
+}
static void _sg_cfg_cb__coll_bcast(const char *name, int pos)
{
_sg_cfg_cb__coll("bcast", mpi_coll_bcast_description, name, pos);
xbt_cfgelm_string, &default_value, 1, 1, &_sg_cfg_cb__coll_alltoall,
NULL);
+ default_value = xbt_strdup("default");
+ xbt_cfg_register(&_sg_cfg_set, "smpi/alltoallv",
+ "Which collective to use for alltoallv",
+ xbt_cfgelm_string, &default_value, 1, 1, &_sg_cfg_cb__coll_alltoallv,
+ NULL);
+
default_value = xbt_strdup("default");
xbt_cfg_register(&_sg_cfg_set, "smpi/bcast",
"Which collective to use for bcast",
{NULL, NULL, NULL} /* this array must be NULL terminated */
};
+s_mpi_coll_description_t mpi_coll_alltoallv_description[] = {
+ {"default",
+ "Ompi alltoallv default collective",
+ smpi_coll_basic_alltoallv},
+
+ {NULL, NULL, NULL} /* this array must be NULL terminated */
+};
+
s_mpi_coll_description_t mpi_coll_bcast_description[] = {
{"default",
"allgather default collective",
int (*mpi_coll_allgather_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
int (*mpi_coll_allreduce_fun)(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm);
int (*mpi_coll_alltoall_fun)(void *, int, MPI_Datatype, void*, int, MPI_Datatype, MPI_Comm);
+int (*mpi_coll_alltoallv_fun)(void *, int*, int*, MPI_Datatype, void*, int*, int*, MPI_Datatype, MPI_Comm);
int (*mpi_coll_bcast_fun)(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm com);
int (*mpi_coll_reduce_fun)(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
void*, int, MPI_Datatype, MPI_Comm))
mpi_coll_alltoall_description[alltoall_id].coll;
+ int alltoallv_id = find_coll_description(mpi_coll_alltoallv_description,
+ sg_cfg_get_string("smpi/alltoallv"));
+ mpi_coll_alltoallv_fun = (int (*)(void *, int*, int*, MPI_Datatype,
+ void*, int*, int*, MPI_Datatype, MPI_Comm))
+ mpi_coll_alltoallv_description[alltoallv_id].coll;
+
int bcast_id = find_coll_description(mpi_coll_bcast_description,
sg_cfg_get_string("smpi/bcast"));
mpi_coll_bcast_fun = (int (*)(void *buf, int count, MPI_Datatype datatype, \
int MPI_Init(int *argc, char ***argv)
{
- int allgather_id = find_coll_description(mpi_coll_allgather_description,
- sg_cfg_get_string("smpi/allgather"));
- mpi_coll_allgather_fun = (int (*)(void *, int, MPI_Datatype,
- void*, int, MPI_Datatype, MPI_Comm))
- mpi_coll_allgather_description[allgather_id].coll;
-
- int allreduce_id = find_coll_description(mpi_coll_allreduce_description,
- sg_cfg_get_string("smpi/allreduce"));
- mpi_coll_allreduce_fun = (int (*)(void *sbuf, void *rbuf, int rcount, \
- MPI_Datatype dtype, MPI_Op op, MPI_Comm comm))
- mpi_coll_allreduce_description[allreduce_id].coll;
-
- int alltoall_id = find_coll_description(mpi_coll_alltoall_description,
- sg_cfg_get_string("smpi/alltoall"));
- mpi_coll_alltoall_fun = (int (*)(void *, int, MPI_Datatype,
- void*, int, MPI_Datatype, MPI_Comm))
- mpi_coll_alltoall_description[alltoall_id].coll;
-
- int bcast_id = find_coll_description(mpi_coll_bcast_description,
- sg_cfg_get_string("smpi/bcast"));
- mpi_coll_bcast_fun = (int (*)(void *buf, int count, MPI_Datatype datatype, \
- int root, MPI_Comm com))
- mpi_coll_bcast_description[bcast_id].coll;
-
- int reduce_id = find_coll_description(mpi_coll_reduce_description,
- sg_cfg_get_string("smpi/reduce"));
- mpi_coll_reduce_fun = (int (*)(void *buf, void *rbuf, int count, MPI_Datatype datatype, \
- MPI_Op op, int root, MPI_Comm comm))
- mpi_coll_reduce_description[reduce_id].coll;
return PMPI_Init(argc, argv);
}
add_executable(alltoall2 alltoall2.c)
add_executable(alltoall_basic alltoall_basic.c)
add_executable(alltoall_coll alltoall_coll.c)
- add_executable(alltoallv alltoallv.c)
+ add_executable(alltoallv_coll alltoallv_coll.c)
add_executable(allreduce allreduce.c)
add_executable(allreduce_coll allreduce_coll.c)
add_executable(bcast bcast.c)
target_link_libraries(alltoall2 simgrid)
target_link_libraries(alltoall_basic simgrid)
target_link_libraries(alltoall_coll simgrid)
- target_link_libraries(alltoallv simgrid)
+ target_link_libraries(alltoallv_coll simgrid)
target_link_libraries(allreduce simgrid)
target_link_libraries(allreduce_coll simgrid)
target_link_libraries(bcast simgrid)
${CMAKE_CURRENT_SOURCE_DIR}/alltoall_coll.c
${CMAKE_CURRENT_SOURCE_DIR}/bcast_coll.c
${CMAKE_CURRENT_SOURCE_DIR}/reduce_coll.c
- ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv_coll.c
${CMAKE_CURRENT_SOURCE_DIR}/get_processor_name.c
${CMAKE_CURRENT_SOURCE_DIR}/pingpong.c
${CMAKE_CURRENT_SOURCE_DIR}/bcast.c
<2> rdisp: (#3): [0][2][4]
after MPI_Alltoallv :
- <0> rbuf: (#9): [0][-1][-2][-3][-4][-5][-6][-7][-8]
- <1> rbuf: (#9): [1][101][201][-3][-4][-5][-6][-7][-8]
- <2> rbuf: (#9): [3][4][103][104][203][204][-6][-7][-8]
+ <0> rbuf: (#9): [-1][-1][-1][-1][-1][-1][-1][-1][-1]
+ <1> rbuf: (#9): [1][101][201][-1][-1][-1][-1][-1][-1]
+ <2> rbuf: (#9): [3][4][103][104][203][204][-1][-1][-1]
*/
static void print_buffer_int(void *buf, int len, char *msg, int rank)
{
int tmp, *v;
- printf("**<%d> %s (#%d): ", rank, msg, len);
+ printf("[%d] %s (#%d): ", rank, msg, len);
for (tmp = 0; tmp < len; tmp++) {
v = buf;
printf("[%d]", v[tmp]);
{
MPI_Comm comm;
- int *sbuf, *rbuf, *erbuf;
+ int *sbuf, *rbuf;
int rank, size;
int *sendcounts, *recvcounts, *rdispls, *sdispls;
- int i, j, *p, err;
+ int i;
MPI_Init(&argc, &argv);
- err = 0;
comm = MPI_COMM_WORLD;
/* Create the buffer */
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
- sbuf = (int *) malloc(size * size * sizeof(int));
- rbuf = (int *) malloc(size * size * sizeof(int));
- erbuf = (int *) malloc(size * size * sizeof(int)); // expected
- if (!sbuf || !rbuf) {
- fprintf(stderr, "Could not allocated buffers!\n");
- MPI_Abort(comm, 1);
- }
+ sbuf = (int *) xbt_malloc(size * size * sizeof(int));
+ rbuf = (int *) xbt_malloc(size * size * sizeof(int));
/* Load up the buffers */
for (i = 0; i < size * size; i++) {
sbuf[i] = i + 100 * rank;
- rbuf[i] = -i;
- erbuf[i] = -i;
+ rbuf[i] = -1;
}
/* Create and load the arguments to alltoallv */
- sendcounts = (int *) malloc(size * sizeof(int));
- recvcounts = (int *) malloc(size * sizeof(int));
- rdispls = (int *) malloc(size * sizeof(int));
- sdispls = (int *) malloc(size * sizeof(int));
- if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
- fprintf(stderr, "Could not allocate arg items!\n");
- MPI_Abort(comm, 1);
- }
+ sendcounts = (int *) xbt_malloc(size * sizeof(int));
+ recvcounts = (int *) xbt_malloc(size * sizeof(int));
+ rdispls = (int *) xbt_malloc(size * sizeof(int));
+ sdispls = (int *) xbt_malloc(size * sizeof(int));
for (i = 0; i < size; i++) {
sendcounts[i] = i;
recvcounts[i] = rank;
rdispls[i] = i * rank;
sdispls[i] = (i * (i + 1)) / 2;
}
-
- /* debug */
- /*
- print_buffer_int( sbuf, size*size, strdup("sbuf:"),rank);
- print_buffer_int( sendcounts, size, strdup("scount:"),rank);
- print_buffer_int( recvcounts, size, strdup("rcount:"),rank);
- print_buffer_int( sdispls, size, strdup("sdisp:"),rank);
- print_buffer_int( rdispls, size, strdup("rdisp:"),rank);
- */
-
-
- /* debug : erbuf */
- /* debug
- for (i=0; i<size; i++) {
- for (j=0; j<rank; j++) {
- *(erbuf+j+ rdispls[i]) = i * 100 + (rank*(rank+1))/2 + j;
- }
- }
- */
-
-
- //print_buffer_int( erbuf, size*size, strdup("erbuf:"),rank);
-
+
+ print_buffer_int( sbuf, size*size, strdup("sbuf:"),rank);
+ print_buffer_int( sendcounts, size, strdup("scount:"),rank);
+ print_buffer_int( recvcounts, size, strdup("rcount:"),rank);
+ print_buffer_int( sdispls, size, strdup("sdisp:"),rank);
+ print_buffer_int( rdispls, size, strdup("rdisp:"),rank);
+
MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT,
rbuf, recvcounts, rdispls, MPI_INT, comm);
- // debug: print_buffer_int( rbuf, size*size, strdup("rbuf:"),rank);
-
-
- /* Check rbuf */
- for (i = 0; i < size; i++) {
- p = rbuf + rdispls[i];
- for (j = 0; j < rank; j++) {
- if (p[j] != i * 100 + (rank * (rank + 1)) / 2 + j) {
- fprintf(stderr, "** Error: <%d> got %d expected %d for %dth\n",
- rank, p[j], (i * (i + 1)) / 2 + j, j);
- err++;
- }
- }
- }
+ print_buffer_int( rbuf, size*size, strdup("rbuf:"),rank);
- /* Summary */
- if (err > 0) {
- printf("<%d> Alltoallv test: failure (%d errors).\n", rank, err);
- }
+ MPI_Barrier(MPI_COMM_WORLD);
if (0 == rank) {
- printf("* Alltoallv TEST COMPLETE.\n");
+ printf("Alltoallv TEST COMPLETE.\n");
}
free(sdispls);
free(rdispls);
free(rbuf);
free(sbuf);
- MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}