ADD_TEST(smpi-tracing-ptp ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/tracing/smpi_traced.tesh)
ADD_TEST(smpi-replay ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi --cd ${CMAKE_BINARY_DIR}/examples/smpi ${CMAKE_HOME_DIRECTORY}/examples/smpi/replay/smpi_replay.tesh)
endif()
+
+ FOREACH (ALLGATHER_COLL default GB loosely_lr lr NTSLR NTSLR_NB pair rdb RDB rhv ring SMP_NTS
+ smp_simple SMP_simple spreading_simple)
+ ADD_TEST(smpi-allgather-coll-${ALLGATHER_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/allgather:${ALLGATHER_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/allgather_coll.tesh)
+ ENDFOREACH()
+ FOREACH (ALLREDUCE_COLL default lr NTS rab1 rab2 rab_rsag rdb smp_binomial smp_rdb smp_rsag
+ smp_rsag_lr smp_rsag_rab redbcast)
+ ADD_TEST(smpi-allreduce-coll-${ALLREDUCE_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/allreduce:${ALLREDUCE_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/allreduce_coll.tesh)
+ ENDFOREACH()
+ FOREACH (ALLTOALL_COLL 2dmesh 3dmesh pair pair_light_barrier pair_mpi_barrier rdb
+ ring ring_light_barrier ring_mpi_barrier ring_one_barrier
+ simple bruck basic_linear pairwise)
+ ADD_TEST(smpi-alltoall-coll-${ALLTOALL_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/alltoall:${ALLTOALL_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/alltoall_coll.tesh)
+ ENDFOREACH()
+ FOREACH (BCAST_COLL default arrival_nb arrival_pattern_aware arrival_pattern_aware_wait arrival_scatter
+ binomial_tree flattree flattree_pipeline NTSB NTSL NTSL_Isend scatter_LR_allgathe
+ scatter_rdb_allgather SMP_binary SMP_binomial SMP_linear TSB)
+ ADD_TEST(smpi-bcast-coll-${BCAST_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/bcast:${BCAST_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/bcast_coll.tesh)
+ ENDFOREACH()
+ FOREACH (REDUCE_COLL default arrival_pattern_aware binomial flat_tree NTSL scatter_gather)
+ ADD_TEST(smpi-reduce-coll-${REDUCE_COLL} ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg smpi/reduce:${REDUCE_COLL} --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce_coll.tesh)
+ ENDFOREACH()
+
endif()
# END TESH TESTS
xbt_cfgelm_string, &default_value, 1, 1, &_sg_cfg_cb__coll_alltoall,
NULL);
-
default_value = xbt_strdup("default");
xbt_cfg_register(&_sg_cfg_set, "smpi/bcast",
"Which collective to use for bcast",
if (Y > X)
num_reqs = Y;
- req = (MPI_Request *) malloc(num_reqs * sizeof(MPI_Request));
- if (!req) {
- printf("allgather-2dmesh-shoot.c:85: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
+ req = (MPI_Request *) xbt_malloc(num_reqs * sizeof(MPI_Request));
req_ptr = req;
comm);
}
- return 0;
+ return MPI_SUCCESS;
}
MPI_Comm_rank(comm, &rank);
MPI_Type_extent(dtype, &extent);
- tmp_buf = (void *) malloc(count * extent);
- if (!tmp_buf) {
- printf("Could not allocate memory for tmp_buf\n");
- return 1;
- }
+ tmp_buf = (void *) xbt_malloc(count * extent);
MPIR_Localcopy(sbuff, count, dtype, rbuff, count, dtype);
}
free(tmp_buf);
- return 0;
+ return MPI_SUCCESS;
}
MPI_Comm_rank(comm, &rank);
MPI_Type_extent(dtype, &extent);
- tmp_buf = (void *) malloc(count * extent);
- if (!tmp_buf) {
- printf("Could not allocate memory for tmp_buf\n");
- return 1;
- }
+ tmp_buf = (void *) xbt_malloc(count * extent);
MPIR_Localcopy(sbuff, count, dtype, rbuff, count, dtype);
}
free(tmp_buf);
- return 0;
+ return MPI_SUCCESS;
}
MPI_Comm_rank(comm, &rank);
MPI_Type_extent(dtype, &extent);
- tmp_buf = (void *) malloc(count * extent);
- if (!tmp_buf) {
- printf("Could not allocate memory for tmp_buf\n");
- return 1;
- }
+ tmp_buf = (void *) xbt_malloc(count * extent);
MPI_Sendrecv(sbuff, count, dtype, rank, tag, rbuff, count, dtype, rank, tag,
comm, &status);
}
free(tmp_buf);
- return 0;
+ return MPI_SUCCESS;
}
free(tmp_buf);
}
- return 0;
+ return MPI_SUCCESS;
}
free(recv);
}
- return 0;
+ return MPI_SUCCESS;
}
nprocs=smpi_comm_size(comm);
rank=smpi_comm_rank(comm);
- smpi_datatype_extent(dtype,&lb, &extent);
- tmp_buf = (void *) malloc(count * extent);
- if (!tmp_buf) {
- printf("Could not allocate memory for tmp_buf\n");
- return 1;
- }
+ smpi_datatype_extent(dtype, &lb, &extent);
+ tmp_buf = (void *) xbt_malloc(count * extent);
smpi_mpi_sendrecv(sbuff, count, dtype, rank, 500,
rbuff, count, dtype, rank, 500, comm, &status);
}
free(tmp_buf);
- return 0;
+ return MPI_SUCCESS;
}
{
smpi_mpi_reduce(buf, buf2, count, datatype, op, 0, comm);
smpi_mpi_bcast(buf2, count, datatype, 0, comm);
- return 0;
+ return MPI_SUCCESS;
}
int i, j, src, dst, rank, num_procs, count, num_reqs;
int X, Y, send_offset, recv_offset;
int my_row_base, my_col_base, src_row_base, block_size;
- int tag = 1, failure = 0, success = 1;
+ int tag = 1;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &num_procs);
MPI_Type_extent(send_type, &extent);
if (!alltoall_check_is_2dmesh(num_procs, &X, &Y))
- return failure;
+ return MPI_ERR_OTHER;
my_row_base = (rank / Y) * Y;
my_col_base = rank % Y;
if (!tmp_buff1) {
XBT_DEBUG("alltoall-2dmesh_shoot.c:88: cannot allocate memory");
MPI_Finalize();
- exit(failure);
+ exit(MPI_ERR_OTHER);
}
tmp_buff2 = (char *) malloc(block_size * Y);
if (!tmp_buff2) {
XBT_WARN("alltoall-2dmesh_shoot.c:88: cannot allocate memory");
MPI_Finalize();
- exit(failure);
+ exit(MPI_ERR_OTHER);
}
if (!reqs) {
XBT_WARN("alltoall-2dmesh_shoot.c:88: cannot allocate memory");
MPI_Finalize();
- exit(failure);
+ exit(MPI_ERR_OTHER);
}
req_ptr = reqs;
free(statuses);
free(tmp_buff1);
free(tmp_buff2);
- return success;
+ return MPI_SUCCESS;
}
MPI_Status status, *statuses;
int i, j, src, dst, rank, num_procs, num_reqs, X, Y, Z, block_size, count;
int my_z, two_dsize, my_row_base, my_col_base, my_z_base, src_row_base;
- int src_z_base, send_offset, recv_offset, tag = 1, failure = 0, success = 1;
+ int src_z_base, send_offset, recv_offset, tag = 1;
char *tmp_buff1, *tmp_buff2;
MPI_Type_extent(send_type, &extent);
if (!alltoall_check_is_3dmesh(num_procs, &X, &Y, &Z))
- return failure;
+ return MPI_ERR_OTHER;
num_reqs = X;
if (Y > X)
block_size = extent * send_count;
- tmp_buff1 = (char *) malloc(block_size * num_procs * two_dsize);
- if (!tmp_buff1) {
- printf("alltoall-3Dmesh:97: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
-
- tmp_buff2 = (char *) malloc(block_size * two_dsize);
- if (!tmp_buff2) {
- printf("alltoall-3Dmesh:105: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
+ tmp_buff1 = (char *) xbt_malloc(block_size * num_procs * two_dsize);
+ tmp_buff2 = (char *) xbt_malloc(block_size * two_dsize);
- statuses = (MPI_Status *) malloc(num_reqs * sizeof(MPI_Status));
- reqs = (MPI_Request *) malloc(num_reqs * sizeof(MPI_Request));
- if (!reqs) {
- printf("alltoall-3Dmesh:113: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
+ statuses = (MPI_Status *) xbt_malloc(num_reqs * sizeof(MPI_Status));
+ reqs = (MPI_Request *) xbt_malloc(num_reqs * sizeof(MPI_Request));
req_ptr = reqs;
free(statuses);
free(tmp_buff1);
free(tmp_buff2);
- return success;
+ return MPI_SUCCESS;
}
int *blocks_length, *disps;
int i, src, dst, rank, num_procs, count, remainder, block, position;
- int pack_size, tag = 1, pof2 = 1, success = 1, failure = 0;
+ int pack_size, tag = 1, pof2 = 1;
char *tmp_buff;
MPI_Type_extent(recv_type, &extent);
- tmp_buff = (char *) malloc(num_procs * recv_count * extent);
- if (!tmp_buff) {
- printf("alltoall-bruck:53: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
-
- disps = (int *) malloc(sizeof(int) * num_procs);
- if (!disps) {
- printf("alltoall-bruck:61: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
-
- blocks_length = (int *) malloc(sizeof(int) * num_procs);
- if (!blocks_length) {
- printf("alltoall-bruck:69: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
-
+ tmp_buff = (char *) xbt_malloc(num_procs * recv_count * extent);
+ disps = (int *) xbt_malloc(sizeof(int) * num_procs);
+ blocks_length = (int *) xbt_malloc(sizeof(int) * num_procs);
MPI_Sendrecv(send_ptr + rank * send_count * extent,
(num_procs - rank) * send_count, send_type, rank, tag,
recv_count, recv_type, rank, tag, comm, &status);
free(tmp_buff);
- return success;
+ return MPI_SUCCESS;
}
MPI_Aint send_chunk, recv_chunk;
MPI_Status s;
int i, src, dst, rank, num_procs, next_partner;
- int tag = 1, success = 1; /*, failure = 0; */
+ int tag = 1; /*, failure = 0; */
char send_sync = 'a', recv_sync = 'b';
char *send_ptr = (char *) send_buff;
&recv_sync, 1, MPI_CHAR, next_partner, tag, comm, &s);
}
}
- return success;
+ return MPI_SUCCESS;
}
MPI_Status s;
MPI_Aint send_chunk, recv_chunk;
int i, src, dst, rank, num_procs;
- int tag = 101, success = 1; /*, failure = 0, pof2 = 1; */
+ int tag = 101;
char *send_ptr = (char *) send_buff;
char *recv_ptr = (char *) recv_buff;
tag, recv_ptr + src * recv_chunk, recv_count, recv_type,
src, tag, comm, &s);
}
- return success;
+ return MPI_SUCCESS;
}
MPI_Aint send_chunk, recv_chunk;
MPI_Status s;
int i, src, dst, rank, num_procs;
- int tag = 1, success = 1; /*, failure = 0, pof2 = 1; */
+ int tag = 1;
char *send_ptr = (char *) send_buff;
char *recv_ptr = (char *) recv_buff;
src, tag, comm, &s);
}
- return success;
+ return MPI_SUCCESS;
}
MPI_Aint send_chunk, recv_chunk;
MPI_Status s;
int i, src, dst, rank, num_procs;
- int tag = 1, success = 1;
-
+ int tag = 1;
char *send_ptr = (char *) send_buff;
char *recv_ptr = (char *) recv_buff;
for (i = 0; i < num_procs; i++) {
src = dst = rank ^ i;
- MPI_Sendrecv(send_ptr + dst * send_chunk, send_count, send_type, dst,
- tag, recv_ptr + src * recv_chunk, recv_count, recv_type,
- src, tag, comm, &s);
+ MPI_Sendrecv(send_ptr + dst * send_chunk, send_count, send_type, dst, tag,
+ recv_ptr + src * recv_chunk, recv_count, recv_type, src, tag,
+ comm, &s);
}
- return success;
+ return MPI_SUCCESS;
}
int dst_tree_root, rank_tree_root, send_offset, recv_offset;
int rank, num_procs, j, k, dst, curr_size, max_size;
int last_recv_count, tmp_mask, tree_root, num_procs_completed;
- int tag = 1, mask = 1, success = 1, failure = 0, i = 0;
+ int tag = 1, mask = 1, i = 0;
char *tmp_buff;
char *send_ptr = (char *) send_buff;
max_size = num_procs * recv_increment;
- tmp_buff = (char *) malloc(max_size);
- if (!tmp_buff) {
- printf("alltoall-rdb:56: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
+ tmp_buff = (char *) xbt_malloc(max_size);
curr_size = send_count * num_procs;
recv_ptr + (i * recv_count * extent),
recv_count, recv_type, rank, tag, comm, &status);
free(tmp_buff);
- return success;
+ return MPI_SUCCESS;
}
MPI_Aint send_chunk, recv_chunk;
MPI_Status s;
int i, src, dst, rank, num_procs, next_dst, next_src;
- int tag = 1, success = 1; /*, failure = 0; */
+ int tag = 1;
char send_sync = 'a', recv_sync = 'b';
char *send_ptr = (char *) send_buff;
}
}
- return success;
+ return MPI_SUCCESS;
}
MPI_Status s;
MPI_Aint send_chunk, recv_chunk;
int i, src, dst, rank, num_procs;
- int tag = 1, success = 1; /*, failure = 0, pof2 = 1; */
+ int tag = 1;
char *send_ptr = (char *) send_buff;
char *recv_ptr = (char *) recv_buff;
src, tag, comm, &s);
}
- return success;
+ return MPI_SUCCESS;
}
MPI_Status s;
MPI_Aint send_chunk, recv_chunk;
int i, src, dst, rank, num_procs;
- int tag = 1, success = 1; /*, failure = 0, pof2 = 1; */
+ int tag = 1;
char *send_ptr = (char *) send_buff;
char *recv_ptr = (char *) recv_buff;
tag, recv_ptr + src * recv_chunk, recv_count, recv_type,
src, tag, comm, &s);
}
- return success;
+ return MPI_SUCCESS;
}
MPI_Status s;
MPI_Aint send_chunk, recv_chunk;
int i, src, dst, rank, num_procs;
- int tag = 1, success = 1; /*, failure = 0, pof2 = 1 */ ;
+ int tag = 11;
char *send_ptr = (char *) send_buff;
char *recv_ptr = (char *) recv_buff;
tag, recv_ptr + src * recv_chunk, recv_count, recv_type,
src, tag, comm, &s);
}
- return success;
+ return MPI_SUCCESS;
}
nreqs = 2 * (size - 1);
if (nreqs > 0) {
- req = (MPI_Request *) malloc(nreqs * sizeof(MPI_Request));
- statuses = (MPI_Status *) malloc(nreqs * sizeof(MPI_Status));
- if (!req || !statuses) {
- free(req);
- free(statuses);
- return 0;
- }
+ req = (MPI_Request *) xbt_malloc(nreqs * sizeof(MPI_Request));
+ statuses = (MPI_Status *) xbt_malloc(nreqs * sizeof(MPI_Status));
} else {
req = NULL;
statuses = NULL;
free((char *) req);
if (statuses)
free(statuses);
- return (1);
+ return MPI_SUCCESS;
}
MPI_Comm comm)
{
int src, dst, rank, num_procs, mask, relative_rank;
- int tag = 1, success = 0;
+ int tag = 1;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &num_procs);
mask >>= 1;
}
- return success;
+ return MPI_SUCCESS;
}
int i, src, dst, rank, num_procs;
int mask, relative_rank, curr_size, recv_size, send_size, nbytes;
int scatter_size, left, right, next_src, *recv_counts, *disps;
- int tag = 1, success = 0, failure = 1;
+ int tag = 1;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &num_procs);
}
// done scatter now do allgather
- recv_counts = (int *) malloc(sizeof(int) * num_procs);
- if (!recv_counts) {
- printf("bcast-scatter-LR-allgather:95: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
-
- disps = (int *) malloc(sizeof(int) * num_procs);
- if (!disps) {
- printf("bcast-scatter-LR-allgather:103: cannot allocate memory\n");
- MPI_Finalize();
- exit(failure);
- }
+ recv_counts = (int *) xbt_malloc(sizeof(int) * num_procs);
+ disps = (int *) xbt_malloc(sizeof(int) * num_procs);
for (i = 0; i < num_procs; i++) {
recv_counts[i] = nbytes - i * scatter_size;
free(recv_counts);
free(disps);
- return success;
+ return MPI_SUCCESS;
}
int mask, relative_rank, curr_size, recv_size, send_size, nbytes;
int scatter_size, tree_root, relative_dst, dst_tree_root;
int my_tree_root, offset, tmp_mask, num_procs_completed;
- int tag = 1, success = 0;
+ int tag = 1;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &num_procs);
i++;
}
- return success;
+ return MPI_SUCCESS;
}
fi
shift
+# steel --cfg and --logs options
+while [ $# -gt 0 ]; do
+ case "$1" in
+ "--cfg="*|"--log="*)
+ for OPT in ${1#*=}
+ do
+ SIMOPTS="$SIMOPTS ${1%%=*}=$OPT"
+ done
+ shift 1
+ ;;
+ *)
+ PROC_ARGS="$PROC_ARGS $1"
+ shift
+ ;;
+ esac
+done
+
+
##-----------------------------------
# Basic checks on the provided arguments
echo " <argument value=\"${hosttraces[0]}\"/>" >> ${APPLICATIONTMP}
fi
else
- for ARG in $*; do
+ for ARG in $PROC_ARGS; do
echo " <argument value=\"${ARG}\"/>" >> ${APPLICATIONTMP}
done
fi
include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
- add_executable(alltoall2 alltoall2.c )
+ add_executable(allgather_coll allgather_coll.c)
+ add_executable(alltoall2 alltoall2.c)
add_executable(alltoall_basic alltoall_basic.c)
+ add_executable(alltoall_coll alltoall_coll.c)
add_executable(alltoallv alltoallv.c)
add_executable(allreduce allreduce.c)
+ add_executable(allreduce_coll allreduce_coll.c)
add_executable(bcast bcast.c)
+ add_executable(bcast_coll bcast_coll.c)
add_executable(compute compute.c)
add_executable(compute2 compute2.c)
add_executable(compute3 compute3.c)
add_executable(pingpong pingpong.c)
add_executable(scatter scatter.c)
add_executable(reduce reduce.c)
+ add_executable(reduce_coll reduce_coll.c)
add_executable(split split.c)
add_executable(smpi_sendrecv sendrecv.c)
add_executable(ttest01 ttest01.c)
add_executable(indexed_test indexed_test.c)
add_executable(struct_test struct_test.c)
+ target_link_libraries(allgather_coll simgrid)
target_link_libraries(alltoall2 simgrid)
target_link_libraries(alltoall_basic simgrid)
+ target_link_libraries(alltoall_coll simgrid)
target_link_libraries(alltoallv simgrid)
target_link_libraries(allreduce simgrid)
+ target_link_libraries(allreduce_coll simgrid)
target_link_libraries(bcast simgrid)
+ target_link_libraries(bcast_coll simgrid)
target_link_libraries(compute simgrid)
target_link_libraries(compute2 simgrid)
target_link_libraries(compute3 simgrid)
target_link_libraries(pingpong simgrid)
target_link_libraries(scatter simgrid)
target_link_libraries(reduce simgrid)
+ target_link_libraries(reduce_coll simgrid)
target_link_libraries(split simgrid)
target_link_libraries(smpi_sendrecv simgrid)
target_link_libraries(ttest01 simgrid)
set(tesh_files
${tesh_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgather_coll.tesh
+ ${CMAKE_CURRENT_SOURCE_DIR}/allreduce_coll.tesh
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoall_coll.tesh
${CMAKE_CURRENT_SOURCE_DIR}/bcast.tesh
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast_coll.tesh
${CMAKE_CURRENT_SOURCE_DIR}/compute.tesh
${CMAKE_CURRENT_SOURCE_DIR}/hvector.tesh
${CMAKE_CURRENT_SOURCE_DIR}/indexed.tesh
${CMAKE_CURRENT_SOURCE_DIR}/pt2pt.tesh
${CMAKE_CURRENT_SOURCE_DIR}/reduce.tesh
+ ${CMAKE_CURRENT_SOURCE_DIR}/reduce_coll.tesh
${CMAKE_CURRENT_SOURCE_DIR}/struct.tesh
${CMAKE_CURRENT_SOURCE_DIR}/vector.tesh
PARENT_SCOPE
)
set(examples_src
${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/allgather_coll.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/allreduce_coll.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/alltoall_coll.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/bcast_coll.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/reduce_coll.c
${CMAKE_CURRENT_SOURCE_DIR}/alltoallv.c
${CMAKE_CURRENT_SOURCE_DIR}/get_processor_name.c
${CMAKE_CURRENT_SOURCE_DIR}/pingpong.c
--- /dev/null
+/* Copyright (c) 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include "mpi.h"
+
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ int i;
+ int *sb;
+ int *rb;
+ int status;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ int count = 2;
+ sb = (int *) xbt_malloc(count * sizeof(int));
+ rb = (int *) xbt_malloc(count * size * sizeof(int));
+
+ for (i = 0; i < count; ++i)
+ sb[i] = rank * count + i;
+ for (i = 0; i < count * size; ++i)
+ rb[i] = 0;
+
+ printf("[%d] sndbuf=[", rank);
+ for (i = 0; i < count; i++)
+ printf("%d ", sb[i]);
+ printf("]\n");
+
+ status = MPI_Allgather(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
+
+ printf("[%d] rcvbuf=[", rank);
+ for (i = 0; i < count * size; i++)
+ printf("%d ", rb[i]);
+ printf("]\n");
+
+
+ if (rank == 0) {
+ if (status != MPI_SUCCESS) {
+ printf("allgather returned %d\n", status);
+ fflush(stdout);
+ }
+ }
+ free(sb);
+ free(rb);
+ MPI_Finalize();
+ return (EXIT_SUCCESS);
+}
--- /dev/null
+# Smpi Alltoall collectives tests
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+
+p Test all to all
+$ ../../bin/smpirun -map -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../examples/msg/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ./allgather_coll
+> You requested to use 16 processes, but there is only 5 processes in your hostfile...
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [rank 2] -> Fafard
+> [rank 3] -> Ginette
+> [rank 4] -> Bourassa
+> [rank 5] -> Tremblay
+> [rank 6] -> Jupiter
+> [rank 7] -> Fafard
+> [rank 8] -> Ginette
+> [rank 9] -> Bourassa
+> [rank 10] -> Tremblay
+> [rank 11] -> Jupiter
+> [rank 12] -> Fafard
+> [rank 13] -> Ginette
+> [rank 14] -> Bourassa
+> [rank 15] -> Tremblay
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [1] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [2] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [3] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [4] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [5] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [6] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [7] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [8] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [9] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [10] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [11] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [12] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [13] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [14] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [15] rcvbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [0] sndbuf=[0 1 ]
+> [1] sndbuf=[2 3 ]
+> [2] sndbuf=[4 5 ]
+> [3] sndbuf=[6 7 ]
+> [4] sndbuf=[8 9 ]
+> [5] sndbuf=[10 11 ]
+> [6] sndbuf=[12 13 ]
+> [7] sndbuf=[14 15 ]
+> [8] sndbuf=[16 17 ]
+> [9] sndbuf=[18 19 ]
+> [10] sndbuf=[20 21 ]
+> [11] sndbuf=[22 23 ]
+> [12] sndbuf=[24 25 ]
+> [13] sndbuf=[26 27 ]
+> [14] sndbuf=[28 29 ]
+> [15] sndbuf=[30 31 ]
+
--- /dev/null
+/* Copyright (c) 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include "mpi.h"
+
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ int i;
+ int *sb;
+ int *rb;
+ int status;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ sb = (int *) xbt_malloc(size * sizeof(int));
+ rb = (int *) xbt_malloc(size * sizeof(int));
+
+ for (i = 0; i < size; ++i) {
+ sb[i] = rank*size + i;
+ rb[i] = 0;
+ }
+
+ printf("[%d] sndbuf=[", rank);
+ for (i = 0; i < size; i++)
+ printf("%d ", sb[i]);
+ printf("]\n");
+
+ status = MPI_Allreduce(sb, rb, size, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
+
+ printf("[%d] rcvbuf=[", rank);
+ for (i = 0; i < size; i++)
+ printf("%d ", rb[i]);
+ printf("]\n");
+
+
+ if (rank == 0) {
+ if (status != MPI_SUCCESS) {
+ printf("all_to_all returned %d\n", status);
+ fflush(stdout);
+ }
+ }
+ free(sb);
+ free(rb);
+ MPI_Finalize();
+ return (EXIT_SUCCESS);
+}
--- /dev/null
+# Smpi Allreduce collectives tests
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+
+p Test allreduce
+$ ../../bin/smpirun -map -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../examples/msg/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ./allreduce_coll
+> You requested to use 16 processes, but there is only 5 processes in your hostfile...
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [rank 2] -> Fafard
+> [rank 3] -> Ginette
+> [rank 4] -> Bourassa
+> [rank 5] -> Tremblay
+> [rank 6] -> Jupiter
+> [rank 7] -> Fafard
+> [rank 8] -> Ginette
+> [rank 9] -> Bourassa
+> [rank 10] -> Tremblay
+> [rank 11] -> Jupiter
+> [rank 12] -> Fafard
+> [rank 13] -> Ginette
+> [rank 14] -> Bourassa
+> [rank 15] -> Tremblay
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
+> [1] sndbuf=[16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [2] sndbuf=[32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 ]
+> [3] sndbuf=[48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 ]
+> [4] sndbuf=[64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 ]
+> [5] sndbuf=[80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 ]
+> [6] sndbuf=[96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 ]
+> [7] sndbuf=[112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 ]
+> [8] sndbuf=[128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 ]
+> [9] sndbuf=[144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 ]
+> [10] sndbuf=[160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 ]
+> [11] sndbuf=[176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 ]
+> [12] sndbuf=[192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 ]
+> [13] sndbuf=[208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 ]
+> [14] sndbuf=[224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 ]
+> [15] sndbuf=[240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 ]
+> [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [1] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [2] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [3] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [4] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [5] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [6] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [7] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [8] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [9] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [10] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [11] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [12] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [13] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [14] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [15] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+
sb[i] = rank + 1;
rb[i] = 0;
}
+
status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD);
printf("[%d] rcvbuf=[", rank);
if (rank == 0) {
- if (status != 0) {
+ if (status != MPI_SUCCESS) {
printf("all_to_all returned %d\n", status);
fflush(stdout);
}
--- /dev/null
+/* Copyright (c) 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include "mpi.h"
+
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ int i;
+ int *sb;
+ int *rb;
+ int status;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ sb = (int *) xbt_malloc(size * sizeof(int) * 2);
+ rb = (int *) xbt_malloc(size * sizeof(int) * 2);
+
+ for (i = 0; i < size; ++i) {
+ sb[i] = rank*size + i;
+ rb[i] = 0;
+ }
+
+ printf("[%d] sndbuf=[", rank);
+ for (i = 0; i < size; i++)
+ printf("%d ", sb[i]);
+ printf("]\n");
+
+ status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD);
+
+ printf("[%d] rcvbuf=[", rank);
+ for (i = 0; i < size; i++)
+ printf("%d ", rb[i]);
+ printf("]\n");
+
+
+ if (rank == 0) {
+ if (status != MPI_SUCCESS) {
+ printf("all_to_all returned %d\n", status);
+ fflush(stdout);
+ }
+ }
+ free(sb);
+ free(rb);
+ MPI_Finalize();
+ return (EXIT_SUCCESS);
+}
--- /dev/null
+# Smpi Alltoall collectives tests
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+
+p Test all to all
+$ ../../bin/smpirun -map -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../examples/msg/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ./alltoall_coll -q
+> You requested to use 16 processes, but there is only 5 processes in your hostfile...
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [rank 2] -> Fafard
+> [rank 3] -> Ginette
+> [rank 4] -> Bourassa
+> [rank 5] -> Tremblay
+> [rank 6] -> Jupiter
+> [rank 7] -> Fafard
+> [rank 8] -> Ginette
+> [rank 9] -> Bourassa
+> [rank 10] -> Tremblay
+> [rank 11] -> Jupiter
+> [rank 12] -> Fafard
+> [rank 13] -> Ginette
+> [rank 14] -> Bourassa
+> [rank 15] -> Tremblay
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
+> [1] sndbuf=[16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [2] sndbuf=[32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 ]
+> [3] sndbuf=[48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 ]
+> [4] sndbuf=[64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 ]
+> [5] sndbuf=[80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 ]
+> [6] sndbuf=[96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 ]
+> [7] sndbuf=[112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 ]
+> [8] sndbuf=[128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 ]
+> [9] sndbuf=[144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 ]
+> [10] sndbuf=[160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 ]
+> [11] sndbuf=[176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 ]
+> [12] sndbuf=[192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 ]
+> [13] sndbuf=[208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 ]
+> [14] sndbuf=[224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 ]
+> [15] sndbuf=[240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 ]
+> [0] rcvbuf=[0 16 32 48 64 80 96 112 128 144 160 176 192 208 224 240 ]
+> [1] rcvbuf=[1 17 33 49 65 81 97 113 129 145 161 177 193 209 225 241 ]
+> [2] rcvbuf=[2 18 34 50 66 82 98 114 130 146 162 178 194 210 226 242 ]
+> [3] rcvbuf=[3 19 35 51 67 83 99 115 131 147 163 179 195 211 227 243 ]
+> [4] rcvbuf=[4 20 36 52 68 84 100 116 132 148 164 180 196 212 228 244 ]
+> [5] rcvbuf=[5 21 37 53 69 85 101 117 133 149 165 181 197 213 229 245 ]
+> [6] rcvbuf=[6 22 38 54 70 86 102 118 134 150 166 182 198 214 230 246 ]
+> [7] rcvbuf=[7 23 39 55 71 87 103 119 135 151 167 183 199 215 231 247 ]
+> [8] rcvbuf=[8 24 40 56 72 88 104 120 136 152 168 184 200 216 232 248 ]
+> [9] rcvbuf=[9 25 41 57 73 89 105 121 137 153 169 185 201 217 233 249 ]
+> [10] rcvbuf=[10 26 42 58 74 90 106 122 138 154 170 186 202 218 234 250 ]
+> [11] rcvbuf=[11 27 43 59 75 91 107 123 139 155 171 187 203 219 235 251 ]
+> [12] rcvbuf=[12 28 44 60 76 92 108 124 140 156 172 188 204 220 236 252 ]
+> [13] rcvbuf=[13 29 45 61 77 93 109 125 141 157 173 189 205 221 237 253 ]
+> [14] rcvbuf=[14 30 46 62 78 94 110 126 142 158 174 190 206 222 238 254 ]
+> [15] rcvbuf=[15 31 47 63 79 95 111 127 143 159 175 191 207 223 239 255 ]
+
--- /dev/null
+/* Copyright (c) 2009. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <mpi.h>
+
+int main(int argc, char **argv)
+{
+ int size, rank;
+ int value = 3;
+ int status;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ if (0 == rank) {
+ value = 17;
+ }
+ status = MPI_Bcast(&value, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ printf("node %d has value %d after broadcast\n", rank, value);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (rank == 0) {
+ if (status != MPI_SUCCESS) {
+ printf("bcast returned %d\n", status);
+ fflush(stdout);
+ }
+ }
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+p Test Broadcast with more processes than hosts
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+
+$ ../../bin/smpirun -map -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../examples/msg/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ./bcast_coll
+> You requested to use 16 processes, but there is only 5 processes in your hostfile...
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [rank 2] -> Fafard
+> [rank 3] -> Ginette
+> [rank 4] -> Bourassa
+> [rank 5] -> Tremblay
+> [rank 6] -> Jupiter
+> [rank 7] -> Fafard
+> [rank 8] -> Ginette
+> [rank 9] -> Bourassa
+> [rank 10] -> Tremblay
+> [rank 11] -> Jupiter
+> [rank 12] -> Fafard
+> [rank 13] -> Ginette
+> [rank 14] -> Bourassa
+> [rank 15] -> Tremblay
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> node 0 has value 17 after broadcast
+> node 1 has value 17 after broadcast
+> node 2 has value 17 after broadcast
+> node 3 has value 17 after broadcast
+> node 4 has value 17 after broadcast
+> node 5 has value 17 after broadcast
+> node 6 has value 17 after broadcast
+> node 7 has value 17 after broadcast
+> node 8 has value 17 after broadcast
+> node 9 has value 17 after broadcast
+> node 10 has value 17 after broadcast
+> node 11 has value 17 after broadcast
+> node 12 has value 17 after broadcast
+> node 13 has value 17 after broadcast
+> node 14 has value 17 after broadcast
+> node 15 has value 17 after broadcast
+
+
--- /dev/null
+/* Copyright (c) 2009, 2010. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include "mpi.h"
+
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+#endif
+
+int main(int argc, char *argv[])
+{
+ int rank, size;
+ int i;
+ int *sb;
+ int *rb;
+ int status;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ sb = (int *) xbt_malloc(size * sizeof(int));
+ rb = (int *) xbt_malloc(size * sizeof(int));
+
+ for (i = 0; i < size; ++i) {
+ sb[i] = rank*size + i;
+ rb[i] = 0;
+ }
+
+ printf("[%d] sndbuf=[", rank);
+ for (i = 0; i < size; i++)
+ printf("%d ", sb[i]);
+ printf("]\n");
+
+ status = MPI_Reduce(sb, rb, size, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ printf("[%d] rcvbuf=[", rank);
+ for (i = 0; i < size; i++)
+ printf("%d ", rb[i]);
+ printf("]\n");
+
+
+ if (rank == 0) {
+ if (status != MPI_SUCCESS) {
+ printf("all_to_all returned %d\n", status);
+ fflush(stdout);
+ }
+ }
+ free(sb);
+ free(rb);
+ MPI_Finalize();
+ return (EXIT_SUCCESS);
+}
--- /dev/null
+# Smpi Allreduce collectives tests
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+
+p Test allreduce
+$ ../../bin/smpirun -map -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../examples/msg/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ./reduce_coll
+> You requested to use 16 processes, but there is only 5 processes in your hostfile...
+> [rank 0] -> Tremblay
+> [rank 1] -> Jupiter
+> [rank 2] -> Fafard
+> [rank 3] -> Ginette
+> [rank 4] -> Bourassa
+> [rank 5] -> Tremblay
+> [rank 6] -> Jupiter
+> [rank 7] -> Fafard
+> [rank 8] -> Ginette
+> [rank 9] -> Bourassa
+> [rank 10] -> Tremblay
+> [rank 11] -> Jupiter
+> [rank 12] -> Fafard
+> [rank 13] -> Ginette
+> [rank 14] -> Bourassa
+> [rank 15] -> Tremblay
+> [0.000000] [surf_config/INFO] Switching workstation model to compound since you changed the network and/or cpu model(s)
+> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
+> [1] sndbuf=[16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 ]
+> [2] sndbuf=[32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 ]
+> [3] sndbuf=[48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 ]
+> [4] sndbuf=[64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 ]
+> [5] sndbuf=[80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 ]
+> [6] sndbuf=[96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 ]
+> [7] sndbuf=[112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 ]
+> [8] sndbuf=[128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 ]
+> [9] sndbuf=[144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 ]
+> [10] sndbuf=[160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 ]
+> [11] sndbuf=[176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 ]
+> [12] sndbuf=[192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 ]
+> [13] sndbuf=[208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 ]
+> [14] sndbuf=[224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 ]
+> [15] sndbuf=[240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 ]
+> [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
+> [1] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [2] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [3] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [4] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [5] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [6] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [7] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [8] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [9] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [10] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [11] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [12] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [13] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [14] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]
+> [15] rcvbuf=[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ]