endif()
endif()
- ADD_TEST(smpi-energy ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
+ ADD_TEST(smpi-energy-thread ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
if(SMPI_F2C)
- ADD_TEST(smpi-energy-f77 ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
+ ADD_TEST(smpi-energy-f77-thread ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
endif()
if(SMPI_F90)
- ADD_TEST(smpi-energy-f90 ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
+ ADD_TEST(smpi-energy-f90-thread ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
+ endif()
+ if(CONTEXT_UCONTEXT)
+ ADD_TEST(smpi-energy-ucontext ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
+ if(SMPI_F2C)
+ ADD_TEST(smpi-energy-f77-ucontext ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
+ endif()
+ if(SMPI_F90)
+ ADD_TEST(smpi-energy-f90-ucontext ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
+ endif()
+ endif()
+ if(HAVE_RAWCTX)
+ ADD_TEST(smpi-energy-raw ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
+ if(SMPI_F2C)
+ ADD_TEST(smpi-energy-f77-raw ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
+ endif()
+ if(SMPI_F90)
+ ADD_TEST(smpi-energy-f90-raw ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
+ endif()
endif()
if(HAVE_TRACING)
teshsuite/smpi/mpich3-test/coll/CMakeLists.txt
teshsuite/smpi/mpich3-test/datatype/CMakeLists.txt
teshsuite/smpi/mpich3-test/group/CMakeLists.txt
+ teshsuite/smpi/mpich3-test/topo/CMakeLists.txt
teshsuite/smpi/mpich3-test/init/CMakeLists.txt
teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt
teshsuite/smpi/mpich3-test/f77/util/CMakeLists.txt
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/comm)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/group)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/topo)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/init)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt)
If you want to push the scalability limits of your code, you really
want to reduce the \b contexts/stack_size item. Its default value
-is 128 (in KiB), while our Chord simulation works with stacks as small
+is 8192 (in KiB), while our Chord simulation works with stacks as small
as 16 KiB, for example. For the thread factory, the default value
is the one of the system, if it is too large/small, it has to be set
with this parameter.
XBT_INFO("Get data: '%s'", data);
- MSG_storage_set_data(storage,strdup("Some user data"));
+ MSG_storage_set_data(storage, xbt_strdup("Some user data"));
data = MSG_storage_get_data(storage);
XBT_INFO("Set and get data: '%s'", data);
+ xbt_free(data);
xbt_free(storage_name);
}
#ifdef HAVE_TRACING
- if (TRACE_is_enabled()) {
- simcall_set_category(comm->s_comm, task->category);
- }
-#endif
+ if (TRACE_is_enabled()) {
+ simcall_set_category(act, task->category);
+ }
-#ifdef HAVE_TRACING
if (call_end)
TRACE_msg_task_put_end();
#endif
xbt_cfgelm_string, 1, 1, _sg_cfg_cb_context_factory, NULL);
xbt_cfg_setdefault_string(_sg_cfg_set, "contexts/factory", dflt_ctx_fact);
- /* stack size of contexts in Ko */
+ /* stack size of contexts in KiB */
xbt_cfg_register(&_sg_cfg_set, "contexts/stack_size",
- "Stack size of contexts in Kib",
+ "Stack size of contexts in KiB",
xbt_cfgelm_int, 1, 1, _sg_cfg_cb_context_stack_size, NULL);
- xbt_cfg_setdefault_int(_sg_cfg_set, "contexts/stack_size", 128);
+ xbt_cfg_setdefault_int(_sg_cfg_set, "contexts/stack_size", 8*1024);
/* No, it was not set yet (the above setdefault() changed this to 1). */
smx_context_stack_size_was_set = 0;
char* smx_context_factory_name = NULL; /* factory name specified by --cfg=contexts/factory:value */
smx_ctx_factory_initializer_t smx_factory_initializer_to_use = NULL;
-int smx_context_stack_size = 128 * 1024;
+int smx_context_stack_size;
int smx_context_stack_size_was_set = 0;
#ifdef HAVE_THREAD_LOCAL_STORAGE
static __thread smx_context_t smx_current_context_parallel;
xbt_dict_remove((xbt_dict_t) _sg_cfg_set,"contexts/factory");
}
+void *SIMIX_context_stack_new(void)
+{
+ return xbt_malloc0(smx_context_stack_size);
+}
+
+void SIMIX_context_stack_delete(void *stack)
+{
+ xbt_free(stack);
+}
+
/**
* \brief Returns whether some parallel threads are used
* for the user contexts.
/* if the user provided a function for the process then use it,
otherwise it is the context for maestro */
if (code) {
- context->malloced_stack = xbt_malloc0(smx_context_stack_size);
+ context->malloced_stack = SIMIX_context_stack_new();
context->stack_top =
raw_makecontext(context->malloced_stack, smx_context_stack_size,
(void_f_pvoid_t) smx_ctx_raw_wrapper, context);
context)->valgrind_stack_id);
#endif /* HAVE_VALGRIND_VALGRIND_H */
- free(((smx_ctx_raw_t) context)->malloced_stack);
+ SIMIX_context_stack_delete(((smx_ctx_raw_t) context)->malloced_stack);
+
}
smx_ctx_base_free(context);
}
#ifdef HAVE_VALGRIND_VALGRIND_H
unsigned int valgrind_stack_id; /* the valgrind stack id */
#endif
- char stack[0]; /* the thread stack (must remain the last element of the structure) */
+ char *stack; /* the thread stack */
} s_smx_ctx_sysv_t, *smx_ctx_sysv_t;
#ifdef CONTEXT_THREADS
static smx_ctx_sysv_t sysv_maestro_context;
static int smx_ctx_sysv_factory_finalize(smx_context_factory_t *factory);
-static smx_context_t
-smx_ctx_sysv_create_context_sized(size_t structure_size,
- xbt_main_func_t code, int argc,
- char **argv,
- void_pfn_smxprocess_t cleanup_func,
- smx_process_t process);
static void smx_ctx_sysv_free(smx_context_t context);
static smx_context_t
smx_ctx_sysv_create_context(xbt_main_func_t code, int argc, char **argv,
}
static smx_context_t
-smx_ctx_sysv_create_context_sized(size_t size, xbt_main_func_t code,
- int argc, char **argv,
- void_pfn_smxprocess_t cleanup_func,
- smx_process_t process)
+smx_ctx_sysv_create_context(xbt_main_func_t code, int argc, char **argv,
+ void_pfn_smxprocess_t cleanup_func,
+ smx_process_t process)
{
int ctx_addr[CTX_ADDR_LEN];
smx_ctx_sysv_t context =
- (smx_ctx_sysv_t) smx_ctx_base_factory_create_context_sized(size,
- code,
- argc,
- argv,
- cleanup_func,
- process);
+ (smx_ctx_sysv_t) smx_ctx_base_factory_create_context_sized(
+ sizeof(s_smx_ctx_sysv_t),
+ code,
+ argc,
+ argv,
+ cleanup_func,
+ process);
/* if the user provided a function for the process then use it,
otherwise it is the context for maestro */
if (code) {
+ context->stack = SIMIX_context_stack_new();
getcontext(&(context->uc));
context->uc.uc_link = NULL;
}
if(MC_is_active() && code)
- MC_new_stack_area(context, ((smx_context_t)context)->process->name,
- &(context->uc), size);
+ MC_new_stack_area(context->stack, ((smx_context_t)context)->process->name,
+ &(context->uc), smx_context_stack_size);
return (smx_context_t) context;
}
-static smx_context_t
-smx_ctx_sysv_create_context(xbt_main_func_t code, int argc, char **argv,
- void_pfn_smxprocess_t cleanup_func,
- smx_process_t process)
-{
-
- return smx_ctx_sysv_create_context_sized(sizeof(s_smx_ctx_sysv_t) +
- smx_context_stack_size,
- code, argc, argv, cleanup_func,
- process);
-
-}
-
static void smx_ctx_sysv_free(smx_context_t context)
{
VALGRIND_STACK_DEREGISTER(((smx_ctx_sysv_t)
context)->valgrind_stack_id);
#endif /* HAVE_VALGRIND_VALGRIND_H */
-
+ SIMIX_context_stack_delete(((smx_ctx_sysv_t)context)->stack);
}
smx_ctx_base_free(context);
}
void SIMIX_context_mod_init(void);
void SIMIX_context_mod_exit(void);
+void *SIMIX_context_stack_new(void);
+void SIMIX_context_stack_delete(void *stack);
+
void SIMIX_context_set_current(smx_context_t context);
smx_context_t SIMIX_context_get_current(void);
status=$?
if [ ${status} = 139 ]; then echo "ERROR: A segmentation fault was triggered.
-A common cause in SimGrid may be the use of a too small stack size for the simulated processes (default 128KiB).
+A common cause in SimGrid may be the use of a too small stack size for the simulated processes (default 8192 KiB).
Please see contexts/stack_size parameter , or http://simgrid.org/simgrid/latest/doc/options.html#options_virt_stacksize "; fi
if [ -z "${KEEP}" ] ; then
if (!strcmp(name, table[i].name)) {
return i;
}
- name_list = strdup(table[0].name);
+ if (!table[0].name)
+ xbt_die("No model is valid! This is a bug.");
+ name_list = xbt_strdup(table[0].name);
for (i = 1; table[i].name; i++) {
name_list = (char *) xbt_realloc(name_list, strlen(name_list) + strlen(table[i].name) + 3);
strcat(name_list, ", ");
elm = (my_elem_t) xbt_set_get_by_name(head, key);
xbt_test_log(" Found %s (under ID %u)\n",
elm ? elm->data : "(null)", elm ? elm->ID : -1);
+ if (elm == NULL)
+ THROWF(mismatch_error, 0,
+ "Got a null elm for name %s", key);
if (strcmp(key, elm->name))
THROWF(mismatch_error, 0, "The key (%s) is not the one expected (%s)",
key, elm->name);
elm = (my_elem_t) xbt_set_get_by_id(head, id);
xbt_test_log("Found %s (data %s)",
elm ? elm->name : "(null)", elm ? elm->data : "(null)");
+ if (elm == NULL)
+ THROWF(mismatch_error, 0,
+ "Got a null elm for id %d", id);
if (id != elm->ID)
THROWF(mismatch_error, 0,
"The found ID (%u) is not the one expected (%d)", elm->ID, id);
xbt_set_foreach(set, cursor, elm) {
xbt_test_assert(elm, "Dude ! Got a null elm during traversal!");
+ if (!elm)
+ continue;
xbt_test_log("Id(%u): %s->%s\n", elm->ID, elm->name, elm->data);
xbt_test_assert(!strcmp(elm->name, elm->data),
"Key(%s) != value(%s). Aborting", elm->name,
xbt_test_add("Search 123");
elm = (my_elem_t) xbt_set_get_by_name(set, "123");
xbt_test_assert(elm, "elm must be there");
- xbt_assert(!strcmp("123", elm->data));
+ xbt_assert(elm && !strcmp("123", elm->data));
search_not_found(set, "Can't be found");
search_not_found(set, "123 Can't be found");
char *data = MSG_storage_get_data(storage);
XBT_INFO("Get data: '%s'", data);
- MSG_storage_set_data(storage,strdup("Some data"));
+ MSG_storage_set_data(storage, xbt_strdup("Some data"));
data = MSG_storage_get_data(storage);
XBT_INFO("\tSet and get data: '%s'", data);
+ xbt_free(data);
}
int client(int argc, char *argv[])
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(enable_smpi AND enable_smpi_MPICH3_testsuite)
+ if(WIN32)
+ set(CMAKE_C_FLAGS "-include ${CMAKE_HOME_DIRECTORY}/include/smpi/smpi_main.h")
+ else()
+ set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+ set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpiff")
+ endif()
+
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+ include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi")
+ include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
+
+
+# add_executable(cartcreates cartcreates.c)
+# add_executable(cartmap1 cartmap1.c)
+# add_executable(cartshift1 cartshift1.c)
+# add_executable(cartsuball cartsuball.c)
+# add_executable(cartzero cartzero.c)
+# add_executable(dgraph_unwgt dgraph_unwgt.c)
+# add_executable(dims1 dims1.c)
+# add_executable(dims2 dims2.c)
+# add_executable(distgraph1 distgraph1.c)
+# add_executable(graphcr2 graphcr2.c)
+# add_executable(graphcr graphcr.c)
+# add_executable(graphmap1 graphmap1.c)
+# add_executable(neighb_coll neighb_coll.c)
+# add_executable(topodup topodup.c)
+# add_executable(topotest topotest.c)
+
+
+# target_link_libraries(cartcreates simgrid mtest_c)
+# target_link_libraries(cartmap1 simgrid mtest_c)
+# target_link_libraries(cartshift1 simgrid mtest_c)
+# target_link_libraries(cartsuball simgrid mtest_c)
+# target_link_libraries(cartzero simgrid mtest_c)
+# target_link_libraries(dgraph_unwgt simgrid mtest_c)
+# target_link_libraries(dims1 simgrid mtest_c)
+# target_link_libraries(dims2 simgrid mtest_c)
+# target_link_libraries(distgraph1 simgrid mtest_c)
+# target_link_libraries(graphcr2 simgrid mtest_c)
+# target_link_libraries(graphcr simgrid mtest_c)
+# target_link_libraries(graphmap1 simgrid mtest_c)
+# target_link_libraries(neighb_coll simgrid mtest_c)
+# target_link_libraries(topodup simgrid mtest_c)
+# target_link_libraries(topotest simgrid mtest_c)
+
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ PARENT_SCOPE
+ )
+set(xml_files
+ ${xml_files}
+ PARENT_SCOPE
+ )
+set(examples_src
+ ${examples_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/cartcreates.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cartmap1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cartshift1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cartsuball.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cartzero.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dgraph_unwgt.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dims1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/dims2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/distgraph1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/graphcr2.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/graphcr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/graphmap1.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/neighb_coll.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ ${CMAKE_CURRENT_SOURCE_DIR}/topodup.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/topotest.c
+ PARENT_SCOPE
+ )
+set(bin_files
+ ${bin_files}
+ PARENT_SCOPE
+ )
+set(txt_files
+ ${txt_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/testlist
+ PARENT_SCOPE
+ )
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank;
+ int dims[2], periods[2];
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ /* Create a new cartesian communicator in a subset of the processes */
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if (size < 2) {
+ fprintf( stderr, "This test needs at least 2 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ dims[0] = size-1;
+ periods[0] = 1;
+ MPI_Cart_create( MPI_COMM_WORLD, 1, dims, periods, 0, &comm );
+
+ if (comm != MPI_COMM_NULL) {
+ int csize;
+ MPI_Comm_size( comm, &csize );
+ if (csize != dims[0]) {
+ errs++;
+ fprintf( stderr,
+ "Sizes is wrong in cart communicator. Is %d, should be %d\n",
+ csize, dims[0] );
+ }
+ MPI_Barrier( comm );
+
+ MPI_Comm_free( &comm );
+ }
+ else if (rank < dims[0]) {
+ errs++;
+ fprintf( stderr, "Communicator returned is null!" );
+ }
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int dims[2];
+ int periods[2];
+ int size, rank, newrank;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ /* This defines a one dimensional cartision grid with a single point */
+ periods[0] = 1;
+ dims[0] = 1;
+
+ MPI_Cart_map( MPI_COMM_WORLD, 1, dims, periods, &newrank );
+ if (rank > 0) {
+ if (newrank != MPI_UNDEFINED) {
+ errs++;
+ printf( "rank outside of input communicator not UNDEFINED\n" );
+ }
+ }
+ else {
+ if (rank != newrank) {
+ errs++;
+ printf( "Newrank not defined and should be 0\n" );
+ }
+ }
+
+
+ /* As of MPI 2.1, a 0-dimensional topology is valid (its also a
+ point) */
+ MPI_Cart_map( MPI_COMM_WORLD, 0, dims, periods, &newrank );
+ if (rank > 0) {
+ if (newrank != MPI_UNDEFINED) {
+ errs++;
+ printf( "rank outside of input communicator not UNDEFINED\n" );
+ }
+ }
+ else {
+ /* rank == 0 */
+ if (rank != newrank) {
+ errs++;
+ printf( "Newrank not defined and should be 0\n" );
+ }
+ }
+
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank;
+ int source, dest;
+ int dims[2], periods[2];
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ dims[0] = size;
+ periods[0] = 1;
+ MPI_Cart_create( MPI_COMM_WORLD, 1, dims, periods, 0, &comm );
+ MPI_Cart_shift( comm, 0, 1, &source, &dest );
+ if (source != ((rank - 1 + size) % size)) {
+ errs++;
+ printf( "source for shift 1 is %d\n", source );
+ }
+ if (dest != ((rank + 1) % size)) {
+ errs++;
+ printf( "dest for shift 1 is %d\n", dest );
+ }
+ MPI_Cart_shift( comm, 0, 0, &source, &dest );
+ if (source != rank) {
+ errs++;
+ printf( "Source for shift 0 is %d\n", source );
+ }
+ if (dest != rank) {
+ errs++;
+ printf( "Dest for shift 0 is %d\n", dest );
+ }
+ MPI_Cart_shift( comm, 0, -1, &source, &dest );
+ if (source != ((rank + 1) % size)) {
+ errs++;
+ printf( "source for shift -1 is %d\n", source );
+ }
+ if (dest != ((rank - 1 + size) % size)) {
+ errs++;
+ printf( "dest for shift -1 is %d\n", dest );
+ }
+
+ /* Now, with non-periodic */
+ MPI_Comm_free( &comm );
+ periods[0] = 0;
+ MPI_Cart_create( MPI_COMM_WORLD, 1, dims, periods, 0, &comm );
+ MPI_Cart_shift( comm, 0, 1, &source, &dest );
+ if ((rank > 0 && source != (rank - 1)) ||
+ (rank == 0 && source != MPI_PROC_NULL)) {
+ errs++;
+ printf( "source for non-periodic shift 1 is %d\n", source );
+ }
+ if ((rank < size-1 && dest != rank + 1) ||
+ ((rank == size-1) && dest != MPI_PROC_NULL)) {
+ errs++;
+ printf( "dest for non-periodic shift 1 is %d\n", dest );
+ }
+ MPI_Cart_shift( comm, 0, 0, &source, &dest );
+ if (source != rank) {
+ errs++;
+ printf( "Source for non-periodic shift 0 is %d\n", source );
+ }
+ if (dest != rank) {
+ errs++;
+ printf( "Dest for non-periodic shift 0 is %d\n", dest );
+ }
+ MPI_Cart_shift( comm, 0, -1, &source, &dest );
+ if ((rank < size - 1 && source != rank + 1) ||
+ (rank == size - 1 && source != MPI_PROC_NULL)) {
+
+ errs++;
+ printf( "source for non-periodic shift -1 is %d\n", source );
+ }
+ if ((rank > 0 && dest != rank - 1) ||
+ (rank == 0 && dest != MPI_PROC_NULL)) {
+ errs++;
+ printf( "dest for non-periodic shift -1 is %d\n", dest );
+ }
+ MPI_Comm_free( &comm );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, dims[2], periods[2], remain[2];
+ int result, rank;
+ MPI_Comm comm, newcomm;
+
+ MTest_Init( &argc, &argv );
+
+ /* First, create a 1-dim cartesian communicator */
+ periods[0] = 0;
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ dims[0] = size;
+ MPI_Cart_create( MPI_COMM_WORLD, 1, dims, periods, 0, &comm );
+
+ /* Now, extract a communicator with no dimensions */
+ remain[0] = 0;
+ MPI_Cart_sub( comm, remain, &newcomm );
+
+ MPI_Comm_rank(comm, &rank);
+
+ if (rank == 0) {
+ /* This should be congruent to MPI_COMM_SELF */
+ MPI_Comm_compare( MPI_COMM_SELF, newcomm, &result );
+ if (result != MPI_CONGRUENT) {
+ errs++;
+ printf( "cart sub to size 0 did not give self\n" );
+ }
+ MPI_Comm_free( &newcomm );
+ }
+ else if (newcomm != MPI_COMM_NULL) {
+ errs++;
+ printf( "cart sub to size 0 did not give null\n" );
+ }
+
+ /* Free the new communicator so that storage leak tests will
+ be happy */
+ MPI_Comm_free( &comm );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2008 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+ Check that the MPI implementation properly handles zero-dimensional
+ Cartesian communicators - the original standard implies that these
+ should be consistent with higher dimensional topologies and thus
+ these should work with any MPI implementation. MPI 2.1 made this
+ requirement explicit.
+*/
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank, ndims;
+ MPI_Comm comm, newcomm;
+
+ MTest_Init( &argc, &argv );
+
+ /* Create a new cartesian communicator in a subset of the processes */
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if (size < 2) {
+ fprintf( stderr, "This test needs at least 2 processes\n" );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ MPI_Cart_create( MPI_COMM_WORLD, 0, NULL, NULL, 0, &comm );
+
+ if (comm != MPI_COMM_NULL) {
+ int csize;
+ MPI_Comm_size( comm, &csize );
+ if (csize != 1) {
+ errs++;
+ fprintf( stderr,
+ "Sizes is wrong in cart communicator. Is %d, should be 1\n",
+ csize );
+ }
+
+ /* This function is not meaningful, but should not fail */
+ MPI_Dims_create(1, 0, NULL);
+
+ ndims = -1;
+ MPI_Cartdim_get(comm, &ndims);
+ if (ndims != 0) {
+ errs++;
+ fprintf(stderr, "MPI_Cartdim_get: ndims is %d, should be 0\n", ndims);
+ }
+
+ /* this function should not fail */
+ MPI_Cart_get(comm, 0, NULL, NULL, NULL);
+
+ MPI_Cart_rank(comm, NULL, &rank);
+ if (rank != 0) {
+ errs++;
+ fprintf(stderr, "MPI_Cart_rank: rank is %d, should be 0\n", rank);
+ }
+
+ /* this function should not fail */
+ MPI_Cart_coords(comm, 0, 0, NULL);
+
+ MPI_Cart_sub(comm, NULL, &newcomm);
+ ndims = -1;
+ MPI_Cartdim_get(newcomm, &ndims);
+ if (ndims != 0) {
+ errs++;
+ fprintf(stderr,
+ "MPI_Cart_sub did not return zero-dimensional communicator\n");
+ }
+
+ MPI_Barrier( comm );
+
+ MPI_Comm_free( &comm );
+ MPI_Comm_free( &newcomm );
+ }
+ else if (rank == 0) {
+ errs++;
+ fprintf( stderr, "Communicator returned is null!" );
+ }
+
+ MTest_Finalize( errs );
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "mpi.h"
+#include "mpitest.h"
+
+#define RING_NUM_NEIGHBORS 2
+
+static int validate_dgraph(MPI_Comm dgraph_comm)
+{
+ int comm_topo;
+ int src_sz, dest_sz;
+ int wgt_flag, ierr;
+ int srcs[RING_NUM_NEIGHBORS], dests[RING_NUM_NEIGHBORS];
+ int *src_wgts, *dest_wgts;
+
+ int world_rank, world_size;
+ int idx, nbr_sep;
+
+ comm_topo = MPI_UNDEFINED;
+ MPI_Topo_test(dgraph_comm, &comm_topo);
+ switch (comm_topo) {
+ case MPI_DIST_GRAPH :
+ break;
+ default:
+ fprintf(stderr, "dgraph_comm is NOT of type MPI_DIST_GRAPH\n");
+ return 0;
+ }
+
+ ierr = MPI_Dist_graph_neighbors_count(dgraph_comm,
+ &src_sz, &dest_sz, &wgt_flag);
+ if (ierr != MPI_SUCCESS) {
+ fprintf(stderr, "MPI_Dist_graph_neighbors_count() fails!\n");
+ return 0;
+ }
+/*
+ else
+ fprintf(stderr, "MPI_Dist_graph_neighbors_count() succeeds!\n");
+*/
+
+ if (wgt_flag) {
+ fprintf(stderr, "dgraph_comm is NOT created with MPI_UNWEIGHTED\n");
+ return 0;
+ }
+/*
+ else
+ fprintf(stderr, "dgraph_comm is created with MPI_UNWEIGHTED\n");
+*/
+ if (src_sz != RING_NUM_NEIGHBORS || dest_sz != RING_NUM_NEIGHBORS) {
+ fprintf(stderr, "source or destination edge array is not of size %d.\n",
+ RING_NUM_NEIGHBORS);
+ fprintf(stderr, "src_sz = %d, dest_sz = %d\n", src_sz, dest_sz);
+ return 0;
+ }
+
+ /*
+ src_wgts and dest_wgts could be anything, e.g. NULL, since
+ MPI_Dist_graph_neighbors_count() returns MPI_UNWEIGHTED.
+ Since this program has a Fortran77 version, and standard Fortran77
+ has no pointer and NULL, so use MPI_UNWEIGHTED for the weighted arrays.
+ */
+ src_wgts = MPI_UNWEIGHTED;
+ dest_wgts = MPI_UNWEIGHTED;
+ ierr = MPI_Dist_graph_neighbors(dgraph_comm,
+ src_sz, srcs, src_wgts,
+ dest_sz, dests, dest_wgts);
+ if (ierr != MPI_SUCCESS) {
+ fprintf(stderr, "MPI_Dist_graph_neighbors() fails!\n");
+ return 0;
+ }
+/*
+ else
+ fprintf(stderr, "MPI_Dist_graph_neighbors() succeeds!\n");
+*/
+
+ /*
+ Check if the neighbors returned from MPI are really
+ the nearest neighbors within a ring.
+ */
+ MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+
+ for (idx=0; idx < src_sz; idx++) {
+ nbr_sep = abs(srcs[idx] - world_rank);
+ if ( nbr_sep != 1 && nbr_sep != (world_size-1) ) {
+ fprintf(stderr, "srcs[%d]=%d is NOT a neighbor of my rank %d.\n",
+ idx, srcs[idx], world_rank);
+ return 0;
+ }
+ }
+ for (idx=0; idx < dest_sz; idx++) {
+ nbr_sep = abs(dests[idx] - world_rank);
+ if ( nbr_sep != 1 && nbr_sep != (world_size-1) ) {
+ fprintf(stderr, "dests[%d]=%d is NOT a neighbor of my rank %d.\n",
+ idx, dests[idx], world_rank);
+ return 0;
+ }
+ }
+
+ /*
+ fprintf(stderr, "dgraph_comm is of type MPI_DIST_GRAPH "
+ "of a bidirectional ring.\n");
+ */
+ return 1;
+}
+
+/*
+ Specify a distributed graph of a bidirectional ring of the MPI_COMM_WORLD,
+ i.e. everyone only talks to left and right neighbors.
+*/
+int main(int argc, char *argv[])
+{
+ MPI_Comm dgraph_comm;
+ int world_size, world_rank, ierr;
+ int errs = 0;
+
+ int src_sz, dest_sz;
+ int degs[1];
+ int srcs[RING_NUM_NEIGHBORS], dests[RING_NUM_NEIGHBORS];
+
+ MTest_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+
+ degs[0] = 2;
+ srcs[0] = world_rank;
+ dests[0] = world_rank-1 < 0 ? world_size-1 : world_rank-1 ;
+ dests[1] = world_rank+1 >= world_size ? 0 : world_rank+1 ;
+ ierr = MPI_Dist_graph_create(MPI_COMM_WORLD, 1, srcs, degs, dests,
+ MPI_UNWEIGHTED, MPI_INFO_NULL, 1,
+ &dgraph_comm);
+ if ( ierr != MPI_SUCCESS ) {
+ fprintf(stderr, "MPI_Dist_graph_create() fails!\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ return 1;
+ }
+ if (!validate_dgraph(dgraph_comm)) {
+ fprintf(stderr, "MPI_Dist_graph_create() does NOT create "
+ "a bidirectional ring graph!\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ return 1;
+ }
+ MPI_Comm_free(&dgraph_comm);
+
+ src_sz = 2;
+ srcs[0] = world_rank-1 < 0 ? world_size-1 : world_rank-1 ;
+ srcs[1] = world_rank+1 >= world_size ? 0 : world_rank+1 ;
+ dest_sz = 2;
+ dests[0] = world_rank-1 < 0 ? world_size-1 : world_rank-1 ;
+ dests[1] = world_rank+1 >= world_size ? 0 : world_rank+1 ;
+ ierr = MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD,
+ src_sz, srcs, MPI_UNWEIGHTED,
+ dest_sz, dests, MPI_UNWEIGHTED,
+ MPI_INFO_NULL, 1, &dgraph_comm);
+ if ( ierr != MPI_SUCCESS ) {
+ fprintf(stderr, "MPI_Dist_graph_create_adjacent() fails!\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ return 1;
+ }
+ if (!validate_dgraph(dgraph_comm)) {
+ fprintf(stderr, "MPI_Dist_graph_create_adjacent() does NOT create "
+ "a bidirectional ring graph!\n");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ return 1;
+ }
+ MPI_Comm_free(&dgraph_comm);
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int prodof( int ndims, const int dims[] );
+int increasing( int ndims, const int dims[] );
+
+int prodof( int ndims, const int dims[] )
+{
+ int i, prod=1;
+ for (i=0; i<ndims; i++)
+ prod *= dims[i];
+ return prod;
+}
+
+int increasing( int ndims, const int dims[] )
+{
+ int i, err=0;
+ for (i=1; i<ndims; i++) {
+ if (dims[i] > dims[i-1]) {
+ printf ("%d = dims[%d] > dims[%d] = %d\n", dims[i], i,
+ i-1, dims[i-1] );
+ err = 1;
+ }
+ }
+ return err;
+}
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int dims[4], nnodes, ndims;
+
+ MTest_Init( &argc, &argv );
+
+ /* Test multiple dims create values. For each, make sure that the
+ product of dims is the number of input nodes */
+ nnodes = 2*3*5*7*11;
+ ndims = 2;
+ dims[0] = dims[1] = 0;
+ MPI_Dims_create( nnodes, ndims, dims );
+ if (prodof(ndims,dims) != nnodes) {
+ errs++;
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+ if (increasing( ndims, dims )) {
+ errs++;
+ printf( "dims create returned a decomposition with increasing dimensions (see MPI-1 standard section 6.5)\n" );
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+
+ /* Test multiple dims create values. For each, make sure that the
+ product of dims is the number of input nodes */
+ nnodes = 2*7;
+ ndims = 2;
+ dims[0] = dims[1] = 0;
+ MPI_Dims_create( nnodes, ndims, dims );
+ if (prodof(ndims,dims) != nnodes) {
+ errs++;
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+ if (increasing( ndims, dims )) {
+ errs++;
+ printf( "dims create returned a decomposition with increasing dimensions (see MPI-1 standard section 6.5)\n" );
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+
+ nnodes = 2*2*3*3*5*7*11;
+ ndims = 2;
+ dims[0] = dims[1] = 0;
+ MPI_Dims_create( nnodes, ndims, dims );
+ if (prodof(ndims,dims) != nnodes) {
+ errs++;
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+ if (increasing( ndims, dims )) {
+ errs++;
+ printf( "dims create returned a decomposition with increasing dimensions (see MPI-1 standard section 6.5)\n" );
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+
+ nnodes = 11;
+ ndims = 2;
+ dims[0] = dims[1] = 0;
+ MPI_Dims_create( nnodes, ndims, dims );
+ if (prodof(ndims,dims) != nnodes) {
+ errs++;
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+ if (increasing( ndims, dims )) {
+ errs++;
+ printf( "dims create returned a decomposition with increasing dimensions (see MPI-1 standard section 6.5)\n" );
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+
+ nnodes = 5*7*11;
+ ndims = 4;
+ dims[0] = dims[1] = dims[2] = dims[3] = 0;
+ MPI_Dims_create( nnodes, ndims, dims );
+ if (prodof(ndims,dims) != nnodes) {
+ errs++;
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+ if (increasing( ndims, dims )) {
+ errs++;
+ printf( "dims create returned a decomposition with increasing dimensions (see MPI-1 standard section 6.5)\n" );
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+
+ nnodes = 64;
+ ndims = 4;
+ dims[0] = dims[1] = dims[2] = dims[3] = 0;
+ MPI_Dims_create( nnodes, ndims, dims );
+ if (prodof(ndims,dims) != nnodes) {
+ errs++;
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+ if (increasing( ndims, dims )) {
+ errs++;
+ printf( "dims create returned a decomposition with increasing dimensions (see MPI-1 standard section 6.5)\n" );
+ printf( "dims create returned the wrong decomposition for %d in %d dims\n",
+ nnodes, ndims );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int prodof( int, const int[] );
+/*
+ * Test edge cases of Dims_create
+ */
+int prodof( int ndims, const int dims[] )
+{
+ int i, prod=1;
+ for (i=0; i<ndims; i++)
+ prod *= dims[i];
+ return prod;
+}
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int dims[4], nnodes;
+
+ MTest_Init( &argc, &argv );
+
+ /* 2 dimensional tests */
+ for (nnodes=1; nnodes <= 32; nnodes = nnodes * 2) {
+ dims[0] = 0;
+ dims[1] = nnodes;
+
+ MPI_Dims_create( nnodes, 2, dims );
+ if (prodof(2, dims) != nnodes) {
+ errs++;
+ printf( "Dims_create returned the wrong decomposition. " );
+ printf( "Is [%d x %d], should be 1 x %d\n", dims[0], dims[1],
+ nnodes );
+ }
+
+ /* Try calling Dims_create with nothing to do (all dimensions
+ specified) */
+ dims[0] = 1;
+ dims[1] = nnodes;
+ MPI_Dims_create( nnodes, 2, dims );
+ if (prodof(2, dims) != nnodes) {
+ errs++;
+ printf( "Dims_create returned the wrong decomposition (all given). " );
+ printf( "Is [%d x %d], should be 1 x %d\n", dims[0], dims[1],
+ nnodes );
+ }
+
+ }
+
+ /* 4 dimensional tests */
+ for (nnodes=4; nnodes <= 32; nnodes = nnodes * 2) {
+ dims[0] = 0;
+ dims[1] = nnodes/2;
+ dims[2] = 0;
+ dims[3] = 2;
+
+ MPI_Dims_create( nnodes, 4, dims );
+ if (prodof(4, dims) != nnodes) {
+ errs++;
+ printf( "Dims_create returned the wrong decomposition. " );
+ printf( "Is [%d x %d x %d x %d], should be 1 x %d x 1 x 2\n",
+ dims[0], dims[1], dims[2], dims[3],
+ nnodes/2 );
+ }
+
+ /* Try calling Dims_create with nothing to do (all dimensions
+ specified) */
+ dims[0] = 1;
+ dims[1] = nnodes/2;
+ dims[2] = 1;
+ dims[3] = 2;
+ MPI_Dims_create( nnodes, 4, dims );
+ if (prodof(4, dims) != nnodes) {
+ errs++;
+ printf( "Dims_create returned the wrong decomposition (all given). " );
+ printf( "Is [%d x %d x %d x %d], should be 1 x %d x 1 x 2\n",
+ dims[0], dims[1], dims[2], dims[3],
+ nnodes/2 );
+ }
+
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include "mpitest.h"
+
+#define NUM_GRAPHS 10
+#define MAX_WEIGHT 100
+
+/* convenience globals */
+int size, rank;
+
+/* We need MPI 2.2 to be able to compile the following routines. */
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+
+/* Maybe use a bit vector instead? */
+int **layout;
+
+#define MAX_LAYOUT_NAME_LEN 256
+char graph_layout_name[MAX_LAYOUT_NAME_LEN] = {'\0'};
+
+static void create_graph_layout(int graph_num)
+{
+ int i, j;
+
+ if (rank == 0) {
+ switch (graph_num) {
+ case 0:
+ strncpy(graph_layout_name, "deterministic complete graph", MAX_LAYOUT_NAME_LEN);
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ layout[i][j] = (i + 2) * (j + 1);
+ break;
+ case 1:
+ strncpy(graph_layout_name, "every other edge deleted", MAX_LAYOUT_NAME_LEN);
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ layout[i][j] = (j % 2 ? (i + 2) * (j + 1) : 0);
+ break;
+ case 2:
+ strncpy(graph_layout_name, "only self-edges", MAX_LAYOUT_NAME_LEN);
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ if (i == rank && j == rank)
+ layout[i][j] = 10 * (i + 1);
+ else
+ layout[i][j] = 0;
+ }
+ }
+ break;
+ case 3:
+ strncpy(graph_layout_name, "no edges", MAX_LAYOUT_NAME_LEN);
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ layout[i][j] = 0;
+ break;
+ default:
+ strncpy(graph_layout_name, "a random incomplete graph", MAX_LAYOUT_NAME_LEN);
+ srand(graph_num);
+
+ /* Create a connectivity graph; layout[i,j]==w represents an outward
+ * connectivity from i to j with weight w, w==0 is no edge. */
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < size; j++) {
+ /* disable about a third of the edges */
+ if (((rand() * 1.0) / RAND_MAX) < 0.33)
+ layout[i][j] = 0;
+ else
+ layout[i][j] = rand() % MAX_WEIGHT;
+ }
+ }
+ break;
+ }
+ }
+
+ /* because of the randomization we must determine the graph on rank 0 and
+ * send the layout to all other processes */
+ MPI_Bcast(graph_layout_name, MAX_LAYOUT_NAME_LEN, MPI_CHAR, 0, MPI_COMM_WORLD);
+ for (i = 0; i < size; ++i) {
+ MPI_Bcast(layout[i], size, MPI_INT, 0, MPI_COMM_WORLD);
+ }
+}
+
+static int verify_comm(MPI_Comm comm)
+{
+ int local_errs = 0;
+ int i, j;
+ int indegree, outdegree, weighted;
+ int *sources, *sweights, *destinations, *dweights;
+ int use_dup;
+ int topo_type = MPI_UNDEFINED;
+ MPI_Comm dupcomm = MPI_COMM_NULL;
+
+ sources = (int *) malloc(size * sizeof(int));
+ sweights = (int *) malloc(size * sizeof(int));
+ destinations = (int *) malloc(size * sizeof(int));
+ dweights = (int *) malloc(size * sizeof(int));
+
+ for (use_dup = 0; use_dup <= 1; ++use_dup) {
+ if (!use_dup) {
+ MPI_Dist_graph_neighbors_count(comm, &indegree, &outdegree, &weighted);
+ }
+ else {
+ MPI_Comm_dup(comm, &dupcomm);
+ comm = dupcomm; /* caller retains original comm value */
+ }
+
+ MPI_Topo_test(comm, &topo_type);
+ if (topo_type != MPI_DIST_GRAPH) {
+ fprintf(stderr, "topo_type != MPI_DIST_GRAPH\n");
+ ++local_errs;
+ }
+
+ j = 0;
+ for (i = 0; i < size; i++)
+ if (layout[i][rank])
+ j++;
+ if (j != indegree) {
+ fprintf(stderr, "indegree does not match, expected=%d got=%d, layout='%s'\n", indegree, j, graph_layout_name);
+ ++local_errs;
+ }
+
+ j = 0;
+ for (i = 0; i < size; i++)
+ if (layout[rank][i])
+ j++;
+ if (j != outdegree) {
+ fprintf(stderr, "outdegree does not match, expected=%d got=%d, layout='%s'\n", outdegree, j, graph_layout_name);
+ ++local_errs;
+ }
+
+ if ((indegree || outdegree) && (weighted == 0)) {
+ fprintf(stderr, "MPI_Dist_graph_neighbors_count thinks the graph is not weighted\n");
+ ++local_errs;
+ }
+
+
+ MPI_Dist_graph_neighbors(comm, indegree, sources, sweights, outdegree, destinations, dweights);
+
+ /* For each incoming and outgoing edge in the matrix, search if
+ * the query function listed it in the sources. */
+ for (i = 0; i < size; i++) {
+ if (layout[i][rank]) {
+ for (j = 0; j < indegree; j++) {
+ assert(sources[j] >= 0);
+ assert(sources[j] < size);
+ if (sources[j] == i)
+ break;
+ }
+ if (j == indegree) {
+ fprintf(stderr, "no edge from %d to %d specified\n", i, rank);
+ ++local_errs;
+ }
+ else {
+ if (sweights[j] != layout[i][rank]) {
+ fprintf(stderr, "incorrect weight for edge (%d,%d): %d instead of %d\n",
+ i, rank, sweights[j], layout[i][rank]);
+ ++local_errs;
+ }
+ }
+ }
+ if (layout[rank][i]) {
+ for (j = 0; j < outdegree; j++) {
+ assert(destinations[j] >= 0);
+ assert(destinations[j] < size);
+ if (destinations[j] == i)
+ break;
+ }
+ if (j == outdegree) {
+ fprintf(stderr, "no edge from %d to %d specified\n", rank, i);
+ ++local_errs;
+ }
+ else {
+ if (dweights[j] != layout[rank][i]) {
+ fprintf(stderr, "incorrect weight for edge (%d,%d): %d instead of %d\n",
+ rank, i, dweights[j], layout[rank][i]);
+ ++local_errs;
+ }
+ }
+ }
+ }
+
+ /* For each incoming and outgoing edge in the sources, we should
+ * have an entry in the matrix */
+ for (i = 0; i < indegree; i++) {
+ if (layout[sources[i]][rank] != sweights[i]) {
+ fprintf(stderr, "edge (%d,%d) has a weight %d instead of %d\n", i, rank,
+ sweights[i], layout[sources[i]][rank]);
+ ++local_errs;
+ }
+ }
+ for (i = 0; i < outdegree; i++) {
+ if (layout[rank][destinations[i]] != dweights[i]) {
+ fprintf(stderr, "edge (%d,%d) has a weight %d instead of %d\n", rank, i,
+ dweights[i], layout[rank][destinations[i]]);
+ ++local_errs;
+ }
+ }
+
+ }
+
+ if (dupcomm != MPI_COMM_NULL)
+ MPI_Comm_free(&dupcomm);
+
+ return local_errs;
+}
+
+#endif /* At least MPI 2.2 */
+
+int main(int argc, char *argv[])
+{
+ int errs = 0;
+ int i, j, k, p;
+ int indegree, outdegree, reorder;
+ int check_indegree, check_outdegree, check_weighted;
+ int *sources, *sweights, *destinations, *dweights, *degrees;
+ MPI_Comm comm;
+
+ MTest_Init(&argc, &argv);
+
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
+ layout = (int **) malloc(size * sizeof(int *));
+ assert(layout);
+ for (i = 0; i < size; i++) {
+ layout[i] = (int *) malloc(size * sizeof(int));
+ assert(layout[i]);
+ }
+ /* alloc size*size ints to handle the all-on-one-process case */
+ sources = (int *) malloc(size * size * sizeof(int));
+ sweights = (int *) malloc(size * size * sizeof(int));
+ destinations = (int *) malloc(size * size * sizeof(int));
+ dweights = (int *) malloc(size * size * sizeof(int));
+ degrees = (int *) malloc(size * size * sizeof(int));
+
+ for (i = 0; i < NUM_GRAPHS; i++) {
+ create_graph_layout(i);
+ if (rank == 0) {
+ MTestPrintfMsg( 1, "using graph layout '%s'\n", graph_layout_name );
+ }
+
+ /* MPI_Dist_graph_create_adjacent */
+ if (rank == 0) {
+ MTestPrintfMsg( 1, "testing MPI_Dist_graph_create_adjacent\n" );
+ }
+ indegree = 0;
+ k = 0;
+ for (j = 0; j < size; j++) {
+ if (layout[j][rank]) {
+ indegree++;
+ sources[k] = j;
+ sweights[k++] = layout[j][rank];
+ }
+ }
+
+ outdegree = 0;
+ k = 0;
+ for (j = 0; j < size; j++) {
+ if (layout[rank][j]) {
+ outdegree++;
+ destinations[k] = j;
+ dweights[k++] = layout[rank][j];
+ }
+ }
+
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, indegree, sources, sweights,
+ outdegree, destinations, dweights, MPI_INFO_NULL,
+ reorder, &comm);
+ MPI_Barrier(comm);
+ errs += verify_comm(comm);
+ MPI_Comm_free(&comm);
+ }
+
+ /* a weak check that passing MPI_UNWEIGHTED doesn't cause
+ * create_adjacent to explode */
+ MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, indegree, sources, MPI_UNWEIGHTED,
+ outdegree, destinations, MPI_UNWEIGHTED, MPI_INFO_NULL,
+ reorder, &comm);
+ MPI_Barrier(comm);
+ /* intentionally no verify here, weights won't match */
+ MPI_Comm_free(&comm);
+
+
+ /* MPI_Dist_graph_create() where each process specifies its
+ * outgoing edges */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create w/ outgoing only\n" );
+ }
+ sources[0] = rank;
+ k = 0;
+ for (j = 0; j < size; j++) {
+ if (layout[rank][j]) {
+ destinations[k] = j;
+ dweights[k++] = layout[rank][j];
+ }
+ }
+ degrees[0] = k;
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, 1, sources, degrees, destinations, dweights,
+ MPI_INFO_NULL, reorder, &comm);
+ MPI_Barrier(comm);
+ errs += verify_comm(comm);
+ MPI_Comm_free(&comm);
+ }
+
+
+ /* MPI_Dist_graph_create() where each process specifies its
+ * incoming edges */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create w/ incoming only\n" );
+ }
+ k = 0;
+ for (j = 0; j < size; j++) {
+ if (layout[j][rank]) {
+ sources[k] = j;
+ sweights[k] = layout[j][rank];
+ degrees[k] = 1;
+ destinations[k++] = rank;
+ }
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, k, sources, degrees, destinations, sweights,
+ MPI_INFO_NULL, reorder, &comm);
+ MPI_Barrier(comm);
+ errs += verify_comm(comm);
+ MPI_Comm_free(&comm);
+ }
+
+
+ /* MPI_Dist_graph_create() where rank 0 specifies the entire
+ * graph */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create w/ rank 0 specifies only\n" );
+ }
+ p = 0;
+ for (j = 0; j < size; j++) {
+ for (k = 0; k < size; k++) {
+ if (layout[j][k]) {
+ sources[p] = j;
+ sweights[p] = layout[j][k];
+ degrees[p] = 1;
+ destinations[p++] = k;
+ }
+ }
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, (rank == 0) ? p : 0, sources, degrees,
+ destinations, sweights, MPI_INFO_NULL, reorder, &comm);
+ MPI_Barrier(comm);
+ errs += verify_comm(comm);
+ MPI_Comm_free(&comm);
+ }
+
+ /* MPI_Dist_graph_create() where rank 0 specifies the entire
+ * graph and all other ranks pass NULL. Can catch implementation
+ * problems when MPI_UNWEIGHTED==NULL. */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create w/ rank 0 specifies only -- NULLs\n");
+ }
+ p = 0;
+ for (j = 0; j < size; j++) {
+ for (k = 0; k < size; k++) {
+ if (layout[j][k]) {
+ sources[p] = j;
+ sweights[p] = layout[j][k];
+ degrees[p] = 1;
+ destinations[p++] = k;
+ }
+ }
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ if (rank == 0) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, p, sources, degrees,
+ destinations, sweights, MPI_INFO_NULL, reorder, &comm);
+ }
+ else {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, 0, NULL, NULL,
+ NULL, NULL, MPI_INFO_NULL, reorder, &comm);
+ }
+ MPI_Barrier(comm);
+ errs += verify_comm(comm);
+ MPI_Comm_free(&comm);
+ }
+
+ }
+
+ /* now tests that don't depend on the layout[][] array */
+
+ /* The MPI-2.2 standard recommends implementations set
+ * MPI_UNWEIGHTED==NULL, but this leads to an ambiguity. The draft
+ * MPI-3.0 standard specifically recommends _not_ setting it equal
+ * to NULL. */
+ if (MPI_UNWEIGHTED == NULL) {
+ fprintf(stderr, "MPI_UNWEIGHTED should not be NULL\n");
+ ++errs;
+ }
+
+ /* MPI_Dist_graph_create() with no graph */
+ if (rank == 0) {
+ MTestPrintfMsg( 1, "testing MPI_Dist_graph_create w/ no graph\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, 0, sources, degrees,
+ destinations, sweights, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ if (!check_weighted) {
+ fprintf(stderr, "expected weighted == TRUE for the \"no graph\" case\n");
+ ++errs;
+ }
+ MPI_Comm_free(&comm);
+ }
+
+ /* MPI_Dist_graph_create() with no graph -- passing MPI_WEIGHTS_EMPTY
+ instead */
+ /* NOTE that MPI_WEIGHTS_EMPTY was added in MPI-3 and does not
+ appear before then. This part of the test thus requires a check
+ on the MPI major version */
+#if MPI_VERSION >= 3
+ if (rank == 0) {
+ MTestPrintfMsg( 1, "testing MPI_Dist_graph_create w/ no graph\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, 0, sources, degrees,
+ destinations, MPI_WEIGHTS_EMPTY, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ if (!check_weighted) {
+ fprintf(stderr, "expected weighted == TRUE for the \"no graph -- MPI_WEIGHTS_EMPTY\" case\n");
+ ++errs;
+ }
+ MPI_Comm_free(&comm);
+ }
+#endif
+
+ /* MPI_Dist_graph_create() with no graph -- passing NULLs instead */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create w/ no graph -- NULLs\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, 0, NULL, NULL,
+ NULL, NULL, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ /* ambiguous if they are equal, only check when they are distinct values. */
+ if (MPI_UNWEIGHTED != NULL) {
+ if (!check_weighted) {
+ fprintf(stderr, "expected weighted == TRUE for the \"no graph -- NULLs\" case\n");
+ ++errs;
+ }
+ }
+ MPI_Comm_free(&comm);
+ }
+
+ /* MPI_Dist_graph_create() with no graph -- passing NULLs+MPI_UNWEIGHTED instead */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create w/ no graph -- NULLs+MPI_UNWEIGHTED\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create(MPI_COMM_WORLD, 0, NULL, NULL,
+ NULL, MPI_UNWEIGHTED, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ /* ambiguous if they are equal, only check when they are distinct values. */
+ if (MPI_UNWEIGHTED != NULL) {
+ if (check_weighted) {
+ fprintf(stderr, "expected weighted == FALSE for the \"no graph -- NULLs+MPI_UNWEIGHTED\" case\n");
+ ++errs;
+ }
+ }
+ MPI_Comm_free(&comm);
+ }
+
+ /* MPI_Dist_graph_create_adjacent() with no graph */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create_adjacent w/ no graph\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, 0, sources, sweights,
+ 0, destinations, dweights, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ if (!check_weighted) {
+ fprintf(stderr, "expected weighted == TRUE for the \"no graph\" case\n");
+ ++errs;
+ }
+ MPI_Comm_free(&comm);
+ }
+
+ /* MPI_Dist_graph_create_adjacent() with no graph -- passing MPI_WEIGHTS_EMPTY instead */
+ /* NOTE that MPI_WEIGHTS_EMPTY was added in MPI-3 and does not
+ appear before then. This part of the test thus requires a check
+ on the MPI major version */
+#if MPI_VERSION >= 3
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create_adjacent w/ no graph -- MPI_WEIGHTS_EMPTY\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, 0, sources, MPI_WEIGHTS_EMPTY,
+ 0, destinations, MPI_WEIGHTS_EMPTY, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ if (!check_weighted) {
+ fprintf(stderr, "expected weighted == TRUE for the \"no graph -- MPI_WEIGHTS_EMPTY\" case\n");
+ ++errs;
+ }
+ MPI_Comm_free(&comm);
+ }
+#endif
+
+ /* MPI_Dist_graph_create_adjacent() with no graph -- passing NULLs instead */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+ "testing MPI_Dist_graph_create_adjacent w/ no graph -- NULLs\n" );
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, 0, NULL, NULL,
+ 0, NULL, NULL, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ /* ambiguous if they are equal, only check when they are distinct values. */
+ if (MPI_UNWEIGHTED != NULL) {
+ if (!check_weighted) {
+ fprintf(stderr, "expected weighted == TRUE for the \"no graph -- NULLs\" case\n");
+ ++errs;
+ }
+ }
+ MPI_Comm_free(&comm);
+ }
+
+ /* MPI_Dist_graph_create_adjacent() with no graph -- passing NULLs+MPI_UNWEIGHTED instead */
+ if (rank == 0) {
+ MTestPrintfMsg( 1,
+"testing MPI_Dist_graph_create_adjacent w/ no graph -- NULLs+MPI_UNWEIGHTED\n");
+ }
+ for (reorder = 0; reorder <= 1; reorder++) {
+ MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD, 0, NULL, MPI_UNWEIGHTED,
+ 0, NULL, MPI_UNWEIGHTED, MPI_INFO_NULL, reorder, &comm);
+ MPI_Dist_graph_neighbors_count(comm, &check_indegree, &check_outdegree, &check_weighted);
+ /* ambiguous if they are equal, only check when they are distinct values. */
+ if (MPI_UNWEIGHTED != NULL) {
+ if (check_weighted) {
+ fprintf(stderr, "expected weighted == FALSE for the \"no graph -- NULLs+MPI_UNWEIGHTED\" case\n");
+ ++errs;
+ }
+ }
+ MPI_Comm_free(&comm);
+ }
+
+
+ for (i = 0; i < size; i++)
+ free(layout[i]);
+ free(layout);
+#endif
+
+ MTest_Finalize(errs);
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Create a communicator with a graph that contains no processes";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int *index = 0, *edges = 0;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ /* MPI 2.1, page 246, lines 29-30 make it clear that this is a valid
+ (not erroneous) call that must return MPI_COMM_NULL */
+ MPI_Graph_create( MPI_COMM_WORLD, 0, index, edges, 0, &comm );
+ if (comm != MPI_COMM_NULL) {
+ errs++;
+ fprintf( stderr, "Expected MPI_COMM_NULL from empty graph create\n" );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+/*
+static char MTEST_Descrip[] = "Create a communicator with a graph that contains null edges and one that contains duplicate edges";
+*/
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int *index = 0, *edges = 0;
+ int rank, size, i, j, crank, csize;
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+
+ index = (int *)malloc( size*sizeof(int) );
+ edges = (int *)malloc( size*sizeof(int) );
+ for (i=0; i<size; i++) {
+ index[i] = 1;
+ edges[i] = i;
+ }
+ /* As of MPI 2.1, self edges are permitted */
+ MPI_Graph_create( MPI_COMM_WORLD, size, index, edges, 0, &comm );
+ MPI_Comm_rank( comm, &crank );
+ MPI_Comm_size( comm, &csize );
+ if (csize != size) {
+ errs ++;
+ fprintf( stderr, "Graph create with self links has size %d should be %d", csize, size );
+ }
+ free( index );
+ free( edges );
+ MPI_Comm_free( &comm );
+
+ /* Create a graph with duplicate links */
+ index = (int *)malloc( size * sizeof(int) );
+ edges = (int *)malloc( size * 2 * sizeof(int) );
+ j = 0;
+ for (i=0; i<size; i++) {
+ index[i] = j + 2;
+ edges[j++] = (i + 1) % size;
+ edges[j++] = (i + 1) % size;
+ }
+ /* As of MPI 2.1, duplicate edges are permitted */
+ MPI_Graph_create( MPI_COMM_WORLD, size, index, edges, 0, &comm );
+ MPI_Comm_rank( comm, &crank );
+ MPI_Comm_size( comm, &csize );
+ if (csize != size) {
+ errs ++;
+ fprintf( stderr, "Graph create with duplicate links has size %d should be %d", csize, size );
+ }
+ free( index );
+ free( edges );
+ MPI_Comm_free( &comm );
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int newrank, merr, rank;
+ int index[2], edges[2];
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ /* Graph map where there are no nodes for this process */
+ MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
+ /* Here is a singleton graph, containing only the root process */
+ index[0] = 0;
+ edges[0] = 0;
+ merr = MPI_Graph_map( MPI_COMM_WORLD, 1, index, edges, &newrank );
+ if (merr) {
+ errs++;
+ printf( "Graph map returned an error\n" );
+ MTestPrintError( merr );
+ }
+ if (rank != 0 && newrank != MPI_UNDEFINED) {
+ errs++;
+ printf( "Graph map with no local nodes did not return MPI_UNDEFINED\n" );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ * (C) 2012 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <mpi.h>
+#include "mpitest.h"
+
+#if !defined(USE_STRICT_MPI) && defined(MPICH)
+#define TEST_NEIGHB_COLL 1
+#endif
+
+/* assert-like macro that bumps the err count and emits a message */
+#define check(x_) \
+ do { \
+ if (!(x_)) { \
+ ++errs; \
+ if (errs < 10) { \
+ fprintf(stderr, "check failed: (%s), line %d\n", #x_, __LINE__); \
+ } \
+ } \
+ } while (0)
+
+int main(int argc, char *argv[])
+{
+ int errs = 0;
+ int wrank, wsize;
+ int periods[1] = { 0 };
+ MPI_Comm cart, dgraph, graph;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
+ MPI_Comm_size(MPI_COMM_WORLD, &wsize);
+
+#if defined(TEST_NEIGHB_COLL)
+ /* a basic test for the 10 (5 patterns x {blocking,nonblocking}) MPI-3
+ * neighborhood collective routines */
+
+ /* (wrap)--> 0 <--> 1 <--> ... <--> p-1 <--(wrap) */
+ MPI_Cart_create(MPI_COMM_WORLD, 1, &wsize, periods, /*reorder=*/0, &cart);
+
+ /* allgather */
+ {
+ int sendbuf[1] = { wrank };
+ int recvbuf[2] = { 0xdeadbeef, 0xdeadbeef };
+
+ /* should see one send to each neighbor (rank-1 and rank+1) and one receive
+ * each from same */
+ MPI_Neighbor_allgather(sendbuf, 1, MPI_INT, recvbuf, 1, MPI_INT, cart);
+
+ if (wrank == 0)
+ check(recvbuf[0] == 0xdeadbeef);
+ else
+ check(recvbuf[0] == wrank - 1);
+
+ if (wrank == wsize - 1)
+ check(recvbuf[1] == 0xdeadbeef);
+ else
+ check(recvbuf[1] == wrank + 1);
+ }
+
+ /* allgatherv */
+ {
+ int sendbuf[1] = { wrank };
+ int recvbuf[2] = { 0xdeadbeef, 0xdeadbeef };
+ int recvcounts[2] = { 1, 1 };
+ int displs[2] = { 1, 0};
+
+ /* should see one send to each neighbor (rank-1 and rank+1) and one receive
+ * each from same, but put them in opposite slots in the buffer */
+ MPI_Neighbor_allgatherv(sendbuf, 1, MPI_INT, recvbuf, recvcounts, displs, MPI_INT, cart);
+
+ if (wrank == 0)
+ check(recvbuf[1] == 0xdeadbeef);
+ else
+ check(recvbuf[1] == wrank - 1);
+
+ if (wrank == wsize - 1)
+ check(recvbuf[0] == 0xdeadbeef);
+ else
+ check(recvbuf[0] == wrank + 1);
+ }
+
+ /* alltoall */
+ {
+ int sendbuf[2] = { -(wrank+1), wrank+1 };
+ int recvbuf[2] = { 0xdeadbeef, 0xdeadbeef };
+
+ /* should see one send to each neighbor (rank-1 and rank+1) and one
+ * receive each from same */
+ MPI_Neighbor_alltoall(sendbuf, 1, MPI_INT, recvbuf, 1, MPI_INT, cart);
+
+ if (wrank == 0)
+ check(recvbuf[0] == 0xdeadbeef);
+ else
+ check(recvbuf[0] == wrank);
+
+ if (wrank == wsize - 1)
+ check(recvbuf[1] == 0xdeadbeef);
+ else
+ check(recvbuf[1] == -(wrank + 2));
+ }
+
+ /* alltoallv */
+ {
+ int sendbuf[2] = { -(wrank+1), wrank+1 };
+ int recvbuf[2] = { 0xdeadbeef, 0xdeadbeef };
+ int sendcounts[2] = { 1, 1 };
+ int recvcounts[2] = { 1, 1 };
+ int sdispls[2] = { 0, 1 };
+ int rdispls[2] = { 1, 0 };
+
+ /* should see one send to each neighbor (rank-1 and rank+1) and one receive
+ * each from same, but put them in opposite slots in the buffer */
+ MPI_Neighbor_alltoallv(sendbuf, sendcounts, sdispls, MPI_INT,
+ recvbuf, recvcounts, rdispls, MPI_INT,
+ cart);
+
+ if (wrank == 0)
+ check(recvbuf[1] == 0xdeadbeef);
+ else
+ check(recvbuf[1] == wrank);
+
+ if (wrank == wsize - 1)
+ check(recvbuf[0] == 0xdeadbeef);
+ else
+ check(recvbuf[0] == -(wrank + 2));
+ }
+
+ /* alltoallw */
+ {
+ int sendbuf[2] = { -(wrank+1), wrank+1 };
+ int recvbuf[2] = { 0xdeadbeef, 0xdeadbeef };
+ int sendcounts[2] = { 1, 1 };
+ int recvcounts[2] = { 1, 1 };
+ MPI_Aint sdispls[2] = { 0, sizeof(int) };
+ MPI_Aint rdispls[2] = { sizeof(int), 0 };
+ MPI_Datatype sendtypes[2] = { MPI_INT, MPI_INT };
+ MPI_Datatype recvtypes[2] = { MPI_INT, MPI_INT };
+
+ /* should see one send to each neighbor (rank-1 and rank+1) and one receive
+ * each from same, but put them in opposite slots in the buffer */
+ MPI_Neighbor_alltoallw(sendbuf, sendcounts, sdispls, sendtypes,
+ recvbuf, recvcounts, rdispls, recvtypes,
+ cart);
+
+ if (wrank == 0)
+ check(recvbuf[1] == 0xdeadbeef);
+ else
+ check(recvbuf[1] == wrank);
+
+ if (wrank == wsize - 1)
+ check(recvbuf[0] == 0xdeadbeef);
+ else
+ check(recvbuf[0] == -(wrank + 2));
+ }
+
+
+ MPI_Comm_free(&cart);
+#endif /* defined(TEST_NEIGHB_COLL) */
+
+ MPI_Reduce((wrank == 0 ? MPI_IN_PLACE : &errs), &errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+ if (wrank == 0) {
+ if (errs) {
+ printf("found %d errors\n", errs);
+ }
+ else {
+ printf(" No errors\n");
+ }
+ }
+ MPI_Finalize();
+
+ return 0;
+}
+
--- /dev/null
+#need Cart implem
+#cartmap1 4
+#cartzero 4
+#cartshift1 4
+#cartsuball 4
+#cartcreates 4
+#need MPI_Dims_create
+#dims1 4
+#dims2 1
+#need MPI_Error_class, MPI_Comm_remote_size, MPI_Graph_map
+#graphmap1 4
+#need MPI_Topo_test, MPI_Cart_create
+#topotest 4
+#need MPI_Cart_create, MPI_Cart_get, MPI_Comm_remote_size, MPI_Dims_create ...
+#topodup 4
+#need MPI_Graph*
+#graphcr 4
+#graphcr2 4
+#distgraph1 4 mpiversion=2.2
+#dgraph_unwgt 4 mpiversion=2.2
+#neighb_coll 4 mpiversion=3.0
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0, i, k;
+ int dims[2], periods[2], wsize;
+ int outdims[2], outperiods[2], outcoords[2];
+ int topo_type;
+ int *index, *edges, *outindex, *outedges;
+ MPI_Comm comm1, comm2;
+
+ MTest_Init( &argc, &argv );
+
+ MPI_Comm_size( MPI_COMM_WORLD, &wsize );
+
+ /* Create a cartesian topology, get its characteristics, then
+ dup it and check that the new communicator has the same properties */
+ dims[0] = dims[1] = 0;
+ MPI_Dims_create( wsize, 2, dims );
+ periods[0] = periods[1] = 0;
+ MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, &comm1 );
+
+ MPI_Comm_dup( comm1, &comm2 );
+ MPI_Topo_test( comm2, &topo_type );
+ if (topo_type != MPI_CART) {
+ errs++;
+ printf( "Topo type of duped cart was not cart\n" );
+ }
+ else {
+ MPI_Cart_get( comm2, 2, outdims, outperiods, outcoords );
+ for (i=0; i<2; i++) {
+ if (outdims[i] != dims[i]) {
+ errs++;
+ printf( "%d = outdims[%d] != dims[%d] = %d\n", outdims[i],
+ i, i, dims[i] );
+ }
+ if (outperiods[i] != periods[i]) {
+ errs++;
+ printf( "%d = outperiods[%d] != periods[%d] = %d\n",
+ outperiods[i], i, i, periods[i] );
+ }
+ }
+ }
+ MPI_Comm_free( &comm2 );
+ MPI_Comm_free( &comm1 );
+
+ /* Now do the same with a graph topology */
+ if (wsize >= 3) {
+ index = (int*)malloc(wsize * sizeof(int) );
+ edges = (int*)malloc(wsize * 2 * sizeof(int) );
+ if (!index || !edges) {
+ printf( "Unable to allocate %d words for index or edges\n",
+ 3 * wsize );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+ index[0] = 2;
+ for (i=1; i<wsize; i++) {
+ index[i] = 2 + index[i-1];
+ }
+ k=0;
+ for (i=0; i<wsize; i++) {
+ edges[k++] = (i-1+wsize) % wsize;
+ edges[k++] = (i+1) % wsize;
+ }
+ MPI_Graph_create( MPI_COMM_WORLD, wsize, index, edges, 0, &comm1 );
+ MPI_Comm_dup( comm1, &comm2 );
+ MPI_Topo_test( comm2, &topo_type );
+ if (topo_type != MPI_GRAPH) {
+ errs++;
+ printf( "Topo type of duped graph was not graph\n" );
+ }
+ else {
+ int nnodes, nedges;
+ MPI_Graphdims_get( comm2, &nnodes, &nedges );
+ if (nnodes != wsize) {
+ errs++;
+ printf( "Nnodes = %d, should be %d\n", nnodes, wsize );
+ }
+ if (nedges != 2*wsize) {
+ errs++;
+ printf( "Nedges = %d, should be %d\n", nedges, 2*wsize );
+ }
+ outindex = (int*)malloc(wsize * sizeof(int) );
+ outedges = (int*)malloc(wsize * 2 * sizeof(int) );
+ if (!outindex || !outedges) {
+ printf( "Unable to allocate %d words for outindex or outedges\n",
+ 3 * wsize );
+ MPI_Abort( MPI_COMM_WORLD, 1 );
+ }
+
+ MPI_Graph_get( comm2, wsize, 2*wsize, outindex, outedges );
+ for (i=0; i<wsize; i++) {
+ if (index[i] != outindex[i]) {
+ printf( "%d = index[%d] != outindex[%d] = %d\n",
+ index[i], i, i, outindex[i] );
+ errs++;
+ }
+ }
+ for (i=0; i<2*wsize; i++) {
+ if (edges[i] != outedges[i]) {
+ printf( "%d = edges[%d] != outedges[%d] = %d\n",
+ edges[i], i, i, outedges[i] );
+ errs++;
+ }
+ }
+ free( outindex );
+ free( outedges );
+ }
+ free( index );
+ free( edges );
+
+ MPI_Comm_free( &comm2 );
+ MPI_Comm_free( &comm1 );
+ }
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}
--- /dev/null
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *
+ * (C) 2003 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+#include "mpi.h"
+#include <stdio.h>
+#include "mpitest.h"
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int topo_type, size, dims[1], periods[1];
+ MPI_Comm comm;
+
+ MTest_Init( &argc, &argv );
+
+ /* Check that topo test returns the correct type, including
+ MPI_UNDEFINED */
+
+ MPI_Topo_test( MPI_COMM_WORLD, &topo_type );
+ if (topo_type != MPI_UNDEFINED) {
+ errs++;
+ printf( "Topo type of comm world is not UNDEFINED\n" );
+ }
+
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+ dims[0] = size;
+ periods[0] = 0;
+ MPI_Cart_create( MPI_COMM_WORLD, 1, dims, periods, 0, &comm );
+ MPI_Topo_test( comm, &topo_type );
+ if (topo_type != MPI_CART) {
+ errs++;
+ printf( "Topo type of cart comm is not CART\n" );
+ }
+
+ MPI_Comm_free( &comm );
+ /* FIXME: still need graph example */
+
+ MTest_Finalize( errs );
+ MPI_Finalize();
+ return 0;
+
+}