X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/8878b3ba58f37396166bd3d2e45ca308d2a2171e..35fd56f06e4d1d4bc93a80dd0e53afeb20f7e35d:/teshsuite/smpi/mpich3-test/util/mtest_manual.c diff --git a/teshsuite/smpi/mpich3-test/util/mtest_manual.c b/teshsuite/smpi/mpich3-test/util/mtest_manual.c index e0813e7671..7af77c9dbd 100644 --- a/teshsuite/smpi/mpich3-test/util/mtest_manual.c +++ b/teshsuite/smpi/mpich3-test/util/mtest_manual.c @@ -39,11 +39,11 @@ * as the error handler. We do *not* set MPI_ERRORS_RETURN because * the code that makes use of these routines may not check return * codes. - * + * */ -static void MTestRMACleanup( void ); -static void MTestResourceSummary( FILE * ); +static void MTestRMACleanup(void); +static void MTestResourceSummary(FILE *); /* Here is where we could put the includes and definitions to enable memory testing */ @@ -64,1287 +64,723 @@ SMPI_VARINIT_GLOBAL_AND_SET(usageOutput, int, 0); /* */ #define MPI_THREAD_SINGLE 0 #endif -/* +/* * Initialize and Finalize MTest */ /* - Initialize MTest, initializing MPI if necessary. + Initialize MTest, initializing MPI if necessary. Environment Variables: + MPITEST_DEBUG - If set (to any value), turns on debugging output . MPITEST_THREADLEVEL_DEFAULT - If set, use as the default "provided" - level of thread support. Applies to + level of thread support. Applies to MTest_Init but not MTest_Init_thread. - MPITEST_VERBOSE - If set to a numeric value, turns on that level of verbose output. This is used by the routine 'MTestPrintfMsg' */ -void MTest_Init_thread( int *argc, char ***argv, int required, int *provided ) +void MTest_Init_thread(int *argc, char ***argv, int required, int *provided) { int flag; char *envval = 0; - MPI_Initialized( &flag ); + MPI_Initialized(&flag); if (!flag) { - /* Permit an MPI that claims only MPI 1 but includes the - MPI_Init_thread routine (e.g., IBM MPI) */ + /* Permit an MPI that claims only MPI 1 but includes the + * MPI_Init_thread routine (e.g., IBM MPI) */ #if MPI_VERSION >= 2 || defined(HAVE_MPI_INIT_THREAD) - MPI_Init_thread( argc, argv, required, provided ); + MPI_Init_thread(argc, argv, required, provided); #else - MPI_Init( argc, argv ); - *provided = -1; + MPI_Init(argc, argv); + *provided = -1; #endif } /* Check for debugging control */ - if (getenv( "MPITEST_DEBUG" )) { - SMPI_VARGET_GLOBAL(dbgflag) = 1; - MPI_Comm_rank( MPI_COMM_WORLD, &SMPI_VARGET_GLOBAL(wrank) ); + if (getenv("MPITEST_DEBUG")) { + SMPI_VARGET_GLOBAL(dbgflag) = 1; + MPI_Comm_rank(MPI_COMM_WORLD, &SMPI_VARGET_GLOBAL(wrank)); } /* Check for verbose control */ - envval = getenv( "MPITEST_VERBOSE" ); + envval = getenv("MPITEST_VERBOSE"); if (envval) { - char *s; - long val = strtol( envval, &s, 0 ); - if (s == envval) { - /* This is the error case for strtol */ - fprintf( stderr, "Warning: %s not valid for MPITEST_VERBOSE\n", - envval ); - fflush( stderr ); - } - else { - if (val >= 0) { - SMPI_VARGET_GLOBAL(verbose) = val; - } - else { - fprintf( stderr, "Warning: %s not valid for MPITEST_VERBOSE\n", - envval ); - fflush( stderr ); - } - } + char *s; + long val = strtol(envval, &s, 0); + if (s == envval) { + /* This is the error case for strtol */ + fprintf(stderr, "Warning: %s not valid for MPITEST_VERBOSE\n", envval); + fflush(stderr); + } + else { + if (val >= 0) { + SMPI_VARGET_GLOBAL(verbose) = val; + } + else { + fprintf(stderr, "Warning: %s not valid for MPITEST_VERBOSE\n", envval); + fflush(stderr); + } + } } /* Check for option to return success/failure in the return value of main */ - envval = getenv( "MPITEST_RETURN_WITH_CODE" ); + envval = getenv("MPITEST_RETURN_WITH_CODE"); if (envval) { - if (strcmp( envval, "yes" ) == 0 || - strcmp( envval, "YES" ) == 0 || - strcmp( envval, "true" ) == 0 || - strcmp( envval, "TRUE" ) == 0) { - SMPI_VARGET_GLOBAL(returnWithVal) = 1; - } - else if (strcmp( envval, "no" ) == 0 || - strcmp( envval, "NO" ) == 0 || - strcmp( envval, "false" ) == 0 || - strcmp( envval, "FALSE" ) == 0) { - SMPI_VARGET_GLOBAL(returnWithVal) = 0; - } - else { - fprintf( stderr, - "Warning: %s not valid for MPITEST_RETURN_WITH_CODE\n", - envval ); - fflush( stderr ); - } + if (strcmp(envval, "yes") == 0 || + strcmp(envval, "YES") == 0 || + strcmp(envval, "true") == 0 || strcmp(envval, "TRUE") == 0) { + SMPI_VARGET_GLOBAL(returnWithVal) = 1; + } + else if (strcmp(envval, "no") == 0 || + strcmp(envval, "NO") == 0 || + strcmp(envval, "false") == 0 || strcmp(envval, "FALSE") == 0) { + SMPI_VARGET_GLOBAL(returnWithVal) = 0; + } + else { + fprintf(stderr, "Warning: %s not valid for MPITEST_RETURN_WITH_CODE\n", envval); + fflush(stderr); + } } - + /* Print rusage data if set */ - if (getenv( "MPITEST_RUSAGE" )) { - SMPI_VARGET_GLOBAL(usageOutput) = 1; + if (getenv("MPITEST_RUSAGE")) { + SMPI_VARGET_GLOBAL(usageOutput) = 1; } } -/* - * Initialize the tests, using an MPI-1 style init. Supports + +/* + * Initialize the tests, using an MPI-1 style init. Supports * MTEST_THREADLEVEL_DEFAULT to test with user-specified thread level */ -void MTest_Init( int *argc, char ***argv ) +void MTest_Init(int *argc, char ***argv) { int provided; #if MPI_VERSION >= 2 || defined(HAVE_MPI_INIT_THREAD) const char *str = 0; - int threadLevel; + int threadLevel; threadLevel = MPI_THREAD_SINGLE; - str = getenv( "MTEST_THREADLEVEL_DEFAULT" ); - if (!str) str = getenv( "MPITEST_THREADLEVEL_DEFAULT" ); + str = getenv("MTEST_THREADLEVEL_DEFAULT"); + if (!str) + str = getenv("MPITEST_THREADLEVEL_DEFAULT"); if (str && *str) { - if (strcmp(str,"MULTIPLE") == 0 || strcmp(str,"multiple") == 0) { - threadLevel = MPI_THREAD_MULTIPLE; - } - else if (strcmp(str,"SERIALIZED") == 0 || - strcmp(str,"serialized") == 0) { - threadLevel = MPI_THREAD_SERIALIZED; - } - else if (strcmp(str,"FUNNELED") == 0 || strcmp(str,"funneled") == 0) { - threadLevel = MPI_THREAD_FUNNELED; - } - else if (strcmp(str,"SINGLE") == 0 || strcmp(str,"single") == 0) { - threadLevel = MPI_THREAD_SINGLE; - } - else { - fprintf( stderr, "Unrecognized thread level %s\n", str ); - /* Use exit since MPI_Init/Init_thread has not been called. */ - exit(1); - } + if (strcmp(str, "MULTIPLE") == 0 || strcmp(str, "multiple") == 0) { + threadLevel = MPI_THREAD_MULTIPLE; + } + else if (strcmp(str, "SERIALIZED") == 0 || strcmp(str, "serialized") == 0) { + threadLevel = MPI_THREAD_SERIALIZED; + } + else if (strcmp(str, "FUNNELED") == 0 || strcmp(str, "funneled") == 0) { + threadLevel = MPI_THREAD_FUNNELED; + } + else if (strcmp(str, "SINGLE") == 0 || strcmp(str, "single") == 0) { + threadLevel = MPI_THREAD_SINGLE; + } + else { + fprintf(stderr, "Unrecognized thread level %s\n", str); + /* Use exit since MPI_Init/Init_thread has not been called. */ + exit(1); + } } - MTest_Init_thread( argc, argv, threadLevel, &provided ); + MTest_Init_thread(argc, argv, threadLevel, &provided); #else /* If the MPI_VERSION is 1, there is no MPI_THREAD_xxx defined */ - MTest_Init_thread( argc, argv, 0, &provided ); -#endif + MTest_Init_thread(argc, argv, 0, &provided); +#endif } /* - Finalize MTest. errs is the number of errors on the calling process; + Finalize MTest. errs is the number of errors on the calling process; this routine will write the total number of errors over all of MPI_COMM_WORLD to the process with rank zero, or " No Errors". It does *not* finalize MPI. */ -void MTest_Finalize( int errs ) +void MTest_Finalize(int errs) { int rank, toterrs, merr; - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); - merr = MPI_Reduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, - 0, MPI_COMM_WORLD ); - if (merr) MTestPrintError( merr ); + merr = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); + if (merr) + MTestPrintError(merr); if (rank == 0) { - if (toterrs) { - printf( " Found %d errors\n", toterrs ); - } - else { - printf( " No Errors\n" ); - } - fflush( stdout ); + if (toterrs) { + printf(" Found %d errors\n", toterrs); + } + else { + printf(" No Errors\n"); + } + fflush(stdout); } - + if (SMPI_VARGET_GLOBAL(usageOutput)) - MTestResourceSummary( stdout ); + MTestResourceSummary(stdout); /* Clean up any persistent objects that we allocated */ MTestRMACleanup(); } + /* ------------------------------------------------------------------------ */ -/* This routine may be used instead of "return 0;" at the end of main; - it allows the program to use the return value to signal success or failure. +/* This routine may be used instead of "return 0;" at the end of main; + it allows the program to use the return value to signal success or failure. */ -int MTestReturnValue( int errors ) +int MTestReturnValue(int errors) { - if (SMPI_VARGET_GLOBAL(returnWithVal)) return errors ? 1 : 0; + if (SMPI_VARGET_GLOBAL(returnWithVal)) + return errors ? 1 : 0; return 0; } + /* ------------------------------------------------------------------------ */ /* * Miscellaneous utilities, particularly to eliminate OS dependencies * from the tests. - * MTestSleep( seconds ) + * MTestSleep(seconds) */ #ifdef HAVE_WINDOWS_H #include -void MTestSleep( int sec ) +void MTestSleep(int sec) { - Sleep( 1000 * sec ); + Sleep(1000 * sec); } #else #include -void MTestSleep( int sec ) -{ - sleep( sec ); -} -#endif - -/* - * Datatypes - * - * Eventually, this could read a description of a file. For now, we hard - * code the choices. - * - * Each kind of datatype has the following functions: - * MTestTypeXXXInit - Initialize a send buffer for that type - * MTestTypeXXXInitRecv - Initialize a receive buffer for that type - * MTestTypeXXXFree - Free any buffers associate with that type - * MTestTypeXXXCheckbuf - Check that the buffer contains the expected data - * These routines work with (nearly) any datatype that is of type XXX, - * allowing the test codes to create a variety of contiguous, vector, and - * indexed types, then test them by calling these routines. - * - * Available types (for the XXX) are - * Contig - Simple contiguous buffers - * Vector - Simple strided "vector" type - * Indexed - Indexed datatype. Only for a count of 1 instance of the - * datatype - */ -SMPI_VARINIT_GLOBAL_AND_SET(datatype_index, int, 0); - -/* ------------------------------------------------------------------------ */ -/* Datatype routines for contiguous datatypes */ -/* ------------------------------------------------------------------------ */ -/* - * Setup contiguous buffers of n copies of a datatype. - */ -static void *MTestTypeContigInit( MTestDatatype *mtype ) -{ - MPI_Aint size; - int merr; - - if (mtype->count > 0) { - signed char *p; - int i, totsize; - merr = MPI_Type_extent( mtype->datatype, &size ); - if (merr) MTestPrintError( merr ); - totsize = size * mtype->count; - if (!mtype->buf) { - mtype->buf = (void *) malloc( totsize ); - } - p = (signed char *)(mtype->buf); - if (!p) { - /* Error - out of memory */ - MTestError( "Out of memory in type buffer init" ); - } - for (i=0; ibuf) { - free( mtype->buf ); - } - mtype->buf = 0; - } - return mtype->buf; -} - -/* - * Setup contiguous buffers of n copies of a datatype. Initialize for - * reception (e.g., set initial data to detect failure) - */ -static void *MTestTypeContigInitRecv( MTestDatatype *mtype ) -{ - MPI_Aint size; - int merr; - - if (mtype->count > 0) { - signed char *p; - int i, totsize; - merr = MPI_Type_extent( mtype->datatype, &size ); - if (merr) MTestPrintError( merr ); - totsize = size * mtype->count; - if (!mtype->buf) { - mtype->buf = (void *) malloc( totsize ); - } - p = (signed char *)(mtype->buf); - if (!p) { - /* Error - out of memory */ - MTestError( "Out of memory in type buffer init" ); - } - for (i=0; ibuf) { - free( mtype->buf ); - } - mtype->buf = 0; - } - return mtype->buf; -} -static void *MTestTypeContigFree( MTestDatatype *mtype ) -{ - if (mtype->buf) { - free( mtype->buf ); - mtype->buf = 0; - } - return 0; -} -static int MTestTypeContigCheckbuf( MTestDatatype *mtype ) -{ - unsigned char *p; - unsigned char expected; - int i, totsize, err = 0, merr; - MPI_Aint size; - - p = (unsigned char *)mtype->buf; - if (p) { - merr = MPI_Type_extent( mtype->datatype, &size ); - if (merr) MTestPrintError( merr ); - totsize = size * mtype->count; - for (i=0; iprintErrors && err < 10) { - printf( "Data expected = %x but got p[%d] = %x\n", - expected, i, p[i] ); - fflush( stdout ); - } - } - } - } - return err; -} - -/* ------------------------------------------------------------------------ */ -/* Datatype routines for vector datatypes */ -/* ------------------------------------------------------------------------ */ - -static void *MTestTypeVectorInit( MTestDatatype *mtype ) -{ - MPI_Aint size; - int merr; - - if (mtype->count > 0) { - unsigned char *p; - int i, j, k, nc, totsize; - - merr = MPI_Type_extent( mtype->datatype, &size ); - if (merr) MTestPrintError( merr ); - totsize = mtype->count * size; - if (!mtype->buf) { - mtype->buf = (void *) malloc( totsize ); - } - p = (unsigned char *)(mtype->buf); - if (!p) { - /* Error - out of memory */ - MTestError( "Out of memory in type buffer init" ); - } - - /* First, set to -1 */ - for (i=0; icount; k++) { - /* For each element (block) */ - for (i=0; inelm; i++) { - /* For each value */ - for (j=0; jblksize; j++) { - p[j] = (0xff ^ (nc & 0xff)); - nc++; - } - p += mtype->stride; - } - } - } - else { - mtype->buf = 0; - } - return mtype->buf; -} - -static void *MTestTypeVectorFree( MTestDatatype *mtype ) -{ - if (mtype->buf) { - free( mtype->buf ); - mtype->buf = 0; - } - return 0; -} - -/* ------------------------------------------------------------------------ */ -/* Datatype routines for indexed block datatypes */ -/* ------------------------------------------------------------------------ */ - -/* - * Setup a buffer for one copy of an indexed datatype. - */ -static void *MTestTypeIndexedInit( MTestDatatype *mtype ) -{ - MPI_Aint totsize; - int merr; - - if (mtype->count > 1) { - MTestError( "This datatype is supported only for a single count" ); - } - if (mtype->count == 1) { - signed char *p; - int i, k, offset, j; - - /* Allocate the send/recv buffer */ - merr = MPI_Type_extent( mtype->datatype, &totsize ); - if (merr) MTestPrintError( merr ); - if (!mtype->buf) { - mtype->buf = (void *) malloc( totsize ); - } - p = (signed char *)(mtype->buf); - if (!p) { - MTestError( "Out of memory in type buffer init\n" ); - } - /* Initialize the elements */ - /* First, set to -1 */ - for (i=0; inelm; i++) { - int b; - /* Compute the offset: */ - offset = mtype->displs[i] * mtype->basesize; - /* For each element in the block */ - for (b=0; bindex[i]; b++) { - for (j=0; jbasesize; j++) { - p[offset+j] = 0xff ^ (k++ & 0xff); - } - offset += mtype->basesize; - } - } - } - else { - /* count == 0 */ - if (mtype->buf) { - free( mtype->buf ); - } - mtype->buf = 0; - } - return mtype->buf; -} - -/* - * Setup indexed buffers for 1 copy of a datatype. Initialize for - * reception (e.g., set initial data to detect failure) - */ -static void *MTestTypeIndexedInitRecv( MTestDatatype *mtype ) +void MTestSleep(int sec) { - MPI_Aint totsize; - int merr; - - if (mtype->count > 1) { - MTestError( "This datatype is supported only for a single count" ); - } - if (mtype->count == 1) { - signed char *p; - int i; - merr = MPI_Type_extent( mtype->datatype, &totsize ); - if (merr) MTestPrintError( merr ); - if (!mtype->buf) { - mtype->buf = (void *) malloc( totsize ); - } - p = (signed char *)(mtype->buf); - if (!p) { - /* Error - out of memory */ - MTestError( "Out of memory in type buffer init\n" ); - } - for (i=0; ibuf) { - free( mtype->buf ); - } - mtype->buf = 0; - } - return mtype->buf; + sleep(sec); } - -static void *MTestTypeIndexedFree( MTestDatatype *mtype ) -{ - if (mtype->buf) { - free( mtype->buf ); - free( mtype->displs ); - free( mtype->index ); - mtype->buf = 0; - mtype->displs = 0; - mtype->index = 0; - } - return 0; -} - -static int MTestTypeIndexedCheckbuf( MTestDatatype *mtype ) -{ - unsigned char *p; - unsigned char expected; - int i, err = 0, merr; - MPI_Aint totsize; - - p = (unsigned char *)mtype->buf; - if (p) { - int j, k, offset; - merr = MPI_Type_extent( mtype->datatype, &totsize ); - if (merr) MTestPrintError( merr ); - - k = 0; - for (i=0; inelm; i++) { - int b; - /* Compute the offset: */ - offset = mtype->displs[i] * mtype->basesize; - for (b=0; bindex[i]; b++) { - for (j=0; jbasesize; j++) { - expected = (0xff ^ (k & 0xff)); - if (p[offset+j] != expected) { - err++; - if (mtype->printErrors && err < 10) { - printf( "Data expected = %x but got p[%d,%d] = %x\n", - expected, i,j, p[offset+j] ); - fflush( stdout ); - } - } - k++; - } - offset += mtype->basesize; - } - } - } - return err; -} - - -/* ------------------------------------------------------------------------ */ -/* Routines to select a datatype and associated buffer create/fill/check */ -/* routines */ -/* ------------------------------------------------------------------------ */ - -/* - Create a range of datatypes with a given count elements. - This uses a selection of types, rather than an exhaustive collection. - It allocates both send and receive types so that they can have the same - type signature (collection of basic types) but different type maps (layouts - in memory) - */ -int MTestGetDatatypes( MTestDatatype *sendtype, MTestDatatype *recvtype, - int count ) -{ - int merr; - int i; - - sendtype->InitBuf = 0; - sendtype->FreeBuf = 0; - sendtype->CheckBuf = 0; - sendtype->datatype = 0; - sendtype->isBasic = 0; - sendtype->printErrors = 0; - recvtype->InitBuf = 0; - recvtype->FreeBuf = 0; - - recvtype->CheckBuf = 0; - recvtype->datatype = 0; - recvtype->isBasic = 0; - recvtype->printErrors = 0; - - sendtype->buf = 0; - recvtype->buf = 0; - - /* Set the defaults for the message lengths */ - sendtype->count = count; - recvtype->count = count; - /* Use datatype_index to choose a datatype to use. If at the end of the - list, return 0 */ - switch (SMPI_VARGET_GLOBAL(datatype_index)) { - case 0: - sendtype->datatype = MPI_INT; - sendtype->isBasic = 1; - recvtype->datatype = MPI_INT; - recvtype->isBasic = 1; - break; - case 1: - sendtype->datatype = MPI_DOUBLE; - sendtype->isBasic = 1; - recvtype->datatype = MPI_DOUBLE; - recvtype->isBasic = 1; - break; - case 2: - sendtype->datatype = MPI_FLOAT_INT; - sendtype->isBasic = 1; - recvtype->datatype = MPI_FLOAT_INT; - recvtype->isBasic = 1; - break; - case 3: - merr = MPI_Type_dup( MPI_INT, &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_set_name( sendtype->datatype, - (char*)"dup of MPI_INT" ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_dup( MPI_INT, &recvtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_set_name( recvtype->datatype, - (char*)"dup of MPI_INT" ); - if (merr) MTestPrintError( merr ); - /* dup'ed types are already committed if the original type - was committed (MPI-2, section 8.8) */ - break; - case 4: - /* vector send type and contiguous receive type */ - /* These sizes are in bytes (see the VectorInit code) */ - sendtype->stride = 3 * sizeof(int); - sendtype->blksize = sizeof(int); - sendtype->nelm = recvtype->count; - - merr = MPI_Type_vector( recvtype->count, 1, 3, MPI_INT, - &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_commit( &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_set_name( sendtype->datatype, - (char*)"int-vector" ); - if (merr) MTestPrintError( merr ); - sendtype->count = 1; - recvtype->datatype = MPI_INT; - recvtype->isBasic = 1; - sendtype->InitBuf = MTestTypeVectorInit; - recvtype->InitBuf = MTestTypeContigInitRecv; - sendtype->FreeBuf = MTestTypeVectorFree; - recvtype->FreeBuf = MTestTypeContigFree; - sendtype->CheckBuf = 0; - recvtype->CheckBuf = MTestTypeContigCheckbuf; - break; - - case 5: - /* Indexed send using many small blocks and contig receive */ - sendtype->blksize = sizeof(int); - sendtype->nelm = recvtype->count; - sendtype->basesize = sizeof(int); - sendtype->displs = (int *)malloc( sendtype->nelm * sizeof(int) ); - sendtype->index = (int *)malloc( sendtype->nelm * sizeof(int) ); - if (!sendtype->displs || !sendtype->index) { - MTestError( "Out of memory in type init\n" ); - } - /* Make the sizes larger (4 ints) to help push the total - size to over 256k in some cases, as the MPICH code as of - 10/1/06 used large internal buffers for packing non-contiguous - messages */ - for (i=0; inelm; i++) { - sendtype->index[i] = 4; - sendtype->displs[i] = 5*i; - } - merr = MPI_Type_indexed( sendtype->nelm, - sendtype->index, sendtype->displs, - MPI_INT, &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_commit( &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_set_name( sendtype->datatype, - (char*)"int-indexed(4-int)" ); - if (merr) MTestPrintError( merr ); - sendtype->count = 1; - sendtype->InitBuf = MTestTypeIndexedInit; - sendtype->FreeBuf = MTestTypeIndexedFree; - sendtype->CheckBuf = 0; - - recvtype->datatype = MPI_INT; - recvtype->isBasic = 1; - recvtype->count = count * 4; - recvtype->InitBuf = MTestTypeContigInitRecv; - recvtype->FreeBuf = MTestTypeContigFree; - recvtype->CheckBuf = MTestTypeContigCheckbuf; - break; - - case 6: - /* Indexed send using 2 large blocks and contig receive */ - sendtype->blksize = sizeof(int); - sendtype->nelm = 2; - sendtype->basesize = sizeof(int); - sendtype->displs = (int *)malloc( sendtype->nelm * sizeof(int) ); - sendtype->index = (int *)malloc( sendtype->nelm * sizeof(int) ); - if (!sendtype->displs || !sendtype->index) { - MTestError( "Out of memory in type init\n" ); - } - /* index -> block size */ - sendtype->index[0] = (recvtype->count + 1) / 2; - sendtype->displs[0] = 0; - sendtype->index[1] = recvtype->count - sendtype->index[0]; - sendtype->displs[1] = sendtype->index[0] + 1; - /* There is a deliberate gap here */ - - merr = MPI_Type_indexed( sendtype->nelm, - sendtype->index, sendtype->displs, - MPI_INT, &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_commit( &sendtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_set_name( sendtype->datatype, - (char*)"int-indexed(2 blocks)" ); - if (merr) MTestPrintError( merr ); - sendtype->count = 1; - sendtype->InitBuf = MTestTypeIndexedInit; - sendtype->FreeBuf = MTestTypeIndexedFree; - sendtype->CheckBuf = 0; - - recvtype->datatype = MPI_INT; - recvtype->isBasic = 1; - recvtype->count = sendtype->index[0] + sendtype->index[1]; - recvtype->InitBuf = MTestTypeContigInitRecv; - recvtype->FreeBuf = MTestTypeContigFree; - recvtype->CheckBuf = MTestTypeContigCheckbuf; - break; - - case 7: - /* Indexed receive using many small blocks and contig send */ - recvtype->blksize = sizeof(int); - recvtype->nelm = recvtype->count; - recvtype->basesize = sizeof(int); - recvtype->displs = (int *)malloc( recvtype->nelm * sizeof(int) ); - recvtype->index = (int *)malloc( recvtype->nelm * sizeof(int) ); - if (!recvtype->displs || !recvtype->index) { - MTestError( "Out of memory in type recv init\n" ); - } - /* Make the sizes larger (4 ints) to help push the total - size to over 256k in some cases, as the MPICH code as of - 10/1/06 used large internal buffers for packing non-contiguous - messages */ - /* Note that there are gaps in the indexed type */ - for (i=0; inelm; i++) { - recvtype->index[i] = 4; - recvtype->displs[i] = 5*i; - } - merr = MPI_Type_indexed( recvtype->nelm, - recvtype->index, recvtype->displs, - MPI_INT, &recvtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_commit( &recvtype->datatype ); - if (merr) MTestPrintError( merr ); - merr = MPI_Type_set_name( recvtype->datatype, - (char*)"recv-int-indexed(4-int)" ); - if (merr) MTestPrintError( merr ); - recvtype->count = 1; - recvtype->InitBuf = MTestTypeIndexedInitRecv; - recvtype->FreeBuf = MTestTypeIndexedFree; - recvtype->CheckBuf = MTestTypeIndexedCheckbuf; - - sendtype->datatype = MPI_INT; - sendtype->isBasic = 1; - sendtype->count = count * 4; - sendtype->InitBuf = MTestTypeContigInit; - sendtype->FreeBuf = MTestTypeContigFree; - sendtype->CheckBuf = 0; - break; - - /* Less commonly used but still simple types */ - case 8: - sendtype->datatype = MPI_SHORT; - sendtype->isBasic = 1; - recvtype->datatype = MPI_SHORT; - recvtype->isBasic = 1; - break; - case 9: - sendtype->datatype = MPI_LONG; - sendtype->isBasic = 1; - recvtype->datatype = MPI_LONG; - recvtype->isBasic = 1; - break; - case 10: - sendtype->datatype = MPI_CHAR; - sendtype->isBasic = 1; - recvtype->datatype = MPI_CHAR; - recvtype->isBasic = 1; - break; - case 11: - sendtype->datatype = MPI_UINT64_T; - sendtype->isBasic = 1; - recvtype->datatype = MPI_UINT64_T; - recvtype->isBasic = 1; - break; - case 12: - sendtype->datatype = MPI_FLOAT; - sendtype->isBasic = 1; - recvtype->datatype = MPI_FLOAT; - recvtype->isBasic = 1; - break; - -#ifndef USE_STRICT_MPI - /* MPI_BYTE may only be used with MPI_BYTE in strict MPI */ - case 13: - sendtype->datatype = MPI_INT; - sendtype->isBasic = 1; - recvtype->datatype = MPI_BYTE; - recvtype->isBasic = 1; - recvtype->count *= sizeof(int); - break; #endif - default: - SMPI_VARGET_GLOBAL(datatype_index) = -1; - } - - if (!sendtype->InitBuf) { - sendtype->InitBuf = MTestTypeContigInit; - recvtype->InitBuf = MTestTypeContigInitRecv; - sendtype->FreeBuf = MTestTypeContigFree; - recvtype->FreeBuf = MTestTypeContigFree; - sendtype->CheckBuf = MTestTypeContigCheckbuf; - recvtype->CheckBuf = MTestTypeContigCheckbuf; - } - SMPI_VARGET_GLOBAL(datatype_index)++; - - if (SMPI_VARGET_GLOBAL(dbgflag) && SMPI_VARGET_GLOBAL(datatype_index) > 0) { - int typesize; - fprintf( stderr, "%d: sendtype is %s\n", SMPI_VARGET_GLOBAL(wrank), MTestGetDatatypeName( sendtype ) ); - merr = MPI_Type_size( sendtype->datatype, &typesize ); - if (merr) MTestPrintError( merr ); - fprintf( stderr, "%d: sendtype size = %d\n", SMPI_VARGET_GLOBAL(wrank), typesize ); - fprintf( stderr, "%d: recvtype is %s\n", SMPI_VARGET_GLOBAL(wrank), MTestGetDatatypeName( recvtype ) ); - merr = MPI_Type_size( recvtype->datatype, &typesize ); - if (merr) MTestPrintError( merr ); - fprintf( stderr, "%d: recvtype size = %d\n", SMPI_VARGET_GLOBAL(wrank), typesize ); - fflush( stderr ); - - } - else if (SMPI_VARGET_GLOBAL(verbose) && SMPI_VARGET_GLOBAL(datatype_index) > 0) { - printf( "Get new datatypes: send = %s, recv = %s\n", - MTestGetDatatypeName( sendtype ), - MTestGetDatatypeName( recvtype ) ); - fflush( stdout ); - } - - return SMPI_VARGET_GLOBAL(datatype_index); -} - -/* Reset the datatype index (start from the initial data type. - Note: This routine is rarely needed; MTestGetDatatypes automatically - starts over after the last available datatype is used. -*/ -void MTestResetDatatypes( void ) -{ - SMPI_VARGET_GLOBAL(datatype_index) = 0; -} -/* Return the index of the current datatype. This is rarely needed and - is provided mostly to enable debugging of the MTest package itself */ -int MTestGetDatatypeIndex( void ) -{ - return SMPI_VARGET_GLOBAL(datatype_index); -} - -/* Free the storage associated with a datatype */ -void MTestFreeDatatype( MTestDatatype *mtype ) -{ - int merr; - /* Invoke a datatype-specific free function to handle - both the datatype and the send/receive buffers */ - if (mtype->FreeBuf) { - (mtype->FreeBuf)( mtype ); - } - /* Free the datatype itself if it was created */ - if (!mtype->isBasic) { - merr = MPI_Type_free( &mtype->datatype ); - if (merr) MTestPrintError( merr ); - } -} -/* Check that a message was received correctly. Returns the number of - errors detected. Status may be NULL or MPI_STATUS_IGNORE */ -int MTestCheckRecv( MPI_Status *status, MTestDatatype *recvtype ) +/* Other mtest subfiles read debug setting using this function. */ +void MTestGetDbgInfo(int *_dbgflag, int *_verbose) { - int count; - int errs = 0, merr; - - if (status && status != MPI_STATUS_IGNORE) { - merr = MPI_Get_count( status, recvtype->datatype, &count ); - if (merr) MTestPrintError( merr ); - - /* Check count against expected count */ - if (count != recvtype->count) { - errs ++; - } - } - - /* Check received data */ - if (!errs && recvtype->CheckBuf( recvtype )) { - errs++; - } - return errs; + *_dbgflag = SMPI_VARGET_GLOBAL(dbgflag); + *_verbose = SMPI_VARGET_GLOBAL(verbose); } -/* This next routine uses a circular buffer of static name arrays just to - simplify the use of the routine */ -const char *MTestGetDatatypeName( MTestDatatype *dtype ) -{ - typedef char name_type[4][MPI_MAX_OBJECT_NAME]; - SMPI_VARINIT_STATIC(name, name_type); - SMPI_VARINIT_STATIC_AND_SET(sp, int, 0); - int rlen, merr; - - if (SMPI_VARGET_STATIC(sp) >= 4) SMPI_VARGET_STATIC(sp) = 0; - merr = MPI_Type_get_name( dtype->datatype, SMPI_VARGET_STATIC(name)[SMPI_VARGET_STATIC(sp)], &rlen ); - if (merr) MTestPrintError( merr ); - return (const char *)SMPI_VARGET_STATIC(name)[SMPI_VARGET_STATIC(sp)++]; -} /* ----------------------------------------------------------------------- */ -/* +/* * Create communicators. Use separate routines for inter and intra * communicators (there is a routine to give both) * Note that the routines may return MPI_COMM_NULL, so code should test for * that return value as well. - * + * */ -SMPI_VARINIT_GLOBAL_AND_SET(interCommIdx, int, 0); -SMPI_VARINIT_GLOBAL_AND_SET(intraCommIdx, int, 0); -SMPI_VARINIT_GLOBAL_AND_SET(intraCommName, const char *, 0); -SMPI_VARINIT_GLOBAL_AND_SET(interCommName, const char *, 0); +static int interCommIdx = 0; +static int intraCommIdx = 0; +static const char *intraCommName = 0; +static const char *interCommName = 0; -/* +/* * Get an intracommunicator with at least min_size members. If "allowSmaller" * is true, allow the communicator to be smaller than MPI_COMM_WORLD and * for this routine to return MPI_COMM_NULL for some values. Returns 0 if * no more communicators are available. */ -int MTestGetIntracommGeneral( MPI_Comm *comm, int min_size, int allowSmaller ) +int MTestGetIntracommGeneral(MPI_Comm * comm, int min_size, int allowSmaller) { int size, rank, merr; - int done2, done=0; + int done = 0; int isBasic = 0; /* The while loop allows us to skip communicators that are too small. - MPI_COMM_NULL is always considered large enough */ + * MPI_COMM_NULL is always considered large enough */ while (!done) { - isBasic = 0; - SMPI_VARGET_GLOBAL(intraCommName) = ""; - switch (SMPI_VARGET_GLOBAL(intraCommIdx)) { - case 0: - *comm = MPI_COMM_WORLD; - isBasic = 1; - SMPI_VARGET_GLOBAL(intraCommName) = "MPI_COMM_WORLD"; - break; - case 1: - /* dup of world */ - merr = MPI_Comm_dup(MPI_COMM_WORLD, comm ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(intraCommName) = "Dup of MPI_COMM_WORLD"; - break; - case 2: - /* reverse ranks */ - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_split( MPI_COMM_WORLD, 0, size-rank, comm ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(intraCommName) = "Rank reverse of MPI_COMM_WORLD"; - break; - case 3: - /* subset of world, with reversed ranks */ - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_split( MPI_COMM_WORLD, ((rank < size/2) ? 1 : MPI_UNDEFINED), - size-rank, comm ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(intraCommName) = "Rank reverse of half of MPI_COMM_WORLD"; - break; - case 4: - *comm = MPI_COMM_SELF; - isBasic = 1; - SMPI_VARGET_GLOBAL(intraCommName) = "MPI_COMM_SELF"; - break; - - /* These next cases are communicators that include some - but not all of the processes */ - case 5: - case 6: - case 7: - case 8: - { - int newsize; - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - newsize = size - (SMPI_VARGET_GLOBAL(intraCommIdx) - 4); - - if (allowSmaller && newsize >= min_size) { - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_split( MPI_COMM_WORLD, rank < newsize, rank, - comm ); - if (merr) MTestPrintError( merr ); - if (rank >= newsize) { - merr = MPI_Comm_free( comm ); - if (merr) MTestPrintError( merr ); - *comm = MPI_COMM_NULL; - } - else { - SMPI_VARGET_GLOBAL(intraCommName) = "Split of WORLD"; - } - } - else { - /* Act like default */ - *comm = MPI_COMM_NULL; - SMPI_VARGET_GLOBAL(intraCommIdx) = -1; - } - } - break; - - /* Other ideas: dup of self, cart comm, graph comm */ - default: - *comm = MPI_COMM_NULL; - SMPI_VARGET_GLOBAL(intraCommIdx) = -1; - break; - } - - if (*comm != MPI_COMM_NULL) { - merr = MPI_Comm_size( *comm, &size ); - if (merr) MTestPrintError( merr ); - if (size >= min_size) - done = 1; - } + isBasic = 0; + intraCommName = ""; + switch (intraCommIdx) { + case 0: + *comm = MPI_COMM_WORLD; + isBasic = 1; + intraCommName = "MPI_COMM_WORLD"; + break; + case 1: + /* dup of world */ + merr = MPI_Comm_dup(MPI_COMM_WORLD, comm); + if (merr) + MTestPrintError(merr); + intraCommName = "Dup of MPI_COMM_WORLD"; + break; + case 2: + /* reverse ranks */ + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, comm); + if (merr) + MTestPrintError(merr); + intraCommName = "Rank reverse of MPI_COMM_WORLD"; + break; + case 3: + /* subset of world, with reversed ranks */ + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_split(MPI_COMM_WORLD, ((rank < size / 2) ? 1 : MPI_UNDEFINED), + size - rank, comm); + if (merr) + MTestPrintError(merr); + intraCommName = "Rank reverse of half of MPI_COMM_WORLD"; + break; + case 4: + *comm = MPI_COMM_SELF; + isBasic = 1; + intraCommName = "MPI_COMM_SELF"; + break; + case 5: + { +#if MTEST_HAVE_MIN_MPI_VERSION(3,0) + /* Dup of the world using MPI_Intercomm_merge */ + int rleader, isLeft; + MPI_Comm local_comm, inter_comm; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (size > 1) { + merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm); + if (merr) + MTestPrintError(merr); + if (rank == 0) { + rleader = size / 2; + } + else if (rank == size / 2) { + rleader = 0; + } + else { + rleader = -1; + } + isLeft = rank < size / 2; + merr = + MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, + &inter_comm); + if (merr) + MTestPrintError(merr); + merr = MPI_Intercomm_merge(inter_comm, isLeft, comm); + if (merr) + MTestPrintError(merr); + MPI_Comm_free(&inter_comm); + MPI_Comm_free(&local_comm); + intraCommName = "Dup of WORLD created by MPI_Intercomm_merge"; + } + else { + *comm = MPI_COMM_NULL; + } + } + break; + case 6: + { + /* Even of the world using MPI_Comm_create_group */ + int i; + MPI_Group world_group, even_group; + int *excl = NULL; + + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (allowSmaller && (size + 1) / 2 >= min_size) { + /* exclude the odd ranks */ + excl = malloc((size / 2) * sizeof(int)); + for (i = 0; i < size / 2; i++) + excl[i] = (2 * i) + 1; + + MPI_Comm_group(MPI_COMM_WORLD, &world_group); + MPI_Group_excl(world_group, size / 2, excl, &even_group); + MPI_Group_free(&world_group); + free(excl); + + if (rank % 2 == 0) { + /* Even processes create a comm. for themselves */ + MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, comm); + intraCommName = "Even of WORLD created by MPI_Comm_create_group"; + } + else { + *comm = MPI_COMM_NULL; + } + + MPI_Group_free(&even_group); + } + else { + *comm = MPI_COMM_NULL; + } +#else + *comm = MPI_COMM_NULL; +#endif + } + break; + case 7: + { + /* High half of the world using MPI_Comm_create */ + int ranges[1][3]; + MPI_Group world_group, high_group; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + ranges[0][0] = size / 2; + ranges[0][1] = size - 1; + ranges[0][2] = 1; + + if (allowSmaller && (size + 1) / 2 >= min_size) { + MPI_Comm_group(MPI_COMM_WORLD, &world_group); + merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm); + if (merr) + MTestPrintError(merr); + MPI_Group_free(&world_group); + MPI_Group_free(&high_group); + intraCommName = "High half of WORLD created by MPI_Comm_create"; + } + else { + *comm = MPI_COMM_NULL; + } + } + break; + /* These next cases are communicators that include some + * but not all of the processes */ + case 8: + case 9: + case 10: + case 11: + { + int newsize; + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + newsize = size - (intraCommIdx - 7); + + if (allowSmaller && newsize >= min_size) { + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_split(MPI_COMM_WORLD, rank < newsize, rank, comm); + if (merr) + MTestPrintError(merr); + if (rank >= newsize) { + merr = MPI_Comm_free(comm); + if (merr) + MTestPrintError(merr); + *comm = MPI_COMM_NULL; + } + else { + intraCommName = "Split of WORLD"; + } + } + else { + /* Act like default */ + *comm = MPI_COMM_NULL; + intraCommIdx = -1; + } + } + break; + + /* Other ideas: dup of self, cart comm, graph comm */ + default: + *comm = MPI_COMM_NULL; + intraCommIdx = -1; + break; + } + + if (*comm != MPI_COMM_NULL) { + merr = MPI_Comm_size(*comm, &size); + if (merr) + MTestPrintError(merr); + if (size >= min_size) + done = 1; + } else { - SMPI_VARGET_GLOBAL(intraCommName) = "MPI_COMM_NULL"; + intraCommName = "MPI_COMM_NULL"; isBasic = 1; done = 1; } -done2=done; + /* we are only done if all processes are done */ - MPI_Allreduce(&done2, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); + MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); /* Advance the comm index whether we are done or not, otherwise we could * spin forever trying to allocate a too-small communicator over and * over again. */ - SMPI_VARGET_GLOBAL(intraCommIdx)++; + intraCommIdx++; if (!done && !isBasic && *comm != MPI_COMM_NULL) { /* avoid leaking communicators */ merr = MPI_Comm_free(comm); - if (merr) MTestPrintError(merr); + if (merr) + MTestPrintError(merr); } } - return SMPI_VARGET_GLOBAL(intraCommIdx); + return intraCommIdx; } -/* +/* * Get an intracommunicator with at least min_size members. */ -int MTestGetIntracomm( MPI_Comm *comm, int min_size ) +int MTestGetIntracomm(MPI_Comm * comm, int min_size) { - return MTestGetIntracommGeneral( comm, min_size, 0 ); + return MTestGetIntracommGeneral(comm, min_size, 0); } /* Return the name of an intra communicator */ -const char *MTestGetIntracommName( void ) +const char *MTestGetIntracommName(void) { - return SMPI_VARGET_GLOBAL(intraCommName); + return intraCommName; } -/* - * Return an intercomm; set isLeftGroup to 1 if the calling process is +/* + * Return an intercomm; set isLeftGroup to 1 if the calling process is * a member of the "left" group. */ -int MTestGetIntercomm( MPI_Comm *comm, int *isLeftGroup, int min_size ) +int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) { int size, rank, remsize, merr; - int done=0; - MPI_Comm mcomm = MPI_COMM_NULL; + int done = 0; + MPI_Comm mcomm = MPI_COMM_NULL; MPI_Comm mcomm2 = MPI_COMM_NULL; int rleader; /* The while loop allows us to skip communicators that are too small. - MPI_COMM_NULL is always considered large enough. The size is - the sum of the sizes of the local and remote groups */ + * MPI_COMM_NULL is always considered large enough. The size is + * the sum of the sizes of the local and remote groups */ while (!done) { *comm = MPI_COMM_NULL; *isLeftGroup = 0; - SMPI_VARGET_GLOBAL(interCommName) = "MPI_COMM_NULL"; - - switch (SMPI_VARGET_GLOBAL(interCommIdx)) { - case 0: - /* Split comm world in half */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size > 1) { - merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, - &mcomm ); - if (merr) MTestPrintError( merr ); - if (rank == 0) { - rleader = size/2; - } - else if (rank == size/2) { - rleader = 0; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank < size/2; - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, - 12345, comm ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD"; - } - else - *comm = MPI_COMM_NULL; - break; - case 1: - /* Split comm world in to 1 and the rest */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size > 1) { - merr = MPI_Comm_split( MPI_COMM_WORLD, rank == 0, rank, - &mcomm ); - if (merr) MTestPrintError( merr ); - if (rank == 0) { - rleader = 1; - } - else if (rank == 1) { - rleader = 0; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank == 0; - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, - rleader, 12346, comm ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD into 1, rest"; - } - else - *comm = MPI_COMM_NULL; - break; - - case 2: - /* Split comm world in to 2 and the rest */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size > 3) { - merr = MPI_Comm_split( MPI_COMM_WORLD, rank < 2, rank, - &mcomm ); - if (merr) MTestPrintError( merr ); - if (rank == 0) { - rleader = 2; - } - else if (rank == 2) { - rleader = 0; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank < 2; - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, - rleader, 12347, comm ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD into 2, rest"; - } - else - *comm = MPI_COMM_NULL; - break; - - case 3: - /* Split comm world in half, then dup */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size > 1) { - merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, - &mcomm ); - if (merr) MTestPrintError( merr ); - if (rank == 0) { - rleader = size/2; - } - else if (rank == size/2) { - rleader = 0; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank < size/2; - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, - 12345, comm ); - if (merr) MTestPrintError( merr ); + interCommName = "MPI_COMM_NULL"; + + switch (interCommIdx) { + case 0: + /* Split comm world in half */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size > 1) { + merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm); + if (merr) + MTestPrintError(merr); + if (rank == 0) { + rleader = size / 2; + } + else if (rank == size / 2) { + rleader = 0; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; + } + *isLeftGroup = rank < size / 2; + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm); + if (merr) + MTestPrintError(merr); + interCommName = "Intercomm by splitting MPI_COMM_WORLD"; + } + else + *comm = MPI_COMM_NULL; + break; + case 1: + /* Split comm world in to 1 and the rest */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size > 1) { + merr = MPI_Comm_split(MPI_COMM_WORLD, rank == 0, rank, &mcomm); + if (merr) + MTestPrintError(merr); + if (rank == 0) { + rleader = 1; + } + else if (rank == 1) { + rleader = 0; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; + } + *isLeftGroup = rank == 0; + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12346, comm); + if (merr) + MTestPrintError(merr); + interCommName = "Intercomm by splitting MPI_COMM_WORLD into 1, rest"; + } + else + *comm = MPI_COMM_NULL; + break; + + case 2: + /* Split comm world in to 2 and the rest */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size > 3) { + merr = MPI_Comm_split(MPI_COMM_WORLD, rank < 2, rank, &mcomm); + if (merr) + MTestPrintError(merr); + if (rank == 0) { + rleader = 2; + } + else if (rank == 2) { + rleader = 0; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; + } + *isLeftGroup = rank < 2; + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12347, comm); + if (merr) + MTestPrintError(merr); + interCommName = "Intercomm by splitting MPI_COMM_WORLD into 2, rest"; + } + else + *comm = MPI_COMM_NULL; + break; + + case 3: + /* Split comm world in half, then dup */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size > 1) { + merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm); + if (merr) + MTestPrintError(merr); + if (rank == 0) { + rleader = size / 2; + } + else if (rank == size / 2) { + rleader = 0; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; + } + *isLeftGroup = rank < size / 2; + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm); + if (merr) + MTestPrintError(merr); /* avoid leaking after assignment below */ - merr = MPI_Comm_free( &mcomm ); - if (merr) MTestPrintError( merr ); - - /* now dup, some bugs only occur for dup's of intercomms */ - mcomm = *comm; - merr = MPI_Comm_dup(mcomm, comm); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD then dup'ing"; - } - else - *comm = MPI_COMM_NULL; - break; - - case 4: - /* Split comm world in half, form intercomm, then split that intercomm */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size > 1) { - merr = MPI_Comm_split( MPI_COMM_WORLD, (rank < size/2), rank, - &mcomm ); - if (merr) MTestPrintError( merr ); - if (rank == 0) { - rleader = size/2; - } - else if (rank == size/2) { - rleader = 0; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank < size/2; - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, - 12345, comm ); - if (merr) MTestPrintError( merr ); + merr = MPI_Comm_free(&mcomm); + if (merr) + MTestPrintError(merr); + + /* now dup, some bugs only occur for dup's of intercomms */ + mcomm = *comm; + merr = MPI_Comm_dup(mcomm, comm); + if (merr) + MTestPrintError(merr); + interCommName = "Intercomm by splitting MPI_COMM_WORLD then dup'ing"; + } + else + *comm = MPI_COMM_NULL; + break; + + case 4: + /* Split comm world in half, form intercomm, then split that intercomm */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size > 1) { + merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm); + if (merr) + MTestPrintError(merr); + if (rank == 0) { + rleader = size / 2; + } + else if (rank == size / 2) { + rleader = 0; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; + } + *isLeftGroup = rank < size / 2; + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm); + if (merr) + MTestPrintError(merr); /* avoid leaking after assignment below */ - merr = MPI_Comm_free( &mcomm ); - if (merr) MTestPrintError( merr ); - - /* now split, some bugs only occur for splits of intercomms */ - mcomm = *comm; - rank = MPI_Comm_rank(mcomm, &rank); - if (merr) MTestPrintError( merr ); - /* this split is effectively a dup but tests the split code paths */ - merr = MPI_Comm_split(mcomm, 0, rank, comm); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD then then splitting again"; - } - else - *comm = MPI_COMM_NULL; - break; - - case 5: + merr = MPI_Comm_free(&mcomm); + if (merr) + MTestPrintError(merr); + + /* now split, some bugs only occur for splits of intercomms */ + mcomm = *comm; + merr = MPI_Comm_rank(mcomm, &rank); + if (merr) + MTestPrintError(merr); + /* this split is effectively a dup but tests the split code paths */ + merr = MPI_Comm_split(mcomm, 0, rank, comm); + if (merr) + MTestPrintError(merr); + interCommName = "Intercomm by splitting MPI_COMM_WORLD then then splitting again"; + } + else + *comm = MPI_COMM_NULL; + break; + + case 5: /* split comm world in half discarding rank 0 on the "left" * communicator, then form them into an intercommunicator */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size >= 4) { - int color = (rank < size/2 ? 0 : 1); + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size >= 4) { + int color = (rank < size / 2 ? 0 : 1); if (rank == 0) color = MPI_UNDEFINED; - merr = MPI_Comm_split( MPI_COMM_WORLD, color, rank, &mcomm ); - if (merr) MTestPrintError( merr ); - - if (rank == 1) { - rleader = size/2; - } - else if (rank == (size/2)) { - rleader = 1; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank < size/2; - if (rank != 0) { /* 0's mcomm is MPI_COMM_NULL */ - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm ); - if (merr) MTestPrintError( merr ); + merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm); + if (merr) + MTestPrintError(merr); + + if (rank == 1) { + rleader = size / 2; + } + else if (rank == (size / 2)) { + rleader = 1; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; } - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD (discarding rank 0 in the left group) then MPI_Intercomm_create'ing"; + *isLeftGroup = rank < size / 2; + if (rank != 0) { /* 0's mcomm is MPI_COMM_NULL */ + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm); + if (merr) + MTestPrintError(merr); + } + interCommName = + "Intercomm by splitting MPI_COMM_WORLD (discarding rank 0 in the left group) then MPI_Intercomm_create'ing"; } else { *comm = MPI_COMM_NULL; @@ -1355,71 +791,84 @@ int MTestGetIntercomm( MPI_Comm *comm, int *isLeftGroup, int min_size ) /* Split comm world in half then form them into an * intercommunicator. Then discard rank 0 from each group of the * intercomm via MPI_Comm_create. */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_size( MPI_COMM_WORLD, &size ); - if (merr) MTestPrintError( merr ); - if (size >= 4) { + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (merr) + MTestPrintError(merr); + if (size >= 4) { MPI_Group oldgroup, newgroup; int ranks[1]; - int color = (rank < size/2 ? 0 : 1); - - merr = MPI_Comm_split( MPI_COMM_WORLD, color, rank, &mcomm ); - if (merr) MTestPrintError( merr ); - - if (rank == 0) { - rleader = size/2; - } - else if (rank == (size/2)) { - rleader = 0; - } - else { - /* Remote leader is signficant only for the processes - designated local leaders */ - rleader = -1; - } - *isLeftGroup = rank < size/2; - merr = MPI_Intercomm_create( mcomm, 0, MPI_COMM_WORLD, rleader, 12345, &mcomm2 ); - if (merr) MTestPrintError( merr ); + int color = (rank < size / 2 ? 0 : 1); + + merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm); + if (merr) + MTestPrintError(merr); + + if (rank == 0) { + rleader = size / 2; + } + else if (rank == (size / 2)) { + rleader = 0; + } + else { + /* Remote leader is signficant only for the processes + * designated local leaders */ + rleader = -1; + } + *isLeftGroup = rank < size / 2; + merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, &mcomm2); + if (merr) + MTestPrintError(merr); /* We have an intercomm between the two halves of comm world. Now create * a new intercomm that removes rank 0 on each side. */ merr = MPI_Comm_group(mcomm2, &oldgroup); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); ranks[0] = 0; merr = MPI_Group_excl(oldgroup, 1, ranks, &newgroup); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); merr = MPI_Comm_create(mcomm2, newgroup, comm); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); merr = MPI_Group_free(&oldgroup); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); merr = MPI_Group_free(&newgroup); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); - SMPI_VARGET_GLOBAL(interCommName) = "Intercomm by splitting MPI_COMM_WORLD then discarding 0 ranks with MPI_Comm_create"; + interCommName = + "Intercomm by splitting MPI_COMM_WORLD then discarding 0 ranks with MPI_Comm_create"; } else { *comm = MPI_COMM_NULL; } break; - default: - *comm = MPI_COMM_NULL; - SMPI_VARGET_GLOBAL(interCommIdx) = -1; - break; - } - - if (*comm != MPI_COMM_NULL) { - merr = MPI_Comm_size( *comm, &size ); - if (merr) MTestPrintError( merr ); - merr = MPI_Comm_remote_size( *comm, &remsize ); - if (merr) MTestPrintError( merr ); - if (size + remsize >= min_size) done = 1; - } - else { - SMPI_VARGET_GLOBAL(interCommName) = "MPI_COMM_NULL"; - done = 1; + default: + *comm = MPI_COMM_NULL; + interCommIdx = -1; + break; + } + + if (*comm != MPI_COMM_NULL) { + merr = MPI_Comm_size(*comm, &size); + if (merr) + MTestPrintError(merr); + merr = MPI_Comm_remote_size(*comm, &remsize); + if (merr) + MTestPrintError(merr); + if (size + remsize >= min_size) + done = 1; + } + else { + interCommName = "MPI_COMM_NULL"; + done = 1; } /* we are only done if all processes are done */ @@ -1428,52 +877,172 @@ int MTestGetIntercomm( MPI_Comm *comm, int *isLeftGroup, int min_size ) /* Advance the comm index whether we are done or not, otherwise we could * spin forever trying to allocate a too-small communicator over and * over again. */ - SMPI_VARGET_GLOBAL(interCommIdx)++; + interCommIdx++; if (!done && *comm != MPI_COMM_NULL) { /* avoid leaking communicators */ merr = MPI_Comm_free(comm); - if (merr) MTestPrintError(merr); + if (merr) + MTestPrintError(merr); } /* cleanup for common temp objects */ if (mcomm != MPI_COMM_NULL) { merr = MPI_Comm_free(&mcomm); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); } if (mcomm2 != MPI_COMM_NULL) { merr = MPI_Comm_free(&mcomm2); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); } } - return SMPI_VARGET_GLOBAL(interCommIdx); + return interCommIdx; } + +int MTestTestIntercomm(MPI_Comm comm) +{ + int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j; + int errs = 0, wrank, nsize; + char commname[MPI_MAX_OBJECT_NAME + 1]; + MPI_Request *reqs; + + MPI_Comm_rank(MPI_COMM_WORLD, &wrank); + MPI_Comm_size(comm, &local_size); + MPI_Comm_remote_size(comm, &remote_size); + MPI_Comm_rank(comm, &rank); + MPI_Comm_get_name(comm, commname, &nsize); + + MTestPrintfMsg(1, "Testing communication on intercomm '%s', remote_size=%d\n", + commname, remote_size); + + reqs = (MPI_Request *) malloc(remote_size * sizeof(MPI_Request)); + if (!reqs) { + printf("[%d] Unable to allocated %d requests for testing intercomm %s\n", + wrank, remote_size, commname); + errs++; + return errs; + } + bufs = (int **) malloc(remote_size * sizeof(int *)); + if (!bufs) { + printf("[%d] Unable to allocated %d int pointers for testing intercomm %s\n", + wrank, remote_size, commname); + errs++; + return errs; + } + bufmem = (int *) malloc(remote_size * 2 * sizeof(int)); + if (!bufmem) { + printf("[%d] Unable to allocated %d int data for testing intercomm %s\n", + wrank, 2 * remote_size, commname); + errs++; + return errs; + } + + /* Each process sends a message containing its own rank and the + * rank of the destination with a nonblocking send. Because we're using + * nonblocking sends, we need to use different buffers for each isend */ + /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although + * it doesn't really hurt to keep separate buffers for our purposes */ + for (j = 0; j < remote_size; j++) { + bufs[j] = &bufmem[2 * j]; + bufs[j][0] = rank; + bufs[j][1] = j; + MPI_Isend(bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j]); + } + MTestPrintfMsg(2, "isends posted, about to recv\n"); + + for (j = 0; j < remote_size; j++) { + MPI_Recv(rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE); + if (rbuf[0] != j) { + printf("[%d] Expected rank %d but saw %d in %s\n", wrank, j, rbuf[0], commname); + errs++; + } + if (rbuf[1] != rank) { + printf("[%d] Expected target rank %d but saw %d from %d in %s\n", + wrank, rank, rbuf[1], j, commname); + errs++; + } + } + if (errs) + fflush(stdout); + + MTestPrintfMsg(2, "my recvs completed, about to waitall\n"); + MPI_Waitall(remote_size, reqs, MPI_STATUSES_IGNORE); + + free(reqs); + free(bufs); + free(bufmem); + + return errs; +} + +int MTestTestIntracomm(MPI_Comm comm) +{ + int i, errs = 0; + int size; + int in[16], out[16], sol[16]; + + MPI_Comm_size(comm, &size); + + /* Set input, output and sol-values */ + for (i = 0; i < 16; i++) { + in[i] = i; + out[i] = 0; + sol[i] = i * size; + } + MPI_Allreduce(in, out, 16, MPI_INT, MPI_SUM, comm); + + /* Test results */ + for (i = 0; i < 16; i++) { + if (sol[i] != out[i]) + errs++; + } + + return errs; +} + +int MTestTestComm(MPI_Comm comm) +{ + int is_inter; + + if (comm == MPI_COMM_NULL) + return 0; + + MPI_Comm_test_inter(comm, &is_inter); + + if (is_inter) + return MTestTestIntercomm(comm); + else + return MTestTestIntracomm(comm); +} + /* Return the name of an intercommunicator */ -const char *MTestGetIntercommName( void ) +const char *MTestGetIntercommName(void) { - return SMPI_VARGET_GLOBAL(interCommName); + return interCommName; } -/* Get a communicator of a given minimum size. Both intra and inter +/* Get a communicator of a given minimum size. Both intra and inter communicators are provided */ -int MTestGetComm( MPI_Comm *comm, int min_size ) +int MTestGetComm(MPI_Comm * comm, int min_size) { - int idx=0; - SMPI_VARINIT_STATIC_AND_SET(getinter, int, 0); - - if (!SMPI_VARGET_STATIC(getinter)) { - idx = MTestGetIntracomm( comm, min_size ); - if (idx == 0) { - SMPI_VARGET_STATIC(getinter) = 1; - } + int idx = 0; + static int getinter = 0; + + if (!getinter) { + idx = MTestGetIntracomm(comm, min_size); + if (idx == 0) { + getinter = 1; + } } - if (SMPI_VARGET_STATIC(getinter)) { - int isLeft; - idx = MTestGetIntercomm( comm, &isLeft, min_size ); - if (idx == 0) { - SMPI_VARGET_STATIC(getinter) = 0; - } + if (getinter) { + int isLeft; + idx = MTestGetIntercomm(comm, &isLeft, min_size); + if (idx == 0) { + getinter = 0; + } } return idx; @@ -1481,235 +1050,312 @@ int MTestGetComm( MPI_Comm *comm, int min_size ) /* Free a communicator. It may be called with a predefined communicator or MPI_COMM_NULL */ -void MTestFreeComm( MPI_Comm *comm ) +void MTestFreeComm(MPI_Comm * comm) { int merr; - if (*comm != MPI_COMM_WORLD && - *comm != MPI_COMM_SELF && - *comm != MPI_COMM_NULL) { - merr = MPI_Comm_free( comm ); - if (merr) MTestPrintError( merr ); + if (*comm != MPI_COMM_WORLD && *comm != MPI_COMM_SELF && *comm != MPI_COMM_NULL) { + merr = MPI_Comm_free(comm); + if (merr) + MTestPrintError(merr); } } /* ------------------------------------------------------------------------ */ -void MTestPrintError( int errcode ) +void MTestPrintError(int errcode) { int errclass, slen; char string[MPI_MAX_ERROR_STRING]; - - MPI_Error_class( errcode, &errclass ); - MPI_Error_string( errcode, string, &slen ); - printf( "Error class %d (%s)\n", errclass, string ); - fflush( stdout ); + + MPI_Error_class(errcode, &errclass); + MPI_Error_string(errcode, string, &slen); + printf("Error class %d (%s)\n", errclass, string); + fflush(stdout); } -void MTestPrintErrorMsg( const char msg[], int errcode ) + +void MTestPrintErrorMsg(const char msg[], int errcode) { int errclass, slen; char string[MPI_MAX_ERROR_STRING]; - - MPI_Error_class( errcode, &errclass ); - MPI_Error_string( errcode, string, &slen ); - printf( "%s: Error class %d (%s)\n", msg, errclass, string ); - fflush( stdout ); + + MPI_Error_class(errcode, &errclass); + MPI_Error_string(errcode, string, &slen); + printf("%s: Error class %d (%s)\n", msg, errclass, string); + fflush(stdout); } + /* ------------------------------------------------------------------------ */ -/* +/* If verbose output is selected and the level is at least that of the - value of the verbose flag, then perform printf( format, ... ); + value of the verbose flag, then perform printf(format, ...); */ -void MTestPrintfMsg( int level, const char format[], ... ) +void MTestPrintfMsg(int level, const char format[], ...) { va_list list; - if (SMPI_VARGET_GLOBAL(verbose) && level >= SMPI_VARGET_GLOBAL(verbose)) { - va_start(list,format); - vprintf( format, list ); - va_end(list); - fflush(stdout); + if (SMPI_VARGET_GLOBAL(verbose) && level <= SMPI_VARGET_GLOBAL(verbose)) { + va_start(list, format); + vprintf(format, list); + va_end(list); + fflush(stdout); } } + /* Fatal error. Report and exit */ -void MTestError( const char *msg ) +void MTestError(const char *msg) { - fprintf( stderr, "%s\n", msg ); - fflush( stderr ); - MPI_Abort( MPI_COMM_WORLD, 1 ); - exit(1); + fprintf(stderr, "%s\n", msg); + fflush(stderr); + MPI_Abort(MPI_COMM_WORLD, 1); } + /* ------------------------------------------------------------------------ */ -static void MTestResourceSummary( FILE *fp ) +static void MTestResourceSummary(FILE * fp) { #ifdef HAVE_GETRUSAGE struct rusage ru; - SMPI_VARINIT_STATIC_AND_SET(pfThreshold, int, -2); + static int pfThreshold = -2; int doOutput = 1; - if (getrusage( RUSAGE_SELF, &ru ) == 0) { - /* There is an option to generate output only when a resource - exceeds a threshold. To date, only page faults supported. */ - if (SMPI_VARGET_STATIC(pfThreshold) == -2) { - char *p = getenv("MPITEST_RUSAGE_PF"); - SMPI_VARGET_STATIC(pfThreshold) = -1; - if (p) { - SMPI_VARGET_STATIC(pfThreshold) = strtol( p, 0, 0 ); - } - } - if (SMPI_VARGET_STATIC(pfThreshold) > 0) { - doOutput = ru.ru_minflt > SMPI_VARGET_STATIC(pfThreshold); - } - if (doOutput) { - /* Cast values to long in case some system has defined them - as another integer type */ - fprintf( fp, "RUSAGE: max resident set = %ldKB\n", - (long)ru.ru_maxrss ); - fprintf( fp, "RUSAGE: page faults = %ld : %ld\n", - (long)ru.ru_minflt, (long)ru.ru_majflt ); - /* Not every Unix provides useful information for the xxrss fields */ - fprintf( fp, "RUSAGE: memory in text/data/stack = %ld : %ld : %ld\n", - (long)ru.ru_ixrss, (long)ru.ru_idrss, (long)ru.ru_isrss ); - fprintf( fp, "RUSAGE: I/O in and out = %ld : %ld\n", - (long)ru.ru_inblock, (long)ru.ru_oublock ); - fprintf( fp, "RUSAGE: context switch = %ld : %ld\n", - (long)ru.ru_nvcsw, (long)ru.ru_nivcsw ); - } + if (getrusage(RUSAGE_SELF, &ru) == 0) { + /* There is an option to generate output only when a resource + * exceeds a threshold. To date, only page faults supported. */ + if (pfThreshold == -2) { + char *p = getenv("MPITEST_RUSAGE_PF"); + pfThreshold = -1; + if (p) { + pfThreshold = strtol(p, 0, 0); + } + } + if (pfThreshold > 0) { + doOutput = ru.ru_minflt > pfThreshold; + } + if (doOutput) { + /* Cast values to long in case some system has defined them + * as another integer type */ + fprintf(fp, "RUSAGE: max resident set = %ldKB\n", (long) ru.ru_maxrss); + fprintf(fp, "RUSAGE: page faults = %ld : %ld\n", + (long) ru.ru_minflt, (long) ru.ru_majflt); + /* Not every Unix provides useful information for the xxrss fields */ + fprintf(fp, "RUSAGE: memory in text/data/stack = %ld : %ld : %ld\n", + (long) ru.ru_ixrss, (long) ru.ru_idrss, (long) ru.ru_isrss); + fprintf(fp, "RUSAGE: I/O in and out = %ld : %ld\n", + (long) ru.ru_inblock, (long) ru.ru_oublock); + fprintf(fp, "RUSAGE: context switch = %ld : %ld\n", + (long) ru.ru_nvcsw, (long) ru.ru_nivcsw); + } } else { - fprintf( fp, "RUSAGE: return error %d\n", errno ); + fprintf(fp, "RUSAGE: return error %d\n", errno); } #endif } + /* ------------------------------------------------------------------------ */ #ifdef HAVE_MPI_WIN_CREATE /* * Create MPI Windows */ -SMPI_VARINIT_GLOBAL_AND_SET(win_index, int, 0); -SMPI_VARINIT_GLOBAL(winName, const char *); +static int win_index = 0; +static const char *winName; /* Use an attribute to remember the type of memory allocation (static, malloc, or MPI_Alloc_mem) */ -SMPI_VARINIT_GLOBAL_AND_SET(mem_keyval, int, MPI_KEYVAL_INVALID); -int MTestGetWin( MPI_Win *win, int mustBePassive ) +static int mem_keyval = MPI_KEYVAL_INVALID; +int MTestGetWin(MPI_Win * win, int mustBePassive) { - typedef char actbuf_type[1024]; - SMPI_VARINIT_STATIC(actbuf, actbuf_type); - SMPI_VARINIT_STATIC(pasbuf, char *); - char *buf; - int n, rank, merr; - MPI_Info info; - - if (SMPI_VARGET_GLOBAL(mem_keyval) == MPI_KEYVAL_INVALID) { - /* Create the keyval */ - merr = MPI_Win_create_keyval( MPI_WIN_NULL_COPY_FN, - MPI_WIN_NULL_DELETE_FN, - &SMPI_VARGET_GLOBAL(mem_keyval), 0 ); - if (merr) MTestPrintError( merr ); + static char actbuf[1024]; + static char *pasbuf; + char *buf; + int n, rank, merr; + MPI_Info info; + + if (mem_keyval == MPI_KEYVAL_INVALID) { + /* Create the keyval */ + merr = MPI_Win_create_keyval(MPI_WIN_NULL_COPY_FN, MPI_WIN_NULL_DELETE_FN, &mem_keyval, 0); + if (merr) + MTestPrintError(merr); } - switch (SMPI_VARGET_GLOBAL(win_index)) { + switch (win_index) { case 0: - /* Active target window */ - merr = MPI_Win_create( SMPI_VARGET_STATIC(actbuf), 1024, 1, MPI_INFO_NULL, MPI_COMM_WORLD, - win ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(winName) = "active-window"; - merr = MPI_Win_set_attr( *win, SMPI_VARGET_GLOBAL(mem_keyval), (void *)0 ); - if (merr) MTestPrintError( merr ); - break; + /* Active target window */ + merr = MPI_Win_create(actbuf, 1024, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win); + if (merr) + MTestPrintError(merr); + winName = "active-window"; + merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 0); + if (merr) + MTestPrintError(merr); + break; case 1: - /* Passive target window */ - merr = MPI_Alloc_mem( 1024, MPI_INFO_NULL, &SMPI_VARGET_STATIC(pasbuf) ); - if (merr) MTestPrintError( merr ); - merr = MPI_Win_create( SMPI_VARGET_STATIC(pasbuf), 1024, 1, MPI_INFO_NULL, MPI_COMM_WORLD, - win ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(winName) = "passive-window"; - merr = MPI_Win_set_attr( *win, SMPI_VARGET_GLOBAL(mem_keyval), (void *)2 ); - if (merr) MTestPrintError( merr ); - break; + /* Passive target window */ + merr = MPI_Alloc_mem(1024, MPI_INFO_NULL, &pasbuf); + if (merr) + MTestPrintError(merr); + merr = MPI_Win_create(pasbuf, 1024, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win); + if (merr) + MTestPrintError(merr); + winName = "passive-window"; + merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 2); + if (merr) + MTestPrintError(merr); + break; case 2: - /* Active target; all windows different sizes */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - n = rank * 64; - if (n) - buf = (char *)malloc( n ); - else - buf = 0; - merr = MPI_Win_create( buf, n, 1, MPI_INFO_NULL, MPI_COMM_WORLD, - win ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(winName) = "active-all-different-win"; - merr = MPI_Win_set_attr( *win, SMPI_VARGET_GLOBAL(mem_keyval), (void *)1 ); - if (merr) MTestPrintError( merr ); - break; + /* Active target; all windows different sizes */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + n = rank * 64; + if (n) + buf = (char *) malloc(n); + else + buf = 0; + merr = MPI_Win_create(buf, n, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win); + if (merr) + MTestPrintError(merr); + winName = "active-all-different-win"; + merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 1); + if (merr) + MTestPrintError(merr); + break; case 3: - /* Active target, no locks set */ - merr = MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if (merr) MTestPrintError( merr ); - n = rank * 64; - if (n) - buf = (char *)malloc( n ); - else - buf = 0; - merr = MPI_Info_create( &info ); - if (merr) MTestPrintError( merr ); - merr = MPI_Info_set( info, (char*)"nolocks", (char*)"true" ); - if (merr) MTestPrintError( merr ); - merr = MPI_Win_create( buf, n, 1, info, MPI_COMM_WORLD, win ); - if (merr) MTestPrintError( merr ); - merr = MPI_Info_free( &info ); - if (merr) MTestPrintError( merr ); - SMPI_VARGET_GLOBAL(winName) = "active-nolocks-all-different-win"; - merr = MPI_Win_set_attr( *win, SMPI_VARGET_GLOBAL(mem_keyval), (void *)1 ); - if (merr) MTestPrintError( merr ); - break; + /* Active target, no locks set */ + merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (merr) + MTestPrintError(merr); + n = rank * 64; + if (n) + buf = (char *) malloc(n); + else + buf = 0; + merr = MPI_Info_create(&info); + if (merr) + MTestPrintError(merr); + merr = MPI_Info_set(info, (char *) "nolocks", (char *) "true"); + if (merr) + MTestPrintError(merr); + merr = MPI_Win_create(buf, n, 1, info, MPI_COMM_WORLD, win); + if (merr) + MTestPrintError(merr); + merr = MPI_Info_free(&info); + if (merr) + MTestPrintError(merr); + winName = "active-nolocks-all-different-win"; + merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 1); + if (merr) + MTestPrintError(merr); + break; default: - SMPI_VARGET_GLOBAL(win_index) = -1; + win_index = -1; } - SMPI_VARGET_GLOBAL(win_index)++; - return SMPI_VARGET_GLOBAL(win_index); + win_index++; + return win_index; } + /* Return a pointer to the name associated with a window object */ -const char *MTestGetWinName( void ) +const char *MTestGetWinName(void) { - return SMPI_VARGET_GLOBAL(winName); + return winName; } + /* Free the storage associated with a window object */ -void MTestFreeWin( MPI_Win *win ) +void MTestFreeWin(MPI_Win * win) { void *addr; - int flag, merr; + int flag, merr; - merr = MPI_Win_get_attr( *win, MPI_WIN_BASE, &addr, &flag ); - if (merr) MTestPrintError( merr ); + merr = MPI_Win_get_attr(*win, MPI_WIN_BASE, &addr, &flag); + if (merr) + MTestPrintError(merr); if (!flag) { - MTestError( "Could not get WIN_BASE from window" ); + MTestError("Could not get WIN_BASE from window"); } if (addr) { - void *val; - merr = MPI_Win_get_attr( *win, SMPI_VARGET_GLOBAL(mem_keyval), &val, &flag ); - if (merr) MTestPrintError( merr ); - if (flag) { - if (val == (void *)1) { - free( addr ); - } - else if (val == (void *)2) { - merr = MPI_Free_mem( addr ); - if (merr) MTestPrintError( merr ); - } - /* if val == (void *)0, then static data that must not be freed */ - } + void *val; + merr = MPI_Win_get_attr(*win, mem_keyval, &val, &flag); + if (merr) + MTestPrintError(merr); + if (flag) { + if (val == (void *) 1) { + free(addr); + } + else if (val == (void *) 2) { + merr = MPI_Free_mem(addr); + if (merr) + MTestPrintError(merr); + } + /* if val == (void *)0, then static data that must not be freed */ + } } merr = MPI_Win_free(win); - if (merr) MTestPrintError( merr ); + if (merr) + MTestPrintError(merr); } -static void MTestRMACleanup( void ) + +static void MTestRMACleanup(void) { - if (SMPI_VARGET_GLOBAL(mem_keyval) != MPI_KEYVAL_INVALID) { - MPI_Win_free_keyval( &SMPI_VARGET_GLOBAL(mem_keyval) ); + if (mem_keyval != MPI_KEYVAL_INVALID) { + MPI_Win_free_keyval(&mem_keyval); } } -#else -static void MTestRMACleanup( void ) {} +#else +static void MTestRMACleanup(void) +{ +} #endif + +/* ------------------------------------------------------------------------ */ +/* This function determines if it is possible to spawn addition MPI + * processes using MPI_COMM_SPAWN and MPI_COMM_SPAWN_MULTIPLE. + * + * It sets the can_spawn value to one of the following: + * 1 = yes, additional processes can be spawned + * 0 = no, MPI_UNIVERSE_SIZE <= the size of MPI_COMM_WORLD + * -1 = it is unknown whether or not processes can be spawned + * due to errors in the necessary query functions + * + */ +int MTestSpawnPossible(int *can_spawn) +{ + int errs = 0; + + void *v = NULL; + int flag = -1; + int vval = -1; + int rc; + + rc = MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag); + if (rc != MPI_SUCCESS) { + /* MPI_UNIVERSE_SIZE keyval missing from MPI_COMM_WORLD attributes */ + *can_spawn = -1; + errs++; + } + else { + /* MPI_UNIVERSE_SIZE need not be set */ + if (flag) { + + int size = -1; + rc = MPI_Comm_size(MPI_COMM_WORLD, &size); + if (rc != MPI_SUCCESS) { + /* MPI_Comm_size failed for MPI_COMM_WORLD */ + *can_spawn = -1; + errs++; + } + + vval = *(int *) v; + if (vval <= size) { + /* no additional processes can be spawned */ + *can_spawn = 0; + } + else { + *can_spawn = 1; + } + } + else { + /* No attribute associated with key MPI_UNIVERSE_SIZE of MPI_COMM_WORLD */ + *can_spawn = -1; + } + } + return errs; +} + +/* ------------------------------------------------------------------------ */