1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
4 * (C) 2001 by Argonne National Laboratory.
5 * See COPYRIGHT in top-level directory.
8 #include "mpitestconf.h"
10 #if defined(HAVE_STDIO_H) || defined(STDC_HEADERS)
13 #if defined(HAVE_STDLIB_H) || defined(STDC_HEADERS)
16 #if defined(HAVE_STRING_H) || defined(STDC_HEADERS)
22 /* The following two includes permit the collection of resource usage
25 #ifdef HAVE_SYS_TIME_H
28 #ifdef HAVE_SYS_RESOURCE_H
29 #include <sys/resource.h>
35 * Utility routines for writing MPI tests.
37 * We check the return codes on all MPI routines (other than INIT)
38 * to allow the program that uses these routines to select MPI_ERRORS_RETURN
39 * as the error handler. We do *not* set MPI_ERRORS_RETURN because
40 * the code that makes use of these routines may not check return
45 static void MTestRMACleanup(void);
46 static void MTestResourceSummary(FILE *);
48 /* Here is where we could put the includes and definitions to enable
51 static int dbgflag = 0; /* Flag used for debugging */
52 static int wrank = -1; /* World rank */
53 static int verbose = 0; /* Message level (0 is none) */
54 static int returnWithVal = 0; /* Allow programs to return with a non-zero
55 * if there was an error (may cause problems
56 * with some runtime systems) */
57 static int usageOutput = 0; /* */
59 /* Provide backward portability to MPI 1 */
64 #define MPI_THREAD_SINGLE 0
68 * Initialize and Finalize MTest
72 Initialize MTest, initializing MPI if necessary.
74 Environment Variables:
75 + MPITEST_DEBUG - If set (to any value), turns on debugging output
76 . MPITEST_THREADLEVEL_DEFAULT - If set, use as the default "provided"
77 level of thread support. Applies to
78 MTest_Init but not MTest_Init_thread.
79 - MPITEST_VERBOSE - If set to a numeric value, turns on that level of
80 verbose output. This is used by the routine 'MTestPrintfMsg'
83 void MTest_Init_thread(int *argc, char ***argv, int required, int *provided)
88 MPI_Initialized(&flag);
90 /* Permit an MPI that claims only MPI 1 but includes the
91 * MPI_Init_thread routine (e.g., IBM MPI) */
92 #if MPI_VERSION >= 2 || defined(HAVE_MPI_INIT_THREAD)
93 MPI_Init_thread(argc, argv, required, provided);
99 /* Check for debugging control */
100 if (getenv("MPITEST_DEBUG")) {
102 MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
105 /* Check for verbose control */
106 envval = getenv("MPITEST_VERBOSE");
109 long val = strtol(envval, &s, 0);
111 /* This is the error case for strtol */
112 fprintf(stderr, "Warning: %s not valid for MPITEST_VERBOSE\n", envval);
120 fprintf(stderr, "Warning: %s not valid for MPITEST_VERBOSE\n", envval);
125 /* Check for option to return success/failure in the return value of main */
126 envval = getenv("MPITEST_RETURN_WITH_CODE");
128 if (strcmp(envval, "yes") == 0 ||
129 strcmp(envval, "YES") == 0 ||
130 strcmp(envval, "true") == 0 || strcmp(envval, "TRUE") == 0) {
133 else if (strcmp(envval, "no") == 0 ||
134 strcmp(envval, "NO") == 0 ||
135 strcmp(envval, "false") == 0 || strcmp(envval, "FALSE") == 0) {
139 fprintf(stderr, "Warning: %s not valid for MPITEST_RETURN_WITH_CODE\n", envval);
144 /* Print rusage data if set */
145 if (getenv("MPITEST_RUSAGE")) {
151 * Initialize the tests, using an MPI-1 style init. Supports
152 * MTEST_THREADLEVEL_DEFAULT to test with user-specified thread level
154 void MTest_Init(int *argc, char ***argv)
157 #if MPI_VERSION >= 2 || defined(HAVE_MPI_INIT_THREAD)
161 threadLevel = MPI_THREAD_SINGLE;
162 str = getenv("MTEST_THREADLEVEL_DEFAULT");
164 str = getenv("MPITEST_THREADLEVEL_DEFAULT");
166 if (strcmp(str, "MULTIPLE") == 0 || strcmp(str, "multiple") == 0) {
167 threadLevel = MPI_THREAD_MULTIPLE;
169 else if (strcmp(str, "SERIALIZED") == 0 || strcmp(str, "serialized") == 0) {
170 threadLevel = MPI_THREAD_SERIALIZED;
172 else if (strcmp(str, "FUNNELED") == 0 || strcmp(str, "funneled") == 0) {
173 threadLevel = MPI_THREAD_FUNNELED;
175 else if (strcmp(str, "SINGLE") == 0 || strcmp(str, "single") == 0) {
176 threadLevel = MPI_THREAD_SINGLE;
179 fprintf(stderr, "Unrecognized thread level %s\n", str);
180 /* Use exit since MPI_Init/Init_thread has not been called. */
184 MTest_Init_thread(argc, argv, threadLevel, &provided);
186 /* If the MPI_VERSION is 1, there is no MPI_THREAD_xxx defined */
187 MTest_Init_thread(argc, argv, 0, &provided);
192 Finalize MTest. errs is the number of errors on the calling process;
193 this routine will write the total number of errors over all of MPI_COMM_WORLD
194 to the process with rank zero, or " No Errors".
195 It does *not* finalize MPI.
197 void MTest_Finalize(int errs)
199 int rank, toterrs, merr;
201 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
203 MTestPrintError(merr);
205 merr = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
207 MTestPrintError(merr);
210 printf(" Found %d errors\n", toterrs);
213 printf(" No Errors\n");
219 MTestResourceSummary(stdout);
222 /* Clean up any persistent objects that we allocated */
226 /* ------------------------------------------------------------------------ */
227 /* This routine may be used instead of "return 0;" at the end of main;
228 it allows the program to use the return value to signal success or failure.
230 int MTestReturnValue(int errors)
233 return errors ? 1 : 0;
237 /* ------------------------------------------------------------------------ */
240 * Miscellaneous utilities, particularly to eliminate OS dependencies
242 * MTestSleep(seconds)
244 #ifdef HAVE_WINDOWS_H
246 void MTestSleep(int sec)
252 void MTestSleep(int sec)
258 /* Other mtest subfiles read debug setting using this function. */
259 void MTestGetDbgInfo(int *_dbgflag, int *_verbose)
265 /* ----------------------------------------------------------------------- */
268 * Create communicators. Use separate routines for inter and intra
269 * communicators (there is a routine to give both)
270 * Note that the routines may return MPI_COMM_NULL, so code should test for
271 * that return value as well.
274 static int interCommIdx = 0;
275 static int intraCommIdx = 0;
276 static const char *intraCommName = 0;
277 static const char *interCommName = 0;
280 * Get an intracommunicator with at least min_size members. If "allowSmaller"
281 * is true, allow the communicator to be smaller than MPI_COMM_WORLD and
282 * for this routine to return MPI_COMM_NULL for some values. Returns 0 if
283 * no more communicators are available.
285 int MTestGetIntracommGeneral(MPI_Comm * comm, int min_size, int allowSmaller)
287 int size, rank, merr;
291 /* The while loop allows us to skip communicators that are too small.
292 * MPI_COMM_NULL is always considered large enough */
296 switch (intraCommIdx) {
298 *comm = MPI_COMM_WORLD;
300 intraCommName = "MPI_COMM_WORLD";
304 merr = MPI_Comm_dup(MPI_COMM_WORLD, comm);
306 MTestPrintError(merr);
307 intraCommName = "Dup of MPI_COMM_WORLD";
311 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
313 MTestPrintError(merr);
314 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
316 MTestPrintError(merr);
317 merr = MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, comm);
319 MTestPrintError(merr);
320 intraCommName = "Rank reverse of MPI_COMM_WORLD";
323 /* subset of world, with reversed ranks */
324 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
326 MTestPrintError(merr);
327 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
329 MTestPrintError(merr);
330 merr = MPI_Comm_split(MPI_COMM_WORLD, ((rank < size / 2) ? 1 : MPI_UNDEFINED),
333 MTestPrintError(merr);
334 intraCommName = "Rank reverse of half of MPI_COMM_WORLD";
337 *comm = MPI_COMM_SELF;
339 intraCommName = "MPI_COMM_SELF";
343 #if MTEST_HAVE_MIN_MPI_VERSION(3,0)
344 /* Dup of the world using MPI_Intercomm_merge */
346 MPI_Comm local_comm, inter_comm;
347 MPI_Comm_size(MPI_COMM_WORLD, &size);
348 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
350 merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);
352 MTestPrintError(merr);
356 else if (rank == size / 2) {
362 isLeft = rank < size / 2;
364 MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99,
367 MTestPrintError(merr);
368 merr = MPI_Intercomm_merge(inter_comm, isLeft, comm);
370 MTestPrintError(merr);
371 MPI_Comm_free(&inter_comm);
372 MPI_Comm_free(&local_comm);
373 intraCommName = "Dup of WORLD created by MPI_Intercomm_merge";
376 *comm = MPI_COMM_NULL;
382 /* Even of the world using MPI_Comm_create_group */
384 MPI_Group world_group, even_group;
387 MPI_Comm_size(MPI_COMM_WORLD, &size);
388 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
389 if (allowSmaller && (size + 1) / 2 >= min_size) {
390 /* exclude the odd ranks */
391 excl = malloc((size / 2) * sizeof(int));
392 for (i = 0; i < size / 2; i++)
393 excl[i] = (2 * i) + 1;
395 MPI_Comm_group(MPI_COMM_WORLD, &world_group);
396 MPI_Group_excl(world_group, size / 2, excl, &even_group);
397 MPI_Group_free(&world_group);
401 /* Even processes create a comm. for themselves */
402 MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, comm);
403 intraCommName = "Even of WORLD created by MPI_Comm_create_group";
406 *comm = MPI_COMM_NULL;
409 MPI_Group_free(&even_group);
412 *comm = MPI_COMM_NULL;
415 *comm = MPI_COMM_NULL;
421 /* High half of the world using MPI_Comm_create */
423 MPI_Group world_group, high_group;
424 MPI_Comm_size(MPI_COMM_WORLD, &size);
425 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
426 ranges[0][0] = size / 2;
427 ranges[0][1] = size - 1;
430 if (allowSmaller && (size + 1) / 2 >= min_size) {
431 MPI_Comm_group(MPI_COMM_WORLD, &world_group);
432 merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group);
434 MTestPrintError(merr);
435 merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm);
437 MTestPrintError(merr);
438 MPI_Group_free(&world_group);
439 MPI_Group_free(&high_group);
440 intraCommName = "High half of WORLD created by MPI_Comm_create";
443 *comm = MPI_COMM_NULL;
447 /* These next cases are communicators that include some
448 * but not all of the processes */
455 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
457 MTestPrintError(merr);
458 newsize = size - (intraCommIdx - 7);
460 if (allowSmaller && newsize >= min_size) {
461 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
463 MTestPrintError(merr);
464 merr = MPI_Comm_split(MPI_COMM_WORLD, rank < newsize, rank, comm);
466 MTestPrintError(merr);
467 if (rank >= newsize) {
468 merr = MPI_Comm_free(comm);
470 MTestPrintError(merr);
471 *comm = MPI_COMM_NULL;
474 intraCommName = "Split of WORLD";
478 /* Act like default */
479 *comm = MPI_COMM_NULL;
485 /* Other ideas: dup of self, cart comm, graph comm */
487 *comm = MPI_COMM_NULL;
492 if (*comm != MPI_COMM_NULL) {
493 merr = MPI_Comm_size(*comm, &size);
495 MTestPrintError(merr);
496 if (size >= min_size)
500 intraCommName = "MPI_COMM_NULL";
505 /* we are only done if all processes are done */
506 MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
508 /* Advance the comm index whether we are done or not, otherwise we could
509 * spin forever trying to allocate a too-small communicator over and
513 if (!done && !isBasic && *comm != MPI_COMM_NULL) {
514 /* avoid leaking communicators */
515 merr = MPI_Comm_free(comm);
517 MTestPrintError(merr);
525 * Get an intracommunicator with at least min_size members.
527 int MTestGetIntracomm(MPI_Comm * comm, int min_size)
529 return MTestGetIntracommGeneral(comm, min_size, 0);
532 /* Return the name of an intra communicator */
533 const char *MTestGetIntracommName(void)
535 return intraCommName;
539 * Return an intercomm; set isLeftGroup to 1 if the calling process is
540 * a member of the "left" group.
542 int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size)
544 int size, rank, remsize, merr;
546 MPI_Comm mcomm = MPI_COMM_NULL;
547 MPI_Comm mcomm2 = MPI_COMM_NULL;
550 /* The while loop allows us to skip communicators that are too small.
551 * MPI_COMM_NULL is always considered large enough. The size is
552 * the sum of the sizes of the local and remote groups */
554 *comm = MPI_COMM_NULL;
556 interCommName = "MPI_COMM_NULL";
558 switch (interCommIdx) {
560 /* Split comm world in half */
561 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
563 MTestPrintError(merr);
564 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
566 MTestPrintError(merr);
568 merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
570 MTestPrintError(merr);
574 else if (rank == size / 2) {
578 /* Remote leader is signficant only for the processes
579 * designated local leaders */
582 *isLeftGroup = rank < size / 2;
583 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
585 MTestPrintError(merr);
586 interCommName = "Intercomm by splitting MPI_COMM_WORLD";
589 *comm = MPI_COMM_NULL;
592 /* Split comm world in to 1 and the rest */
593 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
595 MTestPrintError(merr);
596 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
598 MTestPrintError(merr);
600 merr = MPI_Comm_split(MPI_COMM_WORLD, rank == 0, rank, &mcomm);
602 MTestPrintError(merr);
606 else if (rank == 1) {
610 /* Remote leader is signficant only for the processes
611 * designated local leaders */
614 *isLeftGroup = rank == 0;
615 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12346, comm);
617 MTestPrintError(merr);
618 interCommName = "Intercomm by splitting MPI_COMM_WORLD into 1, rest";
621 *comm = MPI_COMM_NULL;
625 /* Split comm world in to 2 and the rest */
626 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
628 MTestPrintError(merr);
629 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
631 MTestPrintError(merr);
633 merr = MPI_Comm_split(MPI_COMM_WORLD, rank < 2, rank, &mcomm);
635 MTestPrintError(merr);
639 else if (rank == 2) {
643 /* Remote leader is signficant only for the processes
644 * designated local leaders */
647 *isLeftGroup = rank < 2;
648 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12347, comm);
650 MTestPrintError(merr);
651 interCommName = "Intercomm by splitting MPI_COMM_WORLD into 2, rest";
654 *comm = MPI_COMM_NULL;
658 /* Split comm world in half, then dup */
659 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
661 MTestPrintError(merr);
662 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
664 MTestPrintError(merr);
666 merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
668 MTestPrintError(merr);
672 else if (rank == size / 2) {
676 /* Remote leader is signficant only for the processes
677 * designated local leaders */
680 *isLeftGroup = rank < size / 2;
681 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
683 MTestPrintError(merr);
684 /* avoid leaking after assignment below */
685 merr = MPI_Comm_free(&mcomm);
687 MTestPrintError(merr);
689 /* now dup, some bugs only occur for dup's of intercomms */
691 merr = MPI_Comm_dup(mcomm, comm);
693 MTestPrintError(merr);
694 interCommName = "Intercomm by splitting MPI_COMM_WORLD then dup'ing";
697 *comm = MPI_COMM_NULL;
701 /* Split comm world in half, form intercomm, then split that intercomm */
702 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
704 MTestPrintError(merr);
705 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
707 MTestPrintError(merr);
709 merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &mcomm);
711 MTestPrintError(merr);
715 else if (rank == size / 2) {
719 /* Remote leader is signficant only for the processes
720 * designated local leaders */
723 *isLeftGroup = rank < size / 2;
724 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
726 MTestPrintError(merr);
727 /* avoid leaking after assignment below */
728 merr = MPI_Comm_free(&mcomm);
730 MTestPrintError(merr);
732 /* now split, some bugs only occur for splits of intercomms */
734 merr = MPI_Comm_rank(mcomm, &rank);
736 MTestPrintError(merr);
737 /* this split is effectively a dup but tests the split code paths */
738 merr = MPI_Comm_split(mcomm, 0, rank, comm);
740 MTestPrintError(merr);
741 interCommName = "Intercomm by splitting MPI_COMM_WORLD then then splitting again";
744 *comm = MPI_COMM_NULL;
748 /* split comm world in half discarding rank 0 on the "left"
749 * communicator, then form them into an intercommunicator */
750 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
752 MTestPrintError(merr);
753 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
755 MTestPrintError(merr);
757 int color = (rank < size / 2 ? 0 : 1);
759 color = MPI_UNDEFINED;
761 merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm);
763 MTestPrintError(merr);
768 else if (rank == (size / 2)) {
772 /* Remote leader is signficant only for the processes
773 * designated local leaders */
776 *isLeftGroup = rank < size / 2;
777 if (rank != 0) { /* 0's mcomm is MPI_COMM_NULL */
778 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, comm);
780 MTestPrintError(merr);
783 "Intercomm by splitting MPI_COMM_WORLD (discarding rank 0 in the left group) then MPI_Intercomm_create'ing";
786 *comm = MPI_COMM_NULL;
791 /* Split comm world in half then form them into an
792 * intercommunicator. Then discard rank 0 from each group of the
793 * intercomm via MPI_Comm_create. */
794 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
796 MTestPrintError(merr);
797 merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
799 MTestPrintError(merr);
801 MPI_Group oldgroup, newgroup;
803 int color = (rank < size / 2 ? 0 : 1);
805 merr = MPI_Comm_split(MPI_COMM_WORLD, color, rank, &mcomm);
807 MTestPrintError(merr);
812 else if (rank == (size / 2)) {
816 /* Remote leader is signficant only for the processes
817 * designated local leaders */
820 *isLeftGroup = rank < size / 2;
821 merr = MPI_Intercomm_create(mcomm, 0, MPI_COMM_WORLD, rleader, 12345, &mcomm2);
823 MTestPrintError(merr);
825 /* We have an intercomm between the two halves of comm world. Now create
826 * a new intercomm that removes rank 0 on each side. */
827 merr = MPI_Comm_group(mcomm2, &oldgroup);
829 MTestPrintError(merr);
831 merr = MPI_Group_excl(oldgroup, 1, ranks, &newgroup);
833 MTestPrintError(merr);
834 merr = MPI_Comm_create(mcomm2, newgroup, comm);
836 MTestPrintError(merr);
838 merr = MPI_Group_free(&oldgroup);
840 MTestPrintError(merr);
841 merr = MPI_Group_free(&newgroup);
843 MTestPrintError(merr);
846 "Intercomm by splitting MPI_COMM_WORLD then discarding 0 ranks with MPI_Comm_create";
849 *comm = MPI_COMM_NULL;
854 *comm = MPI_COMM_NULL;
859 if (*comm != MPI_COMM_NULL) {
860 merr = MPI_Comm_size(*comm, &size);
862 MTestPrintError(merr);
863 merr = MPI_Comm_remote_size(*comm, &remsize);
865 MTestPrintError(merr);
866 if (size + remsize >= min_size)
870 interCommName = "MPI_COMM_NULL";
874 /* we are only done if all processes are done */
875 MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
877 /* Advance the comm index whether we are done or not, otherwise we could
878 * spin forever trying to allocate a too-small communicator over and
882 if (!done && *comm != MPI_COMM_NULL) {
883 /* avoid leaking communicators */
884 merr = MPI_Comm_free(comm);
886 MTestPrintError(merr);
889 /* cleanup for common temp objects */
890 if (mcomm != MPI_COMM_NULL) {
891 merr = MPI_Comm_free(&mcomm);
893 MTestPrintError(merr);
895 if (mcomm2 != MPI_COMM_NULL) {
896 merr = MPI_Comm_free(&mcomm2);
898 MTestPrintError(merr);
905 int MTestTestIntercomm(MPI_Comm comm)
907 int local_size, remote_size, rank, **bufs, *bufmem, rbuf[2], j;
908 int errs = 0, wrank, nsize;
909 char commname[MPI_MAX_OBJECT_NAME + 1];
912 MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
913 MPI_Comm_size(comm, &local_size);
914 MPI_Comm_remote_size(comm, &remote_size);
915 MPI_Comm_rank(comm, &rank);
916 MPI_Comm_get_name(comm, commname, &nsize);
918 MTestPrintfMsg(1, "Testing communication on intercomm '%s', remote_size=%d\n",
919 commname, remote_size);
921 reqs = (MPI_Request *) malloc(remote_size * sizeof(MPI_Request));
923 printf("[%d] Unable to allocated %d requests for testing intercomm %s\n",
924 wrank, remote_size, commname);
928 bufs = (int **) malloc(remote_size * sizeof(int *));
930 printf("[%d] Unable to allocated %d int pointers for testing intercomm %s\n",
931 wrank, remote_size, commname);
935 bufmem = (int *) malloc(remote_size * 2 * sizeof(int));
937 printf("[%d] Unable to allocated %d int data for testing intercomm %s\n",
938 wrank, 2 * remote_size, commname);
943 /* Each process sends a message containing its own rank and the
944 * rank of the destination with a nonblocking send. Because we're using
945 * nonblocking sends, we need to use different buffers for each isend */
946 /* NOTE: the send buffer access restriction was relaxed in MPI-2.2, although
947 * it doesn't really hurt to keep separate buffers for our purposes */
948 for (j = 0; j < remote_size; j++) {
949 bufs[j] = &bufmem[2 * j];
952 MPI_Isend(bufs[j], 2, MPI_INT, j, 0, comm, &reqs[j]);
954 MTestPrintfMsg(2, "isends posted, about to recv\n");
956 for (j = 0; j < remote_size; j++) {
957 MPI_Recv(rbuf, 2, MPI_INT, j, 0, comm, MPI_STATUS_IGNORE);
959 printf("[%d] Expected rank %d but saw %d in %s\n", wrank, j, rbuf[0], commname);
962 if (rbuf[1] != rank) {
963 printf("[%d] Expected target rank %d but saw %d from %d in %s\n",
964 wrank, rank, rbuf[1], j, commname);
971 MTestPrintfMsg(2, "my recvs completed, about to waitall\n");
972 MPI_Waitall(remote_size, reqs, MPI_STATUSES_IGNORE);
981 int MTestTestIntracomm(MPI_Comm comm)
985 int in[16], out[16], sol[16];
987 MPI_Comm_size(comm, &size);
989 /* Set input, output and sol-values */
990 for (i = 0; i < 16; i++) {
995 MPI_Allreduce(in, out, 16, MPI_INT, MPI_SUM, comm);
998 for (i = 0; i < 16; i++) {
999 if (sol[i] != out[i])
1006 int MTestTestComm(MPI_Comm comm)
1010 if (comm == MPI_COMM_NULL)
1013 MPI_Comm_test_inter(comm, &is_inter);
1016 return MTestTestIntercomm(comm);
1018 return MTestTestIntracomm(comm);
1021 /* Return the name of an intercommunicator */
1022 const char *MTestGetIntercommName(void)
1024 return interCommName;
1027 /* Get a communicator of a given minimum size. Both intra and inter
1028 communicators are provided */
1029 int MTestGetComm(MPI_Comm * comm, int min_size)
1032 static int getinter = 0;
1035 idx = MTestGetIntracomm(comm, min_size);
1042 idx = MTestGetIntercomm(comm, &isLeft, min_size);
1051 /* Free a communicator. It may be called with a predefined communicator
1053 void MTestFreeComm(MPI_Comm * comm)
1056 if (*comm != MPI_COMM_WORLD && *comm != MPI_COMM_SELF && *comm != MPI_COMM_NULL) {
1057 merr = MPI_Comm_free(comm);
1059 MTestPrintError(merr);
1063 /* ------------------------------------------------------------------------ */
1064 void MTestPrintError(int errcode)
1067 char string[MPI_MAX_ERROR_STRING];
1069 MPI_Error_class(errcode, &errclass);
1070 MPI_Error_string(errcode, string, &slen);
1071 printf("Error class %d (%s)\n", errclass, string);
1075 void MTestPrintErrorMsg(const char msg[], int errcode)
1078 char string[MPI_MAX_ERROR_STRING];
1080 MPI_Error_class(errcode, &errclass);
1081 MPI_Error_string(errcode, string, &slen);
1082 printf("%s: Error class %d (%s)\n", msg, errclass, string);
1086 /* ------------------------------------------------------------------------ */
1088 If verbose output is selected and the level is at least that of the
1089 value of the verbose flag, then perform printf(format, ...);
1091 void MTestPrintfMsg(int level, const char format[], ...)
1095 if (verbose && level <= verbose) {
1096 va_start(list, format);
1097 vprintf(format, list);
1103 /* Fatal error. Report and exit */
1104 void MTestError(const char *msg)
1106 fprintf(stderr, "%s\n", msg);
1108 MPI_Abort(MPI_COMM_WORLD, 1);
1111 /* ------------------------------------------------------------------------ */
1112 static void MTestResourceSummary(FILE * fp)
1114 #ifdef HAVE_GETRUSAGE
1116 static int pfThreshold = -2;
1118 if (getrusage(RUSAGE_SELF, &ru) == 0) {
1119 /* There is an option to generate output only when a resource
1120 * exceeds a threshold. To date, only page faults supported. */
1121 if (pfThreshold == -2) {
1122 char *p = getenv("MPITEST_RUSAGE_PF");
1125 pfThreshold = strtol(p, 0, 0);
1128 if (pfThreshold > 0) {
1129 doOutput = ru.ru_minflt > pfThreshold;
1132 /* Cast values to long in case some system has defined them
1133 * as another integer type */
1134 fprintf(fp, "RUSAGE: max resident set = %ldKB\n", (long) ru.ru_maxrss);
1135 fprintf(fp, "RUSAGE: page faults = %ld : %ld\n",
1136 (long) ru.ru_minflt, (long) ru.ru_majflt);
1137 /* Not every Unix provides useful information for the xxrss fields */
1138 fprintf(fp, "RUSAGE: memory in text/data/stack = %ld : %ld : %ld\n",
1139 (long) ru.ru_ixrss, (long) ru.ru_idrss, (long) ru.ru_isrss);
1140 fprintf(fp, "RUSAGE: I/O in and out = %ld : %ld\n",
1141 (long) ru.ru_inblock, (long) ru.ru_oublock);
1142 fprintf(fp, "RUSAGE: context switch = %ld : %ld\n",
1143 (long) ru.ru_nvcsw, (long) ru.ru_nivcsw);
1147 fprintf(fp, "RUSAGE: return error %d\n", errno);
1152 /* ------------------------------------------------------------------------ */
1153 #ifdef HAVE_MPI_WIN_CREATE
1155 * Create MPI Windows
1157 static int win_index = 0;
1158 static const char *winName;
1159 /* Use an attribute to remember the type of memory allocation (static,
1160 malloc, or MPI_Alloc_mem) */
1161 static int mem_keyval = MPI_KEYVAL_INVALID;
1162 int MTestGetWin(MPI_Win * win, int mustBePassive)
1164 static char actbuf[1024];
1165 static char *pasbuf;
1170 if (mem_keyval == MPI_KEYVAL_INVALID) {
1171 /* Create the keyval */
1172 merr = MPI_Win_create_keyval(MPI_WIN_NULL_COPY_FN, MPI_WIN_NULL_DELETE_FN, &mem_keyval, 0);
1174 MTestPrintError(merr);
1178 switch (win_index) {
1180 /* Active target window */
1181 merr = MPI_Win_create(actbuf, 1024, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win);
1183 MTestPrintError(merr);
1184 winName = "active-window";
1185 merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 0);
1187 MTestPrintError(merr);
1190 /* Passive target window */
1191 merr = MPI_Alloc_mem(1024, MPI_INFO_NULL, &pasbuf);
1193 MTestPrintError(merr);
1194 merr = MPI_Win_create(pasbuf, 1024, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win);
1196 MTestPrintError(merr);
1197 winName = "passive-window";
1198 merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 2);
1200 MTestPrintError(merr);
1203 /* Active target; all windows different sizes */
1204 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
1206 MTestPrintError(merr);
1209 buf = (char *) malloc(n);
1212 merr = MPI_Win_create(buf, n, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win);
1214 MTestPrintError(merr);
1215 winName = "active-all-different-win";
1216 merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 1);
1218 MTestPrintError(merr);
1221 /* Active target, no locks set */
1222 merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
1224 MTestPrintError(merr);
1227 buf = (char *) malloc(n);
1230 merr = MPI_Info_create(&info);
1232 MTestPrintError(merr);
1233 merr = MPI_Info_set(info, (char *) "nolocks", (char *) "true");
1235 MTestPrintError(merr);
1236 merr = MPI_Win_create(buf, n, 1, info, MPI_COMM_WORLD, win);
1238 MTestPrintError(merr);
1239 merr = MPI_Info_free(&info);
1241 MTestPrintError(merr);
1242 winName = "active-nolocks-all-different-win";
1243 merr = MPI_Win_set_attr(*win, mem_keyval, (void *) 1);
1245 MTestPrintError(merr);
1254 /* Return a pointer to the name associated with a window object */
1255 const char *MTestGetWinName(void)
1260 /* Free the storage associated with a window object */
1261 void MTestFreeWin(MPI_Win * win)
1266 merr = MPI_Win_get_attr(*win, MPI_WIN_BASE, &addr, &flag);
1268 MTestPrintError(merr);
1270 MTestError("Could not get WIN_BASE from window");
1274 merr = MPI_Win_get_attr(*win, mem_keyval, &val, &flag);
1276 MTestPrintError(merr);
1278 if (val == (void *) 1) {
1281 else if (val == (void *) 2) {
1282 merr = MPI_Free_mem(addr);
1284 MTestPrintError(merr);
1286 /* if val == (void *)0, then static data that must not be freed */
1289 merr = MPI_Win_free(win);
1291 MTestPrintError(merr);
1294 static void MTestRMACleanup(void)
1296 if (mem_keyval != MPI_KEYVAL_INVALID) {
1297 MPI_Win_free_keyval(&mem_keyval);
1301 static void MTestRMACleanup(void)
1306 /* ------------------------------------------------------------------------ */
1307 /* This function determines if it is possible to spawn addition MPI
1308 * processes using MPI_COMM_SPAWN and MPI_COMM_SPAWN_MULTIPLE.
1310 * It sets the can_spawn value to one of the following:
1311 * 1 = yes, additional processes can be spawned
1312 * 0 = no, MPI_UNIVERSE_SIZE <= the size of MPI_COMM_WORLD
1313 * -1 = it is unknown whether or not processes can be spawned
1314 * due to errors in the necessary query functions
1317 int MTestSpawnPossible(int *can_spawn)
1326 rc = MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &v, &flag);
1327 if (rc != MPI_SUCCESS) {
1328 /* MPI_UNIVERSE_SIZE keyval missing from MPI_COMM_WORLD attributes */
1333 /* MPI_UNIVERSE_SIZE need not be set */
1337 rc = MPI_Comm_size(MPI_COMM_WORLD, &size);
1338 if (rc != MPI_SUCCESS) {
1339 /* MPI_Comm_size failed for MPI_COMM_WORLD */
1346 /* no additional processes can be spawned */
1354 /* No attribute associated with key MPI_UNIVERSE_SIZE of MPI_COMM_WORLD */
1361 /* ------------------------------------------------------------------------ */