From a286e57018d2ef03257affb9fe1e73e0b8d08a3d Mon Sep 17 00:00:00 2001 From: Augustin Degomme Date: Tue, 23 Oct 2012 17:21:53 +0200 Subject: [PATCH] first commit to add the mpich-test suite to smpi tesh suite. Obviously all tests don't complete correctly, so we will only execute a (pretty good) part of it, but all tests should compile with smpi (at least C ones for now, fotran needs more work) Conflicts: CMakeLists.txt buildtools/Cmake/AddTests.cmake --- CMakeLists.txt | 7 +- buildtools/Cmake/AddTests.cmake | 14 +- buildtools/Cmake/DefinePackages.cmake | 5 + buildtools/Cmake/Flags.cmake | 1 + buildtools/Cmake/MakeExe.cmake | 5 + teshsuite/smpi/mpich-test/CMakeLists.txt | 102 + teshsuite/smpi/mpich-test/README | 75 + teshsuite/smpi/mpich-test/coll/CMakeLists.txt | 156 ++ teshsuite/smpi/mpich-test/coll/allgatherf.f | 41 + teshsuite/smpi/mpich-test/coll/allred.c | 2466 +++++++++++++++++ teshsuite/smpi/mpich-test/coll/allred2.c | 50 + teshsuite/smpi/mpich-test/coll/allredf.f | 894 ++++++ teshsuite/smpi/mpich-test/coll/allredf.std | 14 + teshsuite/smpi/mpich-test/coll/allredmany.c | 28 + teshsuite/smpi/mpich-test/coll/allredmany.std | 22 + teshsuite/smpi/mpich-test/coll/alltoallv.c | 97 + teshsuite/smpi/mpich-test/coll/assocf.f | 73 + teshsuite/smpi/mpich-test/coll/barrier.c | 88 + teshsuite/smpi/mpich-test/coll/bcast.c | 53 + teshsuite/smpi/mpich-test/coll/bcast2.f | 37 + teshsuite/smpi/mpich-test/coll/bcastbug.c | 70 + teshsuite/smpi/mpich-test/coll/bcastbug2.c | 29 + teshsuite/smpi/mpich-test/coll/bcastlog.f | 38 + teshsuite/smpi/mpich-test/coll/bcastvec.c | 83 + teshsuite/smpi/mpich-test/coll/coll1.c | 61 + teshsuite/smpi/mpich-test/coll/coll10.c | 60 + teshsuite/smpi/mpich-test/coll/coll11.c | 110 + teshsuite/smpi/mpich-test/coll/coll12.c | 76 + teshsuite/smpi/mpich-test/coll/coll13.c | 86 + teshsuite/smpi/mpich-test/coll/coll2.c | 68 + teshsuite/smpi/mpich-test/coll/coll3.c | 86 + teshsuite/smpi/mpich-test/coll/coll4.c | 46 + teshsuite/smpi/mpich-test/coll/coll5.c | 51 + teshsuite/smpi/mpich-test/coll/coll6.c | 82 + teshsuite/smpi/mpich-test/coll/coll7.c | 59 + teshsuite/smpi/mpich-test/coll/coll8.c | 39 + teshsuite/smpi/mpich-test/coll/coll9.c | 44 + teshsuite/smpi/mpich-test/coll/grouptest.c | 61 + teshsuite/smpi/mpich-test/coll/longuser.c | 81 + teshsuite/smpi/mpich-test/coll/nbcoll.c | 75 + teshsuite/smpi/mpich-test/coll/redscat.c | 53 + teshsuite/smpi/mpich-test/coll/redtst.c | 21 + teshsuite/smpi/mpich-test/coll/runtests | 184 ++ teshsuite/smpi/mpich-test/coll/scantst.c | 152 + teshsuite/smpi/mpich-test/coll/scattern.c | 54 + teshsuite/smpi/mpich-test/coll/scatterv.c | 167 ++ teshsuite/smpi/mpich-test/coll/scatterv.std | 4 + teshsuite/smpi/mpich-test/coll/shortint.c | 39 + teshsuite/smpi/mpich-test/coll/temprun | 269 ++ teshsuite/smpi/mpich-test/coll/test.c | 97 + teshsuite/smpi/mpich-test/coll/test.h | 18 + .../smpi/mpich-test/context/CMakeLists.txt | 75 + teshsuite/smpi/mpich-test/context/attrerr.c | 114 + teshsuite/smpi/mpich-test/context/attrerr.std | 4 + teshsuite/smpi/mpich-test/context/attrt.c | 260 ++ teshsuite/smpi/mpich-test/context/attrt.std | 9 + teshsuite/smpi/mpich-test/context/attrtest.f | 105 + teshsuite/smpi/mpich-test/context/commnames.c | 62 + .../smpi/mpich-test/context/commnames.std | 4 + .../smpi/mpich-test/context/commnamesf.f | 75 + teshsuite/smpi/mpich-test/context/context.std | 3 + .../smpi/mpich-test/context/groupcreate.c | 67 + teshsuite/smpi/mpich-test/context/grouptest.c | 192 ++ teshsuite/smpi/mpich-test/context/icdup.c | 71 + teshsuite/smpi/mpich-test/context/ictest.c | 124 + teshsuite/smpi/mpich-test/context/ictest2.c | 209 ++ teshsuite/smpi/mpich-test/context/ictest3.c | 195 ++ teshsuite/smpi/mpich-test/context/runtests | 136 + teshsuite/smpi/mpich-test/context/test.c | 94 + teshsuite/smpi/mpich-test/context/test.h | 24 + teshsuite/smpi/mpich-test/env/CMakeLists.txt | 79 + teshsuite/smpi/mpich-test/env/aborttest.c | 34 + teshsuite/smpi/mpich-test/env/aborttest.out | 6 + teshsuite/smpi/mpich-test/env/aborttest.std | 6 + teshsuite/smpi/mpich-test/env/argstest.c | 19 + teshsuite/smpi/mpich-test/env/baseattr.c | 48 + teshsuite/smpi/mpich-test/env/baseattrf.f | 35 + teshsuite/smpi/mpich-test/env/cmdline.c | 54 + teshsuite/smpi/mpich-test/env/env.std | 10 + teshsuite/smpi/mpich-test/env/errhand.c | 242 ++ teshsuite/smpi/mpich-test/env/errhand2.c | 62 + teshsuite/smpi/mpich-test/env/errhandf.f | 56 + teshsuite/smpi/mpich-test/env/errstringsf.f | 48 + teshsuite/smpi/mpich-test/env/errstringsf.std | 4 + teshsuite/smpi/mpich-test/env/getproc.c | 57 + teshsuite/smpi/mpich-test/env/getproc.out | 3 + teshsuite/smpi/mpich-test/env/getproc.stdo | 3 + teshsuite/smpi/mpich-test/env/getprocf.f | 27 + teshsuite/smpi/mpich-test/env/gtime.c | 132 + teshsuite/smpi/mpich-test/env/gtime.out | 3 + teshsuite/smpi/mpich-test/env/gtime.stdo | 3 + teshsuite/smpi/mpich-test/env/hang.c | 15 + teshsuite/smpi/mpich-test/env/init.c | 30 + teshsuite/smpi/mpich-test/env/init.out | 3 + teshsuite/smpi/mpich-test/env/init.stdo | 3 + teshsuite/smpi/mpich-test/env/runtests | 231 ++ teshsuite/smpi/mpich-test/env/sigchk.c | 201 ++ teshsuite/smpi/mpich-test/env/test.c | 130 + teshsuite/smpi/mpich-test/env/test.h | 24 + teshsuite/smpi/mpich-test/env/testerr.c | 170 ++ teshsuite/smpi/mpich-test/env/timers.c | 54 + teshsuite/smpi/mpich-test/env/timertest.c | 35 + .../smpi/mpich-test/profile/CMakeLists.txt | 45 + teshsuite/smpi/mpich-test/profile/colluses.c | 81 + teshsuite/smpi/mpich-test/profile/ptest.c | 21 + teshsuite/smpi/mpich-test/profile/ptest.std | 3 + teshsuite/smpi/mpich-test/profile/runtests | 106 + .../smpi/mpich-test/pt2pt/CMakeLists.txt | 404 +++ teshsuite/smpi/mpich-test/pt2pt/README | 36 + teshsuite/smpi/mpich-test/pt2pt/allpair.f | 767 +++++ teshsuite/smpi/mpich-test/pt2pt/allpair.std | 13 + teshsuite/smpi/mpich-test/pt2pt/allpair2.f | 809 ++++++ teshsuite/smpi/mpich-test/pt2pt/allpair2.std | 13 + teshsuite/smpi/mpich-test/pt2pt/bsendtest.c | 211 ++ teshsuite/smpi/mpich-test/pt2pt/cancel.c | 117 + teshsuite/smpi/mpich-test/pt2pt/cancel2.c | 230 ++ teshsuite/smpi/mpich-test/pt2pt/cancel3.c | 217 ++ teshsuite/smpi/mpich-test/pt2pt/cancelibm.c | 146 + .../smpi/mpich-test/pt2pt/cancelissend.c | 160 ++ .../smpi/mpich-test/pt2pt/cancelmessages.c | 171 ++ teshsuite/smpi/mpich-test/pt2pt/commit.c | 86 + teshsuite/smpi/mpich-test/pt2pt/dataalign.c | 105 + teshsuite/smpi/mpich-test/pt2pt/dtypelife.c | 84 + teshsuite/smpi/mpich-test/pt2pt/dtyperecv.c | 61 + teshsuite/smpi/mpich-test/pt2pt/dtypes.c | 343 +++ teshsuite/smpi/mpich-test/pt2pt/dtypes.h | 13 + teshsuite/smpi/mpich-test/pt2pt/exittest.c | 78 + .../smpi/mpich-test/pt2pt/fairness/README | 3 + .../mpich-test/pt2pt/fairness/fairness-euih.c | 80 + .../smpi/mpich-test/pt2pt/fairness/fairness.c | 72 + .../mpich-test/pt2pt/fairness/fairness2.c | 87 + .../mpich-test/pt2pt/fairness/fairness2m.c | 99 + .../mpich-test/pt2pt/fairness/fairnessm.c | 79 + teshsuite/smpi/mpich-test/pt2pt/fifth.c | 54 + teshsuite/smpi/mpich-test/pt2pt/flood.c | 254 ++ teshsuite/smpi/mpich-test/pt2pt/flood2.c | 215 ++ teshsuite/smpi/mpich-test/pt2pt/fourth.c | 64 + teshsuite/smpi/mpich-test/pt2pt/gcomm.c | 78 + teshsuite/smpi/mpich-test/pt2pt/gcomm.h | 6 + teshsuite/smpi/mpich-test/pt2pt/getelm.c | 148 + teshsuite/smpi/mpich-test/pt2pt/hindexed.c | 106 + teshsuite/smpi/mpich-test/pt2pt/hindexed.std | 3 + teshsuite/smpi/mpich-test/pt2pt/htmsg.c | 54 + teshsuite/smpi/mpich-test/pt2pt/hvec.c | 127 + teshsuite/smpi/mpich-test/pt2pt/hvec.std | 17 + teshsuite/smpi/mpich-test/pt2pt/hvectest.c | 315 +++ teshsuite/smpi/mpich-test/pt2pt/hvectest2.c | 366 +++ teshsuite/smpi/mpich-test/pt2pt/irecvtest.c | 152 + teshsuite/smpi/mpich-test/pt2pt/irsend.c | 155 ++ teshsuite/smpi/mpich-test/pt2pt/irsendinit.c | 167 ++ teshsuite/smpi/mpich-test/pt2pt/isendf.f | 54 + teshsuite/smpi/mpich-test/pt2pt/isendtest.c | 48 + teshsuite/smpi/mpich-test/pt2pt/isndrcv.c | 507 ++++ teshsuite/smpi/mpich-test/pt2pt/isndrcv2.c | 41 + teshsuite/smpi/mpich-test/pt2pt/issend2.c | 102 + teshsuite/smpi/mpich-test/pt2pt/issendinit.c | 104 + teshsuite/smpi/mpich-test/pt2pt/issendtest.c | 131 + teshsuite/smpi/mpich-test/pt2pt/longmsgs.c | 201 ++ teshsuite/smpi/mpich-test/pt2pt/mpitest.h | 11 + teshsuite/smpi/mpich-test/pt2pt/nblock.c | 101 + teshsuite/smpi/mpich-test/pt2pt/nblock.std | 10 + teshsuite/smpi/mpich-test/pt2pt/nbtest.c | 103 + teshsuite/smpi/mpich-test/pt2pt/nbtest.std | 10 + teshsuite/smpi/mpich-test/pt2pt/nullproc.c | 118 + teshsuite/smpi/mpich-test/pt2pt/nullproc.std | 3 + teshsuite/smpi/mpich-test/pt2pt/nullproc2.c | 129 + teshsuite/smpi/mpich-test/pt2pt/nullproc2.std | 3 + teshsuite/smpi/mpich-test/pt2pt/order.c | 71 + teshsuite/smpi/mpich-test/pt2pt/overtake.c | 290 ++ teshsuite/smpi/mpich-test/pt2pt/pack.c | 77 + teshsuite/smpi/mpich-test/pt2pt/pack.std | 3 + teshsuite/smpi/mpich-test/pt2pt/persist.c | 54 + teshsuite/smpi/mpich-test/pt2pt/persist.std | 3 + teshsuite/smpi/mpich-test/pt2pt/persist2.c | 80 + teshsuite/smpi/mpich-test/pt2pt/persist2.std | 3 + teshsuite/smpi/mpich-test/pt2pt/persistent.c | 56 + .../smpi/mpich-test/pt2pt/persistent.std | 9 + teshsuite/smpi/mpich-test/pt2pt/pingpong.f | 274 ++ teshsuite/smpi/mpich-test/pt2pt/probe.c | 56 + teshsuite/smpi/mpich-test/pt2pt/probe1.c | 78 + teshsuite/smpi/mpich-test/pt2pt/relrank.c | 58 + teshsuite/smpi/mpich-test/pt2pt/reqcreate.c | 85 + teshsuite/smpi/mpich-test/pt2pt/reqcreate.std | 4 + teshsuite/smpi/mpich-test/pt2pt/reqfree.c | 152 + teshsuite/smpi/mpich-test/pt2pt/runtests | 386 +++ teshsuite/smpi/mpich-test/pt2pt/secondf.f | 59 + teshsuite/smpi/mpich-test/pt2pt/self.c | 63 + teshsuite/smpi/mpich-test/pt2pt/self.std | 32 + teshsuite/smpi/mpich-test/pt2pt/selfvsworld.c | 104 + .../smpi/mpich-test/pt2pt/selfvsworld.std | 3 + teshsuite/smpi/mpich-test/pt2pt/send1.f | 113 + teshsuite/smpi/mpich-test/pt2pt/sendcplx.f | 33 + teshsuite/smpi/mpich-test/pt2pt/sendfort.f | 47 + teshsuite/smpi/mpich-test/pt2pt/sendmany.c | 84 + teshsuite/smpi/mpich-test/pt2pt/sendmany.std | 17 + teshsuite/smpi/mpich-test/pt2pt/sendorder.c | 173 ++ teshsuite/smpi/mpich-test/pt2pt/sendorder.std | 3 + teshsuite/smpi/mpich-test/pt2pt/sendrecv.c | 634 +++++ teshsuite/smpi/mpich-test/pt2pt/sendrecv2.c | 123 + teshsuite/smpi/mpich-test/pt2pt/sendrecv3.c | 158 ++ teshsuite/smpi/mpich-test/pt2pt/sendrecv4.c | 175 ++ teshsuite/smpi/mpich-test/pt2pt/sixth.c | 114 + teshsuite/smpi/mpich-test/pt2pt/sndrcv.c | 138 + teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.c | 68 + teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.std | 3 + teshsuite/smpi/mpich-test/pt2pt/sndrcvrpl2.c | 64 + teshsuite/smpi/mpich-test/pt2pt/ssendtest.c | 145 + teshsuite/smpi/mpich-test/pt2pt/ssendtest2.c | 78 + .../smpi/mpich-test/pt2pt/ssendtest2.std | 12 + teshsuite/smpi/mpich-test/pt2pt/structf.f | 87 + teshsuite/smpi/mpich-test/pt2pt/structf.std | 3 + teshsuite/smpi/mpich-test/pt2pt/structlb.c | 55 + teshsuite/smpi/mpich-test/pt2pt/systest.c | 433 +++ teshsuite/smpi/mpich-test/pt2pt/systest1.c | 115 + teshsuite/smpi/mpich-test/pt2pt/test.c | 162 ++ teshsuite/smpi/mpich-test/pt2pt/test.h | 29 + teshsuite/smpi/mpich-test/pt2pt/testall.c | 175 ++ teshsuite/smpi/mpich-test/pt2pt/testhetero | 92 + teshsuite/smpi/mpich-test/pt2pt/testsome.c | 173 ++ teshsuite/smpi/mpich-test/pt2pt/testtest1.c | 83 + teshsuite/smpi/mpich-test/pt2pt/testtypes.c | 37 + teshsuite/smpi/mpich-test/pt2pt/third.c | 121 + teshsuite/smpi/mpich-test/pt2pt/trunc.c | 163 ++ teshsuite/smpi/mpich-test/pt2pt/truncmult.c | 258 ++ teshsuite/smpi/mpich-test/pt2pt/typebase.c | 126 + teshsuite/smpi/mpich-test/pt2pt/typebase.std | 3 + teshsuite/smpi/mpich-test/pt2pt/typebasef.f | 71 + teshsuite/smpi/mpich-test/pt2pt/typecreate.c | 66 + .../smpi/mpich-test/pt2pt/typecreate.std | 3 + teshsuite/smpi/mpich-test/pt2pt/typelb.c | 45 + teshsuite/smpi/mpich-test/pt2pt/typetest.c | 310 +++ teshsuite/smpi/mpich-test/pt2pt/typeub.c | 88 + teshsuite/smpi/mpich-test/pt2pt/typeub.std | 3 + teshsuite/smpi/mpich-test/pt2pt/typeub2.c | 75 + teshsuite/smpi/mpich-test/pt2pt/typeub2.std | 5 + teshsuite/smpi/mpich-test/pt2pt/typeub3.c | 104 + teshsuite/smpi/mpich-test/pt2pt/typeub3.std | 6 + teshsuite/smpi/mpich-test/pt2pt/waitall.c | 120 + teshsuite/smpi/mpich-test/pt2pt/waitall.std | 3 + teshsuite/smpi/mpich-test/pt2pt/waitall2.c | 109 + teshsuite/smpi/mpich-test/pt2pt/waitall2.std | 3 + teshsuite/smpi/mpich-test/pt2pt/waitall3.c | 124 + teshsuite/smpi/mpich-test/pt2pt/waitall4.c | 127 + teshsuite/smpi/mpich-test/pt2pt/waitany.c | 104 + teshsuite/smpi/mpich-test/pt2pt/waitany.std | 4 + teshsuite/smpi/mpich-test/runbase | 252 ++ teshsuite/smpi/mpich-test/topol/cart.c | 134 + teshsuite/smpi/mpich-test/topol/cart1f.f | 192 ++ teshsuite/smpi/mpich-test/topol/cart2.c | 61 + teshsuite/smpi/mpich-test/topol/cart2f.f | 54 + teshsuite/smpi/mpich-test/topol/cartc.f90 | 21 + teshsuite/smpi/mpich-test/topol/cartf.f | 278 ++ teshsuite/smpi/mpich-test/topol/cartf.std | 4 + teshsuite/smpi/mpich-test/topol/cartmap.c | 55 + teshsuite/smpi/mpich-test/topol/cartmap.std | 3 + teshsuite/smpi/mpich-test/topol/cartorder.c | 148 + teshsuite/smpi/mpich-test/topol/dims.c | 117 + teshsuite/smpi/mpich-test/topol/graphtest.c | 234 ++ teshsuite/smpi/mpich-test/topol/graphtest.std | 5 + teshsuite/smpi/mpich-test/topol/test.c | 102 + teshsuite/smpi/mpich-test/topol/test.h | 24 + teshsuite/smpi/mpich-test/topol/twod.f | 291 ++ teshsuite/smpi/mpich-test/topol/twod2.f | 289 ++ 263 files changed, 28864 insertions(+), 5 deletions(-) create mode 100644 teshsuite/smpi/mpich-test/CMakeLists.txt create mode 100644 teshsuite/smpi/mpich-test/README create mode 100644 teshsuite/smpi/mpich-test/coll/CMakeLists.txt create mode 100644 teshsuite/smpi/mpich-test/coll/allgatherf.f create mode 100644 teshsuite/smpi/mpich-test/coll/allred.c create mode 100644 teshsuite/smpi/mpich-test/coll/allred2.c create mode 100644 teshsuite/smpi/mpich-test/coll/allredf.f create mode 100644 teshsuite/smpi/mpich-test/coll/allredf.std create mode 100644 teshsuite/smpi/mpich-test/coll/allredmany.c create mode 100644 teshsuite/smpi/mpich-test/coll/allredmany.std create mode 100644 teshsuite/smpi/mpich-test/coll/alltoallv.c create mode 100644 teshsuite/smpi/mpich-test/coll/assocf.f create mode 100644 teshsuite/smpi/mpich-test/coll/barrier.c create mode 100644 teshsuite/smpi/mpich-test/coll/bcast.c create mode 100644 teshsuite/smpi/mpich-test/coll/bcast2.f create mode 100644 teshsuite/smpi/mpich-test/coll/bcastbug.c create mode 100644 teshsuite/smpi/mpich-test/coll/bcastbug2.c create mode 100644 teshsuite/smpi/mpich-test/coll/bcastlog.f create mode 100644 teshsuite/smpi/mpich-test/coll/bcastvec.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll1.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll10.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll11.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll12.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll13.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll2.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll3.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll4.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll5.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll6.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll7.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll8.c create mode 100644 teshsuite/smpi/mpich-test/coll/coll9.c create mode 100644 teshsuite/smpi/mpich-test/coll/grouptest.c create mode 100644 teshsuite/smpi/mpich-test/coll/longuser.c create mode 100644 teshsuite/smpi/mpich-test/coll/nbcoll.c create mode 100644 teshsuite/smpi/mpich-test/coll/redscat.c create mode 100644 teshsuite/smpi/mpich-test/coll/redtst.c create mode 100755 teshsuite/smpi/mpich-test/coll/runtests create mode 100644 teshsuite/smpi/mpich-test/coll/scantst.c create mode 100644 teshsuite/smpi/mpich-test/coll/scattern.c create mode 100644 teshsuite/smpi/mpich-test/coll/scatterv.c create mode 100644 teshsuite/smpi/mpich-test/coll/scatterv.std create mode 100644 teshsuite/smpi/mpich-test/coll/shortint.c create mode 100755 teshsuite/smpi/mpich-test/coll/temprun create mode 100644 teshsuite/smpi/mpich-test/coll/test.c create mode 100644 teshsuite/smpi/mpich-test/coll/test.h create mode 100644 teshsuite/smpi/mpich-test/context/CMakeLists.txt create mode 100644 teshsuite/smpi/mpich-test/context/attrerr.c create mode 100644 teshsuite/smpi/mpich-test/context/attrerr.std create mode 100644 teshsuite/smpi/mpich-test/context/attrt.c create mode 100644 teshsuite/smpi/mpich-test/context/attrt.std create mode 100644 teshsuite/smpi/mpich-test/context/attrtest.f create mode 100644 teshsuite/smpi/mpich-test/context/commnames.c create mode 100644 teshsuite/smpi/mpich-test/context/commnames.std create mode 100644 teshsuite/smpi/mpich-test/context/commnamesf.f create mode 100644 teshsuite/smpi/mpich-test/context/context.std create mode 100644 teshsuite/smpi/mpich-test/context/groupcreate.c create mode 100644 teshsuite/smpi/mpich-test/context/grouptest.c create mode 100644 teshsuite/smpi/mpich-test/context/icdup.c create mode 100644 teshsuite/smpi/mpich-test/context/ictest.c create mode 100644 teshsuite/smpi/mpich-test/context/ictest2.c create mode 100644 teshsuite/smpi/mpich-test/context/ictest3.c create mode 100755 teshsuite/smpi/mpich-test/context/runtests create mode 100644 teshsuite/smpi/mpich-test/context/test.c create mode 100644 teshsuite/smpi/mpich-test/context/test.h create mode 100644 teshsuite/smpi/mpich-test/env/CMakeLists.txt create mode 100644 teshsuite/smpi/mpich-test/env/aborttest.c create mode 100644 teshsuite/smpi/mpich-test/env/aborttest.out create mode 100644 teshsuite/smpi/mpich-test/env/aborttest.std create mode 100644 teshsuite/smpi/mpich-test/env/argstest.c create mode 100644 teshsuite/smpi/mpich-test/env/baseattr.c create mode 100644 teshsuite/smpi/mpich-test/env/baseattrf.f create mode 100644 teshsuite/smpi/mpich-test/env/cmdline.c create mode 100644 teshsuite/smpi/mpich-test/env/env.std create mode 100644 teshsuite/smpi/mpich-test/env/errhand.c create mode 100644 teshsuite/smpi/mpich-test/env/errhand2.c create mode 100644 teshsuite/smpi/mpich-test/env/errhandf.f create mode 100644 teshsuite/smpi/mpich-test/env/errstringsf.f create mode 100644 teshsuite/smpi/mpich-test/env/errstringsf.std create mode 100644 teshsuite/smpi/mpich-test/env/getproc.c create mode 100644 teshsuite/smpi/mpich-test/env/getproc.out create mode 100644 teshsuite/smpi/mpich-test/env/getproc.stdo create mode 100644 teshsuite/smpi/mpich-test/env/getprocf.f create mode 100644 teshsuite/smpi/mpich-test/env/gtime.c create mode 100644 teshsuite/smpi/mpich-test/env/gtime.out create mode 100644 teshsuite/smpi/mpich-test/env/gtime.stdo create mode 100644 teshsuite/smpi/mpich-test/env/hang.c create mode 100644 teshsuite/smpi/mpich-test/env/init.c create mode 100644 teshsuite/smpi/mpich-test/env/init.out create mode 100644 teshsuite/smpi/mpich-test/env/init.stdo create mode 100755 teshsuite/smpi/mpich-test/env/runtests create mode 100644 teshsuite/smpi/mpich-test/env/sigchk.c create mode 100644 teshsuite/smpi/mpich-test/env/test.c create mode 100644 teshsuite/smpi/mpich-test/env/test.h create mode 100644 teshsuite/smpi/mpich-test/env/testerr.c create mode 100644 teshsuite/smpi/mpich-test/env/timers.c create mode 100644 teshsuite/smpi/mpich-test/env/timertest.c create mode 100644 teshsuite/smpi/mpich-test/profile/CMakeLists.txt create mode 100644 teshsuite/smpi/mpich-test/profile/colluses.c create mode 100644 teshsuite/smpi/mpich-test/profile/ptest.c create mode 100644 teshsuite/smpi/mpich-test/profile/ptest.std create mode 100755 teshsuite/smpi/mpich-test/profile/runtests create mode 100644 teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt create mode 100644 teshsuite/smpi/mpich-test/pt2pt/README create mode 100644 teshsuite/smpi/mpich-test/pt2pt/allpair.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/allpair.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/allpair2.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/allpair2.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/bsendtest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/cancel.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/cancel2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/cancel3.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/cancelibm.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/cancelissend.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/cancelmessages.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/commit.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/dataalign.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/dtypelife.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/dtyperecv.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/dtypes.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/dtypes.h create mode 100644 teshsuite/smpi/mpich-test/pt2pt/exittest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fairness/README create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fairness/fairness-euih.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fairness/fairness.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2m.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fairness/fairnessm.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fifth.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/flood.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/flood2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/fourth.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/gcomm.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/gcomm.h create mode 100644 teshsuite/smpi/mpich-test/pt2pt/getelm.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/hindexed.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/hindexed.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/htmsg.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/hvec.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/hvec.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/hvectest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/hvectest2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/irecvtest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/irsend.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/irsendinit.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/isendf.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/isendtest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/isndrcv.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/isndrcv2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/issend2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/issendinit.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/issendtest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/longmsgs.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/mpitest.h create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nblock.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nblock.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nbtest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nbtest.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nullproc.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nullproc.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nullproc2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/nullproc2.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/order.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/overtake.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/pack.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/pack.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/persist.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/persist.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/persist2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/persist2.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/persistent.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/persistent.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/pingpong.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/probe.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/probe1.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/relrank.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/reqcreate.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/reqcreate.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/reqfree.c create mode 100755 teshsuite/smpi/mpich-test/pt2pt/runtests create mode 100644 teshsuite/smpi/mpich-test/pt2pt/secondf.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/self.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/self.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/selfvsworld.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/selfvsworld.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/send1.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendcplx.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendfort.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendmany.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendmany.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendorder.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendorder.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendrecv.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendrecv2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendrecv3.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sendrecv4.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sixth.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sndrcv.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/sndrcvrpl2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/ssendtest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/ssendtest2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/ssendtest2.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/structf.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/structf.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/structlb.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/systest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/systest1.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/test.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/test.h create mode 100644 teshsuite/smpi/mpich-test/pt2pt/testall.c create mode 100755 teshsuite/smpi/mpich-test/pt2pt/testhetero create mode 100644 teshsuite/smpi/mpich-test/pt2pt/testsome.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/testtest1.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/testtypes.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/third.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/trunc.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/truncmult.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typebase.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typebase.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typebasef.f create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typecreate.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typecreate.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typelb.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typetest.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typeub.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typeub.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typeub2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typeub2.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typeub3.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/typeub3.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitall.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitall.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitall2.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitall2.std create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitall3.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitall4.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitany.c create mode 100644 teshsuite/smpi/mpich-test/pt2pt/waitany.std create mode 100644 teshsuite/smpi/mpich-test/runbase create mode 100644 teshsuite/smpi/mpich-test/topol/cart.c create mode 100644 teshsuite/smpi/mpich-test/topol/cart1f.f create mode 100644 teshsuite/smpi/mpich-test/topol/cart2.c create mode 100644 teshsuite/smpi/mpich-test/topol/cart2f.f create mode 100644 teshsuite/smpi/mpich-test/topol/cartc.f90 create mode 100644 teshsuite/smpi/mpich-test/topol/cartf.f create mode 100644 teshsuite/smpi/mpich-test/topol/cartf.std create mode 100644 teshsuite/smpi/mpich-test/topol/cartmap.c create mode 100644 teshsuite/smpi/mpich-test/topol/cartmap.std create mode 100644 teshsuite/smpi/mpich-test/topol/cartorder.c create mode 100644 teshsuite/smpi/mpich-test/topol/dims.c create mode 100644 teshsuite/smpi/mpich-test/topol/graphtest.c create mode 100644 teshsuite/smpi/mpich-test/topol/graphtest.std create mode 100644 teshsuite/smpi/mpich-test/topol/test.c create mode 100644 teshsuite/smpi/mpich-test/topol/test.h create mode 100644 teshsuite/smpi/mpich-test/topol/twod.f create mode 100644 teshsuite/smpi/mpich-test/topol/twod2.f diff --git a/CMakeLists.txt b/CMakeLists.txt index 3da52d4d31..05a1a3abab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,14 +2,15 @@ cmake_minimum_required(VERSION 2.6) ### Need to set rc ccompiler before enable language if(WIN32) SET(CMAKE_RC_COMPILER "windres") -endif() -project(SimGrid CXX C) +endif(WIN32) +project(SimGrid CXX C Fortran) set(CMAKE_C_FLAGS "" CACHE TYPE INTERNAL FORCE) set(CMAKE_CXX_FLAGS "" CACHE TYPE INTERNAL FORCE) set(CMAKE_EXE_LINKER_FLAGS "" CACHE TYPE INTERNAL FORCE) set(CMAKE_C_LINK_FLAGS "" CACHE TYPE INTERNAL FORCE) - +set(CMAKE_Fortran_FLAGS "" CACHE TYPE INTERNAL FORCE) +set(CMAKE_Fortran_LINK_FLAGS "" CACHE TYPE INTERNAL FORCE) ## Mapping version number -> version name # 3.5.99 -> alpha1 (oops) # 3.5.9{1,2} -> beta{1,2} diff --git a/buildtools/Cmake/AddTests.cmake b/buildtools/Cmake/AddTests.cmake index b70333d7c9..aecf6d3362 100644 --- a/buildtools/Cmake/AddTests.cmake +++ b/buildtools/Cmake/AddTests.cmake @@ -477,6 +477,7 @@ if(NOT enable_memcheck) ADD_TEST(smpi-struct-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/struct.tesh) ADD_TEST(smpi-pt2pt-thread ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:thread --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt.tesh) + if(HAVE_RAWCTX) ADD_TEST(smpi-bcast-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/bcast.tesh) ADD_TEST(smpi-reduce-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce.tesh) @@ -484,8 +485,17 @@ if(NOT enable_memcheck) ADD_TEST(smpi-indexed-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/indexed.tesh) ADD_TEST(smpi-struct-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/struct.tesh) ADD_TEST(smpi-pt2pt-raw ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:raw --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt.tesh) - - endif() + ADD_TEST(NAME smpi-mpich-env-raw COMMAND ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/env/runtests + -srcdir=${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/env -basedir=${CMAKE_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/env) + ADD_TEST(NAME smpi-mpich-pt2pt-raw COMMAND ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/pt2pt/runtests + -srcdir=${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/pt2pt -basedir=${CMAKE_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/pt2pt) + ADD_TEST(NAME smpi-mpich-context-raw COMMAND ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/context/runtests + -srcdir=${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/context -basedir=${CMAKE_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/context) + ADD_TEST(NAME smpi-mpich-profile-raw COMMAND ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/profile/runtests + -srcdir=${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/profile -basedir=${CMAKE_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/profile) + ADD_TEST(NAME smpi-mpich-coll-raw COMMAND ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll/runtests + -srcdir=${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll -basedir=${CMAKE_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich-test/coll) + endif(HAVE_RAWCTX) if(CONTEXT_UCONTEXT) ADD_TEST(smpi-bcast-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/bcast.tesh) ADD_TEST(smpi-reduce-ucontext ${CMAKE_BINARY_DIR}/bin/tesh ${TESH_OPTION} --cfg contexts/factory:ucontext --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/reduce.tesh) diff --git a/buildtools/Cmake/DefinePackages.cmake b/buildtools/Cmake/DefinePackages.cmake index ead196181d..4c72f6909f 100644 --- a/buildtools/Cmake/DefinePackages.cmake +++ b/buildtools/Cmake/DefinePackages.cmake @@ -916,6 +916,11 @@ set(TESHSUITE_CMAKEFILES_TXT teshsuite/simdag/platforms/CMakeLists.txt teshsuite/xbt/CMakeLists.txt teshsuite/smpi/CMakeLists.txt + teshsuite/smpi/mpich-test/env/CMakeLists.txt + teshsuite/smpi/mpich-test/coll/CMakeLists.txt + teshsuite/smpi/mpich-test/context/CMakeLists.txt + teshsuite/smpi/mpich-test/profile/CMakeLists.txt + teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt ) set(TOOLS_CMAKEFILES_TXT diff --git a/buildtools/Cmake/Flags.cmake b/buildtools/Cmake/Flags.cmake index 4b627253a8..81828fd89b 100644 --- a/buildtools/Cmake/Flags.cmake +++ b/buildtools/Cmake/Flags.cmake @@ -54,6 +54,7 @@ if(enable_coverage) SET(COVERAGE_COMMAND "${GCOV_PATH}" CACHE TYPE FILEPATH FORCE) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DCOVERAGE") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fprofile-arcs -ftest-coverage") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") set(TESH_OPTION --enable-coverage) add_definitions(-fprofile-arcs -ftest-coverage) diff --git a/buildtools/Cmake/MakeExe.cmake b/buildtools/Cmake/MakeExe.cmake index 87d8221c01..cde9e1623b 100644 --- a/buildtools/Cmake/MakeExe.cmake +++ b/buildtools/Cmake/MakeExe.cmake @@ -30,6 +30,11 @@ add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/network/mxn) add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/partask) add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms) add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/env) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/coll) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/context) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/profile) +add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich-test/pt2pt) add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/msg) add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/msg/trace) diff --git a/teshsuite/smpi/mpich-test/CMakeLists.txt b/teshsuite/smpi/mpich-test/CMakeLists.txt new file mode 100644 index 0000000000..a743421b15 --- /dev/null +++ b/teshsuite/smpi/mpich-test/CMakeLists.txt @@ -0,0 +1,102 @@ +cmake_minimum_required(VERSION 2.6) + +if(enable_smpi) + set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/bin/smpicc") + + set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") + + include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi") + + add_executable(alltoall2 alltoall2.c ) + add_executable(alltoall_basic alltoall_basic.c) + add_executable(alltoallv alltoallv.c) + add_executable(allreduce allreduce.c) + add_executable(bcast bcast.c) + add_executable(compute compute.c) + add_executable(compute2 compute2.c) + add_executable(compute3 compute3.c) + add_executable(pingpong pingpong.c) + add_executable(scatter scatter.c) + add_executable(reduce reduce.c) + add_executable(split split.c) + add_executable(dsend dsend.c) + add_executable(smpi_sendrecv sendrecv.c) + add_executable(ttest01 ttest01.c) + add_executable(vector_test vector_test.c) + add_executable(hvector_test hvector_test.c) + add_executable(indexed_test indexed_test.c) + add_executable(struct_test struct_test.c) + + target_link_libraries(alltoall2 m simgrid smpi ) + target_link_libraries(alltoall_basic m simgrid smpi ) + target_link_libraries(alltoallv m simgrid smpi ) + target_link_libraries(allreduce m simgrid smpi ) + target_link_libraries(bcast m simgrid smpi ) + target_link_libraries(compute m simgrid smpi ) + target_link_libraries(compute2 m simgrid smpi ) + target_link_libraries(compute3 m simgrid smpi ) + target_link_libraries(pingpong m simgrid smpi ) + target_link_libraries(scatter m simgrid smpi ) + target_link_libraries(reduce m simgrid smpi ) + target_link_libraries(split m simgrid smpi ) + target_link_libraries(dsend m simgrid smpi ) + target_link_libraries(smpi_sendrecv m simgrid smpi ) + target_link_libraries(ttest01 m simgrid smpi ) + target_link_libraries(vector_test m simgrid smpi ) + target_link_libraries(hvector_test m simgrid smpi ) + target_link_libraries(indexed_test m simgrid smpi ) + target_link_libraries(struct_test m simgrid smpi ) + + set_target_properties(smpi_sendrecv PROPERTIES RENAME sendrecv) +endif(enable_smpi) + +set(tesh_files + ${tesh_files} + ${CMAKE_CURRENT_SOURCE_DIR}/bcast.tesh + ${CMAKE_CURRENT_SOURCE_DIR}/hvector.tesh + ${CMAKE_CURRENT_SOURCE_DIR}/indexed.tesh + ${CMAKE_CURRENT_SOURCE_DIR}/pt2pt.tesh + ${CMAKE_CURRENT_SOURCE_DIR}/reduce.tesh + ${CMAKE_CURRENT_SOURCE_DIR}/struct.tesh + ${CMAKE_CURRENT_SOURCE_DIR}/vector.tesh + PARENT_SCOPE + ) +set(xml_files + ${xml_files} + PARENT_SCOPE + ) +set(examples_src + ${examples_src} + ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv.c + ${CMAKE_CURRENT_SOURCE_DIR}/get_processor_name.c + ${CMAKE_CURRENT_SOURCE_DIR}/pingpong.c + ${CMAKE_CURRENT_SOURCE_DIR}/bcast.c + ${CMAKE_CURRENT_SOURCE_DIR}/allreduce.c + ${CMAKE_CURRENT_SOURCE_DIR}/alltoall_basic.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv.c + ${CMAKE_CURRENT_SOURCE_DIR}/reduce.c + ${CMAKE_CURRENT_SOURCE_DIR}/compute2.c + ${CMAKE_CURRENT_SOURCE_DIR}/split.c + ${CMAKE_CURRENT_SOURCE_DIR}/dsend.c + ${CMAKE_CURRENT_SOURCE_DIR}/ttest01.c + ${CMAKE_CURRENT_SOURCE_DIR}/compute.c + ${CMAKE_CURRENT_SOURCE_DIR}/compute3.c + ${CMAKE_CURRENT_SOURCE_DIR}/alltoall2.c + ${CMAKE_CURRENT_SOURCE_DIR}/scatter.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/barrier.c + ${CMAKE_CURRENT_SOURCE_DIR}/vector_test.c + ${CMAKE_CURRENT_SOURCE_DIR}/hvector_test.c + ${CMAKE_CURRENT_SOURCE_DIR}/indexed_test.c + ${CMAKE_CURRENT_SOURCE_DIR}/struct_test.c + PARENT_SCOPE + ) +set(bin_files + ${bin_files} + ${CMAKE_CURRENT_SOURCE_DIR}/hostfile + PARENT_SCOPE + ) +set(txt_files + ${txt_files} + PARENT_SCOPE + ) diff --git a/teshsuite/smpi/mpich-test/README b/teshsuite/smpi/mpich-test/README new file mode 100644 index 0000000000..dcb011acea --- /dev/null +++ b/teshsuite/smpi/mpich-test/README @@ -0,0 +1,75 @@ +This directory contains a variety of different test codes +of different types. This directory and many of the files in +it are still undergoing active development and change. Please +forgive (and report) any problems you have with these programs. + +These tests may be used with any MPI implementation. In a few cases, +differences in error messages may be reported; these are not errors, of +course. However, the accuracy and detail of the messages should be evaluated. + +To build and run the tests, execute configure followed by make testing. +The options to configure for some MPI implementations follow: + +SGI: + ./configure -cc=cc -fc=f77 +IBM: + ./configure -cc=mpcc -fc=mpxlf + (You also need a script called "mpirun" that takes a -np number-of-procs + argument, since there are so many different ways to run parallel + programs on IBM systems) +MPICH: + ./configure -mpichpath= + +The directories are as follows: + +pt2pt - Test cases that generally cover various point to point + routines, such as send, isend, probe, etc... The + README in this directory contains some additional + useful information about running the tests. The tests + in this directory are most complete. + + +coll - Test programs for various collective operations + +context - Test programs for context operations + +env - Test programs for the environment routines + +profile - Test program(s) for MPI_Pcontrol + +topol - Test programs for the topology routines + +lederman- A series of tests of various types written by Steve + Lederman + + +Other directories and additional tests will be added in the future. + +To run the test, but not leave the executables around, do (in this directory) + + make TESTARGS=-small testing >& testing.out + +If your MPI requires a boot step before running programs, use the target +testing-boot instead of testint: + + make TESTARGS=-small testing-boot >& testing.out + +This will boot any MPI startup demons, and it will stop them at the end of the +test. + +If you are NOT using the MPICH implementation, then you can run the configure +script in this directory. If you need to view the configure options, then type + configure -help +and a list of configure options will be provided. + +You will probably also have to provide an "mpirun" program or script. +This has roughly the form + + mpirun -mvhome -np n -mvback "string" programname programargs + +The options -mvhome and -mvback "string" can be ignored; they are needed only +on systems that do not share file systems with the system running the runtests +script (yes, there is one such system). The option "-np n" specifies that +"n" processes are needed. Note that this is not always a power of 2; systems +must be prepared to accept any (small) value of n. + diff --git a/teshsuite/smpi/mpich-test/coll/CMakeLists.txt b/teshsuite/smpi/mpich-test/coll/CMakeLists.txt new file mode 100644 index 0000000000..f01537eedc --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/CMakeLists.txt @@ -0,0 +1,156 @@ +cmake_minimum_required(VERSION 2.6) + +if(enable_smpi) + set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/bin/smpicc") + + set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") + set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1") + + include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi") + + add_executable(coll1 coll1.c test.c) + add_executable(coll2 coll2.c test.c) + add_executable(coll3 coll3.c test.c) + add_executable(coll4 coll4.c test.c) + add_executable(coll5 coll5.c test.c) + add_executable(coll6 coll6.c test.c) + add_executable(coll7 coll7.c test.c) + add_executable(coll8 coll8.c test.c) + add_executable(coll9 coll9.c test.c) + add_executable(coll10 coll10.c test.c) + add_executable(coll11 coll11.c test.c) + add_executable(coll12 coll12.c test.c) + add_executable(coll13 coll13.c) + add_executable(allredmany allredmany.c) + add_executable(bcastbug2 bcastbug2.c test.c) + add_executable(bcastbug bcastbug.c test.c) + add_executable(bcastvec bcastvec.c test.c ) + add_executable(grouptest grouptest.c test.c) + add_executable(redtst redtst.c test.c) + add_executable(barrier barrier.c test.c) + add_executable(bcast_mpich bcast.c test.c) + add_executable(allred allred.c ../pt2pt/gcomm.c) + add_executable(allred2 allred2.c ../pt2pt/gcomm.c) + add_executable(scatterv scatterv.c) + add_executable(scattern scattern.c) + add_executable(redscat redscat.c) + add_executable(alltoallv_mpich alltoallv.c) + add_executable(scantst scantst.c test.c) + add_executable(longuser longuser.c test.c) + + + target_link_libraries(coll1 m simgrid smpi ) + target_link_libraries(coll2 m simgrid smpi ) + target_link_libraries(coll3 m simgrid smpi ) + target_link_libraries(coll4 m simgrid smpi ) + target_link_libraries(coll5 m simgrid smpi ) + target_link_libraries(coll6 m simgrid smpi ) + target_link_libraries(coll7 m simgrid smpi ) + target_link_libraries(coll8 m simgrid smpi ) + target_link_libraries(coll9 m simgrid smpi ) + target_link_libraries(coll10 m simgrid smpi ) + target_link_libraries(coll11 m simgrid smpi ) + target_link_libraries(coll12 m simgrid smpi ) + target_link_libraries(coll13 m simgrid smpi ) + target_link_libraries(allredmany m simgrid smpi ) + target_link_libraries(bcastbug m simgrid smpi ) + target_link_libraries(bcastbug2 m simgrid smpi ) + target_link_libraries(bcastvec m simgrid smpi ) + target_link_libraries(grouptest m simgrid smpi ) + target_link_libraries(redtst m simgrid smpi ) + target_link_libraries(barrier m simgrid smpi ) + target_link_libraries(bcast_mpich m simgrid smpi ) + target_link_libraries(allred m simgrid smpi ) + target_link_libraries(allred2 m simgrid smpi ) + target_link_libraries(scatterv m simgrid smpi ) + target_link_libraries(scattern m simgrid smpi ) + target_link_libraries(redscat m simgrid smpi ) + target_link_libraries(longuser m simgrid smpi ) + target_link_libraries(alltoallv_mpich m simgrid smpi ) + target_link_libraries(scantst m simgrid smpi ) + + + set_target_properties(coll1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll5 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll6 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll7 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll8 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll9 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll10 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll11 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll12 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(coll13 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(allredmany PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(bcastbug PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(bcastbug2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(bcastvec PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(grouptest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(redtst PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(barrier PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(bcast_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(allred PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(allred2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(scatterv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(scattern PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(redscat PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(longuser PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(alltoallv_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(scantst PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") +endif(enable_smpi) + +set(tesh_files + ${tesh_files} + ${CMAKE_CURRENT_SOURCE_DIR}/coll.tesh + PARENT_SCOPE + ) +set(xml_files + ${xml_files} + PARENT_SCOPE + ) +set(examples_src + ${examples_src} + ${CMAKE_CURRENT_SOURCE_DIR}/coll1 .c + ${CMAKE_CURRENT_SOURCE_DIR}/coll2.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll3.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll4.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll5.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll6.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll7.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll8.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll9.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll10.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll11.c + ${CMAKE_CURRENT_SOURCE_DIR}/coll12 .c + ${CMAKE_CURRENT_SOURCE_DIR}/coll13.c + ${CMAKE_CURRENT_SOURCE_DIR}/allredmany.c + ${CMAKE_CURRENT_SOURCE_DIR}/bcastbug.c + ${CMAKE_CURRENT_SOURCE_DIR}/bcastbug2.c + ${CMAKE_CURRENT_SOURCE_DIR}/bcastvec.c + ${CMAKE_CURRENT_SOURCE_DIR}/grouptest.c + ${CMAKE_CURRENT_SOURCE_DIR}/redtst.c + ${CMAKE_CURRENT_SOURCE_DIR}/barrier.c + ${CMAKE_CURRENT_SOURCE_DIR}/bcast.c + ${CMAKE_CURRENT_SOURCE_DIR}/allred.c + ${CMAKE_CURRENT_SOURCE_DIR}/allred2.c + ${CMAKE_CURRENT_SOURCE_DIR}/scatterv.c + ${CMAKE_CURRENT_SOURCE_DIR}/scattern.c + ${CMAKE_CURRENT_SOURCE_DIR}/redscat.c + ${CMAKE_CURRENT_SOURCE_DIR}/longuser.c + ${CMAKE_CURRENT_SOURCE_DIR}/alltoallv.c + ${CMAKE_CURRENT_SOURCE_DIR}/scantst.c + ${CMAKE_CURRENT_SOURCE_DIR}/test.c + ${CMAKE_CURRENT_SOURCE_DIR}/test.h + PARENT_SCOPE + ) +set(bin_files + ${bin_files} + ${CMAKE_CURRENT_SOURCE_DIR}/../hostfile + PARENT_SCOPE + ) +set(txt_files + ${txt_files} + PARENT_SCOPE + ) diff --git a/teshsuite/smpi/mpich-test/coll/allgatherf.f b/teshsuite/smpi/mpich-test/coll/allgatherf.f new file mode 100644 index 0000000000..462d3e0f73 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/allgatherf.f @@ -0,0 +1,41 @@ +c +c This test looks at sending some data with a count of zero. +c + program testmpi + integer mnprocs, lcwk1 + parameter ( mnprocs = 2, lcwk1 = 6 ) + integer comm, rc, myid, nprocs, ierr, i, + & recvts(0:mnprocs-1), displs(0:mnprocs-1) + double precision wrkbuf(3), cwk1(lcwk1) + include 'mpif.h' +c + call MPI_INIT( ierr ) + comm = MPI_COMM_WORLD + call MPI_COMM_RANK( comm, myid, ierr ) + call MPI_COMM_SIZE( comm, nprocs, ierr ) +c + do i = 1, lcwk1 + cwk1(i) = -10 + end do + do i=1,3 + wrkbuf(i) = myid + end do + do i = 0, mnprocs-1 + recvts(i) = 3 + displs(i) = 3 * i + end do + recvts(mnprocs-1) = 0 + displs(mnprocs-1) = 0 +c + call MPI_ALLGATHERV( wrkbuf, recvts(myid), + & MPI_DOUBLE_PRECISION, cwk1, recvts, + & displs, MPI_DOUBLE_PRECISION, comm, ierr ) +c + do i = 1, lcwk1 + print *, myid, i, cwk1(i) + end do +c + call MPI_FINALIZE(rc) +c + end +c diff --git a/teshsuite/smpi/mpich-test/coll/allred.c b/teshsuite/smpi/mpich-test/coll/allred.c new file mode 100644 index 0000000000..740ceb361d --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/allred.c @@ -0,0 +1,2466 @@ + +#include +#include "mpi.h" +#include +#include +#include "test.h" +#include "../pt2pt/gcomm.h" + +int verbose = 1; +int main( int argc, char **argv ) +{ +int count, errcnt = 0, gerr = 0, toterr, size, rank; +MPI_Comm comm; + +MPI_Comm comms[10]; +int ncomm, ii, world_rank; + +MPI_Init( &argc, &argv ); +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + +/* First tests */ +MakeComms( comms, 10, &ncomm, 0 ); +for (ii=0; ii 0) + printf( "Found %d errors on %d for MPI_SUM\n", errcnt, rank ); +errcnt = 0; + +/* Test product */ +if (world_rank == 0 && verbose) printf( "Testing MPI_PROD...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_INT, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_LONG, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_SHORT, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_SHORT, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_LONG, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_FLOAT, MPI_PROD, comm ); +for (i=0; i 0) ? (int)(pow((double)(i),(double)size)+0.1) : 0; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_DOUBLE, MPI_PROD, comm ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_PROD\n", errcnt, rank ); +errcnt = 0; + +/* Test max */ +if (world_rank == 0 && verbose) printf( "Testing MPI_MAX...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_MAX\n", errcnt, rank ); +errcnt = 0; + +/* Test min */ +if (world_rank == 0 && verbose) printf( "Testing MPI_MIN...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_MIN\n", errcnt, rank ); +errcnt = 0; + +/* Test LOR */ +if (world_rank == 0 && verbose) printf( "Testing MPI_LOR...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_INT, MPI_LOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_LONG, MPI_LOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_SHORT, MPI_LOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_SHORT, MPI_LOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED, MPI_LOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_LONG, MPI_LOR, comm ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LOR(1)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LOR(0)\n", errcnt, rank ); +errcnt = 0; + +/* Test LXOR */ +if (world_rank == 0 && verbose) printf( "Testing MPI_LXOR...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_INT, MPI_LXOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_LONG, MPI_LXOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_SHORT, MPI_LXOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_SHORT, MPI_LXOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED, MPI_LXOR, comm ); +for (i=0; i 1); + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_LONG, MPI_LXOR, comm ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LXOR(1)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LXOR(0)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LXOR(1-0)\n", errcnt, rank ); +errcnt = 0; + +/* Test LAND */ +if (world_rank == 0 && verbose) printf( "Testing MPI_LAND...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LAND(0)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_LAND(1)\n", errcnt, rank ); +errcnt = 0; + +/* Test BOR */ +if (world_rank == 0 && verbose) printf( "Testing MPI_BOR...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_BOR(1)\n", errcnt, rank ); +errcnt = 0; + +/* Test BAND */ +if (world_rank == 0 && verbose) printf( "Testing MPI_BAND...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_BAND(1)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_BAND(0)\n", errcnt, rank ); +errcnt = 0; + +/* Test BXOR */ +if (world_rank == 0 && verbose) printf( "Testing MPI_BXOR...\n" ); + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 1)*0xf0 ; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_INT, MPI_BXOR, comm ); +for (i=0; i 1)*0xf0 ; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_LONG, MPI_BXOR, comm ); +for (i=0; i 1)*0xf0 ; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_SHORT, MPI_BXOR, comm ); +for (i=0; i 1)*0xf0 ; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_SHORT, MPI_BXOR, comm ); +for (i=0; i 1)*0xf0 ; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED, MPI_BXOR, comm ); +for (i=0; i 1)*0xf0 ; + *(out + i) = 0; } +MPI_Allreduce( in, out, count, MPI_UNSIGNED_LONG, MPI_BXOR, comm ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_BXOR(1)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_BXOR(0)\n", errcnt, rank ); +errcnt = 0; + + +{ +int *in, *out, *sol; +int i, fnderr=0; +in = (int *)malloc( count * sizeof(int) ); +out = (int *)malloc( count * sizeof(int) ); +sol = (int *)malloc( count * sizeof(int) ); +for (i=0; i 0) + printf( "Found %d errors on %d for MPI_BXOR(1-0)\n", errcnt, rank ); +errcnt = 0; + +/* Test Maxloc */ +if (world_rank == 0 && verbose) printf( "Testing MPI_MAXLOC...\n" ); + +{ +struct int_test { int a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct int_test *)malloc( count * sizeof(struct int_test) ); +out = (struct int_test *)malloc( count * sizeof(struct int_test) ); +sol = (struct int_test *)malloc( count * sizeof(struct int_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = (size - 1 + i); (sol + i)->b = (size-1); + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_2INT, MPI_MAXLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_2INT and op MPI_MAXLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct long_test { long a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct long_test *)malloc( count * sizeof(struct long_test) ); +out = (struct long_test *)malloc( count * sizeof(struct long_test) ); +sol = (struct long_test *)malloc( count * sizeof(struct long_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = (size - 1 + i); (sol + i)->b = (size-1); + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_LONG_INT, MPI_MAXLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_LONG_INT and op MPI_MAXLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct short_test { short a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct short_test *)malloc( count * sizeof(struct short_test) ); +out = (struct short_test *)malloc( count * sizeof(struct short_test) ); +sol = (struct short_test *)malloc( count * sizeof(struct short_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = (size - 1 + i); (sol + i)->b = (size-1); + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_SHORT_INT, MPI_MAXLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_SHORT_INT and op MPI_MAXLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct float_test { float a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct float_test *)malloc( count * sizeof(struct float_test) ); +out = (struct float_test *)malloc( count * sizeof(struct float_test) ); +sol = (struct float_test *)malloc( count * sizeof(struct float_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = (size - 1 + i); (sol + i)->b = (size-1); + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_FLOAT_INT, MPI_MAXLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_FLOAT_INT and op MPI_MAXLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct double_test { double a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct double_test *)malloc( count * sizeof(struct double_test) ); +out = (struct double_test *)malloc( count * sizeof(struct double_test) ); +sol = (struct double_test *)malloc( count * sizeof(struct double_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = (size - 1 + i); (sol + i)->b = (size-1); + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_DOUBLE_INT, MPI_MAXLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_DOUBLE_INT and op MPI_MAXLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +gerr += errcnt; +if (errcnt > 0) + printf( "Found %d errors on %d for MPI_MAXLOC\n", errcnt, rank ); +errcnt = 0; + +/* Test minloc */ +if (world_rank == 0 && verbose) printf( "Testing MPI_MINLOC...\n" ); + + +{ +struct int_test { int a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct int_test *)malloc( count * sizeof(struct int_test) ); +out = (struct int_test *)malloc( count * sizeof(struct int_test) ); +sol = (struct int_test *)malloc( count * sizeof(struct int_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = i; (sol + i)->b = 0; + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_2INT, MPI_MINLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_2INT and op MPI_MINLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct long_test { long a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct long_test *)malloc( count * sizeof(struct long_test) ); +out = (struct long_test *)malloc( count * sizeof(struct long_test) ); +sol = (struct long_test *)malloc( count * sizeof(struct long_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = i; (sol + i)->b = 0; + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_LONG_INT, MPI_MINLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_LONG_INT and op MPI_MINLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct short_test { short a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct short_test *)malloc( count * sizeof(struct short_test) ); +out = (struct short_test *)malloc( count * sizeof(struct short_test) ); +sol = (struct short_test *)malloc( count * sizeof(struct short_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = i; (sol + i)->b = 0; + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_SHORT_INT, MPI_MINLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_SHORT_INT and op MPI_MINLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct float_test { float a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct float_test *)malloc( count * sizeof(struct float_test) ); +out = (struct float_test *)malloc( count * sizeof(struct float_test) ); +sol = (struct float_test *)malloc( count * sizeof(struct float_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = i; (sol + i)->b = 0; + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_FLOAT_INT, MPI_MINLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_FLOAT_INT and op MPI_MINLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +{ +struct double_test { double a; int b; } *in, *out, *sol; +int i,fnderr=0; +in = (struct double_test *)malloc( count * sizeof(struct double_test) ); +out = (struct double_test *)malloc( count * sizeof(struct double_test) ); +sol = (struct double_test *)malloc( count * sizeof(struct double_test) ); +for (i=0; ia = (rank + i); (in + i)->b = rank; + (sol + i)->a = i; (sol + i)->b = 0; + (out + i)->a = 0; (out + i)->b = -1; } +MPI_Allreduce( in, out, count, MPI_DOUBLE_INT, MPI_MINLOC, comm ); +for (i=0; ia != (sol + i)->a || + (out + i)->b != (sol + i)->b) { + errcnt++; fnderr++; + fprintf( stderr, "(%d) Expected (%d,%d) got (%d,%d)\n", world_rank, + (int)((sol + i)->a), + (sol+i)->b, (int)((out+i)->a), (out+i)->b ); +}} +if (fnderr) fprintf( stderr, + "(%d) Error for type MPI_DOUBLE_INT and op MPI_MINLOC (%d of %d wrong)\n", + world_rank, fnderr, count ); +free( in ); +free( out ); +free( sol ); +} + + +gerr += errcnt; +if (errcnt > 0) + printf( "Found %d errors on %d for MPI_MINLOC\n", errcnt, rank ); +errcnt = 0; + +} +if (gerr > 0) { + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + printf( "Found %d errors overall on %d\n", gerr, rank ); + } +MPI_Allreduce( &gerr, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } +FreeComms( comms, ncomm ); +MPI_Finalize( ); +return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/allred2.c b/teshsuite/smpi/mpich-test/coll/allred2.c new file mode 100644 index 0000000000..a9dc98a214 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/allred2.c @@ -0,0 +1,50 @@ +/* + This test checks for possible interference between + successive calls to MPI_Allreduce. Some users, on some MPI implementations + and platforms, have had to add MPI_Barrier before MPI_Allreduce calls. + */ +#include "mpi.h" +#include + +#define MAX_LOOP 1000 + +int main( int argc, char *argv[] ) +{ + int i, in_val, out_val; + int rank, size; + int errs = 0, toterrs; + + MPI_Init( &argc, &argv ); + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + for (i=0; i +#include "mpi.h" +#include "test.h" + +/* + * This example should be run with 2 processes and tests the ability of the + * implementation to handle a flood of one-way messages. + */ + +int main( int argc, char **argv ) +{ + double wscale = 10.0, scale; + int numprocs, myid,i,namelen; + char processor_name[MPI_MAX_PROCESSOR_NAME]; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&numprocs); + MPI_Comm_rank(MPI_COMM_WORLD,&myid); + MPI_Get_processor_name(processor_name,&namelen); + + /* fprintf(stderr,"Process %d on %s\n", + myid, processor_name); */ + for ( i=0; i<10000; i++) { + MPI_Allreduce(&wscale,&scale,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + } + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/allredmany.std b/teshsuite/smpi/mpich-test/coll/allredmany.std new file mode 100644 index 0000000000..e7c3f62796 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/allredmany.std @@ -0,0 +1,22 @@ +*** allredmany *** +*** allredmany run 0 *** +*** allredmany run 1 *** +*** allredmany run 2 *** +*** allredmany run 3 *** +*** allredmany run 4 *** +*** allredmany run 5 *** +*** allredmany run 6 *** +*** allredmany run 7 *** +*** allredmany run 8 *** +*** allredmany run 9 *** +*** allredmany run 10 *** +*** allredmany run 11 *** +*** allredmany run 12 *** +*** allredmany run 13 *** +*** allredmany run 14 *** +*** allredmany run 15 *** +*** allredmany run 16 *** +*** allredmany run 17 *** +*** allredmany run 18 *** +*** allredmany run 19 *** +*** allredmany *** diff --git a/teshsuite/smpi/mpich-test/coll/alltoallv.c b/teshsuite/smpi/mpich-test/coll/alltoallv.c new file mode 100644 index 0000000000..b08979bbca --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/alltoallv.c @@ -0,0 +1,97 @@ +#include "mpi.h" +#include +#include +#include "test.h" + +/* + This program tests MPI_Alltoallv by having processor i send different + amounts of data to each processor. + + Because there are separate send and receive types to alltoallv, + there need to be tests to rearrange data on the fly. Not done yet. + + The first test sends i items to processor i from all processors. + + Currently, the test uses only MPI_INT; this is adequate for testing systems + that use point-to-point operations + */ + +int main( int argc, char **argv ) +{ + + MPI_Comm comm; + int *sbuf, *rbuf; + int rank, size; + int *sendcounts, *recvcounts, *rdispls, *sdispls; + int i, j, *p, err, toterr; + + MPI_Init( &argc, &argv ); + err = 0; + + comm = MPI_COMM_WORLD; + + /* Create the buffer */ + MPI_Comm_size( comm, &size ); + MPI_Comm_rank( comm, &rank ); + sbuf = (int *)malloc( size * size * sizeof(int) ); + rbuf = (int *)malloc( size * size * sizeof(int) ); + if (!sbuf || !rbuf) { + fprintf( stderr, "Could not allocated buffers!\n" ); + MPI_Abort( comm, 1 ); + } + + /* Load up the buffers */ + for (i=0; i 0) + fprintf( stderr, "Test FAILED with %d errors\n", toterr ); + else + fprintf( stderr, " No Errors\n" ); + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/assocf.f b/teshsuite/smpi/mpich-test/coll/assocf.f new file mode 100644 index 0000000000..f39747c39e --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/assocf.f @@ -0,0 +1,73 @@ +C +C Thanks to zollweg@tc.cornell.edu (John A. Zollweg) for this test +C which detected a problem in one version of the IBM product +C implementation of MPI. The source of the problem in that implementation +C was assuming that floating point arithmetic was associative (it isn't +C even commutative on IBM hardware). +C +C This program was designed for IEEE and may be uninteresting on other +C systems. Note that since it is testing that the same VALUE is +C delivered at each system, it will run correctly on all systems. +C + PROGRAM ALLREDUCE + include 'mpif.h' + real*8 myval(4), sum, recvbuf(4) + integer ier, me, size, tsize, dtype, i, errors, toterr + data myval /-12830196119319614d0,9154042893114674d0, + &2371516219785616d0,1304637006419324.8d0/ + call MPI_INIT(ier) + call MPI_COMM_SIZE(MPI_COMM_WORLD,size,ier) + if (size.ne.4) then + print *,"This test case must be run as a four-way job" + call MPI_FINALIZE(ier) + stop + end if + call MPI_TYPE_SIZE( MPI_REAL, tsize, ier ) + if (tsize .eq. 8) then + dtype = MPI_REAL + else + call MPI_TYPE_SIZE( MPI_DOUBLE_PRECISION, tsize, ier ) + if (tsize .ne. 8) then + print *, " Can not test allreduce without an 8 byte" + print *, " floating double type." + call MPI_FINALIZE(ier) + stop + endif + dtype = MPI_DOUBLE_PRECISION + endif + call MPI_COMM_RANK(MPI_COMM_WORLD,me,ier) + call MPI_ALLREDUCE(myval(me+1),sum,1,dtype,MPI_SUM, + &MPI_COMM_WORLD,ier) +C +C collect the values and make sure that they are all the same BITWISE +C We could use Gather, but this gives us an added test. +C + do 5 i=1,4 + recvbuf(i) = i + 5 continue + call MPI_ALLGATHER( sum, 1, dtype, recvbuf, 1, dtype, + & MPI_COMM_WORLD, ier ) + errors = 0 + do 10 i=2,4 +C print *, "recvbuf(",i,") = ", recvbuf(i), " on ", me + if (recvbuf(1) .ne. recvbuf(i)) then + errors = errors + 1 + print *, "Inconsistent values for ", i, "th entry on ", + & me + print *, recvbuf(1), " not equal to ", recvbuf(i) + endif + 10 continue + call MPI_ALLREDUCE( errors, toterr, 1, MPI_INTEGER, MPI_SUM, + & MPI_COMM_WORLD, ier ) + if (me .eq. 0) then + if (toterr .gt. 0) then + print *, " FAILED with ", toterr, " errors." + else + print *, " No Errors" + endif + endif +C print *," The value of the sum on node ",me,"is",sum + call MPI_FINALIZE(ier) +C Calling stop can generate unwanted noise on some systems, and is not +C required. + end diff --git a/teshsuite/smpi/mpich-test/coll/barrier.c b/teshsuite/smpi/mpich-test/coll/barrier.c new file mode 100644 index 0000000000..94fd3621d1 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/barrier.c @@ -0,0 +1,88 @@ +/* This program provides some simple verification of the MPI_Barrier + * program. All of the clients send a message to indicate that they + * are alive (a simple character string) and then the all of the + * clients enter an MPI_Barrier. The server then Iprobes for a while + * to make sure that none of the "through barrier" messages that the + * clients send after leaving the barrier arive before the server enters + * the barrier. The server then enters the barrier, and upon leaving, + * waits for a message from each client. + */ + +#include "test.h" +#include "mpi.h" + +#define WAIT_TIMES 500 + +int +main( int argc, char **argv) +{ + int rank, size, i, recv_flag, ret, passed; + MPI_Status Status; + char message[17]; + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + if (rank == 0) { + Test_Init("barrier", rank); + /* Receive the startup messages from each of the + other clients */ + for (i = 0; i < size - 1; i++) { + MPI_Recv(message, 17, MPI_CHAR, MPI_ANY_SOURCE, 2000, + MPI_COMM_WORLD, &Status); + } + + /* Now use Iprobe to make sure no more messages arive for a + while */ + passed = 1; + for (i = 0; i < WAIT_TIMES; i++){ + recv_flag = 0; + MPI_Iprobe(MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, + &recv_flag, &Status); + if (recv_flag) + passed = 0; + } + + if (passed) + Test_Passed("Barrier Test 1"); + else + Test_Failed("Barrier Test 1"); + + /* Now go into the barrier myself */ + MPI_Barrier(MPI_COMM_WORLD); + + /* And get everyones message who came out */ + for (i = 0; i < size - 1; i++) { + MPI_Recv(message, 13, MPI_CHAR, MPI_ANY_SOURCE, 2000, + MPI_COMM_WORLD, &Status); + } + + /* Now use Iprobe to make sure no more messages arive for a + while */ + passed = 1; + for (i = 0; i < WAIT_TIMES; i++){ + recv_flag = 0; + MPI_Iprobe(MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, + &recv_flag, &Status); + if (recv_flag) + passed = 0; + } + if (passed) + Test_Passed("Barrier Test 2"); + else + Test_Failed("Barrier Test 2"); + + Test_Waitforall( ); + ret = Summarize_Test_Results(); + Test_Finalize(); + MPI_Finalize(); + return ret; + } else { + MPI_Send((char*)"Entering Barrier", 17, MPI_CHAR, 0, 2000, MPI_COMM_WORLD); + MPI_Barrier(MPI_COMM_WORLD); + MPI_Send((char*)"Past Barrier", 13, MPI_CHAR, 0, 2000, MPI_COMM_WORLD); + Test_Waitforall( ); + MPI_Finalize(); + return 0; + } +} diff --git a/teshsuite/smpi/mpich-test/coll/bcast.c b/teshsuite/smpi/mpich-test/coll/bcast.c new file mode 100644 index 0000000000..f8c983a5ac --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/bcast.c @@ -0,0 +1,53 @@ +/* + * This program performs some simple tests of the MPI_Bcast broadcast + * functionality. + */ + +#include "test.h" +#include "mpi.h" +#include + +int +main( int argc, char **argv) +{ + int rank, size, ret, passed, i, *test_array; + + /* Set up MPI */ + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + /* Setup the tests */ + Test_Init("bcast", rank); + test_array = (int *)malloc(size*sizeof(int)); + + /* Perform the test - this operation should really be done + with an allgather, but it makes a good test... */ + passed = 1; + for (i=0; i < size; i++) { + if (i == rank) + test_array[i] = i; + MPI_Bcast(test_array, size, MPI_INT, i, MPI_COMM_WORLD); + if (test_array[i] != i) + passed = 0; + } + if (!passed) + Test_Failed("Simple Broadcast test"); + else { + if (rank == 0) + Test_Passed("Simple Broadcast test"); + } + + /* Close down the tests */ + free(test_array); + if (rank == 0) + ret = Summarize_Test_Results(); + else + ret = 0; + Test_Finalize(); + + /* Close down MPI */ + Test_Waitforall( ); + MPI_Finalize(); + return ret; +} diff --git a/teshsuite/smpi/mpich-test/coll/bcast2.f b/teshsuite/smpi/mpich-test/coll/bcast2.f new file mode 100644 index 0000000000..e62d39a25c --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/bcast2.f @@ -0,0 +1,37 @@ + program test +C +C This program hangs when run with the version of MPICH (1.1.2) distributed +C by Myricom using their ch_gm device. I've added it to our collection +C on general principle; note that it hasn't been put into a form usable +C by our tests yet +C + include 'mpif.h' + integer comm_size,comm_rank,status(mpi_status_size) + integer at_number,chunk + double precision T0,D + at_number=0 + chunk=0 + T0=3D3048.48883 + D=3D3877.4888 + call mpi_init(ierror) + call mpi_comm_size(mpi_comm_world,comm_size,ierror) + call mpi_comm_rank(mpi_comm_world,comm_rank,ierror) + CALL MPI_BCAST(at_number,1,mpi_integer,0,mpi_comm_world,ierr) + CALL MPI_BCAST(chunk,1,mpi_integer,0,mpi_comm_world,ierr) + CALL MPI_BCAST(T0,1,mpi_double_precision,0,mpi_comm_world,ierr) + CALL MPI_BCAST(D,1,mpi_double_precision,0,mpi_comm_world,ierr) + + write(6,*) 'Rank=3D',comm_rank,' finished bcast' + do i=3D1,99999 + T0=3Di*1.0d0 + d=3Dt0**.987 + do j=3D1,100 + a=3Dj**.2 + enddo + enddo + write(6,*) 'Rank=3D',comm_rank,' finished calculations' + call mpi_finalize(ierror) + stop + en +C +C Run with mpirun -np 16 test diff --git a/teshsuite/smpi/mpich-test/coll/bcastbug.c b/teshsuite/smpi/mpich-test/coll/bcastbug.c new file mode 100644 index 0000000000..dc2d81af99 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/bcastbug.c @@ -0,0 +1,70 @@ +#include "mpi.h" +#include +#include +#include "test.h" + +int main( int argc, char **argv ) +{ + char *buf; + int rank, size, i; + MPI_Request req[10]; + MPI_Status stat[10]; + MPI_Status status; + + buf = (char *)malloc(32*1024); + MPI_Init(&argc, &argv); + MPI_Comm_rank ( MPI_COMM_WORLD, &rank ); + MPI_Comm_size ( MPI_COMM_WORLD, &size ); + + if (size > 10) return 1; + + if (rank == 0) { + for ( i = 1; i < size; i++ ) + MPI_Isend(buf,1024,MPI_BYTE,i,0,MPI_COMM_WORLD,&req[i]); + MPI_Waitall(size-1, &req[1], &stat[1]); /* Core dumps here! */ + } + else + MPI_Recv(buf,1024,MPI_BYTE,0,0,MPI_COMM_WORLD,&status); + + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} + +#if 0 +int MPIND_Waitall(count, array_of_requests, array_of_statuses ) +int count; +MPI_Request array_of_requests[]; +MPI_Status array_of_statuses[]; +{ + int i; + MPIR_BOOL completed; + + for (i = 0; i < count; i++) { + if (!array_of_requests[i]) continue; + MPID_complete_send(&array_of_requests[i]->shandle, + &(array_of_statuses[i]) ); + + MPIND_Request_free( &array_of_requests[i] ); /* Core dumps here! */ + array_of_requests[i] = NULL; + } + return MPI_SUCCESS; +} + + +#define MPID_ND_free_send_handle( a ) if ((a)->buffer) {FREE((a)->buffer);} + +int MPIND_Request_free( request ) +MPI_Request *request; +{ + int errno = MPI_SUCCESS; + + printf("Should be core dumping here (buffer = %d)...\n", + (&((*request)->shandle.dev_shandle))->buffer); + MPID_ND_free_send_handle(&((*request)->shandle.dev_shandle)); + printf("and not reaching here!\n"); + SBfree( MPIR_shandles, *request ); + + return MPI_SUCCESS; +} +#endif diff --git a/teshsuite/smpi/mpich-test/coll/bcastbug2.c b/teshsuite/smpi/mpich-test/coll/bcastbug2.c new file mode 100644 index 0000000000..4870195388 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/bcastbug2.c @@ -0,0 +1,29 @@ +#include "mpi.h" +#include +#include +#include "test.h" + +int main( int argc, char **argv) +{ + char *buf; + int i, iam; + MPI_Init(&argc, &argv); + MPI_Barrier(MPI_COMM_WORLD); + buf = (char *)malloc(32*1024); + MPI_Comm_rank(MPI_COMM_WORLD, &iam); + for(i=1; i<=32; i++){ + if (iam == 0){ + *buf=i; + printf("Broadcasting %d bytes\n", i*64); + } + MPI_Bcast(buf, i*64, MPI_BYTE, 0, MPI_COMM_WORLD); + if (*buf != i) printf("Sanity check error on node %d\n", iam); +/* gsync(); +*/ + MPI_Barrier(MPI_COMM_WORLD); + } + Test_Waitforall( ); + MPI_Finalize(); + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/bcastlog.f b/teshsuite/smpi/mpich-test/coll/bcastlog.f new file mode 100644 index 0000000000..71a2d355f1 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/bcastlog.f @@ -0,0 +1,38 @@ + program main +c test bcast of logical +c works on suns, needs mpich fix and heterogeneous test on alpha with PC + include 'mpif.h' + integer myid, numprocs, rc, ierr + integer errs, toterrs + logical boo + + call MPI_INIT( ierr ) + call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr ) + call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr ) +C + errs = 0 + boo = .true. + call MPI_BCAST(boo,1,MPI_LOGICAL,0,MPI_COMM_WORLD,ierr) + if (boo .neqv. .true.) then + print *, 'Did not broadcast Fortran logical (true)' + errs = errs + 1 + endif +C + boo = .false. + call MPI_BCAST(boo,1,MPI_LOGICAL,0,MPI_COMM_WORLD,ierr) + if (boo .neqv. .false.) then + print *, 'Did not broadcast Fortran logical (false)' + errs = errs + 1 + endif + call MPI_Reduce( errs, toterrs, 1, MPI_INTEGER, MPI_SUM, + $ 0, MPI_COMM_WORLD, ierr ) + if (myid .eq. 0) then + if (toterrs .eq. 0) then + print *, ' No Errors' + else + print *, ' Found ', toterrs, ' errors' + endif + endif + call MPI_FINALIZE(rc) + stop + end diff --git a/teshsuite/smpi/mpich-test/coll/bcastvec.c b/teshsuite/smpi/mpich-test/coll/bcastvec.c new file mode 100644 index 0000000000..b587174a86 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/bcastvec.c @@ -0,0 +1,83 @@ +/* + * This program performs some simple tests of the MPI_Bcast broadcast + * functionality. + * + * It checks the handling of different datatypes by different participants + * (with matching type signatures, of course), as well as different + * roots and communicators. + */ + +#include "test.h" +#include "mpi.h" +#include + +int main( int argc, char **argv ) +{ + int rank, size, ret, passed, i, *test_array; + int stride, count, root; + MPI_Datatype newtype; + MPI_Comm comm = MPI_COMM_WORLD; + + /* Set up MPI */ + MPI_Init(&argc, &argv); + MPI_Comm_rank(comm, &rank); + + /* Setup the tests */ + Test_Init("bcastvec", rank); + + /* Allow for additional communicators */ + MPI_Comm_size(comm, &size); + /* MPI_Comm_rank(comm, &rank); */ + stride = (rank + 1); + test_array = (int *)malloc(size*stride*sizeof(int)); + + /* Create the vector datatype EXCEPT for process 0 (vector of + stride 1 is contiguous) */ + if (rank > 0) { + count = 1; + MPI_Type_vector( size, 1, stride, MPI_INT, &newtype); + MPI_Type_commit( &newtype ); + } + else { + count = size; + newtype = MPI_INT; + } + + /* Perform the test. Each process in turn becomes the root. + After each operation, check that nothing has gone wrong */ + passed = 1; + for (root = 0; root < size; root++) { + /* Fill the array with -1 for unset, rank + i * size for set */ + for (i=0; i +#include + +int main( int argc, char **argv ) +{ + int rank, size, i; + int *table; + int errors=0; + MPI_Aint address; + MPI_Datatype type, newtype; + int lens; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* Make data table */ + table = (int *) calloc (size, sizeof(int)); + table[rank] = rank + 1; + + MPI_Barrier ( MPI_COMM_WORLD ); + /* Broadcast the data */ + for ( i=0; i +#include "test.h" +#define BAD_ANSWER 100000 + +int assoc ( int *, int *, int *, MPI_Datatype * ); + +/* + The operation is inoutvec[i] = invec[i] op inoutvec[i] + (see 4.9.4). The order is important. + + Note that the computation is in process rank (in the communicator) + order, independant of the root. + */ +int assoc(invec, inoutvec, len, dtype) +int *invec, *inoutvec, *len; +MPI_Datatype *dtype; +{ + int i; + for ( i=0; i<*len; i++ ) { + if (inoutvec[i] <= invec[i] ) { + int rank; + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n", + rank, inoutvec[0], invec[0] ); + inoutvec[i] = BAD_ANSWER; + } + else + inoutvec[i] = invec[i]; + } + return (1); +} + +int main( int argc, char **argv ) +{ + int rank, size; + int data; + int errors=0; + int result = -100; + MPI_Op op; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + data = rank; + + MPI_Op_create( (MPI_User_function*)assoc, 0, &op ); + MPI_Reduce ( &data, &result, 1, MPI_INT, op, size-1, MPI_COMM_WORLD ); + MPI_Bcast ( &result, 1, MPI_INT, size-1, MPI_COMM_WORLD ); + MPI_Op_free( &op ); + if (result == BAD_ANSWER) errors++; + + if (errors) + printf( "[%d] done with ERRORS(%d)!\n", rank, errors ); + Test_Waitforall( ); + MPI_Finalize(); + + return errors; +} diff --git a/teshsuite/smpi/mpich-test/coll/coll11.c b/teshsuite/smpi/mpich-test/coll/coll11.c new file mode 100644 index 0000000000..e3ce6c8525 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/coll11.c @@ -0,0 +1,110 @@ +#include "mpi.h" +#include +#include "test.h" + +void addem ( int *, int *, int *, MPI_Datatype * ); +void assoc ( int *, int *, int *, MPI_Datatype * ); + +void addem(invec, inoutvec, len, dtype) +int *invec, *inoutvec, *len; +MPI_Datatype *dtype; +{ + int i; + for ( i=0; i<*len; i++ ) + inoutvec[i] += invec[i]; +} + +#define BAD_ANSWER 100000 + +/* + The operation is inoutvec[i] = invec[i] op inoutvec[i] + (see 4.9.4). The order is important. + + Note that the computation is in process rank (in the communicator) + order, independant of the root. + */ +void assoc(invec, inoutvec, len, dtype) +int *invec, *inoutvec, *len; +MPI_Datatype *dtype; +{ + int i; + for ( i=0; i<*len; i++ ) { + if (inoutvec[i] <= invec[i] ) { + int rank; + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n", + rank, inoutvec[0], invec[0] ); + inoutvec[i] = BAD_ANSWER; + } + else + inoutvec[i] = invec[i]; + } +} + +int main( int argc, char **argv ) +{ + int rank, size, i; + int data; + int errors=0; + int result = -100; + int correct_result; + MPI_Op op_assoc, op_addem; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + data = rank; + + correct_result = 0; + for (i=0;i<=rank;i++) + correct_result += i; + + MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error suming ints with scan\n", rank ); + errors++; + } + + MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank ); + errors++; + } + + data = rank; + result = -100; + MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc ); + MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem ); + MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error summing ints with scan (userop)\n", + rank ); + errors++; + } + + MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, MPI_COMM_WORLD ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n", + rank ); + errors++; + } + /*result = -100; + data = rank; + MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, MPI_COMM_WORLD ); + if (result == BAD_ANSWER) { + fprintf( stderr, "[%d] Error scanning with non-commutative op\n", + rank ); + errors++; + }*/ + + MPI_Op_free( &op_assoc ); + MPI_Op_free( &op_addem ); + + if (errors) + printf( "[%d] done with ERRORS(%d)!\n", rank, errors ); + + Test_Waitforall( ); + MPI_Finalize(); + return errors; +} diff --git a/teshsuite/smpi/mpich-test/coll/coll12.c b/teshsuite/smpi/mpich-test/coll/coll12.c new file mode 100644 index 0000000000..b25b52c8fe --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/coll12.c @@ -0,0 +1,76 @@ + +#include +#include "mpi.h" +#include "test.h" + +#define TABLE_SIZE 2 + +int main( int argc, char **argv ) +{ + int rank, size; + double a[TABLE_SIZE]; + struct { double a; int b; } in[TABLE_SIZE], out[TABLE_SIZE]; + int i; + int errors = 0, toterrors; + + /* Initialize the environment and some variables */ + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* Initialize the maxloc data */ + for ( i=0; i +#include + +#include +#include +#ifndef EXIT_SUCCESS +#define EXIT_SUCCESS 0 +#define EXIT_FAILURE 1 +#endif + +int main( int argc, char *argv[] ) +{ + int rank, size; + int chunk = 4096; + int i; + int *sb; + int *rb; + int status, gstatus; + + MPI_Init(&argc,&argv); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm_size(MPI_COMM_WORLD,&size); + + for ( i=1 ; i < argc ; ++i ) { + if ( argv[i][0] != '-' ) + continue; + switch(argv[i][1]) { + case 'm': + chunk = atoi(argv[++i]); + break; + default: + fprintf(stderr,"Unrecognized argument %s\n", + argv[i]); + MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE); + } + } + + sb = (int *)malloc(size*chunk*sizeof(int)); + if ( !sb ) { + perror( "can't allocate send buffer" ); + MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE); + } + rb = (int *)malloc(size*chunk*sizeof(int)); + if ( !rb ) { + perror( "can't allocate recv buffer"); + free(sb); + MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE); + } + for ( i=0 ; i < size*chunk ; ++i ) { + sb[i] = rank + 1; + rb[i] = 0; + } + + /* fputs("Before MPI_Alltoall\n",stdout); */ + + /* This should really send MPI_CHAR, but since sb and rb were allocated + as chunk*size*sizeof(int), the buffers are large enough */ + status = MPI_Alltoall(sb,chunk,MPI_INT,rb,chunk,MPI_INT, + MPI_COMM_WORLD); + + /* fputs("Before MPI_Allreduce\n",stdout); */ + MPI_Allreduce( &status, &gstatus, 1, MPI_INT, MPI_SUM, + MPI_COMM_WORLD ); + + /* fputs("After MPI_Allreduce\n",stdout); */ + if (rank == 0) { + if (gstatus == 0) printf( " No Errors\n" ); + else + printf("all_to_all returned %d\n",gstatus); + } + + free(sb); + free(rb); + + MPI_Finalize(); + + return(EXIT_SUCCESS); +} + diff --git a/teshsuite/smpi/mpich-test/coll/coll2.c b/teshsuite/smpi/mpich-test/coll/coll2.c new file mode 100644 index 0000000000..d5871712f4 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/coll2.c @@ -0,0 +1,68 @@ +#include "mpi.h" +#include +#include "test.h" + +#define MAX_PROCESSES 10 + +int main( int argc, char **argv ) +{ + int rank, size, i,j; + int table[MAX_PROCESSES][MAX_PROCESSES]; + int errors=0; + int participants; + MPI_Comm testcomm; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* A maximum of MAX_PROCESSES processes can participate */ + if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; + else participants = size; + /* Set the particpants so that it divides the MAX_PROCESSES */ + while (MAX_PROCESSES % participants) participants--; + /* Create the communicator */ + MPI_Comm_split( MPI_COMM_WORLD, rank < participants, rank, &testcomm ); + + if (MAX_PROCESSES % participants) { + fprintf( stderr, "Number of processors must divide %d\n", + MAX_PROCESSES ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if ( (rank < participants) ) { + + /* Determine what rows are my responsibility */ + int block_size = MAX_PROCESSES / participants; + int begin_row = rank * block_size; + int end_row = (rank+1) * block_size; + int send_count = block_size * MAX_PROCESSES; + int recv_count = send_count; + + /* Paint my rows my color */ + for (i=begin_row; i +#include "test.h" + +#define MAX_PROCESSES 10 + +int main( int argc, char **argv ) +{ + int rank, size, i,j; + int table[MAX_PROCESSES][MAX_PROCESSES]; + int errors=0; + int participants; + int displs[MAX_PROCESSES]; + int recv_counts[MAX_PROCESSES]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* A maximum of MAX_PROCESSES processes can participate */ + if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; + else participants = size; + /* while (MAX_PROCESSES % participants) participants--; */ + if (MAX_PROCESSES % participants) { + fprintf( stderr, "Number of processors must divide %d\n", + MAX_PROCESSES ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if ( (rank < participants) ) { + + /* Determine what rows are my responsibility */ + int block_size = MAX_PROCESSES / participants; + int begin_row = rank * block_size; + int end_row = (rank+1) * block_size; + int send_count = block_size * MAX_PROCESSES; + + /* Fill in the displacements and recv_counts */ + for (i=0; i +#include "test.h" + +#define MAX_PROCESSES 10 + +int main( int argc, char **argv ) +{ + int rank, size, i,j; + int table[MAX_PROCESSES][MAX_PROCESSES]; + int row[MAX_PROCESSES]; + int errors=0; + int participants; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* A maximum of MAX_PROCESSES processes can participate */ + if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; + else participants = size; + if ( (rank < participants) ) { + int send_count = MAX_PROCESSES; + int recv_count = MAX_PROCESSES; + + /* If I'm the root (process 0), then fill out the big table */ + if (rank == 0) + for ( i=0; i +#include "test.h" + +#define MAX_PROCESSES 10 + +int main( int argc, char **argv ) +{ + int rank, size, i,j; + int table[MAX_PROCESSES][MAX_PROCESSES]; + int row[MAX_PROCESSES]; + int errors=0; + int participants; + int displs[MAX_PROCESSES]; + int send_counts[MAX_PROCESSES]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* A maximum of MAX_PROCESSES processes can participate */ + if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; + else participants = size; + if ( (rank < participants) ) { + int recv_count = MAX_PROCESSES; + + /* If I'm the root (process 0), then fill out the big table */ + /* and setup send_counts and displs arrays */ + if (rank == 0) + for ( i=0; i +#include "test.h" + +#define MAX_PROCESSES 10 + +int main( int argc, char **argv ) +{ + int rank, size, i,j; + int table[MAX_PROCESSES][MAX_PROCESSES]; + int errors=0; + int participants; + int displs[MAX_PROCESSES]; + int recv_counts[MAX_PROCESSES]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* A maximum of MAX_PROCESSES processes can participate */ + if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; + else participants = size; + /* while (MAX_PROCESSES % participants) participants--; */ + if (MAX_PROCESSES % participants) { + fprintf( stderr, "Number of processors must divide %d\n", + MAX_PROCESSES ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if ( (rank < participants) ) { + + /* Determine what rows are my responsibility */ + int block_size = MAX_PROCESSES / participants; + int begin_row = rank * block_size; + int end_row = (rank+1) * block_size; + int send_count = block_size * MAX_PROCESSES; + + /* Fill in the displacements and recv_counts */ + for (i=0; i +#include "test.h" + +#define MAX_PROCESSES 10 + +int main( int argc, char **argv ) +{ + int rank, size, i,j; + int table[MAX_PROCESSES][MAX_PROCESSES]; + int errors=0; + int participants; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* A maximum of MAX_PROCESSES processes can participate */ + if ( size > MAX_PROCESSES ) participants = MAX_PROCESSES; + else participants = size; + if (MAX_PROCESSES % participants) { + fprintf( stderr, "Number of processors must divide %d\n", + MAX_PROCESSES ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + /* while (MAX_PROCESSES % participants) participants--; */ + if ( (rank < participants) ) { + + /* Determine what rows are my responsibility */ + int block_size = MAX_PROCESSES / participants; + int begin_row = rank * block_size; + int end_row = (rank+1) * block_size; + int send_count = block_size * MAX_PROCESSES; + int recv_count = send_count; + + /* Paint my rows my color */ + for (i=begin_row; i +#include "test.h" + +int main( int argc, char **argv ) +{ + int rank, size, i; + int data; + int errors=0; + int result = -100; + int correct_result; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + data = rank; + + MPI_Reduce ( &data, &result, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); + MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD ); + correct_result = 0; + for(i=0;i +#include "test.h" + +void addem ( int *, int *, int *, MPI_Datatype * ); + +void addem(invec, inoutvec, len, dtype) +int *invec, *inoutvec, *len; +MPI_Datatype *dtype; +{ + int i; + for ( i=0; i<*len; i++ ) + inoutvec[i] += invec[i]; +} + +int main( int argc, char **argv ) +{ + int rank, size, i; + int data; + int errors=0; + int result = -100; + int correct_result; + MPI_Op op; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + data = rank; + MPI_Op_create( (MPI_User_function *)addem, 1, &op ); + MPI_Reduce ( &data, &result, 1, MPI_INT, op, 0, MPI_COMM_WORLD ); + MPI_Bcast ( &result, 1, MPI_INT, 0, MPI_COMM_WORLD ); + MPI_Op_free( &op ); + correct_result = 0; + for(i=0;i +#include "test.h" + +int main( int argc, char **argv ) +{ + int rank, size, i; + MPI_Group group1, group2, group3, groupall, groupunion, newgroup; + MPI_Comm newcomm; + int ranks1[100], ranks2[100], ranks3[100]; + int nranks1=0, nranks2=0, nranks3=0; + + MPI_Init( &argc, &argv ); + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_group( MPI_COMM_WORLD, &groupall ); + + /* Divide groups */ + for (i=0; i +#include +#include "test.h" + +int add ( double *, double *, int *, MPI_Datatype * ); +/* + * User-defined operation on a long value (tests proper handling of + * possible pipelining in the implementation of reductions with user-defined + * operations). + */ +int add( invec, inoutvec, len, dtype ) +double *invec, *inoutvec; +int *len; +MPI_Datatype *dtype; +{ + int i, n = *len; + for (i=0; i + +int main( int argc, char *argv[] ) +{ + int rank, size; + MPI_Comm local_comm; + MPI_Request r; + MPI_Status status; + double t0; + + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + if (size < 3) { + fprintf( stderr, "Need at least 3 processors\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_split( MPI_COMM_WORLD, rank < 2, rank, &local_comm ); + + MPI_Barrier( MPI_COMM_WORLD ); + if (rank == 0) { + /* First, ensure ssend works */ + t0 = MPI_Wtime(); + MPI_Ssend( MPI_BOTTOM, 0, MPI_INT, 1, 1, MPI_COMM_WORLD ); + t0 = MPI_Wtime() - t0; + if (t0 < 1.0) { + fprintf( stderr, "Ssend does not wait for recv!\n" ); + fflush( stderr ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + MPI_Barrier( MPI_COMM_WORLD ); + /* Start the ssend after process 1 is well into its barrier */ + t0 = MPI_Wtime(); + while (MPI_Wtime() - t0 < 1.0) ; + MPI_Ssend( MPI_BOTTOM, 0, MPI_INT, 1, 0, MPI_COMM_WORLD ); + MPI_Barrier( local_comm ); + /* Send process 2 an alls well */ + MPI_Send( MPI_BOTTOM, 0, MPI_INT, 2, 0, MPI_COMM_WORLD ); + } + else if (rank == 1) { + t0 = MPI_Wtime(); + while (MPI_Wtime() - t0 < 2.0) ; + MPI_Recv( MPI_BOTTOM, 0, MPI_INT, 0, 1, MPI_COMM_WORLD, &status ); + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Irecv( MPI_BOTTOM, 0, MPI_INT, 0, 0, MPI_COMM_WORLD, &r ); + MPI_Barrier( local_comm ); + MPI_Wait( &r, &status ); + } + else if (rank == 2) { + int flag; + + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Irecv( MPI_BOTTOM, 0, MPI_INT, 0, 0, MPI_COMM_WORLD, &r ); + t0 = MPI_Wtime(); + while (MPI_Wtime() - t0 < 3.0) ; + MPI_Test( &r, &flag, &status ); + if (!flag) { + fprintf( stderr, "Test failed!\n" ); + fflush( stderr ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + else + fprintf( stderr, "Test succeeded\n" ); + } + else { + MPI_Barrier( MPI_COMM_WORLD ); + } + + MPI_Comm_free( &local_comm ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/redscat.c b/teshsuite/smpi/mpich-test/coll/redscat.c new file mode 100644 index 0000000000..3cb057da76 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/redscat.c @@ -0,0 +1,53 @@ +/* + * Test of reduce scatter. + * + * Each processor contributes its rank + the index to the reduction, + * then receives the ith sum + * + * Can be called with any number of processors. + */ + +#include "mpi.h" +#include +#include +#include "test.h" + +int main( int argc, char **argv ) +{ + int err = 0, toterr; + int *sendbuf, *recvbuf, *recvcounts; + int size, rank, i, sumval; + MPI_Comm comm; + + + MPI_Init( &argc, &argv ); + comm = MPI_COMM_WORLD; + + MPI_Comm_size( comm, &size ); + MPI_Comm_rank( comm, &rank ); + sendbuf = (int *) malloc( size * sizeof(int) ); + for (i=0; i +#include "test.h" + +int main( int argc, char **argv ) +{ + int rank, value, result; + + MPI_Init (&argc, &argv); + MPI_Comm_rank (MPI_COMM_WORLD, &rank); + + value = (rank == 0) ? 3 : 6; + MPI_Allreduce (&value, &result, 1, MPI_INT, MPI_BOR, MPI_COMM_WORLD); + if (rank == 0) printf ("Result of 3 BOR 6 is %d, result of 3|6 is %d\n", + result, 3|6); + + Test_Waitforall( ); + MPI_Finalize (); + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/runtests b/teshsuite/smpi/mpich-test/coll/runtests new file mode 100755 index 0000000000..7ed8a7b59a --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/runtests @@ -0,0 +1,184 @@ +#! /bin/sh +# This version puts the output from each program into a separate file. +# -mvhome is needed for the ANL SP, and is ignored by others +args= +device= +#top_srcdir=/home/degomme/Downloads/mpich-test +#srcdir=/home/degomme/Downloads/mpich-test/coll +MPICH_VERSION= +STOPFILE=${MPITEST_STOPTEST:-"$HOME/.stopmpichtests"} + +MAKE="make --no-print-directory" +MPIRUNMVBACK='' +# + +# Set mpirun to the name/path of the mpirun program +#FindMPIRUN +# +runtests=1 +makeeach=0 +writesummaryfile=no +quiet=0 +MAKE="make --no-print-directory" +for arg in "$@" ; do + case $arg in + -basedir=* ) + basedir=`echo $arg | sed 's/-basedir=//'` + ;; + -srcdir=* ) + srcdir=`echo $arg | sed 's/-srcdir=//'` + ;; + -checkonly ) + runtests=0 + ;; + -margs=*) + margs=`echo $arg | sed 's/-margs=//'` + args="$args $margs" + ;; + -small) + shift + makeeach=1 + ;; + -summaryfile=*) + writesummaryfile=yes + summaryfile=`echo A$arg | sed 's/A-summaryfile=//'` + ;; + -quiet) + shift + quiet=1 + ;; + -help|-u) + echo "runtests [-checkonly] [-margs='...']" + echo "run tests in this directory. If -checkonly set, just run" + echo "the differences check (do NO rerun the test programs)." + echo "If -margs is used, these options are passed to mpirun." + echo "If -small is used, the examples are built, run, and deleted." + exit 1 + ;; + *) + if test -n "$arg" ; then + echo "runtests: Unknown argument ($arg)" + exit 1 + fi + ;; + esac +done +# Load basic procedures +. ${srcdir}/../runbase +# +# If the programs are not available, run make. +if [ ! -x coll1 -a $makeeach = 0 -a $runtests = 1 ] ; then + $MAKE +fi + +mpirun=" ${basedir}/bin/smpirun -platform ${srcdir}/../../../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir}/../../hostfile --log=root.thres:critical" +testfiles="" +if [ $runtests = 1 ] ; then +echo '**** Testing MPI Collective routines ****' + +RunTest barrier 4 "*** Barrier Test ***" "" "barrier-0.out" + +RunTest bcast_mpich 4 "*** Broadcast Test ***" "" "bcast-0.out bcast-1.out bcast-2.out bcast-3.out" + +RunTest bcastvec 4 "*** Broadcast Datatype Test ***" "" "bcastvec-0.out bcastvec-1.out bcastvec-2.out bcastvec-3.out" + + +#TODO : handle MPI_BOTTOM to allow som operations to use absolute addresses +RunTest coll1 4 + +RunTest coll2 5 + +RunTest coll3 5 + +RunTest coll4 4 + +RunTest coll5 4 + +RunTest coll6 5 + +RunTest coll7 5 + +RunTest coll8 4 + +RunTest coll9 4 + +#smpi does not handle non commutative operations, removed +#RunTest coll10 4 + +#smpi does not handle non commutative operations, removed +RunTest coll11 4 + +#weird manipulations of ranks in split, and comms -> deadlock, removed +#RunTest scantst 4 + +RunTest coll12 4 + +# coll13 is very picky about arguments +RunTest coll13 4 + +RunTest longuser 4 + +# Some implementations (e.g., IBM's) forget to handle the np = 1 case. +#RunTest longuser 1 "*** longuser (np == 1) ***" +MakeExe longuser +cp longuser longuser1 +RunTest longuser1 1 '*** longuser (np == 1) ***' +rm -f longuser1 + +#OutTime +#testfiles="$testfiles allredmany.out" +#rm -f allredmany.out +#MakeExe allredmany +#echo '**** allredmany ****' +#echo '*** allredmany ***' >> allredmany.out +#cnt=0 +## Run several times to try and catch timing/race conditions in managing +## the flood of one-way messages. +#while [ $cnt -lt 20 ] ; do +# echo "*** allredmany run $cnt ***" >> allredmany.out +# $mpirun -np 2 $args allredmany >> allredmany.out 2>&1 +# cnt=`expr $cnt + 1` +#done +#echo '*** allredmany ***' >> allredmany.out +#CleanExe allredmany + +RunTest grouptest 4 +#uses MPI_Dims_create, MPI_Cart_create ... removed +#RunTest allred 4 "*** Allred ***" + +RunTest allred2 4 "*** Allred2 ***" +#uses MPI_Dims_create, MPI_Cart_create ... removed +#RunTest scatterv 4 "*** Scatterv ***" + +RunTest scattern 4 "*** Scattern ***" + +#fails, more debug needed to understand +#RunTest redscat 4 "*** Reduce_scatter ***" + +RunTest alltoallv_mpich 4 "*** Alltoallv ***" + +# +# Run Fortran tests ONLY if Fortran available +if [ 0 = 1 ] ; then + echo "FORTRAN TESTS" + + RunTest allredf 4 "*** Testing allreduce from Fortran ***" + + RunTest assocf 4 "*** Testing allreduce from Fortran (2) ***" + + RunTest bcastlog 4 "*** Testing logical datatype in BCAST ***" + echo "END OF FORTRAN TESTS" +fi + +else + # Just run checks + testfiles=`echo *.out` + if test "$testfiles" = "*.out" ; then + echo "No output files remain from previous test!" + exit 1 + fi +fi + +echo '*** Checking for differences from expected output ***' +CheckAllOutput coll.diff +exit 0 diff --git a/teshsuite/smpi/mpich-test/coll/scantst.c b/teshsuite/smpi/mpich-test/coll/scantst.c new file mode 100644 index 0000000000..63561be65b --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/scantst.c @@ -0,0 +1,152 @@ +#include "mpi.h" +#include +#include "test.h" + +MPI_Comm GetNextComm( void ); +void addem ( int *, int *, int *, MPI_Datatype * ); +void assoc ( int *, int *, int *, MPI_Datatype * ); + +void addem( int *invec, int *inoutvec, int *len, MPI_Datatype *dtype) +{ + int i; + for ( i=0; i<*len; i++ ) + inoutvec[i] += invec[i]; +} + +#define BAD_ANSWER 100000 + +/* + The operation is inoutvec[i] = invec[i] op inoutvec[i] + (see 4.9.4). The order is important. + + Note that the computation is in process rank (in the communicator) + order, independant of the root. + */ +void assoc( int *invec, int *inoutvec, int *len, MPI_Datatype *dtype) +{ + int i; + for ( i=0; i<*len; i++ ) { + if (inoutvec[i] <= invec[i] ) { + int rank; + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + fprintf( stderr, "[%d] inout[0] = %d, in[0] = %d\n", + rank, inoutvec[0], invec[0] ); + inoutvec[i] = BAD_ANSWER; + } + else + inoutvec[i] = invec[i]; + } +} + +MPI_Comm GetNextComm( void ) +{ + MPI_Comm comm = MPI_COMM_NULL; + static int idx = 0; + int size, rank; + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + switch (idx) { + case 0: + MPI_Comm_dup( MPI_COMM_WORLD, &comm ); + break; + case 1: + /* invert the rank order */ + MPI_Comm_split( MPI_COMM_WORLD, 0, size - rank, &comm ); + break; + case 2: + /* Divide into subsets */ + MPI_Comm_split( MPI_COMM_WORLD, rank < (size/2), rank, &comm ); + break; + case 3: + /* Another division */ + MPI_Comm_split( MPI_COMM_WORLD, rank < (size/3), size-rank, &comm ); + break; + case 4: + /* odd and even */ + MPI_Comm_split( MPI_COMM_WORLD, (rank % 2) == 0, rank, &comm ); + break; + case 5: + /* Last case: startover */ + idx = -1; + break; + } + idx++; + return comm; +} + +int main( int argc, char **argv ) +{ + int rank, size, i; + int data; + int errors=0; + int result = -100; + int correct_result; + MPI_Op op_assoc, op_addem; + MPI_Comm comm; + + MPI_Init( &argc, &argv ); + MPI_Op_create( (MPI_User_function *)assoc, 0, &op_assoc ); + MPI_Op_create( (MPI_User_function *)addem, 1, &op_addem ); + + /* Run this for a variety of communicator sizes */ + while ((comm = GetNextComm()) != MPI_COMM_NULL) { + MPI_Comm_rank( comm, &rank ); + MPI_Comm_size( comm, &size ); + + data = rank; + + correct_result = 0; + for (i=0;i<=rank;i++) + correct_result += i; + + MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error suming ints with scan\n", rank ); + errors++; + } + + MPI_Scan ( &data, &result, 1, MPI_INT, MPI_SUM, comm ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error summing ints with scan (2)\n", rank ); + errors++; + } + + data = rank; + result = -100; + MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error summing ints with scan (userop)\n", + rank ); + errors++; + } + + MPI_Scan ( &data, &result, 1, MPI_INT, op_addem, comm ); + if (result != correct_result) { + fprintf( stderr, "[%d] Error summing ints with scan (userop2)\n", + rank ); + errors++; + } +/* result = -100;*/ +/* data = rank;*/ +/* MPI_Scan ( &data, &result, 1, MPI_INT, op_assoc, comm );*/ +/* if (result == BAD_ANSWER) {*/ +/* fprintf( stderr, "[%d] Error scanning with non-commutative op\n",*/ +/* rank );*/ +/* errors++;*/ +/* }*/ + MPI_Comm_free( &comm ); + } + + MPI_Op_free( &op_assoc ); + MPI_Op_free( &op_addem ); + + if (errors) { + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + printf( "[%d] done with ERRORS(%d)!\n", rank, errors ); + } + + Test_Waitforall( ); + MPI_Finalize(); + return errors; +} diff --git a/teshsuite/smpi/mpich-test/coll/scattern.c b/teshsuite/smpi/mpich-test/coll/scattern.c new file mode 100644 index 0000000000..082fe6a15f --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/scattern.c @@ -0,0 +1,54 @@ +#include "mpi.h" +#include +#include +#include "test.h" + +/* This example sends a vector and receives individual elements */ + +int main( int argc, char **argv ) +{ + MPI_Datatype vec; + double *vecin, *vecout, ivalue; + int root, i, n, stride, err = 0; + int rank, size; + + MPI_Init( &argc, &argv ); + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + n = 12; + stride = 10; + vecin = (double *)malloc( n * stride * size * sizeof(double) ); + vecout = (double *)malloc( n * sizeof(double) ); + + MPI_Type_vector( n, 1, stride, MPI_DOUBLE, &vec ); + MPI_Type_commit( &vec ); + + for (i=0; i 0) printf( "Found %d errors!\n", err ); + else printf( " No Errors\n" ); + } + MPI_Type_free( &vec ); + MPI_Finalize(); + return 0; + +} + diff --git a/teshsuite/smpi/mpich-test/coll/scatterv.c b/teshsuite/smpi/mpich-test/coll/scatterv.c new file mode 100644 index 0000000000..aefcb2e1ac --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/scatterv.c @@ -0,0 +1,167 @@ +#include "mpi.h" +#include +#include +#include "test.h" + +/* Prototypes for picky compilers */ +void SetData ( double *, double *, int, int, int, int, int, int ); +int CheckData ( double *, int, int, int, int, int ); +/* + This is an example of using scatterv to send a matrix from one + process to all others, with the matrix stored in Fortran order. + Note the use of an explicit UB to enable the sources to overlap. + + This tests scatterv to make sure that it uses the datatype size + and extent correctly. It requires number of processors that + can be split with MPI_Dims_create. + + */ + +void SetData( sendbuf, recvbuf, nx, ny, myrow, mycol, nrow, ncol ) +double *sendbuf, *recvbuf; +int nx, ny, myrow, mycol, nrow, ncol; +{ +int coldim, i, j, m, k; +double *p; + +if (myrow == 0 && mycol == 0) { + coldim = nx * nrow; + for (j=0; j +typedef struct { short a; int b } s1; + +main( int argc, char **argv ) +{ +s1 s[10], sout[10]; +int i, rank; +MPI_Status status; + +MPI_Init( &argc, &argv ); +MPI_Comm_rank( MPI_COMM_WORLD, &rank ); +for (i=0; i<10; i++) { + s[i].a = rank + i; + s[i].b = rank; + sout[i].a = -1; + sout[i].b = -1; + } +/* MPI_Allreduce( s, sout, 10, MPI_SHORT_INT, MPI_MINLOC, MPI_COMM_WORLD ); */ +/* if (rank == 1) + for (i=0; i<10; i++) + sout[i] = s[i]; + */ +MPI_Reduce( s, sout, 10, MPI_SHORT_INT, MPI_MINLOC, 1, MPI_COMM_WORLD ); +if (rank == 1) +for (i=0; i<10; i++) { + printf( "[%d] (%x,%x)\n", rank, (int)sout[i].a, sout[i].b ); + } +if (rank == 1) + MPI_Send( sout, 10, MPI_SHORT_INT, 0, 0, MPI_COMM_WORLD ); +else if (rank == 0) + MPI_Recv( sout, 10, MPI_SHORT_INT, 1, 0, MPI_COMM_WORLD, &status ); +/* MPI_Bcast( sout, 10, MPI_SHORT_INT, 1, MPI_COMM_WORLD ); */ +for (i=0; i<10; i++) { + printf( "[%d] (%x,%x)\n", rank, (int)sout[i].a, sout[i].b ); + } +MPI_Finalize(); +return 0; +} diff --git a/teshsuite/smpi/mpich-test/coll/temprun b/teshsuite/smpi/mpich-test/coll/temprun new file mode 100755 index 0000000000..4bcd93c309 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/temprun @@ -0,0 +1,269 @@ +#!/bin/sh +# This version puts the output from each program into a separate file. +# -mvhome is needed for the ANL SP, and is ignored by others +args="-pg -mvhome" +# +runtests=1 +makeeach=0 +for arg in "$@" ; do + case $arg in + -checkonly ) + runtests=0 + ;; + -margs=*) + margs=`echo $arg | sed 's/-margs=//'` + args="$args $margs" + ;; + -small) + makeeach=1 + ;; + -help|-u) + echo "runtests [-checkonly] [-margs='...']" + echo "run tests in this directory. If -checkonly set, just run" + echo "the differences check (do NO rerun the test programs)." + echo "If -margs is used, these options are passed to mpirun." + echo "If -small is used, the examples are built, run, and deleted." + exit 1 + ;; + *) + if test -n "$arg" ; then + echo "runtests: Unknown argument ($arg)" + exit 1 + fi + ;; + esac +done + +MakeExe() { + if [ $makeeach = 1 -o ! -x $1 ] ; then + make $1 + fi +} +CleanExe() { + if [ $makeeach = 1 ] ; then + /bin/rm -f $1 $1.o + fi +} + +# If the programs are not available, run make. +if [ ! -x coll1 -a $makeeach = 0 ] ; then + make +fi + +testfiles="" +if [ $runtests = 1 ] ; then +echo '**** Testing MPI Collective routines ****' + +testfiles="$testfiles barrier.out" +/bin/rm -f barrier.out barrier-0.out +MakeExe barrier +echo '*** Barrier Test ***' >> barrier.out +echo '**** Barrier Test ****' +mpirun $args -np 4 -mvback "barrier-0.out" barrier $* >> barrier.out +cat barrier-0.out >> barrier.out +/bin/rm -f barrier-[01234].out +echo '*** Barrier Test ***' >> barrier.out +CleanExe barrier + +testfiles="$testfiles bcast.out" +/bin/rm -f bcast.out bcast-[0-3].out +MakeExe bcast +echo '**** Broadcast Test ****' +echo '*** Broadcast Test ***' >> bcast.out +mpirun $args -np 4 \ + -mvback "bcast-0.out bcast-1.out bcast-2.out bcast-3.out" \ + bcast $* >> bcast.out +cat bcast-[0123].out >> bcast.out +/bin/rm -f bcast-[0123].out +echo '*** Broadcast Test ***' >> bcast.out +CleanExe bcast + +testfiles="$testfiles coll1.out" +/bin/rm -f coll1.out +MakeExe coll1 +echo '**** coll1 ****' +echo '*** coll1 ***' >> coll1.out +mpirun $args -np 4 coll1 $* >> coll1.out +echo '*** coll1 ***' >> coll1.out +CleanExe coll1 + +testfiles="$testfiles coll2.out" +/bin/rm -f coll2.out +MakeExe coll2 +echo '**** coll2 ****' +echo '*** coll2 ***' >> coll2.out +# mpirun $args -np 5 coll2 $* >> coll2.out +echo '*** coll2 ***' >> coll2.out +CleanExe coll2 + +testfiles="$testfiles coll3.out" +/bin/rm -f coll3.out +MakeExe coll3 +echo '**** coll3 ****' +echo '*** coll3 ***' >> coll3.out +mpirun $args -np 5 coll3 $* >> coll3.out +echo '*** coll3 ***' >> coll3.out +CleanExe coll3 + +testfiles="$testfiles coll4.out" +/bin/rm -f coll4.out +MakeExe coll4 +echo '**** coll4 ****' +echo '*** coll4 ***' >> coll4.out +mpirun $args -np 4 coll4 $* >> coll4.out +echo '*** coll4 ***' >> coll4.out +CleanExe coll4 + +testfiles="$testfiles coll5.out" +/bin/rm -f coll5.out +MakeExe coll5 +echo '**** coll5 ****' +echo '*** coll5 ***' >> coll5.out +mpirun $args -np 4 coll5 $* >> coll5.out +echo '*** coll5 ***' >> coll5.out +CleanExe coll5 + +testfiles="$testfiles coll6.out" +/bin/rm -f coll6.out +MakeExe coll6 +echo '**** coll6 ****' +echo '*** coll6 ***' >> coll6.out +mpirun $args -np 5 coll6 $* >> coll6.out +echo '*** coll6 ***' >> coll6.out +CleanExe coll6 + +testfiles="$testfiles coll7.out" +/bin/rm -f coll7.out +MakeExe coll7 +echo '**** coll7 ****' +echo '*** coll7 ***' >> coll7.out +mpirun $args -np 5 coll7 $* >> coll7.out +echo '*** coll7 ***' >> coll7.out +CleanExe coll7 + +testfiles="$testfiles coll8.out" +/bin/rm -f coll8.out +MakeExe coll8 +echo '**** coll8 ****' +echo '*** coll8 ***' >> coll8.out +mpirun $args -np 4 coll8 $* >> coll8.out +echo '*** coll8 ***' >> coll8.out +CleanExe coll8 + +testfiles="$testfiles coll9.out" +/bin/rm -f coll9.out +MakeExe coll9 +echo '**** coll9 ****' +echo '*** coll9 ***' >> coll9.out +mpirun $args -np 4 coll9 $* >> coll9.out +echo '*** coll9 ***' >> coll9.out +CleanExe coll9 + +testfiles="$testfiles coll10.out" +/bin/rm -f coll10.out +MakeExe coll10 +echo '**** coll10 ****' +echo '*** coll10 ***' >> coll10.out +mpirun -np 4 $args coll10 $* >> coll10.out +echo '*** coll10 ***' >> coll10.out +CleanExe coll10 + +testfiles="$testfiles coll11.out" +/bin/rm -f coll11.out +MakeExe coll11 +echo '**** coll11 ****' +echo '*** coll11 ***' >> coll11.out +mpirun -np 4 $args coll11 $* >> coll11.out +echo '*** coll11 ***' >> coll11.out +CleanExe coll11 + +testfiles="$testfiles coll12.out" +/bin/rm -f coll12.out +MakeExe coll12 +echo '**** coll12 ****' +echo '*** coll12 ***' >> coll12.out +mpirun -np 4 $args coll12 $* >> coll12.out +echo '*** coll12 ***' >> coll12.out +CleanExe coll12 + +testfiles="$testfiles coll13.out" +/bin/rm -f coll13.out +MakeExe coll13 +echo '**** coll13 ****' +echo '*** coll13 ***' >> coll13.out +mpirun -np 4 $args coll13 $* >> coll13.out +echo '*** coll13 ***' >> coll13.out +CleanExe coll13 + +testfiles="$testfiles grouptest.out" +/bin/rm -f grouptest.out +MakeExe grouptest +echo '*** Grouptest ***' +echo '*** grouptest ***' >> grouptest.out +mpirun $args -np 4 grouptest $* >> grouptest.out +echo '*** grouptest ***' >> grouptest.out +CleanExe grouptest + +testfiles="$testfiles allred.out" +/bin/rm -f allred.out +MakeExe allred +echo '*** Allred ***' +echo '*** Allred ***' >> allred.out +mpirun $args -np 4 allred $* >> allred.out +echo '*** Allred ***' >> allred.out +CleanExe allred + +testfiles="$testfiles scatterv.out" +/bin/rm -f scatterv.out +MakeExe scatterv +echo '*** Scatterv ***' +echo '*** Scatterv ***' >> scatterv.out +mpirun $args -np 4 scatterv $* >> scatterv.out +echo '*** Scatterv ***' >> scatterv.out +CleanExe scatterv + +# +# Run Fortran tests ONLY if Fortran available +if [ 1 = 1 ] ; then + echo "FORTRAN TESTS" + # + testfiles="$testfiles allredf.out" + /bin/rm -f allredf.out + MakeExe allredf + echo '*** Testing allreduce from Fortran ***' + echo '*** Testing allreduce from Fortran ***' >> allredf.out + mpirun $args -np 4 allredf "$@" >> allredf.out + echo '*** Testing allreduce from Fortran ***' >> allredf.out + CleanExe allredf + # + echo "END OF FORTRAN TESTS" +fi + +else + # Just run checks + testfiles=`echo *.out` + if test "$testfiles" = "*.out" ; then + echo "No output files remain from previous test!" + exit 1 + fi +fi + +echo '*** Differences from expected output ***' +/bin/rm -f coll.diff +for file in $testfiles ; do + stdfile=`basename $file .out`.std + if [ -s $stdfile ] ; then + if diff -b $file `basename $file .out`.std > /dev/null ; then + true + else + echo "Differences in `basename $file .out`" >> coll.diff + diff -b $file `basename $file .out`.std >> coll.diff + fi + else + echo "Can not find file $stdfile to compare against for test `basename $file .out`" + fi +done +if [ -s coll.diff ] ; then + cat coll.diff +fi +exit 0 diff --git a/teshsuite/smpi/mpich-test/coll/test.c b/teshsuite/smpi/mpich-test/coll/test.c new file mode 100644 index 0000000000..5a8d6f2114 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/test.c @@ -0,0 +1,97 @@ +/* Procedures for recording and printing test results */ + +#include +#include +#include "test.h" +#include "mpi.h" + +static int tests_passed = 0; +static int tests_failed = 0; +static char failed_tests[255][81]; +static char suite_name[255]; +FILE *fileout = NULL; + +void Test_Init( const char *suite, int rank) +{ + char filename[512]; + + sprintf(filename, "%s-%d.out", suite, rank); + strncpy(suite_name, suite, 255); + fileout = fopen(filename, "w"); + if (!fileout) { + fprintf( stderr, "Could not open %s on node %d\n", filename, rank ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } +} + +void Test_Message( const char *mess) +{ + fprintf(fileout, "[%s]: %s\n", suite_name, mess); + if(fileout)fflush(fileout); +} + +void Test_Failed(const char *test) +{ + fprintf(fileout, "[%s]: *** Test '%s' Failed! ***\n", suite_name, test); + strncpy(failed_tests[tests_failed], test, 81); + if(fileout)fflush(fileout); + tests_failed++; +} + +void Test_Passed(const char *test) +{ +#ifdef VERBOSE + fprintf(fileout, "[%s]: Test '%s' Passed.\n", suite_name, test); + if(fileout)fflush(fileout); +#endif + tests_passed++; +} + +int Summarize_Test_Results(void) +{ +#ifdef VERBOSE + fprintf(fileout, "For test suite '%s':\n", suite_name); +#else + if (tests_failed > 0) +#endif + { + fprintf(fileout, "Of %d attempted tests, %d passed, %d failed.\n", + tests_passed + tests_failed, tests_passed, tests_failed); + } + if (tests_failed > 0) { + int i; + + fprintf(fileout, "*** Tests Failed:\n"); + for (i = 0; i < tests_failed; i++) + fprintf(fileout, "*** %s\n", failed_tests[i]); + } + return tests_failed; +} + +void Test_Finalize(void) +{ + if(fileout)fflush(fileout); + //fclose(fileout); +} + +#include "mpi.h" +/* Wait for every process to pass through this point. This test is used + to make sure that all processes complete, and that a test "passes" because + it executed, not because some process failed. + */ +void Test_Waitforall(void) +{ + int m, one, myrank, n; + + MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); + MPI_Comm_size( MPI_COMM_WORLD, &n ); + one = 1; + MPI_Allreduce( &one, &m, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + + if (m != n) { + printf( "[%d] Expected %d processes to wait at end, got %d\n", myrank, + n, m ); + } + if (myrank == 0) + printf( " No Errors\n" ); +} diff --git a/teshsuite/smpi/mpich-test/coll/test.h b/teshsuite/smpi/mpich-test/coll/test.h new file mode 100644 index 0000000000..7360323a60 --- /dev/null +++ b/teshsuite/smpi/mpich-test/coll/test.h @@ -0,0 +1,18 @@ +/* Header for testing procedures */ + +#ifndef _INCLUDED_TEST_H_ +#define _INCLUDED_TEST_H_ + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Test_Init (const char *, int); +void Test_Message (const char *); +void Test_Failed (const char *); +void Test_Passed (const char *); +int Summarize_Test_Results (void); +void Test_Finalize (void); +void Test_Waitforall (void); + +#endif diff --git a/teshsuite/smpi/mpich-test/context/CMakeLists.txt b/teshsuite/smpi/mpich-test/context/CMakeLists.txt new file mode 100644 index 0000000000..76e3474a0d --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/CMakeLists.txt @@ -0,0 +1,75 @@ +cmake_minimum_required(VERSION 2.6) + +if(enable_smpi) + set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/bin/smpicc") + + set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") + set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1") + + include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi") + + add_executable(attrerr attrerr.c test.c) + # add_executable(attrt attrt.c test.c) + # add_executable(commnames commnames.c) + add_executable(groupcreate groupcreate.c) + add_executable(grouptest_mpich grouptest.c) + add_executable(icdup icdup.c) + add_executable(ictest ictest.c ) + add_executable(ictest2 ictest2.c) + add_executable(ictest3 ictest3.c) + + target_link_libraries(attrerr m simgrid smpi ) + # target_link_libraries(attrt m simgrid smpi ) + # target_link_libraries(commnames m simgrid smpi ) + target_link_libraries(groupcreate m simgrid smpi ) + target_link_libraries(grouptest_mpich m simgrid smpi ) + target_link_libraries(icdup m simgrid smpi ) + target_link_libraries(ictest m simgrid smpi ) + target_link_libraries(ictest2 m simgrid smpi ) + target_link_libraries(ictest3 m simgrid smpi ) + + set_target_properties(attrerr PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + # set_target_properties(attrt PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + # set_target_properties(commnames PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(groupcreate PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(grouptest_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(icdup PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(ictest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(ictest2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(ictest3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + +endif(enable_smpi) + +set(tesh_files + ${tesh_files} + ${CMAKE_CURRENT_SOURCE_DIR}/context.tesh + PARENT_SCOPE + ) +set(xml_files + ${xml_files} + PARENT_SCOPE + ) +set(examples_src + ${examples_src} + ${CMAKE_CURRENT_SOURCE_DIR}/test.c + ${CMAKE_CURRENT_SOURCE_DIR}/attrerr.c + # ${CMAKE_CURRENT_SOURCE_DIR}/attrt.c + # ${CMAKE_CURRENT_SOURCE_DIR}/commnames.c + ${CMAKE_CURRENT_SOURCE_DIR}/groupcreate.c + ${CMAKE_CURRENT_SOURCE_DIR}/grouptest.c + ${CMAKE_CURRENT_SOURCE_DIR}/icdup.c + ${CMAKE_CURRENT_SOURCE_DIR}/ictest.c + ${CMAKE_CURRENT_SOURCE_DIR}/ictest2.c + ${CMAKE_CURRENT_SOURCE_DIR}/ictest3.c + ${CMAKE_CURRENT_SOURCE_DIR}/test.h + PARENT_SCOPE + ) +set(bin_files + ${bin_files} + ${CMAKE_CURRENT_SOURCE_DIR}/../hostfile + PARENT_SCOPE + ) +set(txt_files + ${txt_files} + PARENT_SCOPE + ) diff --git a/teshsuite/smpi/mpich-test/context/attrerr.c b/teshsuite/smpi/mpich-test/context/attrerr.c new file mode 100644 index 0000000000..a73d0f0d97 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/attrerr.c @@ -0,0 +1,114 @@ +/* + + Exercise attribute routines. + This version checks for correct behavior of the copy and delete functions + on an attribute, particularly the correct behavior when the routine returns + failure. + + */ +#include +#include "mpi.h" +#include "test.h" + +int test_communicators ( void ); +void abort_msg ( const char *, int ); +int copybomb_fn ( MPI_Comm, int, void *, void *, void *, int * ); +int deletebomb_fn ( MPI_Comm, int, void *, void * ); + +int main( int argc, char **argv ) +{ + MPI_Init( &argc, &argv ); + test_communicators(); + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} + +/* + * MPI 1.2 Clarification: Clarification of Error Behavior of + * Attribute Callback Functions + * Any return value other than MPI_SUCCESS is erroneous. The specific value + * returned to the user is undefined (other than it can't be MPI_SUCCESS). + * Proposals to specify particular values (e.g., user's value) failed. + */ +/* Return an error as the value */ +int copybomb_fn( MPI_Comm oldcomm, int keyval, void *extra_state, + void *attribute_val_in, void *attribute_val_out, int *flag) +{ +/* Note that if (sizeof(int) < sizeof(void *), just setting the int + part of attribute_val_out may leave some dirty bits + */ + *flag = 1; + return MPI_ERR_OTHER; +} + +/* Set delete flag to 1 to allow the attribute to be deleted */ +static int delete_flag = 0; +int deletebomb_fn( MPI_Comm comm, int keyval, void *attribute_val, + void *extra_state) +{ + if (delete_flag) return MPI_SUCCESS; + return MPI_ERR_OTHER; +} + +void abort_msg( const char *str, int code ) +{ + fprintf( stderr, "%s, err = %d\n", str, code ); + MPI_Abort( MPI_COMM_WORLD, code ); +} + +int test_communicators( void ) +{ + MPI_Comm dup_comm_world, d2; + ptrdiff_t world_rank; + int world_size, key_1; + int err; + MPI_Aint value; + int rank; + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + world_rank=rank; + MPI_Comm_size( MPI_COMM_WORLD, &world_size ); + if (world_rank == 0) { + printf( "*** Attribute copy/delete return codes ***\n" ); + } + + MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world ); + MPI_Barrier( dup_comm_world ); + + MPI_Errhandler_set( dup_comm_world, MPI_ERRORS_RETURN ); + + value = - 11; + if ((err=MPI_Keyval_create( copybomb_fn, deletebomb_fn, &key_1, &value ))) + abort_msg( "Keyval_create", err ); + + err = MPI_Attr_put( dup_comm_world, key_1, (void *)world_rank ); + if (err) { + printf( "Error with first put\n" ); + } + + err = MPI_Attr_put( dup_comm_world, key_1, (void *)(2*world_rank) ); + if (err == MPI_SUCCESS) { + printf( "delete function return code was MPI_SUCCESS in put\n" ); + } + + /* Because the attribute delete function should fail, the attribute + should *not be removed* */ + err = MPI_Attr_delete( dup_comm_world, key_1 ); + if (err == MPI_SUCCESS) { + printf( "delete function return code was MPI_SUCCESS in delete\n" ); + } + + err = MPI_Comm_dup( dup_comm_world, &d2 ); + if (err == MPI_SUCCESS) { + printf( "copy function return code was MPI_SUCCESS in dup\n" ); + } + if (err && d2 != MPI_COMM_NULL) { + printf( "dup did not return MPI_COMM_NULL on error\n" ); + } + + delete_flag = 1; + MPI_Comm_free( &dup_comm_world ); + + return 0; +} + diff --git a/teshsuite/smpi/mpich-test/context/attrerr.std b/teshsuite/smpi/mpich-test/context/attrerr.std new file mode 100644 index 0000000000..9db5ec977b --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/attrerr.std @@ -0,0 +1,4 @@ +*** Testing attributes (2) *** +*** Attribute copy/delete return codes *** +All processes completed test +*** Testing attributes (2) *** diff --git a/teshsuite/smpi/mpich-test/context/attrt.c b/teshsuite/smpi/mpich-test/context/attrt.c new file mode 100644 index 0000000000..858db14728 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/attrt.c @@ -0,0 +1,260 @@ +/* + + Exercise communicator routines. + + This C version derived from a Fortran test program from .... + + */ +#include +#include "mpi.h" +#include "test.h" + +int test_communicators ( void ); +int copy_fn ( MPI_Comm, int, void *, void *, void *, int * ); +int delete_fn ( MPI_Comm, int, void *, void * ); + +int main( int argc, char **argv ) +{ + MPI_Init( &argc, &argv ); + test_communicators(); + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} + +int copy_fn( MPI_Comm oldcomm, int keyval, void *extra_state, + void *attribute_val_in, void *attribute_val_out, + int *flag) +{ +/* Note that if (sizeof(int) < sizeof(void *), just setting the int + part of attribute_val_out may leave some dirty bits + */ +*(MPI_Aint *)attribute_val_out = (MPI_Aint)attribute_val_in; +*flag = 1; +return MPI_SUCCESS; +} + +int delete_fn( MPI_Comm comm, int keyval, void *attribute_val, + void *extra_state) +{ +int world_rank; +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +if ((MPI_Aint)attribute_val != (MPI_Aint)world_rank) { + printf( "incorrect attribute value %d\n", *(int*)attribute_val ); + MPI_Abort(MPI_COMM_WORLD, 1005 ); + } +return MPI_SUCCESS; +} + +int test_communicators( void ) +{ +MPI_Comm dup_comm_world, lo_comm, rev_comm, dup_comm, split_comm, world_comm; +MPI_Group world_group, lo_group, rev_group; +void *vvalue; +int ranges[1][3]; +int flag, world_rank, world_size, rank, size, n, key_1, key_3; +int color, key, result; +/* integer n, , + . key_2 + + */ +MPI_Aint value; + +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +MPI_Comm_size( MPI_COMM_WORLD, &world_size ); +if (world_rank == 0) { + printf( "*** Communicators ***\n" ); + } + +MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world ); + +/* + Exercise Comm_create by creating an equivalent to dup_comm_world + (sans attributes) and a half-world communicator. + */ + +if (world_rank == 0) + printf( " Comm_create\n" ); + +MPI_Comm_group( dup_comm_world, &world_group ); +MPI_Comm_create( dup_comm_world, world_group, &world_comm ); +MPI_Comm_rank( world_comm, &rank ); +if (rank != world_rank) { + printf( "incorrect rank in world comm: %d\n", rank ); + MPI_Abort(MPI_COMM_WORLD, 3001 ); + } + +n = world_size / 2; + +ranges[0][0] = 0; +ranges[0][1] = (world_size - n) - 1; +ranges[0][2] = 1; + +MPI_Group_range_incl(world_group, 1, ranges, &lo_group ); +MPI_Comm_create(world_comm, lo_group, &lo_comm ); +MPI_Group_free( &lo_group ); + +if (world_rank < (world_size - n)) { + MPI_Comm_rank(lo_comm, &rank ); + if (rank == MPI_UNDEFINED) { + printf( "incorrect lo group rank: %d\n", rank ); + MPI_Abort(MPI_COMM_WORLD, 3002 ); + } + else { + MPI_Barrier(lo_comm ); + } + } +else { + if (lo_comm != MPI_COMM_NULL) { + printf( "incorrect lo comm:\n" ); + MPI_Abort(MPI_COMM_WORLD, 3003 ); + } + } + +MPI_Barrier(world_comm); +/* + Check Comm_dup by adding attributes to lo_comm & duplicating + */ +if (world_rank == 0) + printf( " Comm_dup\n" ); + +if (lo_comm != MPI_COMM_NULL) { + value = 9; + MPI_Keyval_create(copy_fn, delete_fn, &key_1, &value ); + value = 8; +/* MPI_Keyval_create(MPI_DUP_FN, MPI_NULL_DELETE_FN, + &key_2, &value ); */ + value = 7; + MPI_Keyval_create(MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN, + &key_3, &value ); + + /* This may generate a compilation warning; it is, however, an + easy way to cache a value instead of a pointer */ + MPI_Attr_put(lo_comm, key_1, (void *)world_rank ); +/* MPI_Attr_put(lo_comm, key_2, world_size ) */ + MPI_Attr_put(lo_comm, key_3, (void *)0 ); + + MPI_Comm_dup(lo_comm, &dup_comm ); + + /* Note that if sizeof(int) < sizeof(void *), we can't use + (void **)&value to get the value we passed into Attr_put. To avoid + problems (e.g., alignment errors), we recover the value into + a (void *) and cast to int. Note that this may generate warning + messages from the compiler. */ + MPI_Attr_get(dup_comm, key_1, (void **)&vvalue, &flag ); + value = (MPI_Aint)vvalue; + + if (! flag) { + printf( "dup_comm key_1 not found on %d\n", world_rank ); + MPI_Abort(MPI_COMM_WORLD, 3004 ); + } + + if (value != world_rank) { + printf( "dup_comm key_1 value incorrect: %ld\n", (long)value ); + MPI_Abort(MPI_COMM_WORLD, 3005 ); + } + +/* MPI_Attr_get(dup_comm, key_2, (int *)&value, &flag ); */ +/* + if (! flag) { + printf( "dup_comm key_2 not found\n" ); + MPI_Abort(MPI_COMM_WORLD, 3006 ); + } + + if (value != world_size) { + printf( "dup_comm key_2 value incorrect: %d\n", value ); + MPI_Abort(MPI_COMM_WORLD, 3007 ); + } + */ + MPI_Attr_get(dup_comm, key_3, (void **)&vvalue, &flag ); + value = (int)vvalue; + if (flag) { + printf( "dup_comm key_3 found!\n" ); + MPI_Abort(MPI_COMM_WORLD, 3008 ); + } + MPI_Keyval_free(&key_1 ); +/* +c MPI_Keyval_free(&key_2 ) + */ + MPI_Keyval_free(&key_3 ); + } +/* + Split the world into even & odd communicators with reversed ranks. + */ + if (world_rank == 0) + printf( " Comm_split\n" ); + + color = world_rank % 2; + key = world_size - world_rank; + + MPI_Comm_split(dup_comm_world, color, key, &split_comm ); + MPI_Comm_size(split_comm, &size ); + MPI_Comm_rank(split_comm, &rank ); + if (rank != ((size - world_rank/2) - 1)) { + printf( "incorrect split rank: %d\n", rank ); + MPI_Abort(MPI_COMM_WORLD, 3009 ); + } + + MPI_Barrier(split_comm ); +/* + Test each possible Comm_compare result + */ + if (world_rank == 0) + printf( " Comm_compare\n" ); + + MPI_Comm_compare(world_comm, world_comm, &result ); + if (result != MPI_IDENT) { + printf( "incorrect ident result: %d\n", result ); + MPI_Abort(MPI_COMM_WORLD, 3010 ); + } + + if (lo_comm != MPI_COMM_NULL) { + MPI_Comm_compare(lo_comm, dup_comm, &result ); + if (result != MPI_CONGRUENT) { + printf( "incorrect congruent result: %d\n", result ); + MPI_Abort(MPI_COMM_WORLD, 3011 ); + } + } + + ranges[0][0] = world_size - 1; + ranges[0][1] = 0; + ranges[0][2] = -1; + + MPI_Group_range_incl(world_group, 1, ranges, &rev_group ); + MPI_Comm_create(world_comm, rev_group, &rev_comm ); + MPI_Comm_compare(world_comm, rev_comm, &result ); + if (result != MPI_SIMILAR) { + printf( "incorrect similar result: %d\n", result ); + MPI_Abort(MPI_COMM_WORLD, 3012 ); + } + + if (lo_comm != MPI_COMM_NULL) { + MPI_Comm_compare(world_comm, lo_comm, &result ); + if (result != MPI_UNEQUAL) { + printf( "incorrect unequal result: %d\n", result ); + MPI_Abort(MPI_COMM_WORLD, 3013 ); + } + } +/* + Free all communicators created + */ + if (world_rank == 0) + printf( " Comm_free\n" ); + + MPI_Comm_free( &world_comm ); + MPI_Comm_free( &dup_comm_world ); + + MPI_Comm_free( &rev_comm ); + MPI_Comm_free( &split_comm ); + + MPI_Group_free( &world_group ); + MPI_Group_free( &rev_group ); + + if (lo_comm != MPI_COMM_NULL) { + MPI_Comm_free( &lo_comm ); + MPI_Comm_free( &dup_comm ); + } + + return 0; +} + diff --git a/teshsuite/smpi/mpich-test/context/attrt.std b/teshsuite/smpi/mpich-test/context/attrt.std new file mode 100644 index 0000000000..4693c037cf --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/attrt.std @@ -0,0 +1,9 @@ +*** Testing attributes *** +*** Communicators *** + Comm_create + Comm_dup + Comm_split + Comm_compare + Comm_free +All processes completed test +*** Testing attributes *** diff --git a/teshsuite/smpi/mpich-test/context/attrtest.f b/teshsuite/smpi/mpich-test/context/attrtest.f new file mode 100644 index 0000000000..6e63bca19e --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/attrtest.f @@ -0,0 +1,105 @@ + PROGRAM MAIN + + include 'mpif.h' + +C. Data layout +C. Number of tests + integer PM_GLOBAL_ERROR, PM_NUM_NODES + integer PM_MAX_TESTS + parameter (PM_MAX_TESTS=3) +C. Test data + integer PM_TEST_INTEGER, fuzzy, Error, FazAttr + integer PM_RANK_SELF + integer Faz_World, FazTag + integer errs + parameter (PM_TEST_INTEGER=12345) + logical FazFlag + external FazCreate, FazDelete +C +C. Initialize MPI + errs = 0 + call MPI_INIT(PM_GLOBAL_ERROR) + + PM_GLOBAL_ERROR = MPI_SUCCESS +C. Find out the number of processes + call MPI_COMM_SIZE (MPI_COMM_WORLD,PM_NUM_NODES,PM_GLOBAL_ERROR) + call MPI_COMM_RANK (MPI_COMM_WORLD,PM_RANK_SELF,PM_GLOBAL_ERROR) + + + call MPI_keyval_create ( FazCreate, FazDelete, FazTag, + & fuzzy, Error ) + +C. Make sure that we can get an attribute that hasn't been set yet (flag +C. is false) + call MPI_attr_get (MPI_COMM_WORLD, FazTag, FazAttr, + & FazFlag, Error) + + if (FazFlag) then + errs = errs + 1 + print *, 'Did not get flag==false when attr_get of key that' + print *, 'had not had a value set with attr_put' + endif + + FazAttr = 120 + call MPI_attr_put (MPI_COMM_WORLD, FazTag, FazAttr, Error) + +C. Check that the put worked + call MPI_attr_get (MPI_COMM_WORLD, FazTag, FazAttr, + & FazFlag, Error) + + if (FazAttr .ne. 120) then + errs = errs + 1 + print 1, ' Proc=',PM_Rank_self, ' ATTR=', FazAttr + endif +C. Duplicate the Communicator and it's cached attributes + + call MPI_Comm_Dup (MPI_COMM_WORLD, Faz_WORLD, Error) + + + call MPI_Attr_Get ( Faz_WORLD, FazTag, FazAttr, + & FazFlag, Error) + + if (FazFlag) then + if (FazAttr .ne. 121) then + errs = errs + 1 + print 1, ' T-Flag, Proc=',PM_Rank_self,' ATTR=', FazAttr + endif + else + errs = errs + 1 + print 1, ' F-Flag, Proc=',PM_Rank_self,' ATTR=',FazAttr + end if + 1 format( a, i5, a, i5 ) + +C. Clean up MPI + if (PM_Rank_self .eq. 0) then + if (errs .eq. 0) then + print *, ' No Errors' + else + print *, ' Found ', errs, ' errors' + endif + endif + call MPI_Comm_free( Faz_WORLD, Error ) + call MPI_FINALIZE (PM_GLOBAL_ERROR) + + end +C +C MPI 1.1 changed these from functions to subroutines. +C + SUBROUTINE FazCreate (comm, keyval, fuzzy, + & attr_in, attr_out, flag, ierr ) + INTEGER comm, keyval, fuzzy, attr_in, attr_out + LOGICAL flag + include 'mpif.h' + attr_out = attr_in + 1 + flag = .true. + ierr = MPI_SUCCESS + END + + SUBROUTINE FazDelete (comm, keyval, attr, extra, ierr ) + INTEGER comm, keyval, attr, extra, ierr + include 'mpif.h' + ierr = MPI_SUCCESS + if (keyval .ne. MPI_KEYVAL_INVALID)then + attr = attr - 1 + end if + END diff --git a/teshsuite/smpi/mpich-test/context/commnames.c b/teshsuite/smpi/mpich-test/context/commnames.c new file mode 100644 index 0000000000..e552c238cf --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/commnames.c @@ -0,0 +1,62 @@ +/* + * Check that we can put names on communicators and get them back. + */ + +#include + +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + char commName [MPI_MAX_NAME_STRING+1]; + int namelen; + + MPI_Init( &argc, &argv ); + + if (MPI_Comm_get_name(MPI_COMM_WORLD, commName, &namelen) != MPI_SUCCESS) + { + printf("Failed to get a name from COMM_WORLD\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + if (strcmp("MPI_COMM_WORLD", commName)) + { + printf("Name on MPI_COMM_WORLD is \"%s\" should be \"MPI_COMM_WORLD\"\n", commName); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + if (namelen != strlen (commName)) + { + printf("Length of name on MPI_COMM_WORLD is %d should be %d\n", + namelen, (int) strlen(commName)); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + /* Check that we can replace it */ + if (MPI_Comm_set_name(MPI_COMM_WORLD,"foobar") != MPI_SUCCESS) + { + printf("Failed to put a name onto COMM_WORLD\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + if (MPI_Comm_get_name(MPI_COMM_WORLD, commName, &namelen) != MPI_SUCCESS) + { + printf("Failed to get a name from COMM_WORLD after changing it\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + if (strcmp("foobar", commName)) + { + printf("Name on MPI_COMM_WORLD is \"%s\" should be \"foobar\"\n", + commName ); + MPI_Abort(MPI_COMM_WORLD, -1); + } + + printf("Name tests OK\n"); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/context/commnames.std b/teshsuite/smpi/mpich-test/context/commnames.std new file mode 100644 index 0000000000..819a2a6eda --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/commnames.std @@ -0,0 +1,4 @@ +*** Testing Communicator Names *** +Name tests OK +Name tests OK +*** Testing Communicator Names *** diff --git a/teshsuite/smpi/mpich-test/context/commnamesf.f b/teshsuite/smpi/mpich-test/context/commnamesf.f new file mode 100644 index 0000000000..e816a8c862 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/commnamesf.f @@ -0,0 +1,75 @@ +C +C Check the communicator naming functions from Fortran +C + + include 'mpif.h' + + integer error, namelen + integer errcnt, rank + character*40 the_name + character*40 other_name + + call mpi_init (error) + + errcnt = 0 + call xify(the_name) + + call mpi_comm_get_name (MPI_COMM_WORLD, the_name, namelen, error) + if (error .ne. mpi_success) then + errcnt = errcnt + 1 + print *,'Failed to get the name from MPI_COMM_WORLD' + call MPI_Abort( MPI_COMM_WORLD, 1, error ) + end if + + if (the_name .ne. 'MPI_COMM_WORLD') then + errcnt = errcnt + 1 + print *,'The name on MPI_COMM_WORLD is not "MPI_COMM_WORLD"' + call MPI_Abort( MPI_COMM_WORLD, 1, error ) + end if + + other_name = 'foobarH' + call mpi_comm_set_name(MPI_COMM_WORLD, other_name(1:6), error) + + if (error .ne. mpi_success) then + errcnt = errcnt + 1 + print *,'Failed to put a name onto MPI_COMM_WORLD' + call MPI_Abort( MPI_COMM_WORLD, 1, error ) + end if + + call xify(the_name) + + call mpi_comm_get_name (MPI_COMM_WORLD, the_name, namelen, error) + if (error .ne. mpi_success) then + errcnt = errcnt + 1 + print *,'Failed to get the name from MPI_COMM_WORLD ', + $ 'after setting it' + call MPI_Abort( MPI_COMM_WORLD, 1, error ) + end if + + if (the_name .ne. 'foobar') then + errcnt = errcnt + 1 + print *,'The name on MPI_COMM_WORLD is not "foobar"' + print *, 'Got ', the_name + call MPI_Abort( MPI_COMM_WORLD, 1, error ) + end if + + call mpi_comm_rank( MPI_COMM_WORLD, rank, error ) + if (errcnt .eq. 0 .and. rank .eq. 0) then + print *, ' No Errors' + endif + call mpi_finalize(error) + end + + + subroutine xify( string ) + character*(*) string + + integer i + + do i = 1,len(string) + string(i:i) = 'X' + end do + + end + + diff --git a/teshsuite/smpi/mpich-test/context/context.std b/teshsuite/smpi/mpich-test/context/context.std new file mode 100644 index 0000000000..6ab43a20bb --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/context.std @@ -0,0 +1,3 @@ +FORTRAN TESTS +*** attrtest *** +END OF FORTRAN TESTS diff --git a/teshsuite/smpi/mpich-test/context/groupcreate.c b/teshsuite/smpi/mpich-test/context/groupcreate.c new file mode 100644 index 0000000000..686fa9b936 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/groupcreate.c @@ -0,0 +1,67 @@ +#include "mpi.h" +#include +/* stdlib.h Needed for malloc declaration */ +#include +#include "test.h" + +int main( int argc, char **argv ) +{ + int i, n, n_goal = 2048, n_all, rc, n_ranks, *ranks, rank, size, len; + MPI_Group *group_array, world_group; + char msg[MPI_MAX_ERROR_STRING]; + + MPI_Init( &argc, &argv ); + MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + n = n_goal; + + group_array = (MPI_Group *)malloc( n * sizeof(MPI_Group) ); + + MPI_Comm_group( MPI_COMM_WORLD, &world_group ); + + n_ranks = size; + ranks = (int *)malloc( size * sizeof(int) ); + for (i=0; i +/* stdlib.h Needed for malloc declaration */ +#include +#include "test.h" + +int main( int argc, char **argv ) +{ + int errs=0, toterr; + MPI_Group basegroup; + MPI_Group g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12; + MPI_Comm comm, newcomm, splitcomm, dupcomm; + int i, grp_rank, rank, grp_size, size, result; + int nranks, *ranks, *ranks_out; + int range[2][3]; + int worldrank; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &worldrank ); + + comm = MPI_COMM_WORLD; + + MPI_Comm_group( comm, &basegroup ); + +/* Get the basic information on this group */ + MPI_Group_rank( basegroup, &grp_rank ); + MPI_Comm_rank( comm, &rank ); + if (grp_rank != rank) { + errs++; + fprintf( stdout, "group rank %d != comm rank %d\n", grp_rank, rank ); + } + + MPI_Group_size( basegroup, &grp_size ); + MPI_Comm_size( comm, &size ); + if (grp_size != size) { + errs++; + fprintf( stdout, "group size %d != comm size %d\n", grp_size, size ); + } + + +/* Form a new communicator with inverted ranking */ + MPI_Comm_split( comm, 0, size - rank, &newcomm ); + MPI_Comm_group( newcomm, &g1 ); + ranks = (int *)malloc( size * sizeof(int) ); + ranks_out = (int *)malloc( size * sizeof(int) ); + for (i=0; i + +/* + * intended to be run with at least 3 procs + */ +int main(int argc, char ** argv) +{ + MPI_Comm new_intercomm; + MPI_Comm new_comm; + int my_rank, my_size; + int rrank; + int procA, procB; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + MPI_Comm_size( MPI_COMM_WORLD, &my_size ); + + if (my_size < 3) { + printf( "This test requires at least 3 processes: only %d provided\n", + my_size ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } +#ifdef DBG + printf("%d: Entering main()\n", my_rank); fflush(stdout); +#endif + /* pick one of the following two settings for procA,procB */ + + /* uncomment these and program will work */ + /* procA = 0; procB = 2; */ + + /* uncomment these and program will hang */ + procA = 1; procB = 2; + /* The SGI implementation of MPI fails this test */ + if (my_rank == procA || my_rank == procB) + { + if (my_rank == procA) + { + rrank = procB; + } + else + { + rrank = procA; + } +#ifdef DBG + printf("%d: Calling MPI_Intercomm_create()\n", my_rank); fflush(stdout); +#endif + MPI_Intercomm_create(MPI_COMM_SELF, 0, + MPI_COMM_WORLD, rrank, + 0, &new_intercomm); + +#ifdef DBG + printf("%d: Calling MPI_Comm_dup()\n", my_rank); fflush(stdout); +#endif + MPI_Comm_dup(new_intercomm, &new_comm); + + /* Free these new communicators */ + MPI_Comm_free( &new_comm ); + MPI_Comm_free( &new_intercomm ); + } + + MPI_Barrier( MPI_COMM_WORLD ); + if (my_rank == 0) { + printf( " No Errors\n" ); + } +#ifdef DBG + printf("%d: Calling MPI_Finalize()\n", my_rank); fflush(stdout); +#endif + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/context/ictest.c b/teshsuite/smpi/mpich-test/context/ictest.c new file mode 100644 index 0000000000..cba25223f8 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/ictest.c @@ -0,0 +1,124 @@ +/* -*- Mode: C; c-basic-offset:4 ; -*- */ +/* ictest.c */ +#include +#include "mpi.h" +#include "test.h" + +int verbose = 0; + +int main( int argc, char **argv ) +{ + int size, rank, key, his_key, lrank, result; + MPI_Comm myComm; + MPI_Comm myFirstComm; + MPI_Comm mySecondComm; + int errors = 0, sum_errors; + MPI_Status status; + + /* Initialization */ + MPI_Init ( &argc, &argv ); + MPI_Comm_rank ( MPI_COMM_WORLD, &rank); + MPI_Comm_size ( MPI_COMM_WORLD, &size); + + /* Only works for 2 or more processes */ + if (size >= 2) { + MPI_Comm merge1, merge2, merge3, merge4; + + /* Generate membership key in the range [0,1] */ + key = rank % 2; + + MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &myComm ); + /* This creates an intercomm that is the size of comm world + but has processes grouped by even and odd */ + MPI_Intercomm_create (myComm, 0, MPI_COMM_WORLD, (key+1)%2, 1, + &myFirstComm ); + /* Dup an intercomm */ + MPI_Comm_dup ( myFirstComm, &mySecondComm ); + MPI_Comm_rank( mySecondComm, &lrank ); + his_key = -1; + + /* Leaders communicate with each other */ + if (lrank == 0) { + MPI_Sendrecv (&key, 1, MPI_INT, 0, 0, + &his_key, 1, MPI_INT, 0, 0, mySecondComm, &status); + if (key != (his_key+1)%2) { + printf( "Received %d but expected %d\n", his_key, (his_key+1)%2 ); + errors++; + } + } + + if (errors) + printf("[%d] Failed!\n",rank); + + if (verbose) printf( "About to merge intercommunicators\n" ); + MPI_Intercomm_merge ( mySecondComm, key, &merge1 ); + MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 ); + MPI_Intercomm_merge ( mySecondComm, 0, &merge3 ); + MPI_Intercomm_merge ( mySecondComm, 1, &merge4 ); + + /* We should check that these are correct! An easy test is that + the merged comms are all MPI_SIMILAR (unless 2 processes used, + in which case MPI_CONGRUENT is ok */ + MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result ); + if ((size > 2 && result != MPI_SIMILAR) || + (size == 2 && result != MPI_CONGRUENT)) { + errors ++; + printf( "merge1 is not the same size as comm world\n" ); + } + /* merge 2 isn't ordered the same way as the others, even for 2 processes */ + MPI_Comm_compare( merge2, MPI_COMM_WORLD, &result ); + if (result != MPI_SIMILAR) { + errors ++; + printf( "merge2 is not the same size as comm world\n" ); + } + MPI_Comm_compare( merge3, MPI_COMM_WORLD, &result ); + if ((size > 2 && result != MPI_SIMILAR) || + (size == 2 && result != MPI_CONGRUENT)) { + errors ++; + printf( "merge3 is not the same size as comm world\n" ); + } + MPI_Comm_compare( merge4, MPI_COMM_WORLD, &result ); + if ((size > 2 && result != MPI_SIMILAR) || + (size == 2 && result != MPI_CONGRUENT)) { + errors ++; + printf( "merge4 is not the same size as comm world\n" ); + } + + /* Free communicators */ + if (verbose) printf( "About to free communicators\n" ); + MPI_Comm_free( &myComm ); + MPI_Comm_free( &myFirstComm ); + MPI_Comm_free( &mySecondComm ); + MPI_Comm_free( &merge1 ); + MPI_Comm_free( &merge2 ); + MPI_Comm_free( &merge3 ); + MPI_Comm_free( &merge4 ); + } + else { + errors ++; + printf("[%d] Failed - at least 2 nodes must be used\n",rank); + } + + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Allreduce( &errors, &sum_errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (sum_errors > 0) { + printf( "%d errors on process %d\n", errors, rank ); + } + else if (rank == 0) { + printf( " No Errors\n" ); + } + /* Finalize and end! */ + + MPI_Finalize(); + return 0; +} + + + + + + + + + + diff --git a/teshsuite/smpi/mpich-test/context/ictest2.c b/teshsuite/smpi/mpich-test/context/ictest2.c new file mode 100644 index 0000000000..4b615bcc0c --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/ictest2.c @@ -0,0 +1,209 @@ +/* -*- Mode: C; c-basic-offset:4 ; -*- */ +/* ictest2.c + This is like ictest.c, but it creates communictors that are valid only + at the "leaders"; other members of the local communicator are NOT + in the remote communicator. This is done by creating two communicators: + 0, + odd rank and even rank. Only 0 is in in both communicators. + + This test originally tested the part of the standard that allowed the + leader to be in both groups. This has been disallowed. This test was + recently changed to operate correctly under the new definition. + + Note that it generates unordered printf output, and is not suitable for + automated testing. + */ +#include "mpi.h" +#include +#include "test.h" + +int verbose = 0; + +int main( int argc, char **argv ) +{ + int size, rank, key, lrank, rsize, result, remLeader = 0; + MPI_Comm myComm; + MPI_Comm myFirstComm; + MPI_Comm mySecondComm; + MPI_Comm evenComm, oddComm, remComm; + int errors = 0, sum_errors; + MPI_Status status; + + /* Initialization */ + MPI_Init ( &argc, &argv ); + MPI_Comm_rank ( MPI_COMM_WORLD, &rank); + MPI_Comm_size ( MPI_COMM_WORLD, &size); + + /* Only works for 2 or more processes */ + if (size >= 2) { + MPI_Comm merge1, merge2, merge3, merge4; + + /* Generate membership key in the range [0,1] */ + key = rank % 2; + /* Create the even communicator */ + MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &evenComm ); + if (key == 1) { + /* Odd rank communicator discarded */ + MPI_Comm_free( &evenComm ); + } + + /* Create the odd communicator */ + MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &oddComm ); + if (key == 0) { + /* Even rank communicator discarded */ + MPI_Comm_free( &oddComm ); + } + + /* Create the odd + 0 communicator */ + if (rank == 0) key = 1; + MPI_Comm_split( MPI_COMM_WORLD, key, rank, &remComm ); + if (key == 0) { + /* Even rank communicator discarded */ + MPI_Comm_free( &remComm ); + } + else { + MPI_Comm_rank( remComm, &lrank ); + if (verbose) { + printf( "[%d] lrank in remComm is %d (color = %d, key=%d)\n", + rank, lrank, rank, key ); + } + remLeader = (lrank == 0) ? 1 : 0; + } + /* Now, choose the local and remote communicators */ + if (rank % 2) { + /* Odd */ + myComm = oddComm; + } + else { + myComm = evenComm; + } + + /* Check that the leader is who we think he is */ + MPI_Comm_rank( myComm, &lrank ); + if (verbose) { + printf( "[%d] local rank is %d\n", rank, lrank ); + } + if (rank == 0) { + int trank; + MPI_Comm_rank( myComm, &trank ); + if (trank != 0) { + printf( "[%d] Comm split improperly ordered group (myComm)\n", + rank ); + fflush(stdout); + errors++; + } + MPI_Comm_rank( remComm, &trank ); + if (trank != 0) { + printf( "[%d] Comm split improperly ordered group (remComm)\n", + rank ); + fflush(stdout); + errors++; + } + } + /* Perform the intercomm create and test it */ + /* local leader is first process in local_comm, i.e., has rank 0 */ + /* remote leader is process 0 (if odd) or 1 (if even) in remComm */ + MPI_Intercomm_create (myComm, 0, remComm, remLeader, 1, &myFirstComm ); +/* temp */ + if (verbose) { + printf( "[%d] through intercom create\n", rank ); + fflush( stdout ); + } + MPI_Barrier( MPI_COMM_WORLD ); + if (verbose) { + printf( "[%d] through barrier at end of intercom create\n", rank ); + fflush( stdout ); + } +/* temp */ + + /* Try to dup this communicator */ + MPI_Comm_dup ( myFirstComm, &mySecondComm ); + +/* temp */ + if (verbose) { + printf( "[%d] through comm dup\n", rank ); + fflush( stdout ); + } + MPI_Barrier( MPI_COMM_WORLD ); + if (verbose) { + printf( "[%d] through barrier at end of comm dup\n", rank ); + fflush( stdout ); + } +/* temp */ + + /* Each member shares data with his "partner". Note that process 0 in + MPI_COMM_WORLD is sending to itself, since it is process 0 in both + remote groups */ + MPI_Comm_rank( mySecondComm, &lrank ); + MPI_Comm_remote_size( mySecondComm, &rsize ); + + if (verbose) { + printf( "[%d] lrank in secondcomm is %d and remote size is %d\n", + rank, lrank, rsize ); + fflush( stdout ); + } + + /* Send key * size + rank in communicator */ + if (lrank < rsize) { + int myval, hisval; + key = rank % 2; + myval = key * size + lrank; + hisval = -1; + if (verbose) { + printf( "[%d] exchanging %d with %d in intercomm\n", + rank, myval, lrank ); + fflush( stdout ); + } + MPI_Sendrecv (&myval, 1, MPI_INT, lrank, 0, + &hisval, 1, MPI_INT, lrank, 0, mySecondComm, &status); + if (hisval != (lrank + (!key)*size)) { + printf( "[%d] expected %d but got %d\n", rank, lrank + (!key)*size, + hisval ); + errors++; + } + } + + if (errors) { + printf("[%d] Failed!\n",rank); + fflush(stdout); + } + + /* Key is 1 for oddComm, 0 for evenComm (note both contain 0 in WORLD) */ + MPI_Intercomm_merge ( mySecondComm, key, &merge1 ); + MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 ); + MPI_Intercomm_merge ( mySecondComm, 0, &merge3 ); + MPI_Intercomm_merge ( mySecondComm, 1, &merge4 ); + + MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result ); + if (result != MPI_SIMILAR && size > 2) { + printf( "[%d] comparision with merge1 failed\n", rank ); + errors++; + } + + /* Free communicators */ + MPI_Comm_free( &myComm ); + /* remComm may have been freed above */ + if (remComm != MPI_COMM_NULL) + MPI_Comm_free( &remComm ); + MPI_Comm_free( &myFirstComm ); + MPI_Comm_free( &mySecondComm ); + MPI_Comm_free( &merge1 ); + MPI_Comm_free( &merge2 ); + MPI_Comm_free( &merge3 ); + MPI_Comm_free( &merge4 ); + } + else { + printf("[%d] Failed - at least 2 nodes must be used\n",rank); + } + + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Allreduce( &errors, &sum_errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (sum_errors > 0) { + printf( "%d errors on process %d\n", errors, rank ); + } + else if (rank == 0) { + printf( " No Errors\n" ); + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/context/ictest3.c b/teshsuite/smpi/mpich-test/context/ictest3.c new file mode 100644 index 0000000000..26b2d970da --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/ictest3.c @@ -0,0 +1,195 @@ +/* ictest3.c + This is like ictest2.c, but it creates communictors that are valid only + at the "leaders"; other members of the local communicator are NOT + in the remote communicator. A peer communicator is constructed that + contains both leaders. + + + */ +#include "mpi.h" +#include +#include "test.h" + +/* #define DEBUG */ + +int verbose = 0; + +int main( int argc, char **argv ) +{ + int size, rank, key, lrank, rsize, result; + MPI_Comm myFirstComm; + MPI_Comm mySecondComm; + MPI_Comm newComm, peerComm; + MPI_Group rgroup, lgroup, igroup; + int errors = 0, sum_errors; + int flag; + MPI_Status status; + + /* Initialization */ + MPI_Init ( &argc, &argv ); + MPI_Comm_rank ( MPI_COMM_WORLD, &rank); + MPI_Comm_size ( MPI_COMM_WORLD, &size); + + /* Only works for 2 or more processes */ + /* + We create an even and odd communicator, then create an + intercommunicator out of them. For this purpose, we use a + "peer" communicator valid only at one member of each of the odd and + even communicators. + */ + if (size >= 2) { + MPI_Comm merge1, merge2, merge3, merge4; + + /* Generate membership key in the range [0,1] */ + key = rank % 2; + /* Create the even communicator and odd communicators */ + MPI_Comm_split ( MPI_COMM_WORLD, key, rank, &newComm ); + + MPI_Comm_test_inter( newComm, &flag ); + if (flag) { + errors++; + printf( "[%d] got test_inter gave true for intra comm\n", rank ); + } + + /* Create the "peer" communicator */ + key = 0; + if (rank < 2) key = 1; + MPI_Comm_split( MPI_COMM_WORLD, key, rank, &peerComm ); + if (key == 0) { + MPI_Comm_free( &peerComm ); + } +#ifdef DEBUG + else { + MPI_Comm_rank( peerComm, &lrank ); + printf( "[%d] lrank in peerComm is %d (color = %d, key=%d)\n", + rank, lrank, key, rank ); + } +#endif + + /* Check that the leader is who we think he is */ + MPI_Comm_rank( newComm, &lrank ); + /* printf( "[%d] local rank is %d\n", rank, lrank ); + fflush(stdout); */ + /* Perform the intercomm create and test it */ + /* Local leader is always the one at rank 0. */ + /* If even, the remote leader is rank 1, if odd, the remote leader + is rank 0 in the peercomm */ + MPI_Intercomm_create (newComm, 0, peerComm, !(rank % 2), 1, &myFirstComm ); +#ifdef DEBUG + printf( "[%d] through intercom create\n", rank ); + fflush( stdout ); + MPI_Barrier( MPI_COMM_WORLD ); + printf( "[%d] through barrier at end of intercom create\n", rank ); +#endif + MPI_Comm_test_inter( myFirstComm, &flag ); + if (!flag) { + errors++; + printf( "[%d] got test_inter gave false for inter comm\n", rank ); + } + + /* Try to dup this communicator */ + MPI_Comm_dup ( myFirstComm, &mySecondComm ); + MPI_Comm_test_inter( mySecondComm, &flag ); + if (!flag) { + errors++; + printf( "[%d] got test_inter gave false for dup of inter comm\n", + rank ); + } + +#ifdef DEBUG + printf( "[%d] through comm dup\n", rank ); + fflush( stdout ); + MPI_Barrier( MPI_COMM_WORLD ); + printf( "[%d] through barrier at end of comm dup\n", rank ); +#endif + + /* Each member shares data with his "partner". */ + MPI_Comm_rank( mySecondComm, &lrank ); + MPI_Comm_remote_size( mySecondComm, &rsize ); + +#ifdef DEBUG + printf( "[%d] lrank in secondcomm is %d and remote size is %d\n", + rank, lrank, rsize ); + fflush( stdout ); +#endif + + /* Check that the remote group is what we think */ + MPI_Comm_remote_group( mySecondComm, &rgroup ); + MPI_Comm_group( newComm, &lgroup ); + MPI_Group_intersection( rgroup, lgroup, &igroup ); + MPI_Group_compare( igroup, MPI_GROUP_EMPTY, &flag ); + if (flag != MPI_IDENT) { + errors++; + printf( "[%d] intersection of remote and local group is not empty\n", + rank ); + } + MPI_Group_free( &rgroup ); + MPI_Group_free( &lgroup ); + MPI_Group_free( &igroup ); + + /* Send key * size + rank in communicator */ + if (lrank < rsize) { + int myval, hisval; + key = rank % 2; + myval = key * size + lrank; + hisval = -1; +#ifdef DEBUG + printf( "[%d] exchanging %d with %d in intercomm\n", + rank, myval, lrank ); + fflush( stdout ); +#endif + MPI_Sendrecv (&myval, 1, MPI_INT, lrank, 0, + &hisval, 1, MPI_INT, lrank, 0, mySecondComm, &status); + if (hisval != (lrank + (!key)*size)) { + printf( "[%d] expected %d but got %d\n", rank, lrank + (!key)*size, + hisval ); + errors++; + } + } + + if (errors) + printf("[%d] Failed!\n",rank); + + /* Key is 1 for oddComm, 0 for evenComm (note both contain 0 in WORLD) */ +#ifdef DEBUG + printf( "[%d] starting intercom merge\n", rank ); + fflush( stdout ); +#endif + MPI_Intercomm_merge ( mySecondComm, key, &merge1 ); + MPI_Intercomm_merge ( mySecondComm, (key+1)%2, &merge2 ); + MPI_Intercomm_merge ( mySecondComm, 0, &merge3 ); + MPI_Intercomm_merge ( mySecondComm, 1, &merge4 ); + + MPI_Comm_compare( merge1, MPI_COMM_WORLD, &result ); + if (result != MPI_SIMILAR && size > 2) { + printf( "[%d] comparision with merge1 failed\n", rank ); + errors++; + } + + /* Free communicators */ + if (verbose) printf( "about to free communicators\n" ); + MPI_Comm_free( &newComm ); + if (peerComm != MPI_COMM_NULL) MPI_Comm_free( &peerComm ); + MPI_Comm_free( &myFirstComm ); + MPI_Comm_free( &mySecondComm ); + MPI_Comm_free( &merge1 ); + MPI_Comm_free( &merge2 ); + MPI_Comm_free( &merge3 ); + MPI_Comm_free( &merge4 ); + } + else + printf("[%d] Failed - at least 2 nodes must be used\n",rank); + + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Allreduce( &errors, &sum_errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (sum_errors > 0) { + printf( "%d errors on process %d\n", errors, rank ); + } + else if (rank == 0) { + printf( " No Errors\n" ); + } + /* Finalize and end! */ + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/context/runtests b/teshsuite/smpi/mpich-test/context/runtests new file mode 100755 index 0000000000..477e990e45 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/runtests @@ -0,0 +1,136 @@ +#! /bin/sh +# +# Run some of the tests. If any arguments are provided, pass them to the +# test programs. +# +# -mvhome is needed for the ANL SP, and is ignored by others +args= +device= +MPICH_VERSION= +STOPFILE=${MPITEST_STOPTEST:-"$HOME/.stopmpichtests"} +MAKE="make --no-print-directory" + +# +# Set mpirun to the name/path of the mpirun program +#FindMPIRUN +# +# +test_mpi2=1 +runtests=1 +quiet=0 +makeeach=0 +writesummaryfile=no +MAKE="make --no-print-directory" +for arg in "$@" ; do + case $arg in + -basedir=* ) + basedir=`echo $arg | sed 's/-basedir=//'` + ;; + -srcdir=* ) + srcdir=`echo $arg | sed 's/-srcdir=//'` + ;; + -checkonly ) + runtests=0 + ;; + -margs=*) + margs=`echo $arg | sed 's/-margs=//'` + args="$args $margs" + ;; + -summaryfile=*) + writesummaryfile=yes + summaryfile=`echo A$arg | sed 's/A-summaryfile=//'` + ;; + -small) + makeeach=1 + ;; + -quiet) + shift + quiet=1 + ;; + -help|-u) + echo "runtests [-checkonly] [-margs='...']" + echo "run tests in this directory. If -checkonly set, just run" + echo "the differences check (do NO rerun the test programs)." + echo "If -margs is used, these options are passed to mpirun." + echo "If -small is used, the examples are built, run, and deleted." + exit 1 + ;; + *) + if test -n "$arg" ; then + echo "runtests: Unknown argument ($arg)" + exit 1 + fi + ;; + esac +done + +# +# Load basic procedures +. ${srcdir}/../runbase + +# If the programs are not available, run make. +if [ ! -x attrerr -a $makeeach = 0 -a $runtests = 1 ] ; then + $MAKE +fi +mpirun=" ${basedir}/bin/smpirun -platform ${srcdir}/../../../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir}/../../hostfile --log=root.thres:critical" +testfiles="" +if [ $runtests = 1 ] ; then + +# +# Run Fortran tests ONLY if Fortran available +if [ 0 = 1 ] ; then + RunTest attrtest 2 "*** Testing attributes from Fortran ***" + + if [ $test_mpi2 = 1 ] ; then + RunTest commnamesf 1 "*** Testing Communicator Names from Fortran ***" + fi +fi +#uses attr, not implemented +#RunTest attrt 2 "*** Testing attributes ***" +#fails +RunTest attrerr 1 "*** Testing attributes (2) ***" + +#fails with unions, excludes or intersections, need debug in smpi to work -> left, but wrong +RunTest grouptest_mpich 4 "*** Testing Groups ***" + +RunTest groupcreate 4 "*** Testing Group creation ***" + +#uses MPI_Intercomm_create +#RunTest ictest 4 "*** Testing Intercommunicators ***" + +RunTest icdup 3 "*** Testing dup of an intercommunicator ***" + +# +# ictest2 relies on a inconsistency in the standard, to wit, that the +# leader in both groups can be the same process. This seems to be +# essential in a dynamic setting, since the only process both groups can +# access may be the single parent process (other than using client/server +# intercommunicator creating routines, with the parent providing the common +# information). +# +#testfiles="$testfiles ictest2.out" +#rm -f ictest2.out +#MakeExe ictest2 +#echo '*** Testing Intercommunicators (2) ***' +#echo '*** Testing Intercommunicators (2) ***' >> ictest2.out +#$mpirun $args -np 4 ictest2 $* >> ictest2.out 2>&1 +#echo '*** Testing Intercommunicators (2) ***' >> ictest2.out +#CleanExe ictest2 +#uses MPI_Comm_test_inter and MPI_Intercomm_create +#RunTest ictest3 4 "*** Testing Intercommunicators (3) ***" + +if [ 0 = 1 ] ; then + + RunTest commnames 2 "*** Testing Communicator Names ***" +fi +else + # Just run checks + testfiles=`echo *.out` +fi + +echo '*** Checking for differences from expected output ***' +CheckAllOutput context.diff +exit 0 + + + diff --git a/teshsuite/smpi/mpich-test/context/test.c b/teshsuite/smpi/mpich-test/context/test.c new file mode 100644 index 0000000000..f276bb6ce9 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/test.c @@ -0,0 +1,94 @@ +/* Procedures for recording and printing test results */ + +#include +#include +#include "test.h" +#include "mpi.h" + +static int tests_passed = 0; +static int tests_failed = 0; +static char failed_tests[255][81]; +static char suite_name[255]; +FILE *fileout = NULL; + +void Test_Init(suite, rank) +const char *suite; +int rank; +{ + char filename[512]; + + sprintf(filename, "%s-%d.out", suite, rank); + strncpy(suite_name, suite, 255); + fileout = fopen(filename, "w"); + if (!fileout) { + fprintf( stderr, "Could not open %s on node %d\n", filename, rank ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } +} + +void Test_Message(mess) +const char *mess; +{ + fprintf(fileout, "[%s]: %s\n", suite_name, mess); + fflush(fileout); +} + +void Test_Failed(test) +const char *test; +{ + fprintf(fileout, "[%s]: *** Test '%s' Failed! ***\n", suite_name, test); + strncpy(failed_tests[tests_failed], test, 81); + fflush(fileout); + tests_failed++; +} + +void Test_Passed(test) +const char *test; +{ + fprintf(fileout, "[%s]: Test '%s' Passed.\n", suite_name, test); + fflush(fileout); + tests_passed++; +} + +int Summarize_Test_Results() +{ + fprintf(fileout, "For test suite '%s':\n", suite_name); + fprintf(fileout, "Of %d attempted tests, %d passed, %d failed.\n", + tests_passed + tests_failed, tests_passed, tests_failed); + if (tests_failed > 0) { + int i; + + fprintf(fileout, "*** Tests Failed:\n"); + for (i = 0; i < tests_failed; i++) + fprintf(fileout, "*** %s\n", failed_tests[i]); + } + return tests_failed; +} + +void Test_Finalize() +{ + fflush(fileout); + fclose(fileout); +} + +#include "mpi.h" +/* Wait for every process to pass through this point. This test is used + to make sure that all processes complete, and that a test "passes" because + it executed, not because it some process failed. + */ +void Test_Waitforall( ) +{ +int m, one, myrank, n; + +MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); +MPI_Comm_size( MPI_COMM_WORLD, &n ); +one = 1; +MPI_Allreduce( &one, &m, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + +if (m != n) { + printf( "[%d] Expected %d processes to wait at end, got %d\n", myrank, + n, m ); + } +if (myrank == 0) + printf( "All processes completed test\n" ); +} diff --git a/teshsuite/smpi/mpich-test/context/test.h b/teshsuite/smpi/mpich-test/context/test.h new file mode 100644 index 0000000000..1eaf6fc0c6 --- /dev/null +++ b/teshsuite/smpi/mpich-test/context/test.h @@ -0,0 +1,24 @@ +/* Header for testing procedures */ + +#ifndef _INCLUDED_TEST_H_ +#define _INCLUDED_TEST_H_ + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Test_Init (const char *, int); +#ifdef USE_STDARG +void Test_Printf (const char *, ...); +#else +/* No prototype */ +void Test_Printf(); +#endif +void Test_Message (const char *); +void Test_Failed (const char *); +void Test_Passed (const char *); +int Summarize_Test_Results (void); +void Test_Finalize (void); +void Test_Waitforall (void); + +#endif diff --git a/teshsuite/smpi/mpich-test/env/CMakeLists.txt b/teshsuite/smpi/mpich-test/env/CMakeLists.txt new file mode 100644 index 0000000000..12224c8713 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/CMakeLists.txt @@ -0,0 +1,79 @@ +cmake_minimum_required(VERSION 2.6) + +if(enable_smpi) + set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/bin/smpicc") + + set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") + set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1") + + include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi") + + add_executable(init init.c ) + add_executable(timers timers.c test.c) + add_executable(timertest timertest.c test.c) + add_executable(baseattr baseattr.c test.c) + add_executable(gtime gtime.c test.c) + add_executable(errhand errhand.c test.c) + add_executable(sigchk sigchk.c test.c) + add_executable(aborttest aborttest.c) + add_executable(testerr testerr.c) + add_executable(getproc getproc.c) + + target_link_libraries(init m simgrid smpi ) + target_link_libraries(timers m simgrid smpi ) + target_link_libraries(timertest m simgrid smpi ) + target_link_libraries(baseattr m simgrid smpi ) + target_link_libraries(gtime m simgrid smpi ) + target_link_libraries(errhand m simgrid smpi ) + target_link_libraries(sigchk m simgrid smpi ) + target_link_libraries(aborttest m simgrid smpi ) + target_link_libraries(testerr m simgrid smpi ) + target_link_libraries(getproc m simgrid smpi ) + + + set_target_properties(timers PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(timers PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(timertest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(baseattr PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(gtime PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(errhand PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sigchk PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(aborttest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(testerr PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(getproc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + +endif(enable_smpi) + +set(tesh_files + ${tesh_files} + ${CMAKE_CURRENT_SOURCE_DIR}/env.tesh + PARENT_SCOPE + ) +set(xml_files + ${xml_files} + PARENT_SCOPE + ) +set(examples_src + ${examples_src} + ${CMAKE_CURRENT_SOURCE_DIR}/test.c + ${CMAKE_CURRENT_SOURCE_DIR}/init.c + ${CMAKE_CURRENT_SOURCE_DIR}/timers.c + ${CMAKE_CURRENT_SOURCE_DIR}/baseattr.c + ${CMAKE_CURRENT_SOURCE_DIR}/gtime.c + ${CMAKE_CURRENT_SOURCE_DIR}/errhand.c + ${CMAKE_CURRENT_SOURCE_DIR}/sigchk.c + ${CMAKE_CURRENT_SOURCE_DIR}/aborttest.c + ${CMAKE_CURRENT_SOURCE_DIR}/testerr.c + ${CMAKE_CURRENT_SOURCE_DIR}/getproc.c + ${CMAKE_CURRENT_SOURCE_DIR}/test.h + PARENT_SCOPE + ) +set(bin_files + ${bin_files} + ${CMAKE_CURRENT_SOURCE_DIR}/../hostfile + PARENT_SCOPE + ) +set(txt_files + ${txt_files} + PARENT_SCOPE + ) diff --git a/teshsuite/smpi/mpich-test/env/aborttest.c b/teshsuite/smpi/mpich-test/env/aborttest.c new file mode 100644 index 0000000000..244cb9e733 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/aborttest.c @@ -0,0 +1,34 @@ +#include "mpi.h" +/* This simple test checks that MPI_Abort kills all processes + * There are two interesting cases: + * masternode == 0 + * masternode != 0 + */ +int main( int argc, char **argv ) +{ + int node, size, i; + int masternode = 0; + + MPI_Init(&argc, &argv); + + MPI_Comm_rank(MPI_COMM_WORLD, &node); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + /* Check for -altmaster */ + for (i=1; i +#include "mpi.h" + +int main( int argc, char **argv ) +{ + int i; + + fprintf(stdout,"Before MPI_Init\n"); + for (i = 0; i < argc; i++) + fprintf(stdout,"arg %d is %s\n", i, argv[i]); + + MPI_Init( &argc, &argv ); + + fprintf(stdout,"After MPI_Init\n"); + for (i = 0; i < argc; i++) + fprintf(stdout,"arg %d is %s\n", i, argv[i]); + + MPI_Finalize( ); +} diff --git a/teshsuite/smpi/mpich-test/env/baseattr.c b/teshsuite/smpi/mpich-test/env/baseattr.c new file mode 100644 index 0000000000..9555fb7792 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/baseattr.c @@ -0,0 +1,48 @@ +#include +#include "mpi.h" +#include "test.h" + +int main( int argc, char **argv) +{ + int err = 0; + void *v; + int flag; + int vval; + int rank, size; + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, &v, &flag ); + if (!flag || (vval = *(int*)v)< 32767) { + err++; + fprintf( stderr, "Could not get TAG_UB or got too-small value\n" ); + } + MPI_Attr_get( MPI_COMM_WORLD, MPI_HOST, &v, &flag ); + vval = *(int*)v; + if (!flag || ((vval < 0 || vval >= size) && vval != MPI_PROC_NULL)) { + err++; + fprintf( stderr, "Could not get HOST or got invalid value\n" ); + } + MPI_Attr_get( MPI_COMM_WORLD, MPI_IO, &v, &flag ); + vval = *(int*)v; + if (!flag || ((vval < 0 || vval >= size) && vval != MPI_ANY_SOURCE && + vval != MPI_PROC_NULL)) { + err++; + fprintf( stderr, "Could not get IO or got invalid value\n" ); + } + MPI_Attr_get( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &v, &flag ); + if (flag) { + /* Wtime need not be set */ + vval = *(int*)v; + if (vval < 0 || vval > 1) { + err++; + fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n", + vval ); + } + } + Test_Waitforall( ); + MPI_Finalize( ); + + return err; +} diff --git a/teshsuite/smpi/mpich-test/env/baseattrf.f b/teshsuite/smpi/mpich-test/env/baseattrf.f new file mode 100644 index 0000000000..b07935c84f --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/baseattrf.f @@ -0,0 +1,35 @@ + + program main + integer err, ierr + integer v + logical flag + integer rank, size + include 'mpif.h' + + err = 0 + call MPI_Init( ierr ) + call MPI_Comm_size( MPI_COMM_WORLD, size, ierr ) + call MPI_Comm_rank( MPI_COMM_WORLD, rank, ierr ) + call MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, v, flag, ierr ) + if (.not. flag .or. v .lt. 32767) then + err = err + 1 + print *, 'Could not get TAG_UB or got too-small value', v + endif +c + call MPI_Attr_get( MPI_COMM_WORLD, MPI_HOST, v, flag, ierr ) + if (.not. flag .or. ((v .lt. 0 .or. v .ge. size) .and. + * v .ne. MPI_PROC_NULL)) then + err = err + 1 + print *, 'Could not get HOST or got invalid value', v + endif +c + call MPI_Attr_get( MPI_COMM_WORLD, MPI_IO, v, flag, ierr ) + if (.not. flag .or. (( v .lt. 0 .or. v .gt. size) .and. + * v .ne. MPI_PROC_NULL .and. + * v .ne. MPI_ANY_SOURCE)) then + err = err + 1 + print *, 'Could not get IO or got invalid value', v + endif + call MPI_Finalize( ierr ) + + end diff --git a/teshsuite/smpi/mpich-test/env/cmdline.c b/teshsuite/smpi/mpich-test/env/cmdline.c new file mode 100644 index 0000000000..87f62edc84 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/cmdline.c @@ -0,0 +1,54 @@ +#include +#include +#include "mpi.h" + +/* + This is a test program to see if command line arguments are handled + well. Note that MPI doesn't *require* anything here, so this is + simply used to acess "quality of implementation" + + run with arguments + a "b c" "d'e" 'f"g" h' + */ +int main( int argc, char *argv[] ) +{ + int i, rank, toterr, err = 0; + static char *eargv[5]; + + eargv[1] = "a"; + eargv[2] = "b c"; + eargv[3] = "d'e"; + eargv[4] = "f\"g\" h"; + + MPI_Init( &argc, &argv ); + + for (i=1; i<=4; i++) { + if (!argv[i]) { + printf( "Argument %d is null!\n", i ); + err++; + } + } + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (toterr) { + MPI_Abort( 1, MPI_COMM_WORLD ); + return 0; + } + + /* a "b c" "d'e" 'f"g" h' */ + for (i=1; i<=4; i++) { + if (strcmp( argv[i], eargv[i] ) != 0) { + err++; + printf( "Found %s but expected %s\n", argv[i], eargv[i] ); + } + } + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + if (rank == 0) { + if (toterr) printf( "Found %d errors\n", toterr ); + else printf( " No Errors\n" ); + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/env/env.std b/teshsuite/smpi/mpich-test/env/env.std new file mode 100644 index 0000000000..f421d7c9bb --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/env.std @@ -0,0 +1,10 @@ +Timer tests +Basic attributes test +Error handling test +*** Error Handling *** + Three error messages (from two errors) are expected +which should both show an error class of 13 +(first) 13 : Invalid group passed to function +(errhandler) 13 : Invalid group passed to function +(second) 13 : Invalid group passed to function +Signal test diff --git a/teshsuite/smpi/mpich-test/env/errhand.c b/teshsuite/smpi/mpich-test/env/errhand.c new file mode 100644 index 0000000000..fe7766cb30 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/errhand.c @@ -0,0 +1,242 @@ +#include +#include "mpi.h" +#include "test.h" + +int verbose = 0; +int global_errors = 0; +int Test_errorhandling (void); +/* This is complicated by the fact that not all systems correctly + implement stdargs (for the ...) declarations). MPICH uses USE_STDARG + as the choice here, instead of the cases + if defined(__STDC__) || defined(__cplusplus) || defined(HAVE_PROTOTYPES) + */ +#if defined(USE_STDARG) +void handler_a( MPI_Comm *, int *, ...); +void handler_b( MPI_Comm *, int *, ...); +void error_handler(MPI_Comm *, int *, ...); +#else +void handler_a ( MPI_Comm *, int * ); +void handler_b ( MPI_Comm *, int * ); +void error_handler ( MPI_Comm *, int * ); +#endif + +/* + Test the error handers (based on a Fortran test program) + */ +int main( int argc, char **argv ) +{ + MPI_Init( &argc, &argv ); + + Test_errorhandling(); + + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} + +static int a_errors, b_errors; + +int Test_errorhandling( void ) +{ + char errstring[MPI_MAX_ERROR_STRING]; + MPI_Comm dup_comm_world, dummy; + MPI_Comm tempcomm; + MPI_Errhandler errhandler_a, errhandler_b, errhandler, old_handler; + int err, world_rank, class, resultlen; + +#ifdef FOO + logical test_default, test_abort +#endif + + MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + MPI_Comm_dup( MPI_COMM_WORLD, &dup_comm_world ); + + if (world_rank == 0 && verbose) { + printf( "*** Error Handling ***\n" ); + } + +/* + Exercise save/restore of user error handlers. + */ + a_errors = 0; + MPI_Errhandler_create(handler_a, &errhandler_a); + + MPI_Errhandler_set(dup_comm_world, errhandler_a); + MPI_Errhandler_free(&errhandler_a); + + if (verbose) printf( "create with null group 1\n" ); + MPI_Comm_create(dup_comm_world, MPI_GROUP_NULL, &dummy); + if (a_errors != 1) { + Test_Failed( " error handler A not invoked\n" ); + global_errors ++; + } + b_errors = 0; + MPI_Errhandler_create(handler_b, &errhandler_b); + MPI_Errhandler_get(dup_comm_world, &old_handler); + /* The following is needed to preserve an old handler */ + MPI_Comm_dup( MPI_COMM_SELF, &tempcomm ); + MPI_Errhandler_set( tempcomm, old_handler ); + MPI_Errhandler_set(dup_comm_world, errhandler_b); + MPI_Errhandler_free(&errhandler_b); + if (verbose) printf( "create with null group 2\n" ); + MPI_Comm_create(dup_comm_world, MPI_GROUP_NULL, &dummy); + if (b_errors != 1) { + Test_Failed( " error handler B not invoked\n" ); + global_errors++; + } + + MPI_Errhandler_set(dup_comm_world, old_handler); + MPI_Comm_free( &tempcomm ); + /* MPI_Errhandler_free(&old_handler); */ + if (verbose) printf( "create with null group 3\n" ); + MPI_Comm_create(dup_comm_world, MPI_GROUP_NULL, &dummy); + if (a_errors != 2) { + Test_Failed( " error handler A not re-invoked\n" ); + global_errors++; + } +/* + Exercise class & string interrogation. + */ + MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_ARE_FATAL); + + if (verbose) + printf( " Three error messages (from two errors) are expected\n\ +which should both show an error class of %d\n", MPI_ERR_GROUP ); + + MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_RETURN); + if (verbose) printf( "create with null group 4\n" ); + err = MPI_Comm_create(dup_comm_world, MPI_GROUP_NULL, &dummy); + if (err != MPI_SUCCESS) { + MPI_Error_class(err, &class); + MPI_Error_string(err, errstring, &resultlen); + if (verbose) + printf( "(first) %d : %s\n", class, errstring ); + else if (class != MPI_ERR_GROUP) { + Test_Failed( "(first) Class is not MPI_ERR_GROUP\n" ); + global_errors++; + } + } + else { + MPI_Comm_free( &dummy ); + Test_Failed( "Did not detect error when building communicator\n" ); + global_errors++; + } + MPI_Errhandler_create(error_handler, &errhandler); + MPI_Errhandler_set(dup_comm_world, errhandler); + MPI_Errhandler_free(&errhandler); + if (verbose) printf( "create with null group 5\n" ); + err = MPI_Comm_create(dup_comm_world, MPI_GROUP_NULL, &dummy); + if (err != MPI_SUCCESS) { + MPI_Error_class(err, &class); + MPI_Error_string(err, errstring, &resultlen); + if (verbose) + printf( "(second) %d : %s\n", class, errstring ); + else if (class != MPI_ERR_GROUP) { + Test_Failed( "(second) class was not MPI_ERR_GROUP" ); + global_errors++; + } + } + else { + MPI_Comm_free( &dummy ); + Test_Failed( "Did not detect error in building communicator\n" ); + global_errors++; + } + MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_ARE_FATAL); + +#ifdef FOO + if (test_default) { + printf("Forcing error for default handler...\n"); + MPI_Comm_create(dup_comm_world, MPI_GROUP_NULL, &dummy); + } + if (test_abort) { + printf( "Calling MPI_Abort...\n" ); + MPI_Abort(MPI_COMM_WORLD, 123456768); + } +#endif + + MPI_Comm_free( &dup_comm_world ); + +#if 0 + errs = global_errors; + MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterrs == 0) + printf( " No Errors\n" ); + else + printf( " Found %d errors\n", toterrs ); + } +#endif + return 0; +} + + +/* + + Trivial error handler. Note that FORTRAN error handlers can't + deal with the varargs stuff the C handlers can. + + */ +#if defined(USE_STDARG) +void error_handler(MPI_Comm *comm, int *err, ...) +#else +void error_handler(MPI_Comm *comm, int *err) +#endif +{ + int class; + int resultlen; + char string[MPI_MAX_ERROR_STRING]; + + MPI_Error_class(*err, &class); + MPI_Error_string(*err, string, &resultlen); + if (verbose) + printf( "(errhandler) %d : %s\n", class, string ); + else { + if (class != MPI_ERR_GROUP) { + printf( "(errhandler) class = %d, expected %d (MPI_ERR_GROUP)\n", + class, MPI_ERR_GROUP ); + printf( " message %s\n", string ); + global_errors++; + } + } +} +/* + Error handler A, used for save/restore testing. + */ + +#if defined(USE_STDARG) +void handler_a( MPI_Comm *comm, int *err, ...) +#else +void handler_a(MPI_Comm *comm, int err) +#endif +{ + int class; + + MPI_Error_class(*err, &class); + if (class != MPI_ERR_GROUP) { + printf( "handler_a: incorrect error class %d\n", class ); + } + *err = MPI_SUCCESS; + a_errors++; +} + +/* + Error handler B, used for save/restore testing. + */ + +#if defined(USE_STDARG) +void handler_b(MPI_Comm *comm, int *err, ...) +#else +void handler_b(comm, err) +MPI_Comm *comm; +int *err; +#endif +{ + int class; + + MPI_Error_class(*err, &class); + if (class != MPI_ERR_GROUP) { + printf( "handler_b: incorrect error class %d\n", class ); + } + *err = MPI_SUCCESS; + b_errors++; +} diff --git a/teshsuite/smpi/mpich-test/env/errhand2.c b/teshsuite/smpi/mpich-test/env/errhand2.c new file mode 100644 index 0000000000..7a9f4b91e6 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/errhand2.c @@ -0,0 +1,62 @@ +#include +#include "mpi.h" +#include "test.h" + +#ifdef USE_STDARG +void errfunc( MPI_Comm *, int *, ... ); +#else +void errfunc( MPI_Comm *, int * ); +#endif + +/* + * Test the reference count semantics of error handlers. + */ +int main( int argc, char *argv[] ) +{ + MPI_Errhandler errhandler, olderrhandler; + MPI_Comm newcomm; + int rc, errcnt = 0; + + MPI_Init( &argc, &argv ); + + MPI_Comm_dup( MPI_COMM_WORLD, &newcomm ); + MPI_Errhandler_create( errfunc, &errhandler ); + MPI_Errhandler_set( newcomm, errhandler ); + /* Once you set it, you should be able to free it */ + MPI_Errhandler_free( &errhandler ); + if (errhandler != MPI_ERRHANDLER_NULL) { + printf( "Freed errhandler is not set to NULL\n" ); + errcnt++; + } + MPI_Errhandler_get( newcomm, &olderrhandler ); + MPI_Comm_free( &newcomm ); + + /* olderrhandler should now be invalid. Is it? */ + /* This test is based on an interpretation of the MPI standard that + was subsequently overturned. See the MPI-1.1 errata. + An Errhandler_get is similar to an MPI_Comm_group (having the + effect of creating a copy to the object). */ + MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ); + rc = MPI_Errhandler_set( MPI_COMM_WORLD, olderrhandler ); + /* In the old interpretation, the test is !rc */ + if (rc) { + printf( "Olderrhandler invalid after get and comm freed!\n" ); + errcnt ++; + } + + if (errcnt) + printf( "Found %d errors!\n", errcnt ); + else + printf( " No Errors\n" ); + + MPI_Finalize( ); + return 0; +} + +#if defined(USE_STDARG) +void errfunc( MPI_Comm *comm, int *err, ...) +#else +void errfunc( MPI_Comm *comm, int *err) +#endif +{ +} diff --git a/teshsuite/smpi/mpich-test/env/errhandf.f b/teshsuite/smpi/mpich-test/env/errhandf.f new file mode 100644 index 0000000000..ad82413fbf --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/errhandf.f @@ -0,0 +1,56 @@ +C +C Test that error handlers can be applied and used through Fortran +C + program main + + include 'mpif.h' + integer ierr, errorclass + integer buf, errors, request +C + call mpi_init(ierr) +C +C Try to set the errors-return handler +C + call mpi_errhandler_set(mpi_comm_world, mpi_errors_return, ierr) + errors = 0 +C +C Activate the handler with a simple case +C + call mpi_send( buf, 1, MPI_INTEGER, -99, 0, MPI_COMM_WORLD, ierr ) + if (IERR .eq. MPI_SUCCESS) then + errors = errors + 1 + print *, 'MPI_Send of negative rank did not return error' + endif +C +C Check for a reasonable error message + call mpi_error_class(ierr, errorclass, err) + if (errorclass .ne. MPI_ERR_RANK) then + errors = errors + 1 + print *, 'Error class was not MPI_ERR_RANK, was ', errorclass + endif +C +C Activate the handler with a simple case +C + call mpi_irecv( buf, 1, MPI_INTEGER, -100, 2, MPI_COMM_WORLD, + * request, ierr ) + if (IERR .eq. MPI_SUCCESS) then + errors = errors + 1 + print *, 'MPI_Irecv of negative rank did not return error' + endif +C +C Check for a reasonable error message + call mpi_error_class(ierr, errorclass, err) + if (errorclass .ne. MPI_ERR_RANK) then + errors = errors + 1 + print *, 'Error class was not MPI_ERR_RANK, was ', errorclass + endif + + if (errors .eq. 0) then + print *, ' No Errors' + else + print *, ' Found ', errors, ' errors' + endif +C + call mpi_finalize(ierr) +C + end diff --git a/teshsuite/smpi/mpich-test/env/errstringsf.f b/teshsuite/smpi/mpich-test/env/errstringsf.f new file mode 100644 index 0000000000..2f2167c680 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/errstringsf.f @@ -0,0 +1,48 @@ +c +c Check the handling of error strings from Fortran +c + + program errstringsf + + include 'mpif.h' + + character*(MPI_MAX_ERROR_STRING) errs + integer i, reslen, reserr, ierr + +c +c Fill the string with 'x' to check that +c blank padding happens correctly. +c + call MPI_Init( ierr ) + do i = 1,MPI_MAX_ERROR_STRING + errs(i:i) = 'x' + end do + + call mpi_error_string(mpi_err_buffer, errs, reslen, reserr) + + if (errs(reslen+1:) .ne. ' ') then + print *,' Fortran strings are not correctly blank padded' + if (errs(reslen+1:reslen+1) .eq. char(0)) then +c +c Very strictly interpreted, an since an error string must be +c MPI_MAX_ERROR_STRING characters long, and the Fortran rules +c for such assignements is to blank pad them, there should not +c be a null character (C-like) in them. However, the standard +c is ambiguous on this. +c + print *, ' Fortran strings have bogus null character' + end if + else + print *,' Fortran strings are assigned ok' + end if + +c Check that the length was right + if (errs(reslen:reslen) .eq. ' ') then + print *,' Length of result is wrong' + else + print *,' Length of result is correct' + end if + + call MPI_Finalize( ierr ) + + end diff --git a/teshsuite/smpi/mpich-test/env/errstringsf.std b/teshsuite/smpi/mpich-test/env/errstringsf.std new file mode 100644 index 0000000000..0b9177d028 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/errstringsf.std @@ -0,0 +1,4 @@ +*** Tests of Fortran error strings *** + Fortran strings are assigned ok + Length of result is correct +*** Tests of Fortran error strings *** diff --git a/teshsuite/smpi/mpich-test/env/getproc.c b/teshsuite/smpi/mpich-test/env/getproc.c new file mode 100644 index 0000000000..0553323abc --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/getproc.c @@ -0,0 +1,57 @@ +/* + * Test get processor name + * + */ +#include "mpi.h" +#include +#include +#include + +int main( int argc, char *argv[] ) +{ + char name[MPI_MAX_PROCESSOR_NAME+10]; + int resultlen; + int err = 0; + + MPI_Init( &argc, &argv ); + + memset( name, 0xFF, MPI_MAX_PROCESSOR_NAME+10 ); + resultlen = 0; + + MPI_Get_processor_name( name, &resultlen ); + /* Test that name has only printing characters */ + if (resultlen > MPI_MAX_PROCESSOR_NAME || resultlen <= 0) { + fprintf( stderr, "resultlen (%d) invalid\n", resultlen ); + err++; + } + if (!err) { + int i; + for (i=0; i +#include "mpi.h" +#include "test.h" +#include + +/* # define MPI_Wtime PMPI_Wtime */ + +/* + * This program tests that if MPI_WTIME_IS_GLOBAL is set, the timer + * IS in fact global. We have some suspicions about certain vendor systems + */ + +int CheckTime( void ); + +/* + * Check time tests that the timers are synchronized + */ +int CheckTime( void ) +{ + int rank, size, i; + double wtick, t1, t2, t3, delta_t; + int ntest=20; + MPI_Status status; + int err = 0; + double max_diff = 0.0; + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + if (rank == 0) { + wtick = MPI_Wtick(); +#ifdef DEBUG + printf( "Wtick is %lf\n", wtick ); +#endif + while (ntest--) { + for (i=1; i (t3 - t1 + wtick)) { + err++; + printf( "Process %d has %f; Process 0 has %f\n", + i, t2, 0.5 * (t1 + t3) ); + } + if (delta_t > max_diff) max_diff = delta_t; + } +#ifdef DEBUG + printf( "delta_t = %lf\n", delta_t ); +#endif + /* Release all process for the next pass */ + for (i=1; i 1) { + err++; + fprintf( stderr, "Invalid value for WTIME_IS_GLOBAL (got %d)\n", + vval ); + } + } + if (flag && vval) { + /* Wtime is global is true. Check it */ +#ifdef DEBUG + printf( "WTIME_IS_GLOBAL\n" ); +#endif + err += CheckTime(); + + /* Wait for 10 seconds */ + t1 = MPI_Wtime(); + while (MPI_Wtime() - t1 < 10.0) ; + + err += CheckTime(); + } + if (rank == 0) { + if (err > 0) { + printf( "Errors in MPI_WTIME_IS_GLOBAL\n" ); + } + else { + printf( " No Errors\n" ); + } + } + /* The SGI implementation of MPI sometimes fails to flush stdout + properly. This fflush will work around that bug. */ + /* fflush( stdout ); */ + MPI_Finalize( ); + + return err; +} diff --git a/teshsuite/smpi/mpich-test/env/gtime.out b/teshsuite/smpi/mpich-test/env/gtime.out new file mode 100644 index 0000000000..58b9daf69f --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/gtime.out @@ -0,0 +1,3 @@ +*** WTIME_IS_GLOBAL *** + No Errors +*** WTIME_IS_GLOBAL *** diff --git a/teshsuite/smpi/mpich-test/env/gtime.stdo b/teshsuite/smpi/mpich-test/env/gtime.stdo new file mode 100644 index 0000000000..58b9daf69f --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/gtime.stdo @@ -0,0 +1,3 @@ +*** WTIME_IS_GLOBAL *** + No Errors +*** WTIME_IS_GLOBAL *** diff --git a/teshsuite/smpi/mpich-test/env/hang.c b/teshsuite/smpi/mpich-test/env/hang.c new file mode 100644 index 0000000000..36adc16f9f --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/hang.c @@ -0,0 +1,15 @@ + +#include +#include "mpi.h" + +int main( int argc, char **args ) +{ + int mytid; + printf("doing mpi_init\n"); + MPI_Init(&argc,&args); + + MPI_Comm_rank(MPI_COMM_WORLD,&mytid); + if (mytid < 2) MPI_Abort( MPI_COMM_WORLD, 1 ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/env/init.c b/teshsuite/smpi/mpich-test/env/init.c new file mode 100644 index 0000000000..477494cdd5 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/init.c @@ -0,0 +1,30 @@ +#include "mpi.h" +#include +#include "test.h" + +int main( int argc, char **argv ) +{ + int flag; + MPI_Status status; + int size, rank, partner, i; + + for (i=0; i<2; i++ ) { + MPI_Initialized(&flag); + if(flag == 0) + MPI_Init(&argc,&argv); + } + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + if (size != 2) { + printf( "Test must be run with 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + partner = (rank + 1) % size; + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_COMM_WORLD, &status ); + if (rank == 0) printf( " No Errors\n" ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/env/init.out b/teshsuite/smpi/mpich-test/env/init.out new file mode 100644 index 0000000000..724f7fe28c --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/init.out @@ -0,0 +1,3 @@ +*** MPI_Initialized tests *** + No Errors +*** MPI_Initialized tests *** diff --git a/teshsuite/smpi/mpich-test/env/init.stdo b/teshsuite/smpi/mpich-test/env/init.stdo new file mode 100644 index 0000000000..724f7fe28c --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/init.stdo @@ -0,0 +1,3 @@ +*** MPI_Initialized tests *** + No Errors +*** MPI_Initialized tests *** diff --git a/teshsuite/smpi/mpich-test/env/runtests b/teshsuite/smpi/mpich-test/env/runtests new file mode 100755 index 0000000000..21bfe39e45 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/runtests @@ -0,0 +1,231 @@ +#! /bin/sh +# +# Run some of the tests. If any arguments are provided, pass them to the +# test programs. +# +# -mvhome is needed for the ANL SP, and is ignored by others + +device= +MPICH_VERSION= +STOPFILE=${MPITEST_STOPTEST:-"$HOME/.stopmpichtests"} +MAKE="make --no-print-directory" + +# +# Set mpirun to the name/path of the mpirun program +#FindMPIRUN +# +quiet=0 +runtests=1 +makeeach=0 +writesummaryfile=no +MAKE="make --no-print-directory" +for arg in "$@" ; do + case $arg in + -basedir=* ) + basedir=`echo $arg | sed 's/-basedir=//'` + ;; + -srcdir=* ) + srcdir=`echo $arg | sed 's/-srcdir=//'` + ;; + -checkonly ) + runtests=0 + ;; + -margs=*) + margs=`echo $arg | sed 's/-margs=//'` + args="$args $margs" + ;; + -summaryfile=*) + writesummaryfile=yes + summaryfile=`echo A$arg | sed 's/A-summaryfile=//'` + ;; + -small) + makeeach=1 + shift + ;; + -quiet) + shift + quiet=1 + ;; + -help|-u) + echo "runtests [-checkonly] [-margs='...']" + echo "run tests in this directory. If -checkonly set, just run" + echo "the differences check (do NO rerun the test programs)." + echo "If -margs is used, these options are passed to mpirun." + echo "If -small is used, the examples are built, run, and deleted." + exit 1 + ;; + *) + if test -n "$arg" ; then + echo "runtests: Unknown argument ($arg)" + exit 1 + fi + ;; + esac +done + +mpirun=" ${basedir}/bin/smpirun -platform ${srcdir}/../../../../examples/msg/small_platform_with_routers.xml -hostfile ${srcdir}/../../hostfile --log=root.thres:critical" +# +# Load basic procedures +. ${srcdir}/../runbase + +# If the programs are not available, run make. +if [ ! -x timers -a $makeeach = 0 -a $runtests = 1 ] ; then + $MAKE +fi +# +testfiles="" +if [ $runtests = 1 ] ; then +#replaced sleep by smpi_sleep to avoid problems with real/simulation powers +#RunTest timers 1 "*** Timer tests ***" + +RunTest init 2 "*** MPI_Initialized tests ***" +# uses MPI_Attr_get +#RunTest baseattr 1 "*** Basic attributes ***" + +RunTest gtime 1 "*** WTIME_IS_GLOBAL ***" +#uses errhandlers +#RunTest errhand 1 "*** Tests of error handling ***" + +#RunTest errhand2 1 "*** Tests of error handling reference counting ***" + +# Do not run sigchk by default because it generates warnings that are not +# errors, and the basic test should be clean +if [ "$MPICH_TESTS_SIGCHK" = yes ] ; then + RunTest sigchk 1 "*** Tests of signals used ***" +fi + +RunTest getproc 1 "*** Test Get Processor Name ***" + +#OutTime +#testfiles="$testfiles cmdline.out" +#rm -f cmdline.out +#MakeExe cmdline +#echo "*** Tests of command line handling ***" +#echo "*** Tests of command line handling ***" >> cmdline.out +#$mpirun $args -np 2 ./cmdline a "b c" "d'e" 'f"g" h' $* > cmdline.out 2>&1 +#echo "*** Tests of command line handling ***" >> cmdline.out +#CleanExe cmdline +#if [ ! -s cmdline.stdo ] ; then +# cat >cmdline.stdo < /dev/null 2>&1 ; then + # This is a better choice than ps aux because it restricts the list of + # processes to those of the running user. The w is needed on some + # systems to get a long output for the command + PSPGM="ps -fwu $LOGNAME" +elif ps -fu $LOGNAME > /dev/null 2>&1 ; then + # This is a better choice than ps aux because it restricts the list of + # processes to those of the running user. + PSPGM="ps -fu $LOGNAME" +else + PSPGM="ps auxww" +fi +OutTime +testfiles="$testfiles aborttest.out" +rm -f aborttest.out aborttest.p1 aborttest.p2 aborttest.out2 +MakeExe aborttest +echo "*** Tests of MPI_Abort ***" +echo "*** Tests of MPI_Abort ***" >> aborttest.out +# We also remove lines that look like build lines in case this system +# is being used for other tests. This is incomplete, but +# it may help reduce false positives +$PSPGM | grep $LOGNAME | grep -v grep | grep -v runtests | \ + grep -v 'make ' | grep -v ' /ld' | grep -v 'gcc' | \ + grep -v 'collect2' > aborttest.p1 +# Send the output of aborttest to a file in case there is a problem +$mpirun $args -np 2 ./aborttest $* < /dev/null >> aborttest.out2 2>&1 +# allow some time for processes to exit +sleep 5 +$PSPGM | grep $LOGNAME | grep -v grep | grep -v runtests | \ + grep -v 'make ' | grep -v ' /ld' | grep -v 'gcc' | \ + grep -v 'collect2' > aborttest.p2 +# If there was a consistant format, we could process it ... +ndiff="`cat aborttest.p1 | wc -l` - `cat aborttest.p2 | wc -l`" +ndiff=`expr $ndiff` +if test "$ndiff" = 0 ; then + echo "All processes aborted" >> aborttest.out +else + echo "Suspicious processes remain" >> aborttest.out + #echo "Processes before" >> aborttest.out + #cat aborttest.p1 >> aborttest.out + #echo "Processes after" >> aborttest.out + #cat aborttest.p2 >> aborttest.out + echo "Differences are" >> aborttest.out + diff -b aborttest.p1 aborttest.p2 >> aborttest.out + if [ -s aborttest.out2 ] ; then + echo "Output from mpirun was" + cat aborttest.out2 >> aborttest.out + fi + # Try to kill them + $PSPGM | grep $LOGNAME | grep aborttest | awk '{ print "kill ", $2 }' | sh +fi +echo "*** Tests of MPI_Abort ***" >> aborttest.out +rm -f aborttest.p1 aborttest.p2 aborttest.out2 + +OutTime +rm -f aborttest.p1 aborttest.p2 aborttest.out2 +echo "*** Tests of MPI_Abort (alt) ***" +echo "*** Tests of MPI_Abort (alt) ***" >> aborttest.out +$PSPGM | grep $LOGNAME | grep -v grep | grep -v runtests | \ + grep -v 'make ' | grep -v ' /ld' | grep -v 'gcc' | \ + grep -v 'collect2' > aborttest.p1 +$mpirun $args -np 2 ./aborttest -altmaster $* >aborttest.out2 2>&1 +sleep 5 +$PSPGM | grep $LOGNAME | grep -v grep | grep -v runtests | \ + grep -v 'make ' | grep -v ' /ld' | grep -v 'gcc' | \ + grep -v 'collect2' > aborttest.p2 +ndiff="`cat aborttest.p1 | wc -l` - `cat aborttest.p2 | wc -l`" +ndiff=`expr $ndiff` +if test "$ndiff" = 0 ; then + echo "All processes aborted" >> aborttest.out +else + echo "Suspicious processes remain" >> aborttest.out + #echo "Processes before" >> aborttest.out + #cat aborttest.p1 >> aborttest.out + #echo "Processes after" >> aborttest.out + #cat aborttest.p2 >> aborttest.out + echo "Differences are" >> aborttest.out + diff -b aborttest.p1 aborttest.p2 >> aborttest.out + if [ -s aborttest.out2 ] ; then + echo "Output from mpirun was" + cat aborttest.out2 >> aborttest.out + fi + # Try to kill them + $PSPGM | grep $LOGNAME | grep aborttest | awk '{ print "kill ", $2 }' | sh +fi +echo "*** Tests of MPI_Abort (alt) ***" >> aborttest.out +rm -f aborttest.p1 aborttest.p2 aborttest.out2 +CleanExe aborttest + +# +# Run Fortran tests ONLY if Fortran available +if [ 0 = 1 ] ; then + + RunTest errstringsf 1 "*** Tests of Fortran error strings ***" + + RunTest getprocf 1 "*** Test MPI_Get_processor_name in Fortran ***" + + RunTest errhandf 1 "*** Tests of error handling in Fortran ***" +fi + +else + # Just run checks + testfiles=*.out + if test "$testfiles" eq "*.out" ; then + echo "No output files remain from previous test!" + exit 1 + fi +fi + +# +echo '*** Checking for differences from expected output ***' +CheckAllOutput env.diff +exit 0 diff --git a/teshsuite/smpi/mpich-test/env/sigchk.c b/teshsuite/smpi/mpich-test/env/sigchk.c new file mode 100644 index 0000000000..9294a05925 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/sigchk.c @@ -0,0 +1,201 @@ +/* This file provides routines to check for the use of signals by software */ + +#include +#include +#include "test.h" +#include "mpi.h" + +/* In order to quiet noisy C compilers, we provide ANSI-style prototypes + where possible */ +int SYiCheckSig ( FILE *, int, const char * ); +int SYCheckSignals ( FILE * ); + +#ifdef HAVE_SIGACTION +int SYiCheckSig( fp, sig, signame ) +FILE *fp; +int sig; +const char *signame; +{ +static int firstmsg = 1; +struct sigaction libsig; + +sigaction( sig, NULL, &libsig); +if (libsig.sa_handler != SIG_IGN && libsig.sa_handler != SIG_DFL) { + if (firstmsg) { + firstmsg = 0; + fprintf( fp, "Some signals have been changed. This is not an error\n\ +but rather is a warning that user programs should not redefine the signals\n\ +listed here\n" ); + } + fprintf( fp, "Signal %s has been changed\n", signame ); + return 1; + } +return 0; +} +#else +int SYiCheckSig( fp, sig, signame ) +FILE *fp; +int sig; +const char *signame; +{ +void (*oldsig)(); +static int firstmsg = 1; + +oldsig = signal(sig,SIG_IGN); +if (oldsig != SIG_IGN && oldsig != SIG_DFL) { + if (firstmsg) { + firstmsg = 0; + fprintf( fp, "Some signals have been changed. This is not an error\n\ +but rather is a warning that user programs should not redefine the signals\n\ +listed here\n" ); + } + fprintf( fp, "Signal %s has been changed\n", signame ); + return 1; + } +signal(sig,oldsig); +return 0; +} +#endif + +int SYCheckSignals( fp ) +FILE *fp; +{ +int ndiff = 0; + +#ifdef SIGHUP +ndiff += SYiCheckSig( fp, SIGHUP, "SIGHUP" ); +#endif + +#ifdef SIGINT +ndiff += SYiCheckSig( fp, SIGINT, "SIGINT" ); +#endif + +#ifdef SIGQUIT +ndiff += SYiCheckSig( fp, SIGQUIT, "SIGQUIT" ); +#endif + +#ifdef SIGILL +ndiff += SYiCheckSig( fp, SIGILL, "SIGILL" ); +#endif + +#ifdef SIGTRAP +ndiff += SYiCheckSig( fp, SIGTRAP, "SIGTRAP" ); +#endif + +#ifdef SIGIOT +ndiff += SYiCheckSig( fp, SIGIOT, "SIGIOT" ); +#endif + +#ifdef SIGABRT +ndiff += SYiCheckSig( fp, SIGABRT, "SIGABRT" ); +#endif + +#ifdef SIGEMT +ndiff += SYiCheckSig( fp, SIGEMT, "SIGEMT" ); +#endif + +#ifdef SIGFPE +ndiff += SYiCheckSig( fp, SIGFPE, "SIGFPE" ); +#endif + +#ifdef SIGBUS +ndiff += SYiCheckSig( fp, SIGBUS, "SIGBUS" ); +#endif + +#ifdef SIGSEGV +ndiff += SYiCheckSig( fp, SIGSEGV, "SIGSEGV" ); +#endif + +#ifdef SIGSYS +ndiff += SYiCheckSig( fp, SIGSYS, "SIGSYS" ); +#endif + +#ifdef SIGPIPE +ndiff += SYiCheckSig( fp, SIGPIPE, "SIGPIPE" ); +#endif + +#ifdef SIGALRM +ndiff += SYiCheckSig( fp, SIGALRM, "SIGALRM" ); +#endif + +#ifdef SIGTERM +ndiff += SYiCheckSig( fp, SIGTERM, "SIGTERM" ); +#endif + +#ifdef SIGURG +ndiff += SYiCheckSig( fp, SIGURG, "SIGURG" ); +#endif + +#ifdef SIGTSTP +ndiff += SYiCheckSig( fp, SIGTSTP, "SIGTSTP" ); +#endif + +#ifdef SIGCONT +ndiff += SYiCheckSig( fp, SIGCONT, "SIGCONT" ); +#endif + +#ifdef SIGCHLD +ndiff += SYiCheckSig( fp, SIGCHLD, "SIGCHLD" ); +#endif + +#ifdef SIGTTIN +ndiff += SYiCheckSig( fp, SIGTTIN, "SIGTTIN" ); +#endif + +#ifdef SIGTTOU +ndiff += SYiCheckSig( fp, SIGTTOU, "SIGTTOU" ); +#endif + +#ifdef SIGIO +ndiff += SYiCheckSig( fp, SIGIO, "SIGIO" ); +#endif + +#ifdef SIGPOLL +ndiff += SYiCheckSig( fp, SIGPOLL, "SIGPOLL" ); +#endif + +#ifdef SIGXCPU +ndiff += SYiCheckSig( fp, SIGXCPU, "SIGXCPU" ); +#endif + +#ifdef SIGXFSZ +ndiff += SYiCheckSig( fp, SIGXFSZ, "SIGXFSZ" ); +#endif + +#ifdef SIGVTALRM +ndiff += SYiCheckSig( fp, SIGVTALRM, "SIGVTALRM" ); +#endif + +#ifdef SIGPROF +ndiff += SYiCheckSig( fp, SIGPROF, "SIGPROF" ); +#endif + +#ifdef SIGWINCH +ndiff += SYiCheckSig( fp, SIGWINCH, "SIGWINCH" ); +#endif + +#ifdef SIGLOST +ndiff += SYiCheckSig( fp, SIGLOST, "SIGLOST" ); +#endif + +#ifdef SIGUSR1 +ndiff += SYiCheckSig( fp, SIGUSR1, "SIGUSR1" ); +#endif + +#ifdef SIGUSR2 +ndiff += SYiCheckSig( fp, SIGUSR2, "SIGUSR2" ); +#endif + +return ndiff; +} + + +int main( int argc, char **argv ) +{ + int err; + MPI_Init( &argc, &argv ); + err = SYCheckSignals( stdout ); + Test_Waitforall( ); + MPI_Finalize(); + return err; +} diff --git a/teshsuite/smpi/mpich-test/env/test.c b/teshsuite/smpi/mpich-test/env/test.c new file mode 100644 index 0000000000..e1b892598f --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/test.c @@ -0,0 +1,130 @@ +/* Procedures for recording and printing test results */ + +#include +#include +#include "test.h" +#include "mpi.h" + +#if defined(USE_STDARG) +#include +#endif + +static int tests_passed = 0; +static int tests_failed = 0; +static char failed_tests[255][81]; +static char suite_name[255]; +FILE *fileout = NULL; + +void Test_Init(suite, rank) +char *suite; +int rank; +{ + char filename[512]; + + sprintf(filename, "%s-%d.out", suite, rank); + strncpy(suite_name, suite, 255); + fileout = fopen(filename, "w"); + if (!fileout) { + fprintf( stderr, "Could not open %s on node %d\n", filename, rank ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } +} + +#ifdef USE_STDARG +void Test_Printf(char *format, ...) +{ + va_list arglist; + + va_start(arglist, format); + (void)vfprintf(fileout, format, arglist); + va_end(arglist); +} +#else +void Test_Printf(va_alist) +va_dcl +{ + char *format; + va_list arglist; + + va_start(arglist); + format = va_arg(arglist, char *); + (void)vfprintf(fileout, format, arglist); + fflush(fileout); + va_end(arglist); +} +#endif + +void Test_Message(mess) +const char *mess; +{ + fprintf(fileout, "[%s]: %s\n", suite_name, mess); + fflush(fileout); +} + +void Test_Failed(test) +const char *test; +{ + fprintf(fileout, "[%s]: *** Test '%s' Failed! ***\n", suite_name, test); + strncpy(failed_tests[tests_failed], test, 81); + fflush(fileout); + tests_failed++; +} + +void Test_Passed(test) +const char *test; +{ +#ifdef VERBOSE + fprintf(fileout, "[%s]: Test '%s' Passed.\n", suite_name, test); + fflush(fileout); +#endif + tests_passed++; +} + +int Summarize_Test_Results() +{ +#ifdef VERBOSE + fprintf(fileout, "For test suite '%s':\n", suite_name); +#else + if (tests_failed > 0) +#endif + { + fprintf(fileout, "Of %d attempted tests, %d passed, %d failed.\n", + tests_passed + tests_failed, tests_passed, tests_failed); + } + if (tests_failed > 0) { + int i; + + fprintf(fileout, "*** Tests Failed:\n"); + for (i = 0; i < tests_failed; i++) + fprintf(fileout, "*** %s\n", failed_tests[i]); + } + return tests_failed; +} + +void Test_Finalize() +{ + fflush(fileout); + fclose(fileout); +} + +#include "mpi.h" +/* Wait for every process to pass through this point. This test is used + to make sure that all processes complete, and that a test "passes" because + it executed, not because it some process failed. + */ +void Test_Waitforall( ) +{ +int m, one, myrank, n; + +MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); +MPI_Comm_size( MPI_COMM_WORLD, &n ); +one = 1; +MPI_Allreduce( &one, &m, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + +if (m != n) { + printf( "[%d] Expected %d processes to wait at end, got %d\n", myrank, + n, m ); + } +if (myrank == 0) + printf( " No Errors\n" ); +} diff --git a/teshsuite/smpi/mpich-test/env/test.h b/teshsuite/smpi/mpich-test/env/test.h new file mode 100644 index 0000000000..87bcde36ca --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/test.h @@ -0,0 +1,24 @@ +/* Header for testing procedures */ + +#ifndef _INCLUDED_TEST_H_ +#define _INCLUDED_TEST_H_ + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Test_Init (char *, int); +#ifdef USE_STDARG +void Test_Printf (char *, ...); +#else +/* No prototype */ +void Test_Printf(); +#endif +void Test_Message (const char *); +void Test_Failed (const char *); +void Test_Passed (const char *); +int Summarize_Test_Results (void); +void Test_Finalize (void); +void Test_Waitforall (void); + +#endif diff --git a/teshsuite/smpi/mpich-test/env/testerr.c b/teshsuite/smpi/mpich-test/env/testerr.c new file mode 100644 index 0000000000..fa6ead6830 --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/testerr.c @@ -0,0 +1,170 @@ +#include +#include "mpi.h" +/* Test error handling. This is MPICH specific */ +void Test_Send( void ); +void Test_Recv( void ); +void Test_Datatype( void ); +void Test_Errors_warn( MPI_Comm *comm, int *code, ... ); +void Test_Failed( const char * msg ); +void Test_Passed(const char * msg ); + +void Test_Errors_warn( MPI_Comm *comm, int *code, ... ) +{ + char buf[MPI_MAX_ERROR_STRING+1]; + int result_len; + static int in_handler = 0; + + if (in_handler) return; + in_handler = 1; + /* Convert code to message and print */ + MPI_Error_string( *code, buf, &result_len ); + printf( "%s\n", buf ); + in_handler = 0; +} + +static int errcount = 0; +void Test_Failed( const char * msg ) +{ + printf( "FAILED: %s\n", msg ); + errcount++; +} +void Test_Passed(const char * msg ) +{ + printf( "Passed: %s\n", msg ); +} + +int main( int argc, char *argv[] ) +{ + MPI_Errhandler TEST_ERRORS_WARN; + + MPI_Init( &argc, &argv ); + + MPI_Errhandler_create( Test_Errors_warn, &TEST_ERRORS_WARN ); + MPI_Errhandler_set(MPI_COMM_WORLD, TEST_ERRORS_WARN); + + Test_Send(); + + Test_Recv(); + + Test_Datatype(); + + MPI_Finalize(); + + return 0; +} + +void Test_Send( void ) +{ + int buffer[100]; + int dest; + MPI_Datatype bogus_type = MPI_DATATYPE_NULL; + int myrank, size; + int large_tag, flag, small_tag; + int *tag_ubp; + + MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + dest = size - 1; + + if (MPI_Send(buffer, 20, MPI_INT, dest, + 1, MPI_COMM_NULL) == MPI_SUCCESS){ + Test_Failed("NULL Communicator Test"); + } + else + Test_Passed("NULL Communicator Test"); + + if (MPI_Send(buffer, -1, MPI_INT, dest, + 1, MPI_COMM_WORLD) == MPI_SUCCESS){ + Test_Failed("Invalid Count Test"); + } + else + Test_Passed("Invalid Count Test"); + + if (MPI_Send(buffer, 20, bogus_type, dest, + 1, MPI_COMM_WORLD) == MPI_SUCCESS){ + Test_Failed("Invalid Type Test"); + } + else + Test_Passed("Invalid Type Test"); + + small_tag = -1; + if (small_tag == MPI_ANY_TAG) small_tag = -2; + if (MPI_Send(buffer, 20, MPI_INT, dest, + small_tag, MPI_COMM_WORLD) == MPI_SUCCESS) { + Test_Failed("Invalid Tag Test"); + } + else + Test_Passed("Invalid Tag Test"); + + /* Form a tag that is too large */ + MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, (void **)&tag_ubp, &flag ); + if (!flag) Test_Failed("Could not get tag ub!" ); + large_tag = *tag_ubp + 1; + if (large_tag > *tag_ubp) { + if (MPI_Send(buffer, 20, MPI_INT, dest, + -1, MPI_COMM_WORLD) == MPI_SUCCESS) { + Test_Failed("Invalid Tag Test"); + } + else + Test_Passed("Invalid Tag Test"); + } + + if (MPI_Send(buffer, 20, MPI_INT, 300, + 1, MPI_COMM_WORLD) == MPI_SUCCESS) { + Test_Failed("Invalid Destination Test"); + } + else + Test_Passed("Invalid Destination Test"); + + if (MPI_Send((void *)0, 10, MPI_INT, dest, + 1, MPI_COMM_WORLD) == MPI_SUCCESS){ + Test_Failed("Invalid Buffer Test (send)"); + } + else + Test_Passed("Invalid Buffer Test (send)"); +} + +void Test_Recv( void ) +{ +} + +void Test_Datatype( void ) +{ +} + +#ifdef FOO +void +ReceiverTest3() +{ + int buffer[20]; + MPI_Datatype bogus_type = MPI_DATATYPE_NULL; + MPI_Status status; + int myrank; + int *tag_ubp; + int large_tag, flag, small_tag; + + MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); + + if (myrank == 0) { + fprintf( stderr, +"There should be eight error messages about invalid communicator\n\ +count argument, datatype argument, tag, rank, buffer send and buffer recv\n" ); + } + + /* A receive test might not fail until it is triggered... */ + if (MPI_Recv((void *)0, 10, MPI_INT, src, + 15, MPI_COMM_WORLD, &status) == MPI_SUCCESS){ + Test_Failed("Invalid Buffer Test (recv)"); + } + else + Test_Passed("Invalid Buffer Test (recv)"); + + /* Just to keep things happy, see if there is a message to receive */ + { int flag, ibuf[10]; + + MPI_Iprobe( src, 15, MPI_COMM_WORLD, &flag, &status ); + if (flag) + MPI_Recv( ibuf, 10, MPI_INT, src, 15, MPI_COMM_WORLD, &status ); + } + return; +#endif diff --git a/teshsuite/smpi/mpich-test/env/timers.c b/teshsuite/smpi/mpich-test/env/timers.c new file mode 100644 index 0000000000..810cbfe80d --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/timers.c @@ -0,0 +1,54 @@ +#include +#include +#include "mpi.h" +#include "test.h" +#ifdef HAVE_WINDOWS_H +#define sleep(a_) Sleep((a_)*1000) +#include +#endif + +int main( int argc, char **argv ) +{ + int err = 0; + double t1, t2; + double tick; + int i; + + MPI_Init( &argc, &argv ); + t1 = MPI_Wtime(); + t2 = MPI_Wtime(); + if (t2 - t1 > 0.1 || t2 - t1 < 0.0) { + err++; + fprintf( stderr, + "Two successive calls to MPI_Wtime gave strange results: (%f) (%f)\n", + t1, t2 ); + } +/* Try several times to get a 1 second sleep */ + for (i = 0; i<10; i++) { + t1 = MPI_Wtime(); + sleep(1); + t2 = MPI_Wtime(); + if (t2 - t1 >= (1.0 - 0.01) && t2 - t1 <= 5.0) break; + if (t2 - t1 > 5.0) i = 9; + } + if (i == 10) { + fprintf( stderr, + "Timer around sleep(1) did not give 1 second; gave %f\n", + t2 - t1 ); + fprintf( stderr, "If the sigchk check shows that SIGALRM is in use, \n\ +this indicates only that user programs must NOT use any system call or\n\ +library that uses SIGALRM. SIGALRM is not used by MPICH but may be used\n\ +by the software the MPICH uses to implement communication to other \n\ +processes\n" ); + err++; + } + tick = MPI_Wtick(); + if (tick > 1.0 || tick <= 0.0) { + err++; + fprintf( stderr, "MPI_Wtick gave a strange result: (%f)\n", tick ); + } + Test_Waitforall( ); + MPI_Finalize( ); + + return err; +} diff --git a/teshsuite/smpi/mpich-test/env/timertest.c b/teshsuite/smpi/mpich-test/env/timertest.c new file mode 100644 index 0000000000..a77d29005b --- /dev/null +++ b/teshsuite/smpi/mpich-test/env/timertest.c @@ -0,0 +1,35 @@ +#include +#include +#include "mpi.h" +#include "test.h" +#ifdef HAVE_WINDOWS_H +#define sleep(a_) Sleep((a_)*1000) +#include +#endif + +int main( int argc, char **argv ) +{ + double t1, t2; + double tick; + int i; + + MPI_Init( &argc, &argv ); + t1 = MPI_Wtime(); + t2 = MPI_Wtime(); + fprintf( stdout, "Two successive calls to MPI_Wtime gave: (%f) (%f)\n", + t1, t2 ); + fprintf( stdout, "Five approximations to one second:\n"); + for (i = 0; i < 5; i++) + { + t1 = MPI_Wtime(); + smpi_sleep(1); + t2 = MPI_Wtime(); + fprintf( stdout, "%f seconds\n", t2 - t1 ); + } + tick = MPI_Wtick(); + fprintf( stdout, "MPI_Wtick gave: (%10.8f)\n", tick ); + + MPI_Finalize( ); + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/profile/CMakeLists.txt b/teshsuite/smpi/mpich-test/profile/CMakeLists.txt new file mode 100644 index 0000000000..a36d7f1dca --- /dev/null +++ b/teshsuite/smpi/mpich-test/profile/CMakeLists.txt @@ -0,0 +1,45 @@ +cmake_minimum_required(VERSION 2.6) + +if(enable_smpi) + set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/bin/smpicc") + + set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") + set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1") + + include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi") + + add_executable(ptest ptest.c ) + add_executable(colluses colluses.c ) + + target_link_libraries(ptest m simgrid smpi ) + target_link_libraries(colluses m simgrid smpi ) + + set_target_properties(ptest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(colluses PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + +endif(enable_smpi) + +set(tesh_files + ${tesh_files} + ${CMAKE_CURRENT_SOURCE_DIR}/profile.tesh + PARENT_SCOPE + ) +set(xml_files + ${xml_files} + PARENT_SCOPE + ) +set(examples_src + ${examples_src} + ${CMAKE_CURRENT_SOURCE_DIR}/ptest.c + ${CMAKE_CURRENT_SOURCE_DIR}/colluses.c + PARENT_SCOPE + ) +set(bin_files + ${bin_files} + ${CMAKE_CURRENT_SOURCE_DIR}/../hostfile + PARENT_SCOPE + ) +set(txt_files + ${txt_files} + PARENT_SCOPE + ) diff --git a/teshsuite/smpi/mpich-test/profile/colluses.c b/teshsuite/smpi/mpich-test/profile/colluses.c new file mode 100644 index 0000000000..a76604e732 --- /dev/null +++ b/teshsuite/smpi/mpich-test/profile/colluses.c @@ -0,0 +1,81 @@ +/* + * This file checks to see if the collective routine MPI_Allreduce uses + * MPI_Send or MPI_Isend to implement the operation. It should use either + * a PMPI routine or a non-MPI routine. + */ + +#include "mpi.h" +#include + +static int used_send = 0, + used_isend = 0, + used_sendrecv = 0; +int main( int argc, char *argv[] ) +{ + int in, out; + int rank; + int in_sends[3], out_sends[3]; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + in = 1; + MPI_Allreduce( &in, &out, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + + /* Now, see whether MPI routines were used */ + in_sends[0] = used_send; + in_sends[1] = used_isend; + in_sends[2] = used_sendrecv; + MPI_Reduce( in_sends, out_sends, 3, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); + if (rank == 0) { + int errs = 0; + if (in_sends[0] > 0) { + printf( " Allreduce used MPI_SEND (%d)\n", in_sends[0] ); + errs++; + } + if (in_sends[1] > 0) { + printf( " Allreduce used MPI_ISEND (%d)\n", in_sends[1] ); + errs++; + } + if (in_sends[2] > 0) { + printf( " Allreduce used MPI_SENDRECV (%d)\n", in_sends[2] ); + errs++; + } + if (!errs) { + printf( " No Errors\n" ); + } + } + + MPI_Finalize( ); + return 0; +} + +/* + * Replacements for MPI_Send, Isend, and Sendrecv that detect their use + */ + +int MPI_Send( void *buf, int count, MPI_Datatype datatype, int dest, + int tag, MPI_Comm comm ) +{ + used_send++; + return PMPI_Send( buf, count, datatype, dest, tag, comm ); +} + +int MPI_Sendrecv( void *sendbuf, int sendcount, MPI_Datatype sendtype, + int dest, int sendtag, + void *recvbuf, int recvcount, MPI_Datatype recvtype, + int source, int recvtag, MPI_Comm comm, MPI_Status *status ) +{ + used_sendrecv++; + return PMPI_Sendrecv( sendbuf, sendcount, sendtype, dest, sendtag, + recvbuf, recvcount, recvtype, source, recvtag, + comm, status ); +} + +int MPI_Isend( void *buf, int count, MPI_Datatype datatype, int dest, int tag, + MPI_Comm comm, MPI_Request *request ) +{ + used_isend++; + return PMPI_Isend( buf, count, datatype, dest, tag, comm, request ); +} + diff --git a/teshsuite/smpi/mpich-test/profile/ptest.c b/teshsuite/smpi/mpich-test/profile/ptest.c new file mode 100644 index 0000000000..672742ce5f --- /dev/null +++ b/teshsuite/smpi/mpich-test/profile/ptest.c @@ -0,0 +1,21 @@ +#include +#include "mpi.h" + +/* Header for testing procedures */ + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + * This tests for the existence of MPI_Pcontrol; nothing more. + */ +int main( int argc, char **argv ) +{ + MPI_Init( &argc, &argv ); + + MPI_Pcontrol( 0 ); + printf( "Pcontrol test passed\n" ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/profile/ptest.std b/teshsuite/smpi/mpich-test/profile/ptest.std new file mode 100644 index 0000000000..70c49f8e69 --- /dev/null +++ b/teshsuite/smpi/mpich-test/profile/ptest.std @@ -0,0 +1,3 @@ +**** Testing MPI_Pcontrol **** +Pcontrol test passed +**** Testing MPI_Pcontrol **** diff --git a/teshsuite/smpi/mpich-test/profile/runtests b/teshsuite/smpi/mpich-test/profile/runtests new file mode 100755 index 0000000000..21dbc8a7be --- /dev/null +++ b/teshsuite/smpi/mpich-test/profile/runtests @@ -0,0 +1,106 @@ +#! /bin/sh +# +# Run some of the tests. If any arguments are provided, pass them to the +# test programs. +# +# -mvhome is needed for the ANL SP, and is ignored by others +args= +device= +top_srcdir=/home/degomme/Downloads/mpich-test +srcdir=/home/degomme/Downloads/mpich-test/profile +MPICH_VERSION= +STOPFILE=${MPITEST_STOPTEST:-"$HOME/.stopmpichtests"} +mpirun="smpirun -hostfile /home/degomme/Documents/hostfile_griffon -platform /home/degomme/Documents/griffon.xml --log=root.thres:critical" +MAKE="make --no-print-directory" +MPIRUNMVBACK="" +# +# Load basic procedures +. ${top_srcdir}/runbase +# +# Set mpirun to the name/path of the mpirun program +#FindMPIRUN +if [ -z "$mpirun" ] ; then + echo "No mpirun in path. Testing cannot proceed." + exit 1 +fi +# +# If the programs are not available, run make. +runtests=1 +makeeach=0 +writesummaryfile=no +quiet=0 +check_at_once=1 +MAKE="make --no-print-directory" +for arg in "$@" ; do + case $arg in + -checkonly) + runtests=0 + ;; + -margs=*) + margs=`echo $arg | sed 's/-margs=//'` + args="$args $margs" + ;; + -summaryfile=*) + writesummaryfile=yes + summaryfile=`echo A$arg | sed 's/A-summaryfile=//'` + ;; + -echo) + set -x + ;; + -small) + makeeach=1 + ;; + -quiet) + shift + quiet=1 + ;; + -atend) + check_at_once=0 + ;; + -help|-u) + echo "runtests [-checkonly] [-margs='...'] [-atend]" + echo "run tests in this directory. If -checkonly set, just run" + echo "the differences check (do NO rerun the test programs)." + echo "If -margs is used, these options are passed to mpirun." + echo "If -small is used, the examples are built, run, and deleted." + echo "If -atend is used, the success of the tests is checked only" + echo "at the end of the test rather than also after each test." + exit 1 + ;; + *) + if test -n "$arg" ; then + echo "Passing remaining arguments to programs ($*)" + break + fi + ;; + esac +done + +# If the programs are not available, run make. +if [ ! -x sendrecv -a $makeeach = 0 -a $runtests = 1 ] ; then + $MAKE default +fi + +testfiles="" +if [ $runtests = 1 ] ; then +echo '**** Testing MPI Profiling routines ****' +#just a test for MPI_Pcontrol, which is not implemented +RunTest ptest 1 "**** Testing MPI_Pcontrol ****" + +RunTest colluses 4 "**** Testing for PMPI in Allreduce ****" + +else + # Just run checks + testfiles=`echo *.out` + if test "$testfiles" = "*.out" ; then + echo "No output files remain from previous test!" + exit 1 + fi +fi +# +echo '*** Checking for differences from expected output ***' +CheckAllOutput profile.diff +exit 0 + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt b/teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt new file mode 100644 index 0000000000..2ec4b5217d --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/CMakeLists.txt @@ -0,0 +1,404 @@ +cmake_minimum_required(VERSION 2.6) + +if(enable_smpi) + set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/bin/smpicc") + set(CMAKE_Fortran_COMPILER "${CMAKE_BINARY_DIR}/bin/smpiff") + set(CMAKE_Fortran_LINKER "${CMAKE_BINARY_DIR}/bin/smpicc") + set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}") + set(MPICH_FLAGS "-DHAVE_STDLIB_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STRING_H=1 -DUSE_STDARG=1 -DHAVE_LONG_DOUBLE=1 -DHAVE_PROTOTYPES=1 -DHAVE_SIGNAL_H=1 -DHAVE_SIGACTION=1 -DHAVE_SLEEP=1 -DHAVE_SYSCONF=1") + + include_directories("${CMAKE_HOME_DIRECTORY}/include/smpi") + add_executable(overtake overtake.c test.c) + add_executable(bsendtest bsendtest.c test.c) + add_executable(relrank relrank.c test.c) + add_executable(sendrecv_mpich sendrecv.c test.c) + add_executable(persistent persistent.c) + add_executable(flood flood.c test.c) + add_executable(flood2 flood2.c test.c) + add_executable(persist persist.c) + add_executable(persist2 persist2.c) + add_executable(sendrecv2 sendrecv2.c dtypes.c gcomm.c) + add_executable(sendrecv3 sendrecv3.c dtypes.c gcomm.c) + add_executable(sendrecv4 sendrecv4.c dtypes.c gcomm.c) + add_executable(irsend irsend.c dtypes.c gcomm.c) + add_executable(irsendinit irsendinit.c dtypes.c gcomm.c) + add_executable(longmsgs longmsgs.c) + add_executable(testsome testsome.c dtypes.c gcomm.c) + add_executable(testall_mpich testall.c dtypes.c gcomm.c) + add_executable(sndrcv sndrcv.c dtypes.c gcomm.c) + add_executable(sndrcvrep sndrcvrep.c) + add_executable(sndrcvrpl2 sndrcvrpl2.c) + add_executable(getelm getelm.c ) + add_executable(self self.c test.c) + add_executable(typelb typelb.c test.c) + add_executable(structlb structlb.c test.c) + add_executable(typeub typeub.c test.c) + add_executable(typeub2 typeub2.c) + add_executable(typeub3 typeub3.c) + add_executable(typecreate typecreate.c) + add_executable(reqcreate reqcreate.c) + add_executable(reqfree reqfree.c) + add_executable(isndrcv isndrcv.c test.c) + add_executable(nullproc nullproc.c) + add_executable(nullproc2 nullproc2.c) + add_executable(trunc trunc.c) + add_executable(truncmult truncmult.c) + add_executable(typetest typetest.c test.c) + add_executable(typebase typebase.c) + add_executable(ssendtest ssendtest.c test.c) + add_executable(ssendtest2 ssendtest2.c) + add_executable(issendtest issendtest.c test.c) + add_executable(issend2 issend2.c) + add_executable(issendinit issendinit.c test.c) + add_executable(testtypes testtypes.c test.c) + add_executable(dataalign dataalign.c) + add_executable(dtyperecv dtyperecv.c) + add_executable(dtypelife dtypelife.c test.c) + add_executable(htmsg htmsg.c) + add_executable(isendtest isendtest.c) + add_executable(third third.c test.c) + add_executable(fourth fourth.c test.c) + add_executable(fifth fifth.c test.c) + add_executable(sixth sixth.c test.c) + add_executable(probe probe.c test.c) + add_executable(nblock nblock.c ) + add_executable(sendmany sendmany.c) + add_executable(order order.c) + add_executable(pack pack.c) + add_executable(probe1 probe1.c test.c) + add_executable(testtest1 testtest1.c test.c) + add_executable(hvectest hvectest.c test.c) + add_executable(hvectest2 hvectest2.c test.c) + add_executable(hvec hvec.c) + add_executable(hindexed hindexed.c) + add_executable(irecvtest irecvtest.c test.c) + add_executable(nbtest nbtest.c) + add_executable(waitany waitany.c) + add_executable(waitall waitall.c) + add_executable(waitall2 waitall2.c) + add_executable(waitall3 waitall3.c) + add_executable(waitall4 waitall4.c) + add_executable(commit commit.c) + add_executable(cancel cancel.c) + add_executable(cancel2 cancel2.c) + add_executable(cancel3 cancel3.c) + add_executable(cancelmessages cancelmessages.c) + add_executable(cancelibm cancelibm.c) + add_executable(cancelissend cancelissend.c) + add_executable(sendorder sendorder.c) + add_executable(exittest exittest.c) + add_executable(selfvsworld selfvsworld.c) + if(SMPI_F2C) +# add_executable(secondf secondf.f) +# add_executable(allpair2 allpair2.f) +# add_executable(allpair allpair.f) +# add_executable(isendf isendf.f) +# add_executable(pingpong_f pingpong.f) +# add_executable(send1 send1.f) +# add_executable(sendfort sendfort.f) +# add_executable(structf structf.f) +# add_executable(typebasef typebasef.f) + add_executable(sendcplx sendcplx.f) + endif(SMPI_F2C) + + target_link_libraries(overtake m simgrid smpi ) + target_link_libraries(bsendtest m simgrid smpi ) + target_link_libraries(relrank m simgrid smpi ) + target_link_libraries(sendrecv_mpich m simgrid smpi ) + target_link_libraries(persistent m simgrid smpi ) + target_link_libraries(flood m simgrid smpi ) + target_link_libraries(flood2 m simgrid smpi ) + target_link_libraries(persist m simgrid smpi ) + target_link_libraries(persist2 m simgrid smpi ) + target_link_libraries(sendrecv2 m simgrid smpi ) + target_link_libraries(sendrecv3 m simgrid smpi ) + target_link_libraries(sendrecv4 m simgrid smpi ) + target_link_libraries(irsend m simgrid smpi ) + target_link_libraries(irsendinit m simgrid smpi ) + target_link_libraries(longmsgs m simgrid smpi ) + target_link_libraries(testsome m simgrid smpi ) + target_link_libraries(testall_mpich m simgrid smpi ) + target_link_libraries(sndrcv m simgrid smpi ) + target_link_libraries(sndrcvrep m simgrid smpi ) + target_link_libraries(sndrcvrpl2 m simgrid smpi ) + target_link_libraries(getelm m simgrid smpi ) + target_link_libraries(self m simgrid smpi ) + target_link_libraries(typelb m simgrid smpi ) + target_link_libraries(structlb m simgrid smpi ) + target_link_libraries(typeub m simgrid smpi ) + target_link_libraries(typeub2 m simgrid smpi ) + target_link_libraries(typeub3 m simgrid smpi ) + target_link_libraries(typecreate m simgrid smpi ) + target_link_libraries(reqcreate m simgrid smpi ) + target_link_libraries(reqfree m simgrid smpi ) + target_link_libraries(isndrcv m simgrid smpi ) + target_link_libraries(nullproc m simgrid smpi ) + target_link_libraries(nullproc2 m simgrid smpi ) + target_link_libraries(trunc m simgrid smpi ) + target_link_libraries(truncmult m simgrid smpi ) + target_link_libraries(typetest m simgrid smpi ) + target_link_libraries(typebase m simgrid smpi ) + target_link_libraries(ssendtest m simgrid smpi ) + target_link_libraries(ssendtest2 m simgrid smpi ) + target_link_libraries(issendtest m simgrid smpi ) + target_link_libraries(issend2 m simgrid smpi ) + target_link_libraries(issendinit m simgrid smpi ) + target_link_libraries(testtypes m simgrid smpi ) + target_link_libraries(dataalign m simgrid smpi ) + target_link_libraries(dtyperecv m simgrid smpi ) + target_link_libraries(dtypelife m simgrid smpi ) + target_link_libraries(htmsg m simgrid smpi ) + target_link_libraries(isendtest m simgrid smpi ) + target_link_libraries(third m simgrid smpi ) + target_link_libraries(fourth m simgrid smpi ) + target_link_libraries(fifth m simgrid smpi ) + target_link_libraries(sixth m simgrid smpi ) + target_link_libraries(probe m simgrid smpi ) + target_link_libraries(nblock m simgrid smpi ) + target_link_libraries(sendmany m simgrid smpi ) + target_link_libraries(order m simgrid smpi ) + target_link_libraries(pack m simgrid smpi ) + target_link_libraries(probe1 m simgrid smpi ) + target_link_libraries(testtest1 m simgrid smpi ) + target_link_libraries(hvectest m simgrid smpi ) + target_link_libraries(hvectest2 m simgrid smpi ) + target_link_libraries(hvec m simgrid smpi ) + target_link_libraries(hindexed m simgrid smpi ) + target_link_libraries(irecvtest m simgrid smpi ) + target_link_libraries(nbtest m simgrid smpi ) + target_link_libraries(waitany m simgrid smpi ) + target_link_libraries(waitall m simgrid smpi ) + target_link_libraries(waitall2 m simgrid smpi ) + target_link_libraries(waitall3 m simgrid smpi ) + target_link_libraries(waitall4 m simgrid smpi ) + target_link_libraries(commit m simgrid smpi ) + target_link_libraries(cancel m simgrid smpi ) + target_link_libraries(cancel2 m simgrid smpi ) + target_link_libraries(cancel3 m simgrid smpi ) + target_link_libraries(cancelmessages m simgrid smpi ) + target_link_libraries(cancelibm m simgrid smpi ) + target_link_libraries(cancelissend m simgrid smpi ) + target_link_libraries(sendorder m simgrid smpi ) + target_link_libraries(exittest m simgrid smpi ) + target_link_libraries(selfvsworld m simgrid smpi ) + if(SMPI_F2C) +# target_link_libraries(secondf m simgrid smpi f2c) +# target_link_libraries(allpair2 m simgrid smpi f2c) +# target_link_libraries(allpair m simgrid smpi f2c) +# target_link_libraries(isendf m simgrid smpi f2c) +# target_link_libraries(pingpong_f m simgrid smpi f2c) +# target_link_libraries(send1 m simgrid smpi f2c) + target_link_libraries(sendcplx m simgrid smpi f2c) +# target_link_libraries(sendfort m simgrid smpi f2c) +# target_link_libraries(structf m simgrid smpi f2c) +# target_link_libraries(typebasef m simgrid smpi f2c) + endif(SMPI_F2C) + + set_target_properties(overtake PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(bsendtest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(relrank PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendrecv_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(persistent PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(flood PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(flood2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(persist PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(persist2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendrecv2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendrecv3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendrecv4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(irsend PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(irsendinit PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(longmsgs PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(testsome PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(testall_mpich PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sndrcv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sndrcvrep PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sndrcvrpl2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(getelm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(self PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typelb PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(structlb PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typeub PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typeub2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typeub3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typecreate PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(reqcreate PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(reqfree PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(isndrcv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(nullproc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(nullproc2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(trunc PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(truncmult PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typetest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(typebase PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(ssendtest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(ssendtest2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(issendtest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(issend2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(issendinit PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(testtypes PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(dataalign PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(dtyperecv PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(dtypelife PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(htmsg PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(isendtest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(third PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(fourth PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(fifth PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sixth PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(probe PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(nblock PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendmany PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(order PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(pack PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(probe1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(testtest1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(hvectest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(hvectest2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(hvec PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(hindexed PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(irecvtest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(nbtest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(waitany PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(waitall PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(waitall2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(waitall3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(waitall4 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(commit PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(cancel PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(cancel2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(cancel3 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(cancelmessages PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(cancelibm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(cancelissend PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendorder PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(exittest PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(selfvsworld PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + if(SMPI_F2C) +# set_target_properties(secondf PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}" ) +# set_target_properties(allpair2 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}" ) +# set_target_properties(allpair PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}" ) +# set_target_properties(isendf PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}" ) +# set_target_properties(pingpong_f PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") +# set_target_properties(send1 PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + set_target_properties(sendcplx PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") +# set_target_properties(sendfort PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") +# set_target_properties(structf PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") +# set_target_properties(typebasef PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}") + endif(SMPI_F2C) +endif(enable_smpi) + +set(tesh_files + ${tesh_files} + ${CMAKE_CURRENT_SOURCE_DIR}/coll.tesh + PARENT_SCOPE + ) +set(xml_files + ${xml_files} + PARENT_SCOPE + ) +set(examples_src + ${examples_src} + ${CMAKE_CURRENT_SOURCE_DIR}/overtake.c + ${CMAKE_CURRENT_SOURCE_DIR}/bsendtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/relrank.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv.c + ${CMAKE_CURRENT_SOURCE_DIR}/persistent.c + ${CMAKE_CURRENT_SOURCE_DIR}/flood.c + ${CMAKE_CURRENT_SOURCE_DIR}/flood2.c + ${CMAKE_CURRENT_SOURCE_DIR}/persist.c + ${CMAKE_CURRENT_SOURCE_DIR}/persist2.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv2.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv3.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendrecv4.c + ${CMAKE_CURRENT_SOURCE_DIR}/irsend.c + ${CMAKE_CURRENT_SOURCE_DIR}/irsendinit.c + ${CMAKE_CURRENT_SOURCE_DIR}/longmsgs.c + ${CMAKE_CURRENT_SOURCE_DIR}/testsome.c + ${CMAKE_CURRENT_SOURCE_DIR}/testall.c + ${CMAKE_CURRENT_SOURCE_DIR}/sndrcv.c + ${CMAKE_CURRENT_SOURCE_DIR}/sndrcvrep.c + ${CMAKE_CURRENT_SOURCE_DIR}/sndrcvrpl2.c + ${CMAKE_CURRENT_SOURCE_DIR}/getelm.c + ${CMAKE_CURRENT_SOURCE_DIR}/self.c + ${CMAKE_CURRENT_SOURCE_DIR}/typelb.c + ${CMAKE_CURRENT_SOURCE_DIR}/structlb.c + ${CMAKE_CURRENT_SOURCE_DIR}/typeub.c + ${CMAKE_CURRENT_SOURCE_DIR}/typeub2.c + ${CMAKE_CURRENT_SOURCE_DIR}/typeub3.c + ${CMAKE_CURRENT_SOURCE_DIR}/typecreate.c + ${CMAKE_CURRENT_SOURCE_DIR}/reqcreate.c + ${CMAKE_CURRENT_SOURCE_DIR}/reqfree.c + ${CMAKE_CURRENT_SOURCE_DIR}/isndrcv.c + ${CMAKE_CURRENT_SOURCE_DIR}/nullproc.c + ${CMAKE_CURRENT_SOURCE_DIR}/nullproc2.c + ${CMAKE_CURRENT_SOURCE_DIR}/trunc.c + ${CMAKE_CURRENT_SOURCE_DIR}/truncmult.c + ${CMAKE_CURRENT_SOURCE_DIR}/typetest.c + ${CMAKE_CURRENT_SOURCE_DIR}/typebase.c + ${CMAKE_CURRENT_SOURCE_DIR}/ssendtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/ssendtest2.c + ${CMAKE_CURRENT_SOURCE_DIR}/issendtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/issend2.c + ${CMAKE_CURRENT_SOURCE_DIR}/issendinit.c + ${CMAKE_CURRENT_SOURCE_DIR}/testtypes.c + ${CMAKE_CURRENT_SOURCE_DIR}/dataalign.c + ${CMAKE_CURRENT_SOURCE_DIR}/dtyperecv.c + ${CMAKE_CURRENT_SOURCE_DIR}/dtypelife.c + ${CMAKE_CURRENT_SOURCE_DIR}/htmsg.c + ${CMAKE_CURRENT_SOURCE_DIR}/isendtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/third.c + ${CMAKE_CURRENT_SOURCE_DIR}/fourth.c + ${CMAKE_CURRENT_SOURCE_DIR}/fifth.c + ${CMAKE_CURRENT_SOURCE_DIR}/sixth.c + ${CMAKE_CURRENT_SOURCE_DIR}/probe.c + ${CMAKE_CURRENT_SOURCE_DIR}/nblock.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendmany.c + ${CMAKE_CURRENT_SOURCE_DIR}/order.c + ${CMAKE_CURRENT_SOURCE_DIR}/pack.c + ${CMAKE_CURRENT_SOURCE_DIR}/probe1.c + ${CMAKE_CURRENT_SOURCE_DIR}/testtest1 .c + ${CMAKE_CURRENT_SOURCE_DIR}/hvectest.c + ${CMAKE_CURRENT_SOURCE_DIR}/hvectest2.c + ${CMAKE_CURRENT_SOURCE_DIR}/hvec.c + ${CMAKE_CURRENT_SOURCE_DIR}/hindexed.c + ${CMAKE_CURRENT_SOURCE_DIR}/irecvtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/nbtest.c + ${CMAKE_CURRENT_SOURCE_DIR}/waitany.c + ${CMAKE_CURRENT_SOURCE_DIR}/waitall.c + ${CMAKE_CURRENT_SOURCE_DIR}/waitall2.c + ${CMAKE_CURRENT_SOURCE_DIR}/waitall3.c + ${CMAKE_CURRENT_SOURCE_DIR}/waitall4.c + ${CMAKE_CURRENT_SOURCE_DIR}/commit.c + ${CMAKE_CURRENT_SOURCE_DIR}/cancel.c + ${CMAKE_CURRENT_SOURCE_DIR}/cancel2.c + ${CMAKE_CURRENT_SOURCE_DIR}/cancel3.c + ${CMAKE_CURRENT_SOURCE_DIR}/cancelmessages.c + ${CMAKE_CURRENT_SOURCE_DIR}/cancelibm.c + ${CMAKE_CURRENT_SOURCE_DIR}/cancelissend.c + ${CMAKE_CURRENT_SOURCE_DIR}/sendorder.c + ${CMAKE_CURRENT_SOURCE_DIR}/exittest.c + ${CMAKE_CURRENT_SOURCE_DIR}/selfvsworld.c + ${CMAKE_CURRENT_SOURCE_DIR}/test.c + ${CMAKE_CURRENT_SOURCE_DIR}/test.h + ${CMAKE_CURRENT_SOURCE_DIR}/secondf.f + ${CMAKE_CURRENT_SOURCE_DIR}/allpair2.f + ${CMAKE_CURRENT_SOURCE_DIR}/allpair.f + ${CMAKE_CURRENT_SOURCE_DIR}/isendf.f + ${CMAKE_CURRENT_SOURCE_DIR}/pingpong.f + ${CMAKE_CURRENT_SOURCE_DIR}/send1.f + ${CMAKE_CURRENT_SOURCE_DIR}/sendcplx.f + ${CMAKE_CURRENT_SOURCE_DIR}/sendfort.f + ${CMAKE_CURRENT_SOURCE_DIR}/structf.f + ${CMAKE_CURRENT_SOURCE_DIR}/typebasef.f + PARENT_SCOPE + ) +set(bin_files + ${bin_files} + ${CMAKE_CURRENT_SOURCE_DIR}/../hostfile + PARENT_SCOPE + ) +set(txt_files + ${txt_files} + PARENT_SCOPE + ) diff --git a/teshsuite/smpi/mpich-test/pt2pt/README b/teshsuite/smpi/mpich-test/pt2pt/README new file mode 100644 index 0000000000..9251e04a78 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/README @@ -0,0 +1,36 @@ +These files are meant to test the point-to-point layer of MPI. +The runtests script is now setup to use mpirun (from the toplevel util +directory. MPIRUN is installed in $PREFIX/bin if you make install). Of couse, +mpirun is still being ported to various different machines. + +All of the following programs use 2 nodes : + +Their output is generally in -.out + +sendrecv - exercises sends and receives of all basic types with + large variations in sizes of the messages sent. + +isndrcv - Same as sndrcv, but uses nonblocking pt2pt calls. + +overtake - tests that messages sent with send and isend are not + overtaking (the standard requires that they are not) + by sending a large message followed immediately by several small + ones. + +testtypes - Tests the datatypes codes by making several hairy types + that should be compatible and trying them out. + +Other test programs (which may use more than 2 nodes...) - +probe and probe1 do minor tests on MPI_probe. +hvectest tests vector sends and receives (with derived vector datatypes) +etc... + +If you need any help with these programs or find any bugs or make any +improvements, let me know. I have some improvements in mind for the testing +harness (test.[ch]), because I'm not quite perfectly happy +with it yet... :-) Enjoy. + + + Patrick Bridges + bridges@mcs.anl.gov + patrick@CS.MsState.Edu diff --git a/teshsuite/smpi/mpich-test/pt2pt/allpair.f b/teshsuite/smpi/mpich-test/pt2pt/allpair.f new file mode 100644 index 0000000000..dfd2df684a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/allpair.f @@ -0,0 +1,767 @@ +c +c This program was inspired by a bug report from +c fsset@corelli.lerc.nasa.gov (Scott Townsend) +c The original version of this program was submitted by email to +c mpi-bugs and is in the directory mpich/bugs/ssend (not distributed +c with the distribution). This program was modified by William +c Gropp (to correct a few errors and make more consistent with the +c structure of the test programs in the examples/test/pt2pt directory. + +c A C version of this program is in allpairc.c +c + program allpair + include 'mpif.h' + integer ierr + + call MPI_Init(ierr) + + call test_pair + + call MPI_Finalize(ierr) + + end + +c------------------------------------------------------------------------------ +c +c Simple pair communication exercises. +c +c------------------------------------------------------------------------------ + subroutine test_pair + include 'mpif.h' + integer TEST_SIZE + parameter (TEST_SIZE=2000) + + integer ierr, prev, next, count, tag, index, i, outcount, + . requests(2), indices(2), rank, size, + . status(MPI_STATUS_SIZE), statuses(MPI_STATUS_SIZE,2) + integer dupcom + logical flag + real send_buf( TEST_SIZE ), recv_buf ( TEST_SIZE ) + + call MPI_Comm_rank( MPI_COMM_WORLD, rank, ierr ) + call MPI_Comm_size( MPI_COMM_WORLD, size, ierr ) + if (size .ne. 2) then + print *, 'Allpair test requires exactly 2 processes' + call MPI_Abort( MPI_COMM_WORLD, 1, ierr ) + endif +C print *, ' about to do dup' + call MPI_Comm_dup( MPI_COMM_WORLD, dupcom, ierr ) +C print *, ' did dup' + next = rank + 1 + if (next .ge. size) next = 0 + + prev = rank - 1 + if (prev .lt. 0) prev = size - 1 +c +c Normal sends +c + if (rank .eq. 0) then + print *, ' Send' + end if + + tag = 1123 + count = TEST_SIZE / 5 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Send(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'send and recv' ) + else + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'send and recv' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if +c +c Ready sends. Note that we must insure that the receive is posted +c before the rsend; this requires using Irecv. +c + if (rank .eq. 0) then + print *, ' Rsend' + end if + + tag = 1456 + count = TEST_SIZE / 3 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Recv( MPI_BOTTOM, 0, MPI_INTEGER, next, tag, + . MPI_COMM_WORLD, status, ierr ) + + call MPI_Rsend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + + call MPI_Probe(MPI_ANY_SOURCE, tag, + . MPI_COMM_WORLD, status, ierr) + + if (status(MPI_SOURCE) .ne. prev) then + print *, 'Incorrect source, expected', prev, + . ', got', status(MPI_SOURCE) + end if + + if (status(MPI_TAG) .ne. tag) then + print *, 'Incorrect tag, expected', tag, + . ', got', status(MPI_TAG) + end if + + call MPI_Get_count(status, MPI_REAL, i, ierr) + + if (i .ne. count) then + print *, 'Incorrect count, expected', count, + . ', got', i + end if + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'rsend and recv' ) + + else + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + call MPI_Send( MPI_BOTTOM, 0, MPI_INTEGER, next, tag, + . MPI_COMM_WORLD, ierr ) + call MPI_Wait( requests(1), status, ierr ) + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'rsend and recv' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if +c +c Synchronous sends +c + if (rank .eq. 0) then + print *, ' Ssend' + end if + + tag = 1789 + count = TEST_SIZE / 3 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Iprobe(MPI_ANY_SOURCE, tag, + . MPI_COMM_WORLD, flag, status, ierr) + + if (flag) then + print *, 'Iprobe succeeded! source', status(MPI_SOURCE), + . ', tag', status(MPI_TAG) + end if + + call MPI_Ssend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + + do while (.not. flag) + call MPI_Iprobe(MPI_ANY_SOURCE, tag, + . MPI_COMM_WORLD, flag, status, ierr) + end do + + if (status(MPI_SOURCE) .ne. prev) then + print *, 'Incorrect source, expected', prev, + . ', got', status(MPI_SOURCE) + end if + + if (status(MPI_TAG) .ne. tag) then + print *, 'Incorrect tag, expected', tag, + . ', got', status(MPI_TAG) + end if + + call MPI_Get_count(status, MPI_REAL, i, ierr) + + if (i .ne. count) then + print *, 'Incorrect count, expected', count, + . ', got', i + end if + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, + $ TEST_SIZE, 'ssend and recv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'ssend and recv' ) + + call MPI_Ssend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if +c +c Nonblocking normal sends +c + if (rank .eq. 0) then + print *, ' Isend' + end if + + tag = 2123 + count = TEST_SIZE / 5 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Isend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + call MPI_Waitall(2, requests, statuses, ierr) + + call rq_check( requests, 2, 'isend and irecv' ) + + call msg_check( recv_buf, prev, tag, count, statuses(1,1), + $ TEST_SIZE, 'isend and irecv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'isend and irecv' ) + + call MPI_Isend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Wait(requests(1), status, ierr) + + call rq_check( requests(1), 1, 'isend and irecv' ) + + end if +c +c Nonblocking ready sends +c + if (rank .eq. 0) then + print *, ' Irsend' + end if + + tag = 2456 + count = TEST_SIZE / 3 + + call clear_test_data(recv_buf,TEST_SIZE) + +c +c This test needs work for comm_size > 2 +c + if (rank .eq. 0) then + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . dupcom, status, ierr ) + + call MPI_Irsend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + index = -1 + do while (index .ne. 1) + call MPI_Waitany(2, requests, index, statuses, ierr) + end do + + call rq_check( requests(1), 1, 'irsend and irecv' ) + + call msg_check( recv_buf, prev, tag, count, statuses, + $ TEST_SIZE, 'irsend and irecv' ) + + else + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . dupcom, status, ierr ) + + flag = .FALSE. + do while (.not. flag) + call MPI_Test(requests(1), flag, status, ierr) + end do + + call rq_check( requests, 1, 'irsend and irecv (test)' ) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'irsend and irecv' ) + + call MPI_Irsend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Waitall(1, requests, statuses, ierr) + + call rq_check( requests, 1, 'irsend and irecv' ) + + end if + +c +c Nonblocking synchronous sends +c + if (rank .eq. 0) then + print *, ' Issend' + end if + + tag = 2789 + count = TEST_SIZE / 3 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Issend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + flag = .FALSE. + do while (.not. flag) + call MPI_Testall(2, requests, flag, statuses, ierr) +C print *, 'flag = ', flag + end do + + call rq_check( requests, 2, 'issend and irecv (testall)' ) + + call msg_check( recv_buf, prev, tag, count, statuses(1,1), + $ TEST_SIZE, 'issend and recv (testall)' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'issend and recv' ) + + call MPI_Issend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + flag = .FALSE. + do while (.not. flag) + call MPI_Testany(1, requests(1), index, flag, + . statuses(1,1), ierr) +c print *, 'flag = ', flag + end do + + call rq_check( requests, 1, 'issend and recv (testany)' ) + + end if +c +c Persistent normal sends +c + if (rank .eq. 0) then + print *, ' Send_init' + end if + + tag = 3123 + count = TEST_SIZE / 5 + + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Send_init(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Recv_init(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(2), ierr) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Startall(2, requests, ierr) + call MPI_Waitall(2, requests, statuses, ierr) + + call msg_check( recv_buf, prev, tag, count, statuses(1,2), + $ TEST_SIZE, 'persistent send/recv' ) + + else + + call MPI_Start(requests(2), ierr) + call MPI_Wait(requests(2), status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + * 'persistent send/recv') + + do i = 1,count + send_buf(i) = recv_buf(i) + end do + + call MPI_Start(requests(1), ierr) + call MPI_Wait(requests(1), status, ierr) + + end if + + call MPI_Request_free(requests(1), ierr) + call MPI_Request_free(requests(2), ierr) +c +c Persistent ready sends +c + if (rank .eq. 0) then + print *, ' Rsend_init' + end if + + tag = 3456 + count = TEST_SIZE / 3 + + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Rsend_init(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Recv_init(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(2), ierr) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Recv( MPI_BOTTOM, 0, MPI_INTEGER, next, tag, + . MPI_COMM_WORLD, status, ierr ) + + call MPI_Startall(2, requests, ierr) + + index = -1 + + do while (index .ne. 2) + call MPI_Waitsome(2, requests, outcount, + . indices, statuses, ierr) + do i = 1,outcount + if (indices(i) .eq. 2) then + call msg_check( recv_buf, prev, tag, count, + $ statuses(1,i), TEST_SIZE, 'waitsome' ) + index = 2 + end if + end do + end do + + else + + call MPI_Start(requests(2), ierr) + + call MPI_Send( MPI_BOTTOM, 0, MPI_INTEGER, next, tag, + . MPI_COMM_WORLD, ierr ) + + flag = .FALSE. + do while (.not. flag) + call MPI_Test(requests(2), flag, status, ierr) + end do + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + * 'test' ) + + do i = 1,count + send_buf(i) = recv_buf(i) + end do + + call MPI_Start(requests(1), ierr) + call MPI_Wait(requests(1), status, ierr) + + end if + + call MPI_Request_free(requests(1), ierr) + call MPI_Request_free(requests(2), ierr) +c +c Persistent synchronous sends +c + if (rank .eq. 0) then + print *, ' Ssend_init' + end if + + tag = 3789 + count = TEST_SIZE / 3 + + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Ssend_init(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + call MPI_Recv_init(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Startall(2, requests, ierr) + + index = -1 + do while (index .ne. 1) + call MPI_Testsome(2, requests, outcount, + . indices, statuses, ierr) + do i = 1,outcount + if (indices(i) .eq. 1) then + call msg_check( recv_buf, prev, tag, count, + $ statuses(1,i), TEST_SIZE, 'testsome' ) + index = 1 + end if + end do + end do + else + + call MPI_Start(requests(1), ierr) + + flag = .FALSE. + do while (.not. flag) + call MPI_Testany(1, requests(1), index, flag, + . statuses(1,1), ierr) + end do + call msg_check( recv_buf, prev, tag, count, statuses(1,1), + $ TEST_SIZE, 'testany' ) + + do i = 1,count + send_buf(i) = recv_buf(i) + end do + + call MPI_Start(requests(2), ierr) + call MPI_Wait(requests(2), status, ierr) + + end if + + call MPI_Request_free(requests(1), ierr) + call MPI_Request_free(requests(2), ierr) +c +c Send/receive. +c + if (rank .eq. 0) then + print *, ' Sendrecv' + end if + + tag = 4123 + count = TEST_SIZE / 5 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Sendrecv(send_buf, count, MPI_REAL, next, tag, + . recv_buf, count, MPI_REAL, prev, tag, + . MPI_COMM_WORLD, status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'sendrecv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'recv/send' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if +c +c Send/receive replace. +c + if (rank .eq. 0) then + print *, ' Sendrecv_replace' + end if + + tag = 4456 + count = TEST_SIZE / 3 + + if (rank .eq. 0) then + + call init_test_data(recv_buf, TEST_SIZE) + + do 11 i = count+1,TEST_SIZE + recv_buf(i) = 0.0 + 11 continue + + call MPI_Sendrecv_replace(recv_buf, count, MPI_REAL, + . next, tag, prev, tag, + . MPI_COMM_WORLD, status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'sendrecvreplace' ) + + else + + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'recv/send for replace' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if + + call MPI_Comm_free( dupcom, ierr ) + return + end + +c------------------------------------------------------------------------------ +c +c Check for correct source, tag, count, and data in test message. +c +c------------------------------------------------------------------------------ + subroutine msg_check( recv_buf, source, tag, count, status, n, + * name ) + include 'mpif.h' + integer n + real recv_buf(n) + integer source, tag, count, rank, status(MPI_STATUS_SIZE) + character*(*) name + + integer ierr, recv_src, recv_tag, recv_count + + recv_src = status(MPI_SOURCE) + recv_tag = status(MPI_TAG) + call MPI_Comm_rank( MPI_COMM_WORLD, rank, ierr ) + call MPI_Get_count(status, MPI_REAL, recv_count, ierr) + + if (recv_src .ne. source) then + print *, '[', rank, '] Unexpected source:', recv_src, + * ' in ', name + call MPI_Abort(MPI_COMM_WORLD, 101, ierr) + end if + + if (recv_tag .ne. tag) then + print *, '[', rank, '] Unexpected tag:', recv_tag, ' in ', name + call MPI_Abort(MPI_COMM_WORLD, 102, ierr) + end if + + if (recv_count .ne. count) then + print *, '[', rank, '] Unexpected count:', recv_count, + * ' in ', name + call MPI_Abort(MPI_COMM_WORLD, 103, ierr) + end if + + call verify_test_data(recv_buf, count, n, name ) + + end +c------------------------------------------------------------------------------ +c +c Check that requests have been set to null +c +c------------------------------------------------------------------------------ + subroutine rq_check( requests, n, msg ) + include 'mpif.h' + integer n, requests(n) + character*(*) msg + integer i +c + do 10 i=1, n + if (requests(i) .ne. MPI_REQUEST_NULL) then + print *, 'Nonnull request in ', msg + endif + 10 continue +c + end +c------------------------------------------------------------------------------ +c +c Initialize test data buffer with integral sequence. +c +c------------------------------------------------------------------------------ + subroutine init_test_data(buf,n) + integer n + real buf(n) + integer i + + do 10 i = 1, n + buf(i) = REAL(i) + 10 continue + end + +c------------------------------------------------------------------------------ +c +c Clear test data buffer +c +c------------------------------------------------------------------------------ + subroutine clear_test_data(buf, n) + integer n + real buf(n) + integer i + + do 10 i = 1, n + buf(i) = 0. + 10 continue + + end + +c------------------------------------------------------------------------------ +c +c Verify test data buffer +c +c------------------------------------------------------------------------------ + subroutine verify_test_data(buf, count, n, name) + include 'mpif.h' + integer n + real buf(n) + character *(*) name + + integer count, ierr, i + + do 10 i = 1, count + if (buf(i) .ne. REAL(i)) then + print 100, buf(i), i, count, name + call MPI_Abort(MPI_COMM_WORLD, 108, ierr) + endif + 10 continue + + do 20 i = count + 1, n + if (buf(i) .ne. 0.) then + print 100, buf(i), i, n, name + call MPI_Abort(MPI_COMM_WORLD, 109, ierr) + endif + 20 continue + +100 format('Invalid data', f6.1, ' at ', i4, ' of ', i4, ' in ', a) + + end diff --git a/teshsuite/smpi/mpich-test/pt2pt/allpair.std b/teshsuite/smpi/mpich-test/pt2pt/allpair.std new file mode 100644 index 0000000000..31a1d0704c --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/allpair.std @@ -0,0 +1,13 @@ +*** Testing pt-2-pt from Fortran *** + Send + Rsend + Ssend + Isend + Irsend + Issend + Send_init + Rsend_init + Ssend_init + Sendrecv + Sendrecv_replace +*** Testing pt-2-pt from Fortran *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/allpair2.f b/teshsuite/smpi/mpich-test/pt2pt/allpair2.f new file mode 100644 index 0000000000..12b91bad08 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/allpair2.f @@ -0,0 +1,809 @@ +c +c This program was inspired by a bug report from +c fsset@corelli.lerc.nasa.gov (Scott Townsend) +c The original version of this program was submitted by email to +c mpi-bugs and is in the directory mpich/bugs/ssend (not distributed +c with the distribution). This program was modified by William +c Gropp (to correct a few errors and make more consistent with the +c structure of the test programs in the examples/test/pt2pt directory. + +c A C version of this program is in allpairc.c +c +c This version is intended to test for memory leaks; it runs each test +c a number of times (TEST_COUNT + some in test_pair). +c + program allpair2 + include 'mpif.h' + integer ierr + + call MPI_Init(ierr) + + call test_pair + + call MPI_Finalize(ierr) + + end + +c------------------------------------------------------------------------------ +c +c Simple pair communication exercises. +c +c------------------------------------------------------------------------------ + subroutine test_pair + include 'mpif.h' + integer TEST_SIZE, TEST_COUNT + parameter (TEST_SIZE=2000) + parameter (TEST_COUNT=100) + + integer ierr, prev, next, count, tag, index, i, outcount, + . requests(2), indices(2), rank, size, + . status(MPI_STATUS_SIZE), statuses(MPI_STATUS_SIZE,2) + integer dupcom + integer c + logical flag + real send_buf( TEST_SIZE ), recv_buf ( TEST_SIZE ) + + call MPI_Comm_rank( MPI_COMM_WORLD, rank, ierr ) + call MPI_Comm_size( MPI_COMM_WORLD, size, ierr ) + call MPI_Comm_dup( MPI_COMM_WORLD, dupcom, ierr ) + next = rank + 1 + if (next .ge. size) next = 0 + + prev = rank - 1 + if (prev .lt. 0) prev = size - 1 +c +c Normal sends +c + if (rank .eq. 0) then + print *, ' Send' + end if + + tag = 1123 + count = TEST_SIZE / 5 + + do 111 c=1, TEST_COUNT+1 + + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Send(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'send and recv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'send and recv' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if + 111 continue +c +c Ready sends. Note that we must ensure that the receive is posted +c before the rsend; this requires using Irecv. +c + if (rank .eq. 0) then + print *, ' Rsend' + end if + + tag = 1456 + count = TEST_SIZE / 3 + + do 112 c = 1, TEST_COUNT+2 + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Recv( MPI_BOTTOM, 0, MPI_INTEGER, next, tag, + . MPI_COMM_WORLD, status, ierr ) + + call MPI_Rsend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + + call MPI_Probe(MPI_ANY_SOURCE, tag, + . MPI_COMM_WORLD, status, ierr) + + if (status(MPI_SOURCE) .ne. prev) then + print *, 'Incorrect source, expected', prev, + . ', got', status(MPI_SOURCE) + end if + + if (status(MPI_TAG) .ne. tag) then + print *, 'Incorrect tag, expected', tag, + . ', got', status(MPI_TAG) + end if + + call MPI_Get_count(status, MPI_REAL, i, ierr) + + if (i .ne. count) then + print *, 'Incorrect count, expected', count, + . ', got', i + end if + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'rsend and recv' ) + + else + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + call MPI_Send( MPI_BOTTOM, 0, MPI_INTEGER, next, tag, + . MPI_COMM_WORLD, ierr ) + call MPI_Wait( requests(1), status, ierr ) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'rsend and recv' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if + 112 continue +c +c Synchronous sends +c + if (rank .eq. 0) then + print *, ' Ssend' + end if + + tag = 1789 + count = TEST_SIZE / 3 + + do 113 c = 1, TEST_COUNT+3 + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Iprobe(MPI_ANY_SOURCE, tag, + . MPI_COMM_WORLD, flag, status, ierr) + + if (flag) then + print *, 'Iprobe succeeded! source', status(MPI_SOURCE), + . ', tag', status(MPI_TAG) + end if + + call MPI_Ssend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + + do while (.not. flag) + call MPI_Iprobe(MPI_ANY_SOURCE, tag, + . MPI_COMM_WORLD, flag, status, ierr) + end do + + if (status(MPI_SOURCE) .ne. prev) then + print *, 'Incorrect source, expected', prev, + . ', got', status(MPI_SOURCE) + end if + + if (status(MPI_TAG) .ne. tag) then + print *, 'Incorrect tag, expected', tag, + . ', got', status(MPI_TAG) + end if + + call MPI_Get_count(status, MPI_REAL, i, ierr) + + if (i .ne. count) then + print *, 'Incorrect count, expected', count, + . ', got', i + end if + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, + $ TEST_SIZE, 'ssend and recv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'ssend and recv' ) + + call MPI_Ssend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if + 113 continue +c +c Nonblocking normal sends +c + if (rank .eq. 0) then + print *, ' Isend' + end if + + tag = 2123 + count = TEST_SIZE / 5 + + do 114 c = 1, TEST_COUNT+4 + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Isend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + call MPI_Waitall(2, requests, statuses, ierr) + + call rq_check( requests, 2, 'isend and irecv' ) + + call msg_check( recv_buf, prev, tag, count, statuses(1,1), + $ TEST_SIZE, 'isend and irecv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'isend and irecv' ) + + call MPI_Isend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Wait(requests(1), status, ierr) + + call rq_check( requests(1), 1, 'isend and irecv' ) + + end if + 114 continue +c +c Nonblocking ready sends +c + if (rank .eq. 0) then + print *, ' Irsend' + end if + + tag = 2456 + count = TEST_SIZE / 3 + + do 115 c = 1, TEST_COUNT+5 + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . dupcom, status, ierr ) + + call MPI_Irsend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + index = -1 + do while (index .ne. 1) + call MPI_Waitany(2, requests, index, statuses, ierr) + end do + + call rq_check( requests(1), 1, 'irsend and irecv' ) + + call msg_check( recv_buf, prev, tag, count, statuses, + $ TEST_SIZE, 'irsend and irecv' ) + +C +C In case the send didn't complete yet. + call MPI_Waitall( 2, requests, statuses, ierr ) + + else + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . MPI_BOTTOM, 0, MPI_INTEGER, next, 0, + . dupcom, status, ierr ) + + flag = .FALSE. + do while (.not. flag) + call MPI_Test(requests(1), flag, status, ierr) + end do + + call rq_check( requests, 1, 'irsend and irecv (test)' ) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'irsend and irecv' ) + + call MPI_Irsend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Waitall(1, requests, statuses, ierr) + + call rq_check( requests, 1, 'irsend and irecv' ) + + end if + 115 continue +c +c Nonblocking synchronous sends +c + if (rank .eq. 0) then + print *, ' Issend' + end if + + tag = 2789 + count = TEST_SIZE / 3 + + do 116 c = 1, TEST_COUNT+6 + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call MPI_Irecv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Issend(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + flag = .FALSE. + do while (.not. flag) + call MPI_Testall(2, requests, flag, statuses, ierr) +C print *, 'flag = ', flag + end do + + call rq_check( requests, 2, 'issend and irecv (testall)' ) + + call msg_check( recv_buf, prev, tag, count, statuses(1,1), + $ TEST_SIZE, 'issend and recv (testall)' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'issend and recv' ) + + call MPI_Issend(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + flag = .FALSE. + do while (.not. flag) + call MPI_Testany(1, requests(1), index, flag, + . statuses(1,1), ierr) +c print *, 'flag = ', flag + end do + + call rq_check( requests, 1, 'issend and recv (testany)' ) + + end if + 116 continue +c +c Persistent normal sends +c + if (rank .eq. 0) then + print *, ' Send_init' + end if + + tag = 3123 + count = TEST_SIZE / 5 + + do 117 c = 1, TEST_COUNT+7 + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Send_init(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Recv_init(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(2), ierr) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Startall(2, requests, ierr) + call MPI_Waitall(2, requests, statuses, ierr) + + call msg_check( recv_buf, prev, tag, count, statuses(1,2), + $ TEST_SIZE, 'persistent send/recv' ) + + else + + call MPI_Start(requests(2), ierr) + call MPI_Wait(requests(2), status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + * 'persistent send/recv') + + do i = 1,count + send_buf(i) = recv_buf(i) + end do + + call MPI_Start(requests(1), ierr) + call MPI_Wait(requests(1), status, ierr) + + end if + + call MPI_Request_free(requests(1), ierr) + call MPI_Request_free(requests(2), ierr) + 117 continue +c +c Persistent ready sends +c Like the ready send, we must ensure that the receive is posted +c before the ready send is started. +c + if (rank .eq. 0) then + print *, ' Rsend_init' + end if + + tag = 3456 + count = TEST_SIZE / 3 + + do 118 c = 1, TEST_COUNT+8 + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Rsend_init(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(1), ierr) + + call MPI_Recv_init(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(2), ierr) + +c +c receive a clear-to-go from the destination, so that the ready send +c will find the matching receive when it arrives + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Recv( MPI_BOTTOM, 0, MPI_INTEGER, 1, 321, + $ MPI_COMM_WORLD, status, ierr ) + call MPI_Startall(2, requests, ierr) + + index = -1 + do while (index .ne. 2) + call MPI_Waitsome(2, requests, outcount, + . indices, statuses, ierr) + do i = 1,outcount + if (indices(i) .eq. 2) then + call msg_check( recv_buf, prev, tag, count, + $ statuses(1,i), TEST_SIZE, 'waitsome' ) + index = 2 + end if + end do + end do + + else + + call MPI_Start(requests(2), ierr) + +c Let the target know that is may begin the ready send + call MPI_Send( MPI_BOTTOM, 0, MPI_INTEGER, 0, 321, + $ MPI_COMM_WORLD, ierr ) + + flag = .FALSE. + do while (.not. flag) + call MPI_Test(requests(2), flag, status, ierr) + end do + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + * 'test' ) + + do i = 1,count + send_buf(i) = recv_buf(i) + end do + + call MPI_Start(requests(1), ierr) + call MPI_Wait(requests(1), status, ierr) + + end if + + call MPI_Request_free(requests(1), ierr) + call MPI_Request_free(requests(2), ierr) + 118 continue +c +c Persistent synchronous sends +c + if (rank .eq. 0) then + print *, ' Ssend_init' + end if + + tag = 3789 + count = TEST_SIZE / 3 + + do 119 c = 1, TEST_COUNT+9 + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Ssend_init(send_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, requests(2), ierr) + + call MPI_Recv_init(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . requests(1), ierr) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Startall(2, requests, ierr) + + index = -1 + do while (index .ne. 1) + call MPI_Testsome(2, requests, outcount, + . indices, statuses, ierr) + do i = 1,outcount + if (indices(i) .eq. 1) then + call msg_check( recv_buf, prev, tag, count, + $ statuses(1,i), TEST_SIZE, 'testsome' ) + index = 1 + end if + end do + end do + + else + + call MPI_Start(requests(1), ierr) + + flag = .FALSE. + do while (.not. flag) + call MPI_Testany(1, requests(1), index, flag, + . statuses(1,1), ierr) + end do + + call msg_check( recv_buf, prev, tag, count, statuses(1,1), + $ TEST_SIZE, 'testany' ) + + do i = 1,count + send_buf(i) = recv_buf(i) + end do + + call MPI_Start(requests(2), ierr) + call MPI_Wait(requests(2), status, ierr) + + end if + + call MPI_Request_free(requests(1), ierr) + call MPI_Request_free(requests(2), ierr) + 119 continue +c +c Send/receive. +c + if (rank .eq. 0) then + print *, ' Sendrecv' + end if + + tag = 4123 + count = TEST_SIZE / 5 + + do 120 c = 1, TEST_COUNT+10 + call clear_test_data(recv_buf,TEST_SIZE) + + if (rank .eq. 0) then + + call init_test_data(send_buf,TEST_SIZE) + + call MPI_Sendrecv(send_buf, count, MPI_REAL, next, tag, + . recv_buf, count, MPI_REAL, prev, tag, + . MPI_COMM_WORLD, status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'sendrecv' ) + + else + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'recv/send' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if + 120 continue +c +c Send/receive replace. +c + if (rank .eq. 0) then + print *, ' Sendrecv_replace' + end if + + tag = 4456 + count = TEST_SIZE / 3 + + do 121 c = 1, TEST_COUNT+11 + if (rank .eq. 0) then + + call init_test_data(recv_buf, TEST_SIZE) + + do 11 i = count+1,TEST_SIZE + recv_buf(i) = 0.0 + 11 continue + + call MPI_Sendrecv_replace(recv_buf, count, MPI_REAL, + . next, tag, prev, tag, + . MPI_COMM_WORLD, status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'sendrecvreplace' ) + + else + + call clear_test_data(recv_buf,TEST_SIZE) + + call MPI_Recv(recv_buf, TEST_SIZE, MPI_REAL, + . MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + . status, ierr) + + call msg_check( recv_buf, prev, tag, count, status, TEST_SIZE, + . 'recv/send for replace' ) + + call MPI_Send(recv_buf, count, MPI_REAL, next, tag, + . MPI_COMM_WORLD, ierr) + end if + + 121 continue + + call MPI_Comm_free( dupcom, ierr ) + return + + end + +c------------------------------------------------------------------------------ +c +c Check for correct source, tag, count, and data in test message. +c +c------------------------------------------------------------------------------ + subroutine msg_check( recv_buf, source, tag, count, status, n, + * name ) + include 'mpif.h' + integer n + real recv_buf(n) + integer source, tag, count, rank, status(MPI_STATUS_SIZE) + character*(*) name + + integer ierr, recv_src, recv_tag, recv_count + + recv_src = status(MPI_SOURCE) + recv_tag = status(MPI_TAG) + call MPI_Comm_rank( MPI_COMM_WORLD, rank, ierr ) + call MPI_Get_count(status, MPI_REAL, recv_count, ierr) + +C Check for null status + if (recv_src .eq. MPI_ANY_SOURCE .and. + * recv_tag .eq. MPI_ANY_TAG .and. + * status(MPI_ERROR) .eq. MPI_SUCCESS) then + print *, '[', rank, '] Unexpected NULL status in ', name + call MPI_Abort( MPI_COMM_WORLD, 104, ierr ) + end if + if (recv_src .ne. source) then + print *, '[', rank, '] Unexpected source:', recv_src, + * ' in ', name + call MPI_Abort(MPI_COMM_WORLD, 101, ierr) + end if + + if (recv_tag .ne. tag) then + print *, '[', rank, '] Unexpected tag:', recv_tag, ' in ', name + call MPI_Abort(MPI_COMM_WORLD, 102, ierr) + end if + + if (recv_count .ne. count) then + print *, '[', rank, '] Unexpected count:', recv_count, + * ' in ', name + call MPI_Abort(MPI_COMM_WORLD, 103, ierr) + end if + + call verify_test_data(recv_buf, count, n, name ) + + end +c------------------------------------------------------------------------------ +c +c Check that requests have been set to null +c +c------------------------------------------------------------------------------ + subroutine rq_check( requests, n, msg ) + include 'mpif.h' + integer n, requests(n) + character*(*) msg + integer i +c + do 10 i=1, n + if (requests(i) .ne. MPI_REQUEST_NULL) then + print *, 'Nonnull request in ', msg + endif + 10 continue +c + end +c------------------------------------------------------------------------------ +c +c Initialize test data buffer with integral sequence. +c +c------------------------------------------------------------------------------ + subroutine init_test_data(buf,n) + integer n + real buf(n) + integer i + + do 10 i = 1, n + buf(i) = REAL(i) + 10 continue + end + +c------------------------------------------------------------------------------ +c +c Clear test data buffer +c +c------------------------------------------------------------------------------ + subroutine clear_test_data(buf, n) + integer n + real buf(n) + integer i + + do 10 i = 1, n + buf(i) = 0. + 10 continue + + end + +c------------------------------------------------------------------------------ +c +c Verify test data buffer +c +c------------------------------------------------------------------------------ + subroutine verify_test_data(buf, count, n, name) + include 'mpif.h' + integer n + real buf(n) + character *(*) name + + integer count, ierr, i + + do 10 i = 1, count + if (buf(i) .ne. REAL(i)) then + print 100, buf(i), i, count, name + call MPI_Abort(MPI_COMM_WORLD, 108, ierr) + endif + 10 continue + + do 20 i = count + 1, n + if (buf(i) .ne. 0.) then + print 100, buf(i), i, n, name + call MPI_Abort(MPI_COMM_WORLD, 109, ierr) + endif + 20 continue + +100 format('Invalid data', f6.1, ' at ', i4, ' of ', i4, ' in ', a) + + end diff --git a/teshsuite/smpi/mpich-test/pt2pt/allpair2.std b/teshsuite/smpi/mpich-test/pt2pt/allpair2.std new file mode 100644 index 0000000000..f1c4b3c712 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/allpair2.std @@ -0,0 +1,13 @@ +*** Testing pt-2-pt from Fortran (many calls) *** + Send + Rsend + Ssend + Isend + Irsend + Issend + Send_init + Rsend_init + Ssend_init + Sendrecv + Sendrecv_replace +*** Testing pt-2-pt from Fortran (many calls) *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/bsendtest.c b/teshsuite/smpi/mpich-test/pt2pt/bsendtest.c new file mode 100644 index 0000000000..afd2eefe63 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/bsendtest.c @@ -0,0 +1,211 @@ +/* + * Program to test that the "no overtaking messages" semantics + * of point to point communications in MPI is satisfied. + * A long message is sent using MPI_BSend and received using MPI_Recv, + * followed by lots of short messages sent the same way. + * + * Patrick Bridges + * bridges@mcs.anl.gov + * patrick@CS.MsState.Edu + */ + +#include +/* Needed for malloc declaration */ +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 + +static int src = 0; +static int dest = 1; + +/* Which tests to perform (not yet implemented) */ +/* static int Do_Buffer = 1; */ +/* static int Do_Standard = 1; */ + +/* Prototypes for picky compilers */ +void Generate_Data ( double *, int ); +void Normal_Test_Recv ( double *, int ); +void Buffered_Test_Send ( double *, int ); +void Buffered_Test_Ibsend ( double *, int ); +int Check_Data ( double *, int ); +void Clear_Buffer ( double *, int ); + +void Generate_Data(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = (double)i+1; +} + +void Normal_Test_Recv(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + MPI_Status Stat; + double *b; + + b = buffer; + for (j = 0; j < 2; j++) { + /* Receive a long message */ + MPI_Recv(b, (buff_size/2 - 10), MPI_DOUBLE, src, + 2000, MPI_COMM_WORLD, &Stat); + b += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) { + MPI_Recv(b++, 1, MPI_DOUBLE, src, 2000, MPI_COMM_WORLD, &Stat); + } + } +} + +void Buffered_Test_Send(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + void *bbuffer; + int size; + + for (j = 0; j < 2; j++) { + /* send a long message */ + MPI_Bsend(buffer, (buff_size/2 - 10), MPI_DOUBLE, dest, 2000, + MPI_COMM_WORLD); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Bsend(buffer++, 1, MPI_DOUBLE, + dest, 2000, MPI_COMM_WORLD); + /* Force this set of Bsends to complete */ + MPI_Buffer_detach( &bbuffer, &size ); + MPI_Buffer_attach( bbuffer, size ); + } +} + +void Buffered_Test_Ibsend(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + void *bbuffer; + int size; + int cnt; + MPI_Request req[20]; + MPI_Status statuses[20]; + + for (j = 0; j < 2; j++) { + /* send a long message */ + cnt = 0; + MPI_Ibsend(buffer, (buff_size/2 - 10), MPI_DOUBLE, dest, 2000, + MPI_COMM_WORLD, &req[cnt++]); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Ibsend(buffer++, 1, MPI_DOUBLE, + dest, 2000, MPI_COMM_WORLD, &req[cnt++]); + /* Wait for these to finish (should finish immediately) */ + MPI_Waitall( cnt, req, statuses ); + + /* Force this set of Bsends to complete; this may take longer than + the Waitall */ + MPI_Buffer_detach( &bbuffer, &size ); + MPI_Buffer_attach( bbuffer, size ); + } +} + +int Check_Data(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + int err = 0; + + for (i = 0; i < buff_size; i++) + if (buffer[i] != (i + 1)) { + err++; + fprintf( stderr, "Value at %d is %f, should be %f\n", i, + buffer[i], (double)(i+1) ); + if (err > 10) return 1; + } + return err; +} + +void Clear_Buffer(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + for (i = 0; i < buff_size; i++) + buffer[i] = -1; +} + + +int main(int argc, char **argv) +{ + int rank; /* My Rank (0 or 1) */ + double buffer[SIZE], *tmpbuffer, *tmpbuf; + int tsize, bsize; + char *Current_Test = NULL; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == src) { + Generate_Data(buffer, SIZE); + MPI_Pack_size( SIZE, MPI_DOUBLE, MPI_COMM_WORLD, &bsize ); + tmpbuffer = (double *) malloc( bsize + 22*MPI_BSEND_OVERHEAD ); + if (!tmpbuffer) { + fprintf( stderr, "Could not allocate bsend buffer of size %d\n", + bsize ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + MPI_Buffer_attach( tmpbuffer, bsize + 22*MPI_BSEND_OVERHEAD ); + Buffered_Test_Send(buffer, SIZE); + Buffered_Test_Ibsend(buffer, SIZE); + MPI_Buffer_detach( &tmpbuf, &tsize ); + Test_Waitforall( ); + MPI_Finalize(); + + } else if (rank == dest) { + Test_Init("bsendtest", rank); + /* Test 3 */ + Current_Test = (char*)"Overtaking Test (Buffered Send -> Normal Receive)"; + Clear_Buffer(buffer, SIZE); + /* For Bsend */ + Normal_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + + /* For Ibsend */ + Current_Test = (char*)"Overtaking Test (Buffered Isend -> Normal Receive)"; + Clear_Buffer(buffer, SIZE); + Normal_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + + Test_Waitforall( ); + { + int rval = Summarize_Test_Results(); /* Returns number of tests; + that failed */ + Test_Finalize(); + MPI_Finalize(); + return rval; + } + } else { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/cancel.c b/teshsuite/smpi/mpich-test/pt2pt/cancel.c new file mode 100644 index 0000000000..e1ca0f341f --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/cancel.c @@ -0,0 +1,117 @@ +/* + * This file shows a typical use of MPI_Cancel to free IRecv's that + * are not wanted. We check for both successful and unsuccessful + * cancels + */ + +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + MPI_Request r1; + int size, rank; + int err = 0; + int partner, buf[10], flag; + MPI_Status status; + + MPI_Init( &argc, &argv ); + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + if (size < 2) { + printf( "Cancel test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* + * Here is the test. First, we ensure an unsatisfied Irecv: + * process 0 process size-1 + * Sendrecv Sendrecv + * Irecv ---- + * Cancel ---- + * Sendrecv Sendrecv + * Next, we confirm receipt before canceling + * Irecv Send + * Sendrecv Sendrecv + * Cancel + */ + if (rank == 0) { + partner = size - 1; + /* Cancel succeeds */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Irecv( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Cancel( &r1 ); + MPI_Wait( &r1, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a receive failed where it should succeed.\n" ); + } + + /* Cancel fails */ + MPI_Irecv( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Test( &r1, &flag, &status ); + MPI_Test_cancelled( &status, &flag ); + /* It is technically possible for the cancel to succeed, even though + the message was (at least partially) delivered. I'm leaving + this test in since most of the MPICH devices provide this + behavior. */ + if (flag) { + err++; + printf( "Cancel of a receive succeeded where it shouldn't.\n" ); + } + + if (err) { + printf( "Test failed with %d errors.\n", err ); + } + else { + printf( " No Errors\n" ); + } + } + else if (rank == size - 1) { + partner = 0; + /* Cancel succeeds */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + /* Cancel fails */ + MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + } + + /* + Next test - check that a cancel for a request receive from + MPI_PROC_NULL succeeds (there is some suspicion that some + systems can't handle this - also, MPI_REQUEST_NULL + + Note that a null request is invalid (see the various NULL comments) + r1 = MPI_REQUEST_NULL; + MPI_Cancel( &r1 ); + */ + MPI_Irecv( buf, 10, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &r1 ); + MPI_Cancel( &r1 ); + + MPI_Request_free( &r1 ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/cancel2.c b/teshsuite/smpi/mpich-test/pt2pt/cancel2.c new file mode 100644 index 0000000000..664c20a23e --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/cancel2.c @@ -0,0 +1,230 @@ +/* + * This file shows a typical use of MPI_Cancel to free Persistent Recv's that + * are not wanted. We check for both successful and unsuccessful + * cancels + */ + +/* On 10/27/99, a test for MPI_Waitsome/MPI_Testsome was added */ + +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + MPI_Request r1; + int size, rank; + int err = 0; + int partner, buf[10], flag, idx, index; + MPI_Status status; + + MPI_Init( &argc, &argv ); + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + if (size < 2) { + printf( "Cancel test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* + * Here is the test. First, we ensure an unsatisfied Irecv: + * process 0 process size-1 + * Sendrecv Sendrecv + * Irecv ---- + * Cancel ---- + * Sendrecv Sendrecv + * Next, we confirm receipt before canceling + * Irecv Send + * Sendrecv Sendrecv + * Cancel + */ + if (rank == 0) { + partner = size - 1; + /* Cancel succeeds for wait/waitall */ + MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Wait( &r1, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a receive failed where it should succeed (Wait).\n" ); + } + + MPI_Request_free( &r1 ); + + /* Cancel fails for test/testall */ + buf[0] = -1; + MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Test( &r1, &flag, &status ); + MPI_Test_cancelled( &status, &flag ); + if (flag) { + err++; + printf( "Cancel of a receive succeeded where it shouldn't (Test).\n" ); + if (buf[0] != -1) { + printf( "Receive buffer changed even though cancel suceeded! (Test).\n" ); + } + } + MPI_Request_free( &r1 ); + + /* Cancel succeeds for waitany */ + MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Waitany( 1, &r1, &idx, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a receive failed where it should succeed (Waitany).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel fails for testany */ + buf[0] = -1; + MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Testany( 1, &r1, &idx, &flag, &status ); + MPI_Test_cancelled( &status, &flag ); + if (flag) { + err++; + printf( "Cancel of a receive succeeded where it shouldn't (Testany).\n" ); + if (buf[0] != -1) { + printf( "Receive buffer changed even though cancel suceeded! (Test).\n" ); + } + } + MPI_Request_free( &r1 ); + + /* Cancel succeeds for waitsome */ + MPI_Recv_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Waitsome( 1, &r1, &idx, &index, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a receive failed where it should succeed (Waitsome).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel fails for testsome*/ + buf[0] = -1; + MPI_Recv_init( buf, 10, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Testsome( 1, &r1, &idx, &index, &status ); + MPI_Test_cancelled( &status, &flag ); + if (flag) { + err++; + printf( "Cancel of a receive succeeded where it shouldn't (Testsome).\n" ); + if (buf[0] != -1) { + printf( "Receive buffer changed even though cancel suceeded! (Testsome).\n" ); + } + } + MPI_Request_free( &r1 ); + + if (err) { + printf( "Test failed with %d errors.\n", err ); + } + else { + printf( " No Errors\n" ); + } + } + + else if (rank == size - 1) { + partner = 0; + /* Cancel succeeds for wait/waitall */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + /* Cancel fails for test/testall */ + buf[0] = 3; + MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + + /* Cancel succeeds for waitany */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + /* Cancel fails for testany */ + MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + + /* Cancel succeeds for waitsome */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + /* Cancel fails for waitsome */ + MPI_Send( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + + /* + Next test - check that a cancel for a request receive from + MPI_PROC_NULL succeeds (there is some suspicion that some + systems can't handle this - also, MPI_REQUEST_NULL + */ + /* A null request is an error. (null objects are errors unless otherwise + allowed) + r1 = MPI_REQUEST_NULL; + MPI_Cancel( &r1 ); + */ + MPI_Recv_init( buf, 10, MPI_INT, MPI_PROC_NULL, 0, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Request_free( &r1 ); /* Must complete cancel. We know that it + won't complete, so we don't need to do + anything else */ + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/cancel3.c b/teshsuite/smpi/mpich-test/pt2pt/cancel3.c new file mode 100644 index 0000000000..c6a84e7435 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/cancel3.c @@ -0,0 +1,217 @@ +/* + * This file shows a typical use of MPI_Cancel to free Persistent Send's that + * are not wanted. We check for both successful and unsuccessful + * cancels + */ + +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + MPI_Request r1; + int size, rank; + int err = 0; + int partner, buf[10], flag, idx, index; + MPI_Status status; + + MPI_Init( &argc, &argv ); + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + if (size < 2) { + printf( "Cancel test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* + * Here is the test. First, we ensure an unsatisfied Irecv: + * process 0 process size-1 + * Sendrecv Sendrecv + * Irecv ---- + * Cancel ---- + * Sendrecv Sendrecv + * Next, we confirm receipt before canceling + * Irecv Send + * Sendrecv Sendrecv + * Cancel + */ + if (rank == 0) { + partner = size - 1; + /* Cancel succeeds for wait/waitall */ + MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Wait( &r1, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a send failed where it should succeed (Wait).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel fails for test/testall */ + buf[0] = 3; + MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Test( &r1, &flag, &status ); + MPI_Test_cancelled( &status, &flag ); + if (flag) { + err++; + printf( "Cancel of a send succeeded where it shouldn't (Test).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel succeeds for waitany */ + MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Waitany( 1, &r1, &idx, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a send failed where it should succeed (Waitany).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel fails for testany */ + buf[0] = 3; + MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Testany( 1, &r1, &idx, &flag, &status ); + MPI_Test_cancelled( &status, &flag ); + if (flag) { + err++; + printf( "Cancel of a send succeeded where it shouldn't (Testany).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel succeeds for waitsome */ + MPI_Send_init( buf, 10, MPI_INT, partner, 0, MPI_COMM_WORLD, &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Start( &r1 ); + MPI_Cancel( &r1 ); + MPI_Waitsome( 1, &r1, &idx, &index, &status ); + MPI_Test_cancelled( &status, &flag ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (!flag) { + err++; + printf( "Cancel of a send failed where it should succeed (Waitsome).\n" ); + } + MPI_Request_free( &r1 ); + + /* Cancel fails for testsome*/ + buf[0] = 3; + MPI_Send_init( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &r1 ); + MPI_Start( &r1 ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Cancel( &r1 ); + MPI_Testsome( 1, &r1, &idx, &index, &status ); + MPI_Test_cancelled( &status, &flag ); + if (flag) { + err++; + printf( "Cancel of a send succeeded where it shouldn't (Testsome).\n" ); + } + MPI_Request_free( &r1 ); + + if (err) { + printf( "Test failed with %d errors.\n", err ); + } + else { + printf( " No Errors\n" ); + } + } + else if (rank == size - 1) { + partner = 0; + /* Cancel succeeds for wait/waitall */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + + /* Cancel fails for test/testall */ + buf[0] = -1; + MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + + if (buf[0] == -1) { + printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" ); + } + + /* Cancel succeeds for waitany */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + /* Cancel fails for testany */ + buf[0] = -1; + MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + if (buf[0] == -1) { + printf( "Receive buffer did not change even though cancel should not have suceeded! (Testany).\n" ); + } + + /* Cancel succeeds for waitsome */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + /* Cancel fails for testsome */ + buf[0] = -1; + MPI_Recv( buf, 3, MPI_INT, partner, 2, MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_BOTTOM, 0, MPI_INT, partner, 1, + MPI_COMM_WORLD, &status ); + + if (buf[0] == -1) { + printf( "Receive buffer did not change even though cancel should not have suceeded! (Test).\n" ); + } + + } + + MPI_Finalize(); + return 0; +} + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/cancelibm.c b/teshsuite/smpi/mpich-test/pt2pt/cancelibm.c new file mode 100644 index 0000000000..3f83a525ce --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/cancelibm.c @@ -0,0 +1,146 @@ +/**************************************************************************** + + MESSAGE PASSING INTERFACE TEST CASE SUITE + + Copyright IBM Corp. 1995 + + IBM Corp. hereby grants a non-exclusive license to use, copy, modify, and + distribute this software for any purpose and without fee provided that the + above copyright notice and the following paragraphs appear in all copies. + + IBM Corp. makes no representation that the test cases comprising this + suite are correct or are an accurate representation of any standard. + + In no event shall IBM be liable to any party for direct, indirect, special + incidental, or consequential damage arising out of the use of this software + even if IBM Corp. has been advised of the possibility of such damage. + + IBM CORP. SPECIFICALLY DISCLAIMS ANY WARRANTIES INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS AND IBM + CORP. HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, + ENHANCEMENTS, OR MODIFICATIONS. + + **************************************************************************** + + These test cases reflect an interpretation of the MPI Standard. They are + are, in most cases, unit tests of specific MPI behaviors. If a user of any + test case from this set believes that the MPI Standard requires behavior + different than that implied by the test case we would appreciate feedback. + + Comments may be sent to: + Richard Treumann + treumann@kgn.ibm.com + + **************************************************************************** +*/ +#include +#include "mpi.h" + +int main(int argc, char *argv[]) +{ + int me, tasks, data, flag; + int err0 = 0; + int err1 = 0; + int errs, toterrs; + MPI_Request request; + MPI_Status status; + + MPI_Init(&argc,&argv); + MPI_Comm_rank(MPI_COMM_WORLD,&me); + MPI_Comm_size(MPI_COMM_WORLD,&tasks); + + if (tasks < 2) { + printf( "Cancel test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + { int data[100000]; if (me == 0) + { + MPI_Irecv(data, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,&request); + MPI_Cancel(&request); + MPI_Wait(&request,&status); + MPI_Test_cancelled(&status,&flag); + if (!flag) { + err0++; + printf("task %d ERROR: Receive request not cancelled!\n", me); + } + + MPI_Issend(data, 100000, MPI_INT, 1, 1, MPI_COMM_WORLD,&request); + MPI_Cancel(&request); + for (flag = 0;; ) + { + MPI_Test(&request,&flag,&status); + if (flag) break; + } + + MPI_Test_cancelled(&status,&flag); + if (!flag) { + err0++; + printf("task %d ERROR: Send request not cancelled! (1)\n", me); + } + }} + + if (me == 0) + { + data = 5; + MPI_Isend(&data, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,&request); + MPI_Cancel(&request); + MPI_Wait(&request,&status); + MPI_Test_cancelled(&status,&flag); + if (!flag) { + err0++; + printf("task %d ERROR: Send request not cancelled! (2)\n", me); + } + MPI_Barrier(MPI_COMM_WORLD); + status.MPI_TAG=MPI_SUCCESS; + data = 6; + MPI_Send(&data, 1, MPI_INT, 1, 5, MPI_COMM_WORLD); + + data = 7; + MPI_Isend(&data, 1, MPI_INT, 1, 1, MPI_COMM_WORLD,&request); + MPI_Barrier(MPI_COMM_WORLD); + MPI_Cancel(&request); + MPI_Wait(&request,&status); + MPI_Test_cancelled(&status,&flag); + if (flag) { + err0++; + printf("task %d ERROR: Send request cancelled!\n", me); + } + } + else if (me == 1) + { + MPI_Barrier(MPI_COMM_WORLD); + data = 0; + MPI_Recv(&data, 1, MPI_INT, 0, 1, MPI_COMM_WORLD,&status); + if (data != 7) { + err1++; + printf("task %d ERROR: Send request not cancelled!\n", me); + } + + MPI_Recv(&data, 1, MPI_INT, 0, 5, MPI_COMM_WORLD,&status); + if (data != 6) { + err1++; + printf("task %d ERROR: Send request not cancelled!\n", me); + } + MPI_Barrier(MPI_COMM_WORLD); + } + else { + /* These are needed when the size of MPI_COMM_WORLD > 2 */ + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Barrier( MPI_COMM_WORLD ); + } + + errs = err0 + err1; + MPI_Reduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); + + if ( errs ) { + printf( "Test failed with %d errors.\n", errs ); + } + if (me == 0 && toterrs == 0) { + printf( " No Errors\n" ); + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/cancelissend.c b/teshsuite/smpi/mpich-test/pt2pt/cancelissend.c new file mode 100644 index 0000000000..277fb735af --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/cancelissend.c @@ -0,0 +1,160 @@ +/**************************************************************************** + + MESSAGE PASSING INTERFACE TEST CASE SUITE + + Copyright IBM Corp. 1995 + + IBM Corp. hereby grants a non-exclusive license to use, copy, modify, and + distribute this software for any purpose and without fee provided that the + above copyright notice and the following paragraphs appear in all copies. + + IBM Corp. makes no representation that the test cases comprising this + suite are correct or are an accurate representation of any standard. + + In no event shall IBM be liable to any party for direct, indirect, special + incidental, or consequential damage arising out of the use of this software + even if IBM Corp. has been advised of the possibility of such damage. + + IBM CORP. SPECIFICALLY DISCLAIMS ANY WARRANTIES INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS AND IBM + CORP. HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, + ENHANCEMENTS, OR MODIFICATIONS. + + **************************************************************************** + + These test cases reflect an interpretation of the MPI Standard. They are + are, in most cases, unit tests of specific MPI behaviors. If a user of any + test case from this set believes that the MPI Standard requires behavior + different than that implied by the test case we would appreciate feedback. + + Comments may be sent to: + Richard Treumann + treumann@kgn.ibm.com + + **************************************************************************** +*/ +/* + * WDG - July 6, 2004 + * + * This is a modified version that: + * Uses a shorter message (in case the implementation uses eager delivery + * even with synchronous send) + * Allows control of which process is executing the Issend and which the + * receive (to simplify debugging) + */ +#include +#include "mpi.h" + +int main(int argc, char *argv[]) +{ + int me, tasks, data, flag; + int err0 = 0; + int err1 = 0; + int errs, toterrs; + int master = 1, worker = 0; + MPI_Request request; + MPI_Status status; + + MPI_Init(&argc,&argv); + MPI_Comm_rank(MPI_COMM_WORLD,&me); + MPI_Comm_size(MPI_COMM_WORLD,&tasks); + + if (tasks < 2) { + printf( "Cancel test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* The original test sent 10000 elements with Issend. This + one uses less data but keeps the array the same size */ + { int data[100000]; if (me == master) + { + MPI_Irecv(data, 1, MPI_INT, worker, 1, MPI_COMM_WORLD,&request); + MPI_Cancel(&request); + MPI_Wait(&request,&status); + MPI_Test_cancelled(&status,&flag); + if (!flag) { + err0++; + printf("task %d ERROR: Receive request not cancelled!\n", me); + } + + /* This is short enough to use eager but because it is + a Synchronous send, it must still be possible to + cancel it, even when it is a short message */ + MPI_Issend(data, 100, MPI_INT, worker, 1, MPI_COMM_WORLD,&request); + MPI_Cancel(&request); + for (flag = 0;; ) + { + MPI_Test(&request,&flag,&status); + if (flag) break; + } + + MPI_Test_cancelled(&status,&flag); + if (!flag) { + err0++; + printf("task %d ERROR: Send request not cancelled! (1)\n", me); + } + }} + + if (me == master) + { + data = 5; + MPI_Isend(&data, 1, MPI_INT, worker, 1, MPI_COMM_WORLD,&request); + MPI_Cancel(&request); + MPI_Wait(&request,&status); + MPI_Test_cancelled(&status,&flag); + if (!flag) { + err0++; + printf("task %d ERROR: Send request not cancelled! (2)\n", me); + } + MPI_Barrier(MPI_COMM_WORLD); + status.MPI_TAG=MPI_SUCCESS; + data = 6; + MPI_Send(&data, 1, MPI_INT, worker, 5, MPI_COMM_WORLD); + + MPI_Isend(&data, 1, MPI_INT, worker, 1, MPI_COMM_WORLD,&request); + MPI_Barrier(MPI_COMM_WORLD); + MPI_Cancel(&request); + MPI_Wait(&request,&status); + MPI_Test_cancelled(&status,&flag); + if (flag) { + err0++; + printf("task %d ERROR: Send request cancelled!\n", me); + } + } + else if (me == worker) + { + MPI_Barrier(MPI_COMM_WORLD); + data = 0; + MPI_Recv(&data, 1, MPI_INT, master, 1, MPI_COMM_WORLD,&status); + if (data != 6) { + err1++; + printf("task %d ERROR: Send request not cancelled!\n", me); + } + + MPI_Recv(&data, 1, MPI_INT, master, 5, MPI_COMM_WORLD,&status); + if (data != 6) { + err1++; + printf("task %d ERROR: Send request not cancelled!\n", me); + } + MPI_Barrier(MPI_COMM_WORLD); + } + else { + /* These are needed when the size of MPI_COMM_WORLD > 2 */ + MPI_Barrier( MPI_COMM_WORLD ); + MPI_Barrier( MPI_COMM_WORLD ); + } + + errs = err0 + err1; + MPI_Reduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD ); + + if ( errs ) { + printf( "Test failed with %d errors.\n", errs ); + } + if (me == 0 && toterrs == 0) { + printf( " No Errors\n" ); + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/cancelmessages.c b/teshsuite/smpi/mpich-test/pt2pt/cancelmessages.c new file mode 100644 index 0000000000..6ca255ba10 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/cancelmessages.c @@ -0,0 +1,171 @@ +/* + * This file tests to see if short,eager,and rndv messages can all be + * successfully cancelled. If they cannot be cancelled, then the + * program still must successfully complete. + */ + +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char *argv[] ) +{ + + double sbuf[20000]; +#ifdef FOO + double rbuf[20000]; +#endif + int rank; + int n, flag, size; + int err = 0; + int verbose = 0; + MPI_Status status; + MPI_Request req; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + if (size < 2) { + printf( "Cancel test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* Short Message Test */ + n = 200; + + if (rank == 1) { /* begin if rank = 1 */ + MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req ); + MPI_Cancel(&req); + MPI_Wait(&req, &status); + MPI_Test_cancelled(&status, &flag); + if (!flag) { + err++; + printf( "Cancelling a short message failed where it should succeed.\n" ); + } + else if (verbose) + { + printf("Cancelling a short message succeeded.\n"); + } + } /* end if rank == 1 */ + +#ifdef FOO +/* Note that MPI-2 specifies that status.MPI_ERROR is only set by + multiple completion (e.g., MPI_Waitsome) and not by test_cancelled. +*/ + MPI_Barrier(MPI_COMM_WORLD); + + if (rank == 0) { /* begin if rank == 0 */ + MPI_Recv( rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &status); + } /* end if rank = 0 */ + else if (rank == 1) { /* begin if rank = 1 */ + MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req ); + MPI_Cancel(&req); + MPI_Wait(&req, &status); + MPI_Test_cancelled(&status, &flag); + if (!flag && status.MPI_ERROR != MPI_SUCCESS) { + err++; + printf( "Cancel of a send returned an error in the status field.\n" ); + } + /* end if status.MPI_ERROR */ + } /* end if rank == 1 */ +#endif + + MPI_Barrier(MPI_COMM_WORLD); + + /* Eager Message Test */ + n = 3000; + + if (rank == 1) { /* begin if rank = 1 */ + MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req ); + MPI_Cancel(&req); + MPI_Wait(&req, &status); + MPI_Test_cancelled(&status, &flag); + if (!flag) { + err++; + printf( "Cancelling an eager message (3000 doubles) failed where it should succeed.\n" ); + } + else if (verbose) + { + printf("Cancelling an eager message (3000 doubles) succeeded.\n"); + } + } /* end if rank == 1 */ + +#ifdef FOO + MPI_Barrier(MPI_COMM_WORLD); + + if (rank == 0) { /* begin if rank == 0 */ + MPI_Irecv(rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &req ); + MPI_Wait( &req, &status); + } /* end if rank = 0 */ + else if (rank == 1) { /* begin if rank = 1 */ + MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req ); + MPI_Cancel(&req); + MPI_Wait(&req, &status); + MPI_Test_cancelled(&status, &flag); + if (!flag && status.MPI_ERROR != MPI_SUCCESS) { + err++; + printf( "Cancel of a send returned an error in the status field.\n" ); + } + /* end if status.MPI_ERROR */ + } /* end if rank == 1 */ +#endif + + MPI_Barrier(MPI_COMM_WORLD); + + /* Rndv Message Test */ + n = 20000; + + if (rank == 1) { /* begin if rank = 1 */ + MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req ); + MPI_Cancel(&req); + MPI_Wait(&req, &status); + MPI_Test_cancelled(&status, &flag); + if (!flag) { + err++; + printf( "Cancelling a rendezvous message failed (20000 doubles) where it should succeed.\n" ); + } + else if (verbose) + { + printf("Cancelling an rendezvous message (20000 doubles) succeeded.\n"); + } + } /* end if rank == 1 */ + +#ifdef FOO + MPI_Barrier(MPI_COMM_WORLD); + + if (rank == 0) { /* begin if rank == 0 */ + MPI_Irecv(rbuf, n, MPI_DOUBLE, 1, 1, MPI_COMM_WORLD, &req ); + MPI_Wait( &req, &status); + } /* end if rank = 0 */ + else if (rank == 1) { /* begin if rank = 1 */ + MPI_Isend( sbuf, n, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD, &req ); + MPI_Cancel(&req); + MPI_Wait(&req, &status); + MPI_Test_cancelled(&status, &flag); + if (!flag && status.MPI_ERROR != MPI_SUCCESS) { + err++; + printf( "Cancel of a send returned an error in the status field.\n" ); + } + /* end if status.MPI_ERROR */ + } /* end if rank == 1 */ +#endif + + MPI_Barrier(MPI_COMM_WORLD); + + if (rank == 1) { /* begin if rank = 1 */ + if (err) { + printf( "Test failed with %d errors.\n", err ); + } + else { + printf( " No Errors\n" ); + } + } + + MPI_Finalize( ); + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/commit.c b/teshsuite/smpi/mpich-test/pt2pt/commit.c new file mode 100644 index 0000000000..09570f0f95 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/commit.c @@ -0,0 +1,86 @@ +/* + * This is a test of Type_commit. This checks to see if Type_commit + * (or Type_struct) replaces a struct with a contiguous type, and + * that that type is constructed correctly. + */ + +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + int nsize, n2size; + MPI_Aint nlb, nub, n2lb, n2ub; + MPI_Datatype ntype, n2type; + MPI_Aint displs[2]; + MPI_Datatype types[2]; + int blockcounts[2]; + double myarray[10]; + int err = 0; + + MPI_Init( &argc, &argv ); + + MPI_Address( &myarray[0], &displs[0] ); + MPI_Address( &myarray[3], &displs[1] ); + blockcounts[0] = 3; + blockcounts[1] = 1; + displs[1] = displs[1] - displs[0]; + displs[0] = 0; + types[0] = MPI_DOUBLE; + types[1] = MPI_DOUBLE; + MPI_Type_struct( 2, blockcounts, displs, types, &ntype ); + MPI_Type_commit( &ntype ); + + MPI_Type_size( ntype, &nsize ); + MPI_Type_lb( ntype, &nlb ); + MPI_Type_ub( ntype, &nub ); + + if (nlb != 0) { + err++; + printf( "LB for struct is %d\n", (int)nlb ); + } + if (nub != 4 * sizeof(double)) { + err++; + printf( "UB for struct is %d != %d\n", (int)nub, + 4 * (int)sizeof(double) ); + } + if (nsize != 4 * sizeof(double)) { + err++; + printf( "Size for struct %d != %d\n", nsize, 4 * (int)sizeof(double) ); + } + + MPI_Type_contiguous( 3, ntype, &n2type ); + MPI_Type_commit( &n2type ); + + MPI_Type_size( n2type, &n2size ); + MPI_Type_lb( n2type, &n2lb ); + MPI_Type_ub( n2type, &n2ub ); + + if (n2size != 3 * nsize) { + err++; + printf( "Size of contig type %d != %d\n", n2size, 3*nsize ); + } + if (n2lb != 0) { + err++; + printf( "LB for contig is %d\n", (int)n2lb ); + } + if (n2ub != 3 * nub) { + err++; + printf( "UB for contig %d != %d\n", (int)n2ub, 3 * (int)nub ); + } + + if (err) { + printf( "Found %d errors\n", err ); + } + else { + printf( " No Errors\n" ); + } + MPI_Type_free( &ntype ); + MPI_Type_free( &n2type ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/dataalign.c b/teshsuite/smpi/mpich-test/pt2pt/dataalign.c new file mode 100644 index 0000000000..a04bd9a55a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/dataalign.c @@ -0,0 +1,105 @@ +#include "test.h" +#include +#include "mpi.h" +#include + + +int main( int argc, char *argv[]) +{ + struct a { int i; + char c; + } s[10], s1[10]; + int j; + int errs = 0, toterrs; + int rank, size, tsize; + MPI_Aint text; + int blens[2]; + MPI_Aint disps[2]; + MPI_Datatype bases[2]; + MPI_Datatype str, con; + MPI_Status status; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + for( j = 0; j < 10; j ++ ) { + s[j].i = j + rank; + s[j].c = j + rank + 'a'; + } + + blens[0] = blens[1] = 1; + disps[0] = 0; disps[1] = sizeof(int); + bases[0] = MPI_INT; bases[1] = MPI_CHAR; + MPI_Type_struct( 2, blens, disps, bases, &str ); + MPI_Type_commit( &str ); + MPI_Type_contiguous( 10, str, &con ); + MPI_Type_commit( &con ); + MPI_Type_size( con, &tsize ); + MPI_Type_extent( con, &text ); + +#ifdef DEBUG + printf("Size of MPI array is %d, extent is %d\n", tsize, text ); +#endif + +#ifdef DEBUG + { + void * p1, *p2; + p1 = s; + p2 = &(s[10].i); /* This statement may fail on some systems */ + printf("C array starts at %p and ends at %p for a length of %d\n", + s, &(s[9].c), (char *)p2-(char *)p1 ); + } +#endif + MPI_Type_extent( str, &text ); +#ifdef DEBUG + MPI_Type_size( str, &tsize ); + printf("Size of MPI struct is %d, extent is %d\n", tsize, (int)text ); + printf("Size of C struct is %d\n", sizeof(struct a) ); +#endif + if (text != sizeof(struct a)) { + printf( "Extent of struct a (%d) does not match sizeof (%d)\n", + (int)text, (int)sizeof(struct a) ); + errs++; + } + + MPI_Send( s, 1, con, rank ^ 1, 0, MPI_COMM_WORLD ); + MPI_Recv( s1, 1, con, rank ^ 1, 0, MPI_COMM_WORLD, &status ); + + for( j = 0; j < 10; j++ ) { +#ifdef DEBUG + printf("%d Sent: %d %c, Got: %d %c\n", rank, + s[j].i, s[j].c, s1[j].i, s1[j].c ); +#endif + if ( s1[j].i != j + status.MPI_SOURCE ) { + errs++; + printf( "Got s[%d].i = %d; expected %d\n", j, s1[j].i, + j + status.MPI_SOURCE ); + } + if ( s1[j].c != 'a' + j + status.MPI_SOURCE ) { + errs++; + /* If the character is not a printing character, + this can generate an file that diff, for example, + believes is a binary file */ + if (isprint( (int)(s1[j].c) )) { + printf( "Got s[%d].c = %c; expected %c\n", j, s1[j].c, + j + status.MPI_SOURCE + 'a'); + } + else { + printf( "Got s[%d].c = %x; expected %c\n", j, (int)s1[j].c, + j + status.MPI_SOURCE + 'a'); + } + } + } + + MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (rank == 0) { + if (toterrs > 0) printf( "Found %d errors\n", toterrs ); + else printf( " No Errors\n" ); + } + MPI_Type_free( &str ); + MPI_Type_free( &con ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/dtypelife.c b/teshsuite/smpi/mpich-test/pt2pt/dtypelife.c new file mode 100644 index 0000000000..baa3d117af --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/dtypelife.c @@ -0,0 +1,84 @@ +/* + * Program to test that datatypes that are freed with MPI_TYPE_FREE + * are not actually deleted until communication that they are a part of + * has completed. + * + */ + +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 +static int src = 1; +static int dest = 0; + +/* Prototypes for picky compilers */ +void Generate_Data ( int *, int ); + +void Generate_Data(buffer, buff_size) +int *buffer; +int buff_size; +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = i+1; +} + +int main( int argc, char **argv) +{ + int rank; /* My Rank (0 or 1) */ + int tag, count, i, errcnt = 0; + MPI_Request handle; + double data[100]; + MPI_Status status; + MPI_Datatype rowtype; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + tag = 2001; + count = 1; + for (i = 0; i < 100; i++) + data[i] = i; + MPI_Type_vector( 10, 1, 10, MPI_DOUBLE, &rowtype ); + MPI_Type_commit( &rowtype ); + if (rank == src) { + MPI_Irecv(data, count, rowtype, dest, tag, MPI_COMM_WORLD, + &handle ); + MPI_Type_free( &rowtype ); + MPI_Recv( (void *)0, 0, MPI_INT, dest, tag+1, + MPI_COMM_WORLD, &status ); + MPI_Wait( &handle, &status ); + /* Check for correct data */ + for (i = 0; i < 10; i++) if (data[i*10] != i*10) { + errcnt++; + fprintf( stderr, + "[%d](rcv row-row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 10.0*i ); + } + + } else if (rank == dest) { + MPI_Send( (void *)0, 0, MPI_INT, src, tag+1, MPI_COMM_WORLD ); + /* By using an Ssend first, we make sure that the Irecv doesn't + match until after the type has been freed */ + MPI_Isend( data, count, rowtype, src, tag, MPI_COMM_WORLD, + &handle ); + MPI_Type_free( &rowtype ); + MPI_Wait( &handle, &status ); + } + + i = errcnt; + MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (errcnt > 0) { + printf( "Found %d errors in the run\n", errcnt ); + } + Test_Waitforall( ); + MPI_Finalize(); + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/dtyperecv.c b/teshsuite/smpi/mpich-test/pt2pt/dtyperecv.c new file mode 100644 index 0000000000..a65e484edd --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/dtyperecv.c @@ -0,0 +1,61 @@ + +/* +> so, my second question: +> +> 2. what is the output of that MPI program? +> +> i think it should be 42 -1 42 -1. +> +> but compiling with mpich-1.1.0 an running on solaris machines +> (ch_p4) writes : 42 -1 42 0. +> +> thanks, +> Holger +> +> MPI code: +> ------------------------------------------------------- +*/ +#include "test.h" +#include +#include +#include "mpi.h" + +int main( int argc, char **argv ) +{ + int my_rank, i, data[6]; + MPI_Status status; + MPI_Datatype my_type; + int errs = 0, toterrs; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); + + MPI_Type_vector(2, 1, 2, MPI_INT, &my_type); + MPI_Type_commit(&my_type); + + if (my_rank == 0) { + data[0]=42;data[1]=42; + MPI_Send(&(data[0]), 2, MPI_INT, 1, 42, MPI_COMM_WORLD); + } else { + for (i=0; i<6; i++) + data[i] = -1; + MPI_Recv(&(data[0]), 2, my_type, 0, 42, MPI_COMM_WORLD, &status); + /* Check for correct receipt */ + if (data[0] != 42 || data[1] != -1 || data[2] != 42 || data[3] != -1 + || data[4] != -1 || data[5] != -1) { + errs++; + for (i=0; i<4; i++) + printf("%i ",data[i]); + printf("\n"); + } + } + MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (my_rank == 0) { + if (toterrs > 0) printf( "Found %d errors\n", toterrs ); + else printf( " No Errors\n" ); + } + + MPI_Type_free( &my_type ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/dtypes.c b/teshsuite/smpi/mpich-test/pt2pt/dtypes.c new file mode 100644 index 0000000000..efcd4a0625 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/dtypes.c @@ -0,0 +1,343 @@ +/* This file contains code to generate a variety of MPI datatypes for testing + the various MPI routines. + + To simplify the test code, this generates an array of datatypes, buffers with + data and buffers with no data (0 bits) for use in send and receive + routines of various types. + + In addition, this doesn't even test all of the possibilities. For example, + there is currently no test of sending more than one item defined with + MPI_Type_contiguous . + + This routine should be extended as time permits. + + Note also that this test assumes that the sending and receive types are + the same. MPI requires only that the type signatures match, which is + a weaker requirement. + + THIS CODE IS FROM mpich/tsuite AND SHOULD BE CHANGED THERE ONLY + */ + +#include "mpi.h" +#include +#include +#include "dtypes.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* Change this to test only the basic, predefined types */ +static int basic_only = 0; + +/* + Arrays types, inbufs, outbufs, and counts are allocated by the + CALLER. n on input is the maximum number; on output, it is the + number defined . + names contains a string identifying the test + + See AllocateForData below for a routine to allocate these arrays. + + We may want to add a routine to call to check that the proper data + has been received. + */ +/* TYPECNT is the number of instances of each type in a test */ +#define TYPECNT 10 +#define SETUPBASICTYPE(mpi,c,name) { int i; c *a; \ +if (cnt > *n) {*n = cnt; return; }\ +types[cnt] = mpi; \ +inbufs[cnt] = (void *)calloc( TYPECNT,sizeof(c) ); \ +outbufs[cnt] = (void *)malloc( sizeof(c) * TYPECNT ); \ +a = (c *)inbufs[cnt]; for (i=0; i *n) {*n = cnt; return; }\ +MPI_Type_contiguous( TYPECNT, mpi, types + cnt );\ +MPI_Type_commit( types + cnt );\ +inbufs[cnt] = (void *)calloc( TYPECNT, sizeof(c) ); \ +outbufs[cnt] = (void *)malloc( sizeof(c) * TYPECNT ); \ +a = (c *)inbufs[cnt]; for (i=0; i *n) {*n = cnt; return; }\ +MPI_Type_vector( TYPECNT, 1, STRIDE, mpi, types + cnt );\ +MPI_Type_commit( types + cnt );\ +inbufs[cnt] = (void *)calloc( sizeof(c) * TYPECNT * STRIDE,1); \ +outbufs[cnt] = (void *)calloc( sizeof(c) * TYPECNT * STRIDE,1); \ +a = (c *)inbufs[cnt]; for (i=0; i *n) {*n = cnt; return; }\ +lens = (int *)malloc( TYPECNT * sizeof(int) ); \ +disp = (int *)malloc( TYPECNT * sizeof(int) ); \ +for (i=0; i *n) {*n = cnt; return; }\ +MPI_Type_struct( 3, cnts, disp, b, types + cnt );\ +MPI_Type_commit( types + cnt );\ +inbufs[cnt] = (void *)calloc( sizeof(struct name) * TYPECNT,1); \ +outbufs[cnt] = (void *)calloc( sizeof(struct name) * TYPECNT,1); \ +a = (struct name *)inbufs[cnt]; for (i=0; i 1 */ +#define SETUPSTRUCTTYPEUB(mpi,c,name) { int i; c *a; \ +int blens[2]; MPI_Aint disps[2]; MPI_Datatype mtypes[2]; \ +if (cnt > *n) {*n = cnt; return; }\ +blens[0] = 1; blens[1] = 1; disps[0] = 0; disps[1] = STRIDE * sizeof(c); \ +mtypes[0] = mpi; mtypes[1] = MPI_UB; \ +MPI_Type_struct( 2, blens, disps, mtypes, types + cnt );\ +MPI_Type_commit( types + cnt );\ +inbufs[cnt] = (void *)calloc( sizeof(c) * TYPECNT * STRIDE,1); \ +outbufs[cnt] = (void *)calloc( sizeof(c) * TYPECNT * STRIDE,1); \ +a = (c *)inbufs[cnt]; for (i=0; i= nbasic_types) + MPI_Type_free( types + i ); + } + free( inbufs ); + free( outbufs ); + free( names ); + free( counts ); + free( bytesize ); +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/dtypes.h b/teshsuite/smpi/mpich-test/pt2pt/dtypes.h new file mode 100644 index 0000000000..7aabe29b91 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/dtypes.h @@ -0,0 +1,13 @@ +#ifndef MPITEST_DTYPES +#define MPITEST_DTYPES + +void GenerateData ( MPI_Datatype *, void **, void **, int *, int *, + char **, int * ); +void AllocateForData ( MPI_Datatype **, void ***, void ***, + int **, int **, char ***, int * ); +int CheckData ( void *, void *, int ); +int CheckDataAndPrint ( void *, void *, int, char *, int ); +void FreeDatatypes ( MPI_Datatype *, void **, void **, + int *, int *, char **, int ); +void BasicDatatypesOnly( void ); +#endif diff --git a/teshsuite/smpi/mpich-test/pt2pt/exittest.c b/teshsuite/smpi/mpich-test/pt2pt/exittest.c new file mode 100644 index 0000000000..09f4c92e95 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/exittest.c @@ -0,0 +1,78 @@ + +/* + A report was made that this program hung on a 2 processor LINUX cluster. + We haven't seen that problem, but since this does test whether process 0 + waits for the other processes to complete before exiting, it is a good + test to have. + */ +#include +#include "mpi.h" +#define MAX_NUM_PROCS 10 + +int main( int argc, char *argv[]) +{ + int idx; + int num_procs,my_id; + int s; + int r; + MPI_Status status; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&num_procs); + MPI_Comm_rank(MPI_COMM_WORLD,&my_id); + + if (num_procs < 3) + { + fprintf(stderr, "Need at least 3 processes for this bug\n"); + MPI_Finalize(); + return 0; + } + +#ifdef DEBUG + fprintf(stderr, "%d Starting ....\n", my_id); + fflush(stderr); +#endif + + if (my_id == 1) + { + idx = 2; + s = 333; +#ifdef DEBUG + fprintf(stdout, "%d start send (%d) to %d\n", my_id, s, idx); + fflush(stdout); +#endif + MPI_Send(&s, 1, MPI_INT, idx, 0, MPI_COMM_WORLD); +#ifdef DEBUG + fprintf(stdout, "%d finished send to %d\n", my_id, idx); + fflush(stdout); +#endif + } + + if (my_id == 2) + { + idx = 1; +#ifdef DEBUG + fprintf(stdout, "%d start recv from %d\n", my_id, idx); + fflush(stdout); +#endif + MPI_Recv (&r, 1, MPI_INT, idx, 0, MPI_COMM_WORLD, &status ); +#ifdef DEBUG + fprintf(stdout, "%d finished recv (%d) from %d\n", my_id, r, idx); + fflush(stdout); +#endif + } + +#ifdef DBUG + fprintf(stdout, "%d Done ....\n",my_id); + fflush(stdout); +#endif + MPI_Barrier( MPI_COMM_WORLD ); + if (my_id == 0) { + /* If we reach here, we're done */ + printf( " No Errors\n" ); + } + + MPI_Finalize(); + return 0; +} + diff --git a/teshsuite/smpi/mpich-test/pt2pt/fairness/README b/teshsuite/smpi/mpich-test/pt2pt/fairness/README new file mode 100644 index 0000000000..61bb15e73d --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fairness/README @@ -0,0 +1,3 @@ +These are programs to test the fairness of mpi. +On some underlying devices, you don't want to know the +results... diff --git a/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness-euih.c b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness-euih.c new file mode 100644 index 0000000000..332cc05237 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness-euih.c @@ -0,0 +1,80 @@ +/* + * Program to test the fairness of the MPI implementation over source. + * All of the programs wait on a barrier, then node 0 starts receiving + * small messages using ANY_SOURCE from all of the other nodes who + * send as much as they can. Node 0 collects statistics on the rate + * messages are received from each source. (Every N messages it + * prints out what percentage of the last N received were from each + * source. It does this for times. + * + * This program should be run with at least 8 nodes just to be (un)fair + * + * Patrick Bridges * bridges@mcs.anl.gov * patrick@CS.MsState.Edu + */ + +#include +#include "test.h" +#define MPG 200 +#define MSZ 1 + +int +main(argc, argv) +int argc; +char **argv; +{ + int rank, size, an_int[MSZ]; + int dummy[4], d1, d2; + char *Current_Test = NULL; + int *num_array, i, j; + int dontcare, allgrp; + + /* Initialize the environment */ + mp_environ(&size,&rank); + + /* Get allgrp from the task */ + d1 = 4; d2 = 3; + mp_task_query(dummy,&d1,&d2); + allgrp = dummy[3]; + dontcare = dummy[0]; + + Test_Init("fairness", rank); + + /* Wait for everyone to be ready */ + if (rank == 0) { + /* Initialize an array to keep statistics in */ + num_array = (int *)malloc((size - 1) * sizeof(int)); + + mp_sync(&allgrp); + + for (i = 0; i < size - 1; i++) { + /* Clear the buffer of counts */ + memset(num_array, 0, (size - 1) * sizeof(int)); + for (j = 0; j < MPG; j++) { + d1 = sizeof(int)*MSZ; + d2 = 2000; + mp_brecv(an_int, &d1, &dontcare, &d2); + num_array[d1 - 1]++; + } + Test_Printf("Statistics for message group %d:\n", i + 1); + for (j = 0; j < size -1 ; j++) + Test_Printf("%f%% of last %d messages received \ +were from source %d.\n", + num_array[j]*100.0/MPG, MPG, j + 1); + } + free(num_array); + (void)Summarize_Test_Results(); + } else { + mp_sync(&allgrp); + for (i = 0; i < MPG; i++) { + int d3, d4; + + d1 = MSZ*sizeof(int); + d2 = 0; + d3 = 2000; + d4 = 0; + mp_bend(an_int, &d1, &d2, &d3, &d4); + } + } + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness.c b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness.c new file mode 100644 index 0000000000..804162e9b0 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness.c @@ -0,0 +1,72 @@ +/* + * Program to test the fairness of the MPI implementation over source. + * All of the programs wait on a barrier, then node 0 starts receiving + * small messages using ANY_SOURCE from all of the other nodes who + * send as much as they can. Node 0 collects statistics on the rate + * messages are received from each source. (Every N messages it + * prints out what percentage of the last N received were from each + * source. It does this for times. + * + * This program should be run with at least 8 nodes just to be (un)fair + * + * Patrick Bridges * bridges@mcs.anl.gov * patrick@CS.MsState.Edu + */ + +#include +#include "test.h" +#include "mpi.h" +#define MPG 200 +#define MSZ 1 +int main(argc, argv) +int argc; +char **argv; +{ + int rank, size, an_int[MSZ]; + char *Current_Test = NULL; + int *num_array, i, j; + MPI_Status Status; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + Test_Init("fairness", rank); + + /* Wait for everyone to be ready */ + + if (rank == 0) { + /* Initialize an array to keep statistics in */ + num_array = (int *)malloc((size - 1) * sizeof(int)); + + MPI_Barrier(MPI_COMM_WORLD); + + for (i = 0; i < size - 1; i++) { + /* Clear the buffer of counts */ + memset(num_array, 0, (size - 1) * sizeof(int)); + for (j = 0; j < MPG; j++) { + MPI_Recv(an_int, MSZ, MPI_INT, MPI_ANY_SOURCE, 2000, + MPI_COMM_WORLD, &Status); + num_array[Status.MPI_SOURCE - 1]++; + } + Test_Printf("Statistics for message group %d:\n", i + 1); + for (j = 0; j < size -1 ; j++) + Test_Printf("%f%% of last %d messages received \ +were from source %d.\n", + num_array[j]*100.0/MPG, MPG, j + 1); + } + free(num_array); + (void)Summarize_Test_Results(); + MPI_Finalize(); + + } else { + MPI_Barrier(MPI_COMM_WORLD); + for (i = 0; i < MPG; i++) { + MPI_Send(an_int, MSZ, MPI_INT, 0, 2000, MPI_COMM_WORLD); + } + MPI_Finalize(); + } + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2.c b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2.c new file mode 100644 index 0000000000..5e295d528b --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2.c @@ -0,0 +1,87 @@ +/* + * This program should be run with at least 8 nodes just to (un)fair + * + * Patrick Bridges * bridges@mcs.anl.gov * patrick@CS.MsState.Edu + */ + +#include +#include "test.h" +#include "mpi.h" + +int main(argc, argv) +int argc; +char **argv; +{ + int rank, size, an_int; + char *Current_Test = NULL; + int *num_array, i, j; + MPI_Status Status; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + Test_Init("fairness2", rank); + + /* Wait for everyone to be ready */ + + if (rank == 0) { + /* Initialize an array to keep statistics in */ + num_array = (int *)malloc((size - 1) * sizeof(int)); + + /* Make sure everyone is ready */ + MPI_Barrier(MPI_COMM_WORLD); + + /* Wait for all of the senders to send all of their messages */ + Test_Message("Waiting for all of the senders to say they're through."); + for (i = 0 ; i < size - 1; i++) + MPI_Recv(&an_int, 1, MPI_INT, MPI_ANY_SOURCE, 5000, + MPI_COMM_WORLD, &Status); + + Test_Message("Starting to dequeue messages..."); + /* Now start dequeuing messages */ + for (i = 0; i < size - 1; i++) { + /* Clear the buffer of counts */ + memset(num_array, 0, (size - 1) * sizeof(int)); + for (j = 0; j < 200; j++) { + MPI_Recv(&an_int, 1, MPI_INT, MPI_ANY_SOURCE, 2000, + MPI_COMM_WORLD, &Status); + num_array[Status.MPI_SOURCE - 1]++; + } + Test_Printf("Statistics for message group %d:\n", i + 1); + for (j = 0; j < size -1 ; j++) + Test_Printf("%f%% of last 200 messages received \ +were from source %d.\n", + num_array[j]/2.0, j + 1); + } + + free(num_array); + (void)Summarize_Test_Results(); + MPI_Finalize(); + + } else { + MPI_Request ReqArray[200]; + MPI_Status StatArray[200]; + + MPI_Barrier(MPI_COMM_WORLD); + an_int = rank; + + Test_Message("About to send all of the little messages."); + /* Send 200 tiny messages - nonblocking so we don't deadlock */ + for (i = 0; i < 200; i++) + MPI_Isend(&an_int, 1, MPI_INT, 0, 2000, MPI_COMM_WORLD, + &ReqArray[i]); + + Test_Message("Sending the final message."); + /* Tell receiver we've sent all of our messages */ + MPI_Send(&an_int, 1, MPI_INT, 0, 5000, MPI_COMM_WORLD); + Test_Message("Waiting on the nonblocking requests."); + MPI_Waitall(200,ReqArray,StatArray); + (void)Summarize_Test_Results(); + MPI_Finalize(); + } + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2m.c b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2m.c new file mode 100644 index 0000000000..706a95d564 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairness2m.c @@ -0,0 +1,99 @@ +/* + * This program should be run with at least 8 nodes just to (un)fair + * + * Patrick Bridges * bridges@mcs.anl.gov * patrick@CS.MsState.Edu + */ + +#include +#include "test.h" +#include "mpi.h" +#include "mpe.h" + +int main(argc, argv) +int argc; +char **argv; +{ + int rank, size, an_int; + char *Current_Test = NULL; + int *num_array, i, j; + MPI_Status Status; + + MPI_Init(&argc, &argv); + MPE_Init_log(); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + Test_Init("fairness2m", rank); + + /* Wait for everyone to be ready */ + + if (rank == 0) { + /* Initialize an array to keep statistics in */ + num_array = (int *)malloc((size - 1) * sizeof(int)); + + /* Make sure everyone is ready */ + MPI_Barrier(MPI_COMM_WORLD); + + /* Wait for all of the senders to send all of their messages */ + Test_Message("Waiting for all of the senders to say they're through."); + for (i = 0 ; i < size - 1; i++) { + MPI_Recv(&an_int, 1, MPI_INT, MPI_ANY_SOURCE, 5000, + MPI_COMM_WORLD, &Status); + MPE_Log_receive(Status.MPI_SOURCE, 5000, sizeof(int)); + } + Test_Message("Starting to dequeue messages..."); + /* Now start dequeuing messages */ + for (i = 0; i < size - 1; i++) { + /* Clear the buffer of counts */ + memset(num_array, 0, (size - 1) * sizeof(int)); + for (j = 0; j < 200; j++) { + MPI_Recv(&an_int, 1, MPI_INT, MPI_ANY_SOURCE, 2000, + MPI_COMM_WORLD, &Status); + MPE_Log_receive(Status.MPI_SOURCE, 2000, sizeof(int)); + num_array[Status.MPI_SOURCE - 1]++; + } + Test_Printf("Statistics for message group %d:\n", i + 1); + for (j = 0; j < size -1 ; j++) + Test_Printf("%f%% of last 200 messages received \ +were from source %d.\n", + num_array[j]/2.0, j + 1); + } + + free(num_array); + (void)Summarize_Test_Results(); + + MPE_Finish_log("/home/bridges/fairness2.log"); + MPI_Finalize(); + + } else { + MPI_Request ReqArray[200]; + MPI_Status StatArray[200]; + + MPI_Barrier(MPI_COMM_WORLD); + an_int = rank; + + Test_Message("About to send all of the little messages."); + /* Send 200 tiny messages - nonblocking so we don't deadlock */ + for (i = 0; i < 200; i++) { + MPI_Isend(&an_int, 1, MPI_INT, 0, 2000, MPI_COMM_WORLD, + &ReqArray[i]); + MPE_Log_send(0, 2000, sizeof(int)); + } + Test_Message("Sending the final message."); + /* Tell receiver we've sent all of our messages */ + MPI_Send(&an_int, 1, MPI_INT, 0, 5000, MPI_COMM_WORLD); + MPE_Log_send(0, 5000, sizeof(int)); + + Test_Message("Waiting on the nonblocking requests."); + MPI_Waitall(200,ReqArray,StatArray); + (void)Summarize_Test_Results(); + + MPE_Finish_log("/home/bridges/fairness2.log"); + MPI_Finalize(); + } + + return 0; +} + + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/fairness/fairnessm.c b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairnessm.c new file mode 100644 index 0000000000..709d08a894 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fairness/fairnessm.c @@ -0,0 +1,79 @@ +/* + * Program to test the fairness of the MPI implementation over source. + * All of the programs wait on a barrier, then node 0 starts receiving + * small messages using ANY_SOURCE from all of the other nodes who + * send as much as they can. Node 0 collects statistics on the rate + * messages are received from each source. (Every N messages it + * prints out what percentage of the last N received were from each + * source. It does this for times. + * + * This program should be run with at least 8 nodes just to be (un)fair + * + * Patrick Bridges * bridges@mcs.anl.gov * patrick@CS.MsState.Edu + */ + +#include +#include "test.h" +#include "mpi.h" +#include "mpe.h" +#define MPG 25 +#define MSZ 1 + +int main(argc, argv) +int argc; +char **argv; +{ + int rank, size, an_int[MSZ]; + char *Current_Test = NULL; + int *num_array, i, j; + MPI_Status Status; + + MPI_Init(&argc, &argv); + MPE_Init_log(); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + Test_Init("fairnessm", rank); + + /* Wait for everyone to be ready */ + + if (rank == 0) { + /* Initialize an array to keep statistics in */ + num_array = (int *)malloc((size - 1) * sizeof(int)); + MPID_SetRecvDebugFlag(1); + MPI_Barrier(MPI_COMM_WORLD); + + for (i = 0; i < size - 1; i++) { + /* Clear the buffer of counts */ + memset(num_array, 0, (size - 1) * sizeof(int)); + for (j = 0; j < MPG; j++) { + MPI_Recv(an_int, MSZ, MPI_INT, MPI_ANY_SOURCE, 2000, + MPI_COMM_WORLD, &Status); + MPE_Log_receive(Status.MPI_SOURCE, 2000, MSZ * sizeof(int)); + num_array[Status.MPI_SOURCE - 1]++; + } + Test_Printf("Statistics for message group %d:\n", i + 1); + for (j = 0; j < size -1 ; j++) + Test_Printf("%f%% of last %d messages received \ +were from source %d.\n", + num_array[j]/2.0, MPG, j + 1); + } + free(num_array); + (void)Summarize_Test_Results(); + MPE_Finish_log("/home/bridges/fairness.log"); + MPI_Finalize(); + + } else { + MPI_Barrier(MPI_COMM_WORLD); + for (i = 0; i < MPG; i++) { + MPI_Send(an_int, MSZ, MPI_INT, 0, 2000, MPI_COMM_WORLD); + MPE_Log_send(0, 2000, MSZ * sizeof(int)); + } + MPE_Finish_log("/home/bridges/fairness.log"); + MPI_Finalize(); + } + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/fifth.c b/teshsuite/smpi/mpich-test/pt2pt/fifth.c new file mode 100644 index 0000000000..d06c907405 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/fifth.c @@ -0,0 +1,54 @@ +#include +#include "mpi.h" +#include "test.h" + +int main( int argc, char **argv ) +{ + int rank, np, data = 777; + MPI_Request handle[4]; + MPI_Status status[4]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &np ); + + if (np < 4) { + MPI_Finalize(); + printf( "4 processors or more required, %d done\n", rank ); + return(1); + } + + if (rank == 0) { + MPI_Isend( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle[0] ); + MPI_Irecv( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle[1] ); + MPI_Isend( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle[2] ); + MPI_Irecv( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle[3] ); + MPI_Waitall ( 4, handle, status ); + } + else if (rank == 1) { + MPI_Irecv( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle[0] ); + MPI_Isend( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle[1] ); + MPI_Isend( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle[2] ); + MPI_Irecv( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle[3] ); + MPI_Waitall ( 4, handle, status ); + } + else if (rank == 2) { + MPI_Isend( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle[0] ); + MPI_Irecv( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle[1] ); + MPI_Irecv( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle[2] ); + MPI_Isend( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle[3] ); + MPI_Waitall ( 4, handle, status ); + } + else if (rank == 3) { + MPI_Irecv( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle[0] ); + MPI_Isend( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle[1] ); + MPI_Irecv( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle[2] ); + MPI_Isend( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle[3] ); + MPI_Waitall ( 4, handle, status ); + } + Test_Waitforall( ); + MPI_Finalize(); + return(0); +} + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/flood.c b/teshsuite/smpi/mpich-test/pt2pt/flood.c new file mode 100644 index 0000000000..ae25bcea86 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/flood.c @@ -0,0 +1,254 @@ +#include "mpi.h" +#include +#include +#include "test.h" + +#define MAX_REQ 16 +#define DEF_MAX_MSG 2000000 +/* + This program tests a flood of data for both unexpected and expected messages to test any internal message fragmentation or protocol shifts + + An optional argument can change the maximum message size. For example, use + flood 9000000 + to stress the memory system (the size is the number of ints, not bytes) + */ + +void SetupData ( int *, int, int ); +void SetupRdata ( int *, int ); +int CheckData ( int *, int, int, MPI_Status * ); + +#ifdef VERBOSE +static int verbose = 1; +#else +static int verbose = 0; +#endif + +int main( int argc, char **argv ) +{ + MPI_Comm comm; + MPI_Request r[MAX_REQ]; + MPI_Status s[MAX_REQ]; + int msgsize, maxmsg, root, i, size, rank, err = 0, toterr; + int max_msg_size = DEF_MAX_MSG; + int *sbuf, *rbuf; + + MPI_Init( &argc, &argv ); + + comm = MPI_COMM_WORLD; + + MPI_Comm_size( comm, &size ); + MPI_Comm_rank( comm, &rank ); + + if (size < 2) { + printf( "This test requires at least 2 processors\n" ); + MPI_Abort( comm, 1 ); + } + + /* Check for a max message argument */ + if (rank == 0) { + if (argc > 1) { + max_msg_size = atoi( argv[1] ); + /* Correct if unrecognized argument */ + if (max_msg_size <= 0) max_msg_size = DEF_MAX_MSG; + } + } + MPI_Bcast( &max_msg_size, 1, MPI_INT, 0, MPI_COMM_WORLD ); + + /* First, try large blocking sends to root */ + root = 0; + + msgsize = 128; + maxmsg = max_msg_size; + if (rank == root && verbose) printf( "Blocking sends: " ); + while (msgsize < maxmsg) { + if (rank == root) { + if (verbose) { printf( "%d ", msgsize ); fflush( stdout ); } + rbuf = (int *)malloc( msgsize * sizeof(int) ); + if (!rbuf) { + printf( "Could not allocate %d words\n", msgsize ); + MPI_Abort( comm, 1 ); + } + for (i=0; i +#include +#include "test.h" + +#define MAX_REQ 32 +#define MAX_MSG_CNT 32000 +#define MAX_MSG 2048 +/* + This program tests a flood of data of short messages to test handling + of both incoming messages and internal message queues + */ + +void SetupData ( int *, int, int ); +void SetupRdata ( int *, int ); +int CheckData ( int *, int, int, MPI_Status * ); + +#ifdef VERBOSE +static int verbose = 1; +#else +static int verbose = 0; +#endif + + +int main( int argc, char **argv ) +{ + MPI_Comm comm; + MPI_Request r[MAX_REQ]; + MPI_Status s[MAX_REQ]; + int msgsize, maxmsg, root, i, j, size, rank, err = 0, msgcnt, toterr; + int *sbuf, *rbuf; + + MPI_Init( &argc, &argv ); + + comm = MPI_COMM_WORLD; + + MPI_Comm_size( comm, &size ); + MPI_Comm_rank( comm, &rank ); + + if (size < 2) { + printf( "This test requires at least 2 processors\n" ); + MPI_Abort( comm, 1 ); + } + + /* First, try large blocking sends to root */ + root = 0; + + maxmsg = MAX_MSG; + msgsize = 128; + msgcnt = MAX_MSG_CNT; + if (rank == root && verbose) printf( "Blocking sends: " ); + while (msgsize <= maxmsg) { + if (rank == root) { + if (verbose) { printf( "%d ", msgsize ); fflush( stdout ); } + rbuf = (int *)malloc( msgsize * sizeof(int) ); + if (!rbuf) { + printf( "Could not allocate %d words\n", msgsize ); + MPI_Abort( comm, 1 ); + } + for (i=0; i +#include "mpi.h" +#include "test.h" + +int main( int argc, char **argv ) +{ + int rank, np, data = 777; + MPI_Request handle; + MPI_Status status; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &np ); + + if (np < 4) { + MPI_Finalize(); + printf( "4 processors or more required, %d done\n", rank ); + return(1); + } + + if (rank == 0) { + MPI_Isend( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Irecv( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Isend( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Irecv( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + } + else if (rank == 1) { + MPI_Irecv( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Isend( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Isend( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Irecv( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + } + else if (rank == 2) { + MPI_Isend( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Irecv( &data, 1, MPI_INT, 3, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Irecv( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Isend( &data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + } + else if (rank == 3) { + MPI_Irecv( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Isend( &data, 1, MPI_INT, 2, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Irecv( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + MPI_Isend( &data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); + } + Test_Waitforall( ); + MPI_Finalize(); + return(0); +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/gcomm.c b/teshsuite/smpi/mpich-test/pt2pt/gcomm.c new file mode 100644 index 0000000000..3bbb3dd844 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/gcomm.c @@ -0,0 +1,78 @@ +/* + This file generates a few communicators for use in the test suite + + THIS CODE IS FROM mpich/tsuite AND SHOULD BE CHANGED THERE ONLY + */ + +#include "mpi.h" + +#include "gcomm.h" + +void MakeComms( comms, maxn, n, make_intercomm ) +MPI_Comm *comms; +int *n, maxn, make_intercomm; +{ +int cnt = 0; +int rank, size; +int dims[2]; +int periods[2], range[1][3]; +MPI_Group group, newgroup; + +MPI_Comm_rank( MPI_COMM_WORLD, &rank ); +MPI_Comm_size( MPI_COMM_WORLD, &size ); + +comms[cnt++] = MPI_COMM_WORLD; +if (cnt == maxn) {*n = cnt; return; } + +/* Construct a communicator with the ranks reversed */ +MPI_Comm_group( MPI_COMM_WORLD, &group ); +range[0][0] = size-1; +range[0][1] = 0; +range[0][2] = -1; +MPI_Group_range_incl( group, 1, range, &newgroup ); +MPI_Comm_create( MPI_COMM_WORLD, newgroup, &comms[cnt] ); +cnt++; +//MPI_Group_free( &group ); +//MPI_Group_free( &newgroup ); +if (cnt == maxn) {*n = cnt; return; } + +if (size > 3) { + /* Divide into odd and even processes */ + MPI_Comm_split( MPI_COMM_WORLD, rank & 0x1, rank, comms + cnt ); + cnt ++; + + /* Use the cartesian constructors */ + dims[0] = 0; dims[1] = 0; + MPI_Dims_create( size, 2, dims ); + periods[0] = 0; periods[1] = 0; + MPI_Cart_create( MPI_COMM_WORLD, 2, dims, periods, 0, comms + cnt ); + cnt ++; + if (cnt == maxn) {*n = cnt; return; } + + /* Create an intercommunicator (point-to-point operations only) + Note that in this case, codes need to use MPI_Comm_remote_size to + (added to MPI_Comm_size) to get the size of the full group */ + if (make_intercomm) { + /* The remote_leader is rank 1 in MPI_COMM_WORLD if we are even + and 0 if we are odd (the remote_leader rank is relative to the + peer communicator) + */ + MPI_Intercomm_create( comms[2], 0, MPI_COMM_WORLD, !(rank&0x1), + 37, comms + cnt ); + cnt ++; + if (cnt == maxn) {*n = cnt; return; } + } + } +*n = cnt; +} + +void FreeComms( comms, n ) +MPI_Comm *comms; +int n; +{ +int i; +for (i=1; i + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +typedef struct { + int len; + double data[1000]; + } buf_t; + +int main( int argc, char **argv ) +{ + int err = 0, toterr; + MPI_Datatype contig1, varstruct1, oldtypes[2], varstruct2; + MPI_Aint displs[2]; + int blens[2]; + MPI_Comm comm; + MPI_Status status; + int world_rank; + int rank, size, partner, count, i; + int send_ibuf[4], recv_ibuf[4]; + buf_t send_buf, recv_buf; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + +/* Form the datatypes */ + MPI_Type_contiguous( 4, MPI_INT, &contig1 ); + MPI_Type_commit( &contig1 ); + blens[0] = 1; + blens[1] = 1000; + oldtypes[0] = MPI_INT; + oldtypes[1] = MPI_DOUBLE; +/* Note that the displacement for the data is probably double aligned */ + MPI_Address( &send_buf.len, &displs[0] ); + MPI_Address( &send_buf.data[0], &displs[1] ); +/* Make relative */ + displs[1] = displs[1] - displs[0]; + displs[0] = 0; + MPI_Type_struct( 2, blens, displs, oldtypes, &varstruct1 ); + MPI_Type_commit( &varstruct1 ); + + comm = MPI_COMM_WORLD; + + MPI_Comm_size( comm, &size ); + MPI_Comm_rank( comm, &rank ); + + if (size < 2) { + fprintf( stderr, "This test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + if (rank == size - 1) { + partner = 0; + /* Send contiguous data */ + for (i=0; i<4; i++) + send_ibuf[i] = i; + MPI_Send( send_ibuf, 1, contig1, partner, 0, comm ); + + /* Send partial structure */ + blens[1] = 23; + MPI_Type_struct( 2, blens, displs, oldtypes, &varstruct2 ); + MPI_Type_commit( &varstruct2 ); + + MPI_Send( &send_buf, 1, varstruct2, partner, 1, comm ); + MPI_Type_free( &varstruct2 ); + + /* Send NO data */ + MPI_Send( MPI_BOTTOM, 0, MPI_INT, partner, 2, comm ); + } + else if (rank == 0) { + partner = size - 1; + MPI_Recv( recv_ibuf, 1, contig1, partner, 0, comm, &status ); + MPI_Get_count( &status, MPI_INT, &count ); + if (count != 4) { + err++; + fprintf( stderr, + "Wrong count for contig recv MPI_INT; got %d expected %d\n", + count, 4 ); + } + MPI_Get_count( &status, contig1, &count ); + if (count != 1) { + err++; + fprintf( stderr, + "Wrong count for contig recv (contig); got %d expected %d\n", + count, 1 ); + } + MPI_Get_elements( &status, contig1, &count ); + if (count != 4) { + err++; + fprintf( stderr, + "Wrong elements for contig recv contig; got %d expected %d\n", + count, 4 ); + } + + /* Now, try the partial structure */ + MPI_Recv( &recv_buf, 1, varstruct1, partner, 1, comm, &status ); + MPI_Get_elements( &status, varstruct1, &count ); + if (count != 24) { + err++; + fprintf( stderr, + "Wrong number of elements for struct recv; got %d expected %d\n", + count, 24 ); + } + + { + /* Receive nothing using a 0-sized type */ + MPI_Datatype ztype; + MPI_Type_contiguous( 0, MPI_INT, &ztype ); + MPI_Type_commit( &ztype ); + MPI_Recv( &recv_buf, 10, ztype, partner, 2, comm, &status ); + /* Current clarification requires 0 for the result */ + MPI_Get_elements( &status, ztype, &count ); + if (count != 0) { + err++; + fprintf( stderr, + "Wrong number of elements for 0-size datatype; got %d\n", + count ); + } + MPI_Get_count( &status, ztype, &count ); + if (count != 0) { + err++; + fprintf( stderr, + "Wrong count for 0-size datatype; got %d\n", + count ); + } + MPI_Type_free( &ztype ); + } + } + MPI_Type_free( &contig1 ); + MPI_Type_free( &varstruct1 ); + + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) + printf( " No Errors\n" ); + else + printf( "Found %d errors in MPI_Get_elements\n", toterr ); + } + MPI_Finalize( ); + return toterr; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/hindexed.c b/teshsuite/smpi/mpich-test/pt2pt/hindexed.c new file mode 100644 index 0000000000..29b4612075 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/hindexed.c @@ -0,0 +1,106 @@ +#include "mpi.h" +#include +/* stdlib.h needed for malloc declaration */ +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + * This file tests MPI_Type_hindexed by describing parts of a triangular + * matrix, stored in a square matrix, and sending sending it. + * + * The matrix is stored in column-major, and the tests use + * MPI_Type_vector or MPI_Type_struct to define the elements that are sent + */ + +int main( int argc, char **argv ) +{ + MPI_Datatype rowtype, mattype; + int *sbuf, *rbuf; + int rank, mat_n; + static int blens[2] = { 1, 1 }; + MPI_Datatype types[2] = { MPI_INT, MPI_UB }; + int *mat_blens, i ; + MPI_Aint *mat_displs; + MPI_Aint displs[2]; + MPI_Status status; + int err, row, col; + + MPI_Init( &argc, &argv ); + + err = 0; + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + mat_n = 10; + sbuf = (int *) malloc( mat_n * mat_n * sizeof(int) ); + rbuf = (int *) malloc( mat_n * mat_n * sizeof(int) ); + if (!sbuf || !rbuf) { + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* Define a row type based on a strided struct type */ + displs[0] = 0; + displs[1] = mat_n*sizeof(int); + MPI_Type_struct( 2, blens, displs, types, &rowtype ); + MPI_Type_commit( &rowtype ); + + /* Define an hindexed type that defines all of the rows of the + triangular part of sbuf */ + + mat_blens = (int *)malloc( mat_n * sizeof(int) ); + mat_displs = (MPI_Aint *)malloc( mat_n * sizeof(MPI_Aint) ); + for (i=0; i +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + This is a very simple MPI program which can be used to check things + like the behavior of the ADI or heterogeneous code + */ +int main( int argc, char **argv ) +{ +char msg[10]; +char smsg[10]; +int rank, size; +int src, dest; +int count; +MPI_Status status; + +MPI_Init( &argc, &argv ); +MPI_Comm_size( MPI_COMM_WORLD, &size ); +MPI_Comm_rank( MPI_COMM_WORLD, &rank ); +if (size != 2) { + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } +src = 1; +dest = 0; +if (rank == src) { + strcpy( msg, "MPICH!" ); + MPI_Send( msg, 7, MPI_CHAR, dest, 10, MPI_COMM_WORLD ); + } +else { + MPI_Recv( smsg, 10, MPI_CHAR, src, 10, MPI_COMM_WORLD, &status ); + if (status.MPI_TAG != 10) { + fprintf( stderr, "Error in status tag!\n" ); + } + if (status.MPI_SOURCE != src) { + fprintf( stderr, "Error in status source!\n" ); + } + MPI_Get_count( &status, MPI_CHAR, &count ); + if (count != 7) { + fprintf( stderr, "Error in count, got %d expected 7\n", count ); + } + if (strcmp( smsg, "MPICH!" )) { + fprintf( stderr, "Got wrong msg (%s), expected \"MPICH!\"\n", smsg ); + } + } + +MPI_Finalize(); +return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/hvec.c b/teshsuite/smpi/mpich-test/pt2pt/hvec.c new file mode 100644 index 0000000000..902ba65458 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/hvec.c @@ -0,0 +1,127 @@ +#include "mpi.h" +#include "test.h" +#include + +/* The original version of this was sent by + empierce@tribble.llnl.gov (Elsie M. Pierce) + I've modified it to fit the automated tests requirements + */ +/* Prototypes for picky compilers */ +int iinit ( int *, int, int ); +int ilist1 ( int *, int, int, int ); +void Build_vect ( MPI_Datatype * ); +void Build_ctg ( int, MPI_Datatype *, MPI_Datatype * ); +void Get_d5 ( int ); + +int iinit(a, value, l) +int *a, value, l; +{ + int i; + + for (i=0; i +#include "mpi.h" +#include "test.h" +/* #define SHOWMSG */ + +#ifdef VERBOSE +static int verbose = 1; +#else +static int verbose = 0; +#endif +int main( int argc, char **argv ) +{ + int rank, size, to, from, tag, count, i; + int src, dest; + int st_source, st_tag, st_count; + int errcnt = 0; + MPI_Request handle; + MPI_Status status; + double data[100]; + MPI_Datatype rowtype; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + src = size - 1; + dest = 0; +/* + dest = size - 1; + src = 0; + */ + MPI_Type_vector( 10, 1, 10, MPI_DOUBLE, &rowtype ); + MPI_Type_commit( &rowtype ); + /* First test: send a row */ + if (rank == src) { + to = dest; + count = 1; + tag = 2001; + for (i = 0; i < 100; i++) + data[i] = i; + /* Send a row */ + MPI_Send( data, count, rowtype, to, tag, MPI_COMM_WORLD ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + + if (rank == dest) { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + MPI_Recv(data, count, MPI_DOUBLE, from, tag, MPI_COMM_WORLD, + &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i]); printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i] != 10*i) { + errcnt++; + fprintf( stderr, + "[%d](rcv double) %d'th element = %f, should be %f\n", + rank, i, data[i], 10.0*i ); + } + } + + /* Second test: receive a column into row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + for (i = 0; i < 100; i++) + data[i] = i; + /* Send a row */ + MPI_Send( data, count, MPI_DOUBLE, to, tag, MPI_COMM_WORLD ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 1; + from = MPI_ANY_SOURCE; + MPI_Recv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i) { + errcnt++; + fprintf( stderr, + "[%d](rcv row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 1.0*i ); + } + } + + /* Third test: send AND receive a row */ + if (rank == src) + { + to = dest; + count = 1; + tag = 2001; + for (i = 0; i < 100; i++) + data[i] = i; + /* Send a row */ + MPI_Send( data, count, rowtype, to, tag, MPI_COMM_WORLD ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 1; + from = MPI_ANY_SOURCE; + MPI_Recv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i*10) { + errcnt++; + fprintf( stderr, + "[%d](rcv row-row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 10.0*i ); + } + } + + /* Second Set of Tests: Use Isend and Irecv instead of Send and Recv */ + /* First test: send a row */ + if (rank == src) + { + to = dest; + count = 1; + tag = 2001; + for (i = 0; i < 100; i++) + data[i] = i; + /* Send a row */ + MPI_Isend( data, count, rowtype, to, tag, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + MPI_Irecv(data, count, MPI_DOUBLE, from, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i]); printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i] != 10*i) { + errcnt++; + fprintf( stderr, + "[%d](ircv double) %d'th element = %f, should be %f\n", + rank, i, data[i], 10.0*i ); + } + } + + /* Second test: receive a column into row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + for (i = 0; i < 100; i++) + data[i] = i; + /* Send a row */ + MPI_Isend( data, count, MPI_DOUBLE, to, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 1; + from = MPI_ANY_SOURCE; + MPI_Irecv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i) { + errcnt++; + fprintf( stderr, + "[%d](ircv row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 1.0*i ); + } + } + + /* Third test: send AND receive a row */ + if (rank == src) + { + to = dest; + count = 1; + tag = 2001; + for (i = 0; i < 100; i++) + data[i] = i; + /* Send a row */ + MPI_Isend( data, count, rowtype, to, tag, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 1; + from = MPI_ANY_SOURCE; + MPI_Irecv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i*10) { + errcnt++; + fprintf( stderr, + "[%d](ircv row-row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 10.0*i ); + } + } + + i = errcnt; + MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (errcnt > 0) { + printf( "Found %d errors in the run \n", errcnt ); + } + MPI_Type_free( &rowtype ); + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/hvectest2.c b/teshsuite/smpi/mpich-test/pt2pt/hvectest2.c new file mode 100644 index 0000000000..0414b3d334 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/hvectest2.c @@ -0,0 +1,366 @@ +/* + hvectest2 - test program that sends an array of floats from the first + process of a group to the last, using send and recv and the + struct datatype for variable length vectors +*/ + +#include "mpi.h" +#include +#include +#include "test.h" +/* #define SHOWMSG */ + +/* Prototypes for picky compilers */ +void ClearArray ( double *, int, double ); +void SetArray ( double *, int ); + +#ifdef VERBOSE +static int verbose = 1; +#else +static int verbose = 0; +#endif + +void ClearArray( a, n, v ) +double *a, v; +int n; +{ + int i; + for (i=0; i 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { + dest = size - 1; + src = 0; + } + else { + src = size - 1; + dest = 0; + } + + displs[0] = 0; + displs[1] = 10*sizeof(double); +/* + blens[0] = 1; + blens[1] = 1; + types[0] = MPI_DOUBLE; + types[1] = MPI_UB; + */ + MPI_Type_struct( 2, blens, displs, types, &rowtype ); + MPI_Type_commit( &rowtype ); + /* First test: send a row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + SetArray( data, 100 ); + /* Send a row */ + MPI_Send( data, count, rowtype, to, tag, MPI_COMM_WORLD ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + + ClearArray( data, 100, -1.0 ); + MPI_Recv(data, count, MPI_DOUBLE, from, tag, MPI_COMM_WORLD, + &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i]); printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i] != 10*i) { + errcnt++; + fprintf( stderr, + "[%d](rcv double) %d'th element = %f, should be %f\n", + rank, i, data[i], 10.0*i ); + } + } + + /* Second test: receive a column into row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + SetArray( data, 100 ); + /* Send a row */ + /* MPE_Print_datatype_pack_action( stdout, count, + MPI_DOUBLE, 0, 0 ); */ + MPI_Send( data, count, MPI_DOUBLE, to, tag, MPI_COMM_WORLD ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + ClearArray( data, 100, -1.0 ); + MPI_Recv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &status ); + /* MPE_Print_datatype_unpack_action( stdout, count, rowtype, 0, 0 ); */ + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i) { + errcnt++; + fprintf( stderr, + "[%d](rcv row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 1.0*i ); + } + } + + /* Third test: send AND receive a row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + SetArray( data, 100 ); + /* Send a row */ + MPI_Send( data, count, rowtype, to, tag, MPI_COMM_WORLD ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + ClearArray( data, 100, -1.0 ); + MPI_Recv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i*10) { + errcnt++; + fprintf( stderr, + "[%d](rcv row-row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 10.0*i ); + } + } + + /* Second Set of Tests: Use Isend and Irecv instead of Send and Recv */ + /* First test: send a row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + SetArray( data, 100 ); + /* Send a row */ + MPI_Isend( data, count, rowtype, to, tag, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + ClearArray( data, 100, -1.0 ); + MPI_Irecv(data, count, MPI_DOUBLE, from, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i]); printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i] != 10*i) { + errcnt++; + fprintf( stderr, + "[%d](ircv double) %d'th element = %f, should be %f\n", + rank, i, data[i], 10.0*i ); + } + } + + /* Second test: receive a column into row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + SetArray( data, 100 ); + /* Send a row */ + MPI_Isend( data, count, MPI_DOUBLE, to, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + ClearArray( data, 100, -1.0 ); + MPI_Irecv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i) { + errcnt++; + fprintf( stderr, + "[%d](ircv row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 1.0*i ); + } + } + + /* Third test: send AND receive a row */ + if (rank == src) + { + to = dest; + count = 10; + tag = 2001; + SetArray( data, 100 ); + /* Send a row */ + MPI_Isend( data, count, rowtype, to, tag, MPI_COMM_WORLD, &handle ); + MPI_Wait( &handle, &status ); +#ifdef SHOWMSG + printf("%d sent", rank ); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + } + if (rank == dest) + { + tag = MPI_ANY_TAG; + count = 10; + from = MPI_ANY_SOURCE; + ClearArray( data, 100, -1.0 ); + MPI_Irecv(data, count, rowtype, from, tag, MPI_COMM_WORLD, + &handle ); + MPI_Wait( &handle, &status ); + + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + MPI_Get_count( &status, MPI_DOUBLE, &st_count ); + + if (st_source != src || st_tag != 2001 || st_count != 10 || verbose) { + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + } +#ifdef SHOWMSG + printf( "%d received", rank); + for (i = 0; i < 10; i++) printf(" %f",data[i*10]);printf("\n"); +#endif + for (i = 0; i < 10; i++) if (data[i*10] != i*10) { + errcnt++; + fprintf( stderr, + "[%d](ircv row-row) %d'th element = %f, should be %f\n", + rank, i, data[i*10], 10.0*i ); + } + } + + i = errcnt; + MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (errcnt > 0) { + printf( "Found %d errors in the run \n", errcnt ); + } + MPI_Type_free( &rowtype ); + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/irecvtest.c b/teshsuite/smpi/mpich-test/pt2pt/irecvtest.c new file mode 100644 index 0000000000..446059e3df --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/irecvtest.c @@ -0,0 +1,152 @@ +/* + * Program to test that the "no overtaking messages" semantics + * of point to point communications in MPI is satisfied, + * for a simple send/irecv operation. + * + * Derived from a program written by + * Patrick Bridges + * bridges@mcs.anl.gov + * patrick@CS.MsState.Edu + */ + +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 + +static int src = 0; +static int dest = 1; + +/* Which tests to perform (not yet implemented) */ +/* static int Do_Buffer = 1; */ +/* static int Do_Standard = 1; */ + +/* Prototypes for picky compilers */ +void Generate_Data ( double *, int ); +void Normal_Test_Send ( double *, int ); +void Async_Test_Recv ( double *, int ); +int Check_Data ( double *, int ); +void Clear_Buffer ( double *, int ); + +void Generate_Data(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = (double)i+1; +} + +#define NSHORT 10 +void Normal_Test_Send(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + + for (j = 0; j < 2; j++) { + /* send a long message */ + MPI_Send(buffer, (buff_size/2 - NSHORT), MPI_DOUBLE, dest, 2000, + MPI_COMM_WORLD); + buffer += buff_size/2 - NSHORT; + /* Followed by NSHORT short ones */ + for (i = 0; i < NSHORT; i++) + MPI_Send(buffer++, 1, MPI_DOUBLE, dest, 2000, MPI_COMM_WORLD); + } +} + +void Async_Test_Recv(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j, req = 0; + MPI_Status Stat[22]; + MPI_Request Hand[22]; + + for (j = 0; j < 2; j++) { + /* Receive a long message */ + MPI_Irecv(buffer, (buff_size/2 - NSHORT), MPI_DOUBLE, src, + 2000, MPI_COMM_WORLD, &(Hand[req++])); + buffer += buff_size/2 - NSHORT; + /* Followed by NSHORT short ones */ + for (i = 0; i < NSHORT; i++) + MPI_Irecv(buffer++, 1, MPI_DOUBLE, src, 2000, + MPI_COMM_WORLD, &(Hand[req++])); + } + MPI_Waitall(req, Hand, Stat); +} + +int Check_Data(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + int err = 0; + + for (i = 0; i < buff_size; i++) + if (buffer[i] != (i + 1)) { + err++; + fprintf( stderr, "Value at %d is %f, should be %f\n", i, + buffer[i], (double)(i+1) ); + fflush( stderr ); + if (err > 10) return 1; + } + return err; +} + +void Clear_Buffer(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + for (i = 0; i < buff_size; i++) + buffer[i] = -1; +} + + +int main( int argc, char **argv) +{ + int rank; /* My Rank (0 or 1) */ + double buffer[SIZE]; + char *Current_Test = NULL; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == src) { + Generate_Data(buffer, SIZE); + Normal_Test_Send(buffer, SIZE); + Test_Waitforall( ); + MPI_Finalize(); + + } else if (rank == dest) { + Test_Init("irecvtest", rank); + /* Test 2 */ + Clear_Buffer(buffer, SIZE); + Current_Test = (char*)"Overtaking Test (Normal Send -> Async Receive)"; + Async_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + Test_Waitforall( ); + + MPI_Finalize(); + { + int rval = Summarize_Test_Results(); /* Returns number of tests; + that failed */ + Test_Finalize(); + return rval; + } + } else { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/irsend.c b/teshsuite/smpi/mpich-test/pt2pt/irsend.c new file mode 100644 index 0000000000..9b690de3de --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/irsend.c @@ -0,0 +1,155 @@ +#include "mpi.h" +#include +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* Nonblocking ready sends + + This is similar to a test in allpair.f, but with an expanded range of + datatypes and communicators. + */ + +int main( int argc, char **argv ) +{ + MPI_Datatype *types; + void **inbufs, **outbufs; + char **names; + int *counts, *bytesize, ntype; + MPI_Comm comms[20]; + int ncomm = 20, rank, np, partner, tag; + int i, j, k, err, toterr, world_rank, errloc; + MPI_Status status, statuses[2]; + int flag, index; + char *obuf; + MPI_Request requests[2]; + + + MPI_Init( &argc, &argv ); + + AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); + GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + + MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ + err = 0; + for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } + FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); + FreeComms( comms, ncomm ); + MPI_Finalize(); + + return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/irsendinit.c b/teshsuite/smpi/mpich-test/pt2pt/irsendinit.c new file mode 100644 index 0000000000..d80cf5a397 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/irsendinit.c @@ -0,0 +1,167 @@ +#include "mpi.h" +#include +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* Nonblocking ready persistent sends + + This is similar to a test in allpair.f, but with an expanded range of + datatypes and communicators. + + This is like irsend.c, but with multiple starts of the same persistent + request. + */ + +int main( int argc, char **argv ) +{ + MPI_Datatype *types; + void **inbufs, **outbufs; + char **names; + int *counts, *bytesize, ntype; + MPI_Comm comms[20]; + int ncomm = 20, rank, np, partner, tag; + int i, j, k, err, toterr, world_rank, errloc; + MPI_Status status; + int flag, index; + char *obuf; + MPI_Request requests[2]; + int mcnt; + + + MPI_Init( &argc, &argv ); + + AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); + GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + + MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ + err = 0; + for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } + FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); + FreeComms( comms, ncomm ); + MPI_Finalize(); + + return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/isendf.f b/teshsuite/smpi/mpich-test/pt2pt/isendf.f new file mode 100644 index 0000000000..024ba4cfa8 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/isendf.f @@ -0,0 +1,54 @@ + program main + include 'mpif.h' + integer ierr, errs, toterrs + integer request + integer status(MPI_STATUS_SIZE) + integer rank, size, buf(10) + logical flag +C + call MPI_Init( ierr ) + errs = 0 +C + call MPI_Comm_size( MPI_COMM_WORLD, size, ierr ) + if (size .lt. 2) then + print *, 'Must have at least two processes' + call MPI_Abort( MPI_COMM_WORLD, 1, ierr ) + endif + call MPI_Comm_rank( MPI_COMM_WORLD, rank, ierr ) + if (rank .eq. 0) then + do i = 1, 10 + buf(i) = i + enddo + call MPI_Isend( buf, 10, MPI_INTEGER, size - 1, 1, + $ MPI_COMM_WORLD, request, ierr ) + call MPI_Wait( request, status, ierr ) + endif + if (rank .eq. size - 1) then + call MPI_Irecv( buf, 10, MPI_INTEGER, 0, 1, MPI_COMM_WORLD, + $ request, ierr ) +C call MPI_Wait( request, status, ierr ) + flag = .FALSE. + do while (.not. flag) + call MPI_Test( request, flag, status, ierr ) + enddo +C +C Check the results + do i = 1, 10 + if (buf(i) .ne. i) then + errs = errs + 1 + endif + enddo + endif +C + call MPI_Allreduce( errs, toterrs, 1, MPI_INTEGER, MPI_SUM, + $ MPI_COMM_WORLD, ierr ) + if (rank .eq. 0) then + if (toterrs .gt. 0) then + print *, "Found ", toterrs, " Errors" + else + PRINT *, " No Errors" + endif + endif + call MPI_Finalize( ierr ) + stop + end diff --git a/teshsuite/smpi/mpich-test/pt2pt/isendtest.c b/teshsuite/smpi/mpich-test/pt2pt/isendtest.c new file mode 100644 index 0000000000..0a920107b4 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/isendtest.c @@ -0,0 +1,48 @@ +/* + MPICH 1.0.8 on Intel Paragons is alleged to have failed this test. + (Original code from + From: weber@zam212.zam.kfa-juelich.de (M.Weber) + Reply-To: M.Weber@kfa-juelich.de + modified slightly to meet our test rules.) + */ +#include +#include "mpi.h" +#define SIZE 100 +/* SIZE 16 worked on Paragon */ + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char *argv[]) +{ + int num_procs,my_id,flag; + int buf[SIZE][SIZE]; + MPI_Status status; + MPI_Request handle; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&num_procs); + MPI_Comm_rank(MPI_COMM_WORLD,&my_id); + + if ( my_id == 1 ) { + MPI_Isend (buf, SIZE*SIZE, MPI_INT, 0, 0, MPI_COMM_WORLD, &handle ); + + flag = 0; + while (flag == 0) { + MPI_Test (&handle, &flag, &status); + printf("%d Wait for completition flag = %d handle = %ld ....\n", + my_id, flag, (long) handle); + } + } + else if (my_id == 0 ) { + MPI_Recv (buf, SIZE*SIZE, MPI_INT, 1, 0, MPI_COMM_WORLD, &status ); + } + + printf("%d Done ....\n",my_id); + + MPI_Finalize(); + return 0; +} + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/isndrcv.c b/teshsuite/smpi/mpich-test/pt2pt/isndrcv.c new file mode 100644 index 0000000000..d15c58ac9b --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/isndrcv.c @@ -0,0 +1,507 @@ +/* + * Program to test all of the features of MPI_Send and MPI_Recv + * + * *** What is tested? *** + * 1. Sending and receiving all basic types and many sizes - check + * 2. Tag selectivity - check + * 3. Error return codes for + * a. Invalid Communicator + * b. Invalid destination or source + * c. Count out of range + * d. Invalid type + */ + +#include "test.h" +#include +#include +#include +#include "mpi.h" + +#ifdef HAVE_MPICHCONF_H +#include "mpichconf.h" +#endif + +static int src = 1; +static int dest = 0; + +static int verbose = 0; + +#define MAX_TYPES 12 +static MPI_Datatype BasicTypes[MAX_TYPES]; +#if defined(HAVE_LONG_DOUBLE) && (!defined HAS_XDR) +static int ntypes = 12; +#else +static int ntypes = 11; +#endif + +static int maxbufferlen = 10000; +static int stdbufferlen = 300; + +/* Prototypes to keep compilers quiet */ +void AllocateBuffers ( void **, MPI_Datatype *, int, int ); +void FreeBuffers ( void **, int ); +void FillBuffers ( void **, MPI_Datatype *, int, int ); +int CheckBuffer ( void *, MPI_Datatype, int ); +void SetupBasicTypes (void); +void SenderTest1 (void); +void ReceiverTest1 (void); +void SenderTest2 (void); +void ReceiverTest2 (void); +void SenderTest3 (void); +void ReceiverTest3 (void); + +void +AllocateBuffers(void **bufferspace, MPI_Datatype *buffertypes, int num_types, + int bufferlen) +{ + int i; + for (i = 0; i < ntypes; i++) { + if (buffertypes[i] == MPI_CHAR) + bufferspace[i] = malloc(bufferlen * sizeof(char)); + else if (buffertypes[i] == MPI_SHORT) + bufferspace[i] = malloc(bufferlen * sizeof(short)); + else if (buffertypes[i] == MPI_INT) + bufferspace[i] = malloc(bufferlen * sizeof(int)); + else if (buffertypes[i] == MPI_LONG) + bufferspace[i] = malloc(bufferlen * sizeof(long)); + else if (buffertypes[i] == MPI_UNSIGNED_CHAR) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned char)); + else if (buffertypes[i] == MPI_UNSIGNED_SHORT) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned short)); + else if (buffertypes[i] == MPI_UNSIGNED) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned int)); + else if (buffertypes[i] == MPI_UNSIGNED_LONG) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned long)); + else if (buffertypes[i] == MPI_FLOAT) + bufferspace[i] = malloc(bufferlen * sizeof(float)); + else if (buffertypes[i] == MPI_DOUBLE) + bufferspace[i] = malloc(bufferlen * sizeof(double)); +#if defined(HAVE_LONG_DOUBLE) && (!defined HAS_XDR) + else if (MPI_LONG_DOUBLE && buffertypes[i] == MPI_LONG_DOUBLE) { + int dlen; + MPI_Type_size( MPI_LONG_DOUBLE, &dlen ); + bufferspace[i] = malloc(bufferlen * dlen); + } +#endif + else if (buffertypes[i] == MPI_BYTE) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned char)); + } +} + +void +FreeBuffers(void **buffers, int nbuffers) +{ + int i; + for (i = 0; i < nbuffers; i++) + free(buffers[i]); +} + +void +FillBuffers(void **bufferspace, MPI_Datatype *buffertypes, int num_types, + int bufferlen) +{ + int i, j; + for (i = 0; i < ntypes; i++) { + for (j = 0; j < bufferlen; j++) { + if (buffertypes[i] == MPI_CHAR) + ((char *)bufferspace[i])[j] = (char)(j & 0x7f); + else if (buffertypes[i] == MPI_SHORT) + ((short *)bufferspace[i])[j] = (short)j; + else if (buffertypes[i] == MPI_INT) + ((int *)bufferspace[i])[j] = (int)j; + else if (buffertypes[i] == MPI_LONG) + ((long *)bufferspace[i])[j] = (long)j; + else if (buffertypes[i] == MPI_UNSIGNED_CHAR) + ((unsigned char *)bufferspace[i])[j] = (unsigned char)j; + else if (buffertypes[i] == MPI_UNSIGNED_SHORT) + ((unsigned short *)bufferspace[i])[j] = (unsigned short)j; + else if (buffertypes[i] == MPI_UNSIGNED) + ((unsigned int *)bufferspace[i])[j] = (unsigned int)j; + else if (buffertypes[i] == MPI_UNSIGNED_LONG) + ((unsigned long *)bufferspace[i])[j] = (unsigned long)j; + else if (buffertypes[i] == MPI_FLOAT) + ((float *)bufferspace[i])[j] = (float)j; + else if (buffertypes[i] == MPI_DOUBLE) + ((double *)bufferspace[i])[j] = (double)j; +#if defined(HAVE_LONG_DOUBLE) && (!defined HAS_XDR) + else if (MPI_LONG_DOUBLE && buffertypes[i] == MPI_LONG_DOUBLE) + ((long double *)bufferspace[i])[j] = (long double)j; +#endif + else if (buffertypes[i] == MPI_BYTE) + ((unsigned char *)bufferspace[i])[j] = (unsigned char)j; + } + } +} + +int +CheckBuffer(bufferspace, buffertype, bufferlen) +void *bufferspace; +MPI_Datatype buffertype; +int bufferlen; +{ + int j; + for (j = 0; j < bufferlen; j++) { + if (buffertype == MPI_CHAR) { + if (((char *)bufferspace)[j] != (char)(j & 0x7f)) + return 1; + } else if (buffertype == MPI_SHORT) { + if (((short *)bufferspace)[j] != (short)j) + return 1; + } else if (buffertype == MPI_INT) { + if (((int *)bufferspace)[j] != (int)j) + return 1; + } else if (buffertype == MPI_LONG) { + if (((long *)bufferspace)[j] != (long)j) + return 1; + } else if (buffertype == MPI_UNSIGNED_CHAR) { + if (((unsigned char *)bufferspace)[j] != (unsigned char)j) + return 1; + } else if (buffertype == MPI_UNSIGNED_SHORT) { + if (((unsigned short *)bufferspace)[j] != (unsigned short)j) + return 1; + } else if (buffertype == MPI_UNSIGNED) { + if (((unsigned int *)bufferspace)[j] != (unsigned int)j) + return 1; + } else if (buffertype == MPI_UNSIGNED_LONG) { + if (((unsigned long *)bufferspace)[j] != (unsigned long)j) + return 1; + } else if (buffertype == MPI_FLOAT) { + if (((float *)bufferspace)[j] != (float)j) + return 1; + } else if (buffertype == MPI_DOUBLE) { + if (((double *)bufferspace)[j] != (double)j) + return 1; +#if defined(HAVE_LONG_DOUBLE) && (!defined HAS_XDR) + } else if (MPI_LONG_DOUBLE && buffertype == MPI_LONG_DOUBLE) { + if (((long double *)bufferspace)[j] != (long double)j) + return 1; +#endif + } else if (buffertype == MPI_BYTE) { + if (((unsigned char *)bufferspace)[j] != (unsigned char)j) + return 1; + } + } + return 0; +} + +void SetupBasicTypes( void ) +{ + BasicTypes[0] = MPI_CHAR; + BasicTypes[1] = MPI_SHORT; + BasicTypes[2] = MPI_INT; + BasicTypes[3] = MPI_LONG; + BasicTypes[4] = MPI_UNSIGNED_CHAR; + BasicTypes[5] = MPI_UNSIGNED_SHORT; + BasicTypes[6] = MPI_UNSIGNED; + BasicTypes[7] = MPI_UNSIGNED_LONG; + BasicTypes[8] = MPI_FLOAT; + BasicTypes[9] = MPI_DOUBLE; + + /* Define the last few elements as null just in case */ + BasicTypes[11] = MPI_DATATYPE_NULL; +#if defined (HAVE_LONG_DOUBLE) && (!defined HAS_XDR) + if (MPI_LONG_DOUBLE) { + BasicTypes[10] = MPI_LONG_DOUBLE; + BasicTypes[11] = MPI_BYTE; + } + else { + ntypes = 11; + BasicTypes[10] = MPI_BYTE; + } +#else + BasicTypes[10] = MPI_BYTE; +#endif +} + +void +SenderTest1( void ) +{ + void *bufferspace[MAX_TYPES]; + int i, j; + int act_send; + MPI_Request *requests = + (MPI_Request *)malloc(sizeof(MPI_Request) * ntypes * + maxbufferlen/500); + MPI_Status *statuses = + (MPI_Status *)malloc(sizeof(MPI_Status) * ntypes * + maxbufferlen/500); + + AllocateBuffers(bufferspace, BasicTypes, ntypes, maxbufferlen); + FillBuffers(bufferspace, BasicTypes, ntypes, maxbufferlen); + act_send = 0; + for (i = 0; i < ntypes; i++) { + for (j = 0; j < maxbufferlen; j += 500) { + if (BasicTypes[i] == MPI_DATATYPE_NULL) continue; + MPI_Isend(bufferspace[i], j, BasicTypes[i], dest, + 2000, MPI_COMM_WORLD, + &(requests[act_send++])); + } + } + MPI_Waitall( act_send, requests, statuses); + free(requests); + free(statuses); + FreeBuffers(bufferspace, ntypes); +} + +void +ReceiverTest1( void ) +{ + void *bufferspace[MAX_TYPES]; + int i, j; + char message[81]; + MPI_Status Stat; + MPI_Request Req; + int dummy, passed; + + AllocateBuffers(bufferspace, BasicTypes, ntypes, maxbufferlen); + for (i = 0; i < ntypes; i++) { + passed = 1; + /* Try different sized messages */ + for (j = 0; j < maxbufferlen; j += 500) { + /* Skip null datatypes */ + if (!BasicTypes[i]) continue; + MPI_Irecv(bufferspace[i], j, BasicTypes[i], src, + 2000, MPI_COMM_WORLD, &Req); + sprintf(message, "Send-Receive Test, Type %d, Count %d", + i, j); + MPI_Wait(&Req, &Stat); + if (Stat.MPI_SOURCE != src) { + fprintf(stderr, "*** Incorrect Source returned. ***\n"); + Test_Failed(message); + passed = 0; + } else if (Stat.MPI_TAG != 2000) { + fprintf(stderr, "*** Incorrect Tag returned. ***\n"); + Test_Failed(message); + passed = 0; + } else if (MPI_Get_count(&Stat, BasicTypes[i], &dummy) || + dummy != j) { + fprintf(stderr, + "*** Incorrect Count returned, Count = %d. ***\n", + dummy); + Test_Failed(message); + passed = 0; + } else if(CheckBuffer(bufferspace[i], BasicTypes[i], j)) { + fprintf(stderr, "*** Incorrect Message received. ***\n"); + Test_Failed(message); + passed = 0; + } + } + sprintf(message, "Send-Receive Test, Type %d", + i); + if (passed) + Test_Passed(message); + else + Test_Failed(message); + } + FreeBuffers(bufferspace, ntypes); +} + +/* Test Tag Selectivity */ +void +SenderTest2( void ) +{ + int *buffer; + int i; + MPI_Request requests[10]; + MPI_Status statuses[10]; + + buffer = (int *)malloc(stdbufferlen * sizeof(int)); + + for (i = 0; i < stdbufferlen; i++) + buffer[i] = i; + + for (i = 1; i <= 10; i++) + MPI_Isend(buffer, stdbufferlen, MPI_INT, dest, + 2000+i, MPI_COMM_WORLD, &(requests[i-1])); + MPI_Waitall(10, requests, statuses); + free(buffer); + + return; +} + +void +ReceiverTest2( void ) +{ + int *buffer; + int i, j; + char message[81]; + MPI_Status Stat; + int dummy, passed; + + MPI_Request Req; + + buffer = (int *)malloc(stdbufferlen * sizeof(int)); + passed = 1; + + for (i = 2010; i >= 2001; i--) { + MPI_Irecv(buffer, stdbufferlen, MPI_INT, src, + i, MPI_COMM_WORLD, &Req); + sprintf(message, "Tag Selectivity Test, Tag %d", + i); + MPI_Wait(&Req, &Stat); + if (Stat.MPI_SOURCE != src) { + fprintf(stderr, "*** Incorrect Source returned. ***\n"); + Test_Failed(message); + } else if (Stat.MPI_TAG != i) { + fprintf(stderr, "*** Incorrect Tag returned. ***\n"); + Test_Failed(message); + } else if (MPI_Get_count(&Stat, MPI_INT, &dummy) || + dummy != stdbufferlen) { + fprintf(stderr, + "*** Incorrect Count returned, Count = %d. ***\n", + dummy); + Test_Failed(message); + } else if(CheckBuffer( (void *)buffer, MPI_INT, stdbufferlen)) { + fprintf(stderr, "*** Incorrect Message received. ***\n"); + Test_Failed(message); + passed = 0; + } + /* Clear out the buffer */ + for (j = 0; j < stdbufferlen; j++) + buffer[j] = -1; + } + strncpy(message, "Tag Selectivity Test", 81); + if (passed) + Test_Passed(message); + else + Test_Failed(message); + free(buffer); + return; +} + +void +SenderTest3( void ) +{ + return; +} + +void +ReceiverTest3( void ) +{ + int buffer[20]; + MPI_Datatype bogus_type = MPI_DATATYPE_NULL; + MPI_Request Req; +#if 0 + MPI_Status Stat; + int err_code; +#endif + if (verbose) + MPI_Errhandler_set(MPI_COMM_WORLD, TEST_ERRORS_WARN); + else + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); + + if (MPI_Isend(buffer, 20, MPI_INT, dest, + 1, MPI_COMM_NULL, &Req) == MPI_SUCCESS){ + Test_Failed("NULL Communicator Test"); + } + else { + Test_Passed("NULL Communicator Test"); +#if 0 + /* If test passed (i.e. send failed, try waiting on the + request... */ + Test_Message("About to wait on failed request."); + if (MPI_Wait(&Req, &Stat) == MPI_SUCCESS) {; + Test_Failed("Wait on failed isend Test"); + } + else + Test_Passed("Wait on failed isend Test"); + Test_Message("Done waiting on failed request."); +#endif + } +/* + if (MPI_Isend(NULL, 10, MPI_INT, dest, + 1, MPI_COMM_WORLD, &Req) == MPI_SUCCESS){ + Test_Failed("Invalid Buffer Test"); + } + else + Test_Passed("Invalid Buffer Test"); +*/ + if (MPI_Isend(buffer, -1, MPI_INT, dest, + 1, MPI_COMM_WORLD, &Req) == MPI_SUCCESS){ + Test_Failed("Invalid Count Test"); + } + else + Test_Passed("Invalid Count Test"); + + if (MPI_Isend(buffer, 20, bogus_type, dest, + 1, MPI_COMM_WORLD, &Req) == MPI_SUCCESS){ + Test_Failed("Invalid Type Test"); + } + else + Test_Passed("Invalid Type Test"); + + if (MPI_Isend(buffer, 20, MPI_INT, dest, + -1000, MPI_COMM_WORLD, &Req) == MPI_SUCCESS) { + Test_Failed("Invalid Tag Test"); + } + else + Test_Passed("Invalid Tag Test"); + + if (MPI_Isend(buffer, 20, MPI_INT, 300, + 1, MPI_COMM_WORLD, &Req) == MPI_SUCCESS) { + Test_Failed("Invalid Destination Test"); + } + else + Test_Passed("Invalid Destination Test"); + return; +} + +int +main( int argc, char **argv ) +{ + int myrank, mysize; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + MPI_Comm_size(MPI_COMM_WORLD, &mysize); + + /* dest writes out the received stats; for the output to be + consistant (with the final check), it should be procees 0 */ + if (argc > 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { + dest = 1; + src = 0; + } + else { + src = 1; + dest = 0; + } + + Test_Init("isndrcv", myrank); + SetupBasicTypes(); + + if (mysize != 2) { + fprintf(stderr, + "*** This test program requires exactly 2 processes.\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* Turn stdout's buffering to line buffered so it mixes right with + stderr in output files. (hopefully) */ + setvbuf(stdout, NULL, _IOLBF, 0); + + if (myrank == src) { + SenderTest1(); + SenderTest2(); + SenderTest3(); + } else if (myrank == dest) { + ReceiverTest1(); + ReceiverTest2(); + ReceiverTest3(); + } else { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + exit(-1); + } + Test_Waitforall( ); + if (myrank == dest) { + int rval; + rval = Summarize_Test_Results(); + Test_Finalize(); + MPI_Finalize(); + return rval; + } + else { + Test_Finalize(); + MPI_Finalize(); + return 0; + } +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/isndrcv2.c b/teshsuite/smpi/mpich-test/pt2pt/isndrcv2.c new file mode 100644 index 0000000000..531e51bc98 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/isndrcv2.c @@ -0,0 +1,41 @@ +#include +#include "mpi.h" + +int main( int argc, char **argv ) +{ + int rank, size; + MPI_Request r1, r2; + MPI_Status s; + int buf[10000], buf2[10000], count, tag1, tag2; + + count = 10000; + tag1 = 100; + tag2 = 1000; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + if (rank == 0) { + MPI_Isend( buf, count, MPI_INT, 1, tag1, MPI_COMM_WORLD, &r1 ); + MPI_Isend( buf2, count, MPI_INT, 1, tag2, MPI_COMM_WORLD, &r2 ); + MPI_Wait( &r1, &s ); + MPI_Wait( &r2, &s ); + } + else if (rank == 1) { + MPI_Irecv( buf2, count, MPI_INT, 0, tag2, MPI_COMM_WORLD, &r2 ); + MPI_Irecv( buf, count, MPI_INT, 0, tag1, MPI_COMM_WORLD, &r1 ); + MPI_Wait( &r2, &s ); + if (s.MPI_TAG != tag2) { + printf( "Error in receive order\n" ); + } + MPI_Wait( &r1, &s ); + } + + MPI_Barrier( MPI_COMM_WORLD ); + if (rank == 0) { + printf( "Test completed\n" ); + } + MPI_Finalize( ); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/issend2.c b/teshsuite/smpi/mpich-test/pt2pt/issend2.c new file mode 100644 index 0000000000..0ff413a4d8 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/issend2.c @@ -0,0 +1,102 @@ +/* -*- Mode: C; c-basic-offset:4 ; -*- */ +/* This program comes from Bert Still, bert@h4p.llnl.gov + It caused problems for the T3D implementation. + */ +#include +#include "mpi.h" +#include "test.h" + +#define MESSAGE_TAG 8 +#define MESSAGE_VALUE 6 +#define MESSAGE_TYPE MPI_BYTE +#define MESSAGE_CTYPE char +static MESSAGE_CTYPE recv_msg[8]; +static MESSAGE_CTYPE send_msg[8]; + +static MPI_Status recv_status; +static MPI_Status send_status[2]; +static MPI_Request request[2]; +static int complete[2]; + +/*------------------------------------------------------------------------*/ + +void fatal ( int,const char * ); + +void fatal(rank, msg) +int rank; +const char *msg; +{ + printf("***FATAL** rank %d: %s\n", rank, msg); + MPI_Abort(MPI_COMM_WORLD, 1); + exit(1); +} + +int verbose = 0; +int main( int argc, char *argv[] ) +{ + int size, rank; + int err=0, toterr; + + if (MPI_Init(&argc, &argv)!=MPI_SUCCESS) fatal(-1, "MPI_Init failed"); + + if (MPI_Comm_size(MPI_COMM_WORLD, &size)!=MPI_SUCCESS) + fatal(-1, "MPI_Comm_size failed"); + if (MPI_Comm_rank(MPI_COMM_WORLD, &rank)!=MPI_SUCCESS) + fatal(-1, "MPI_Comm_rank failed"); + if (size!=2) fatal(rank, "issend2 test requires -np 2\n"); + + if (rank) { + if (MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + &recv_status)!=MPI_SUCCESS) + fatal(rank, "MPI_Probe failed"); + if (recv_status.MPI_SOURCE!=0 || recv_status.MPI_TAG!=MESSAGE_TAG) + fatal(rank, "message source or tag wrong"); + if (MPI_Recv(recv_msg, 8, MESSAGE_TYPE, + recv_status.MPI_SOURCE, recv_status.MPI_TAG, MPI_COMM_WORLD, + &recv_status)!=MPI_SUCCESS) + fatal(rank, "MPI_Recv failed"); + if (recv_msg[0] == MESSAGE_VALUE) { + if (verbose) printf( "test completed successfully\n" ); + } + else { + printf("test failed: rank %d: got %d but expected %d\n", + rank, recv_msg[0], MESSAGE_VALUE ); + err++; + } + + fflush(stdout); + + if (recv_msg[0]!=MESSAGE_VALUE) + fatal(rank, "received message doesn't match sent message"); + + } else { + int n_complete; + + send_msg[0]= MESSAGE_VALUE; + + if (MPI_Issend(send_msg, 1, MESSAGE_TYPE, /*rank*/1, MESSAGE_TAG, + MPI_COMM_WORLD, request) != MPI_SUCCESS) + fatal(rank, "MPI_Issend failed"); + if (MPI_Waitsome(1, request, &n_complete, complete,send_status) != + MPI_SUCCESS) + fatal(rank, "MPI_Waitsome failed"); + if (request[0]!=MPI_REQUEST_NULL || n_complete!=1 || complete[0]!=0) + fatal(rank, "Waitsome result is wrong"); + } + + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } + /* printf("rank %d: about to finalize\n", rank); */ + fflush(stdout); + MPI_Finalize(); + /* printf("rank %d: finalize completed\n", rank); */ + fflush(stdout); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/issendinit.c b/teshsuite/smpi/mpich-test/pt2pt/issendinit.c new file mode 100644 index 0000000000..42588d2d20 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/issendinit.c @@ -0,0 +1,104 @@ +/* + * Program to test that the "synchronous send" semantics + * of point to point communications in MPI is (probably) satisfied. + * This is done by starting two synchronous sends and then testing that + * they do not complete until the matchine receives are issued. + */ + +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 +/* Amount of time in seconds to wait for the receipt of the second Ssend + message */ +#define MAX_TIME 20 +static int src = 1; +static int dest = 0; + +/* Prototypes for picky compilers */ +void Generate_Data ( int *, int ); + +void Generate_Data(buffer, buff_size) +int *buffer; +int buff_size; +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = i+1; +} + +int main( int argc, char **argv ) +{ + int rank; /* My Rank (0 or 1) */ + int act_size = 1000; + int flag; + int buffer[SIZE]; + double t0; + char *Current_Test = NULL; + MPI_Status status; + MPI_Request r[2]; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == src) { + Test_Init("issendinit", rank); + Generate_Data(buffer, SIZE); + Current_Test = (char*)"Ssend_init waits for recv"; + MPI_Recv( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD, &status ); + MPI_Send( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD ); + MPI_Ssend_init( buffer, act_size, MPI_INT, dest, 1, MPI_COMM_WORLD, + &r[0] ); + MPI_Ssend_init( buffer, act_size, MPI_INT, dest, 2, MPI_COMM_WORLD, + &r[1] ); + MPI_Startall( 2, r ); + t0 = MPI_Wtime(); + flag = 0; + while (MPI_Wtime() - t0 < MAX_TIME) { + MPI_Test( &r[0], &flag, &status ); + if (flag) { + Test_Failed(Current_Test); + break; + } + } + if (!flag) + Test_Passed(Current_Test); + MPI_Wait( &r[1], &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, dest, 13, + MPI_BOTTOM, 0, MPI_INT, dest, 13, + MPI_COMM_WORLD, &status ); + MPI_Wait( &r[0], &status ); + MPI_Request_free( &r[0] ); + MPI_Request_free( &r[1] ); + Test_Waitforall( ); + { + int rval = Summarize_Test_Results(); /* Returns number of tests; + that failed */ + Test_Finalize(); + MPI_Finalize(); + return rval; + } + + } else if (rank == dest) { + MPI_Send( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD ); + MPI_Recv( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD, &status ); + MPI_Recv( buffer, act_size, MPI_INT, src, 2, MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, src, 13, + MPI_BOTTOM, 0, MPI_INT, src, 13, + MPI_COMM_WORLD, &status ); + MPI_Recv( buffer, act_size, MPI_INT, src, 1, MPI_COMM_WORLD, &status ); + /* Test 1 */ + Test_Waitforall( ); + MPI_Finalize(); + } else { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/issendtest.c b/teshsuite/smpi/mpich-test/pt2pt/issendtest.c new file mode 100644 index 0000000000..80a9ed1269 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/issendtest.c @@ -0,0 +1,131 @@ +/* + * Program to test that the "synchronous send" semantics + * of point to point communications in MPI is (probably) satisfied. + * This uses tests on the completions of the SENDS (unlike the MPI_Ssend + * test) since the Issends return "immediately" but can not complete + * until the matching receive begins. + * + * This program has been patterned off of "overtake.c" + * + * William Gropp + * gropp@mcs.anl.gov + */ + +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 +/* Amount of time in seconds to wait for the receipt of the second Ssend + message */ +#define MAX_TIME 20 +static int src = 1; +static int dest = 0; + +/* Prototypes for picky compilers */ +void Generate_Data ( int *, int ); + +void Generate_Data( int *buffer, int buff_size) +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = i+1; +} + +int main( int argc, char **argv) +{ + int rank; /* My Rank (0 or 1) */ + int act_size = 1000; + int flag; + int buffer[SIZE]; + double t0; + char *Current_Test = NULL; + MPI_Status status; + MPI_Request r1, r2; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + /* This test depends on a working wtime. Make a simple check */ + Current_Test = (char*)"Testing timer"; + t0 = MPI_Wtime(); + if (t0 == 0 && MPI_Wtime() == 0) { + int loopcount = 1000000; + /* This test is too severe (systems with fast + processors and large MPI_Wtick values can + fail. Try harder to test MPI_Wtime */ + while (loopcount-- && MPI_Wtime() == 0) ; + if (loopcount <= 0) { + fprintf( stderr, + "MPI_WTIME is returning 0; a working value is needed\n\ +for this test.\n" ); + Test_Failed(Current_Test); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + t0 = MPI_Wtime(); + } + /* Test that the timer increases */ + Current_Test = (char*)"Testing timer increases"; + for (flag=0; flag<1000000; flag++) { + if (MPI_Wtime() > t0) break; + } + if (flag >= 1000000) { + fprintf( stderr, "MPI_WTIME is not returning increasing values!\n" ); + Test_Failed(Current_Test); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + Current_Test = (char*)"Issend waits for recv"; + if (rank == src) { + Test_Init("issendtest", rank); + Generate_Data(buffer, SIZE); + MPI_Recv( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD, &status ); + MPI_Send( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD ); + MPI_Issend( buffer, act_size, MPI_INT, dest, 1, MPI_COMM_WORLD, &r1 ); + MPI_Issend( buffer, act_size, MPI_INT, dest, 2, MPI_COMM_WORLD, &r2 ); + t0 = MPI_Wtime(); + flag = 0; + while ( (MPI_Wtime() - t0) < MAX_TIME) { + MPI_Test( &r1, &flag, &status ); + if (flag) { + Test_Failed(Current_Test); + break; + } + } + if (!flag) + Test_Passed(Current_Test); + MPI_Wait( &r2, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, dest, 13, + MPI_BOTTOM, 0, MPI_INT, dest, 13, + MPI_COMM_WORLD, &status ); + MPI_Wait( &r1, &status ); + Test_Waitforall( ); + { + int rval = Summarize_Test_Results(); /* Returns number of tests; + that failed */ + Test_Finalize(); + MPI_Finalize(); + return rval; + } + + } else if (rank == dest) { + /* Test 1 */ + MPI_Send( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD ); + MPI_Recv( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD, &status ); + MPI_Recv( buffer, act_size, MPI_INT, src, 2, MPI_COMM_WORLD, &status ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, src, 13, + MPI_BOTTOM, 0, MPI_INT, src, 13, + MPI_COMM_WORLD, &status ); + MPI_Recv( buffer, act_size, MPI_INT, src, 1, MPI_COMM_WORLD, &status ); + + Test_Waitforall( ); + Test_Finalize(); + MPI_Finalize(); + } else { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/longmsgs.c b/teshsuite/smpi/mpich-test/pt2pt/longmsgs.c new file mode 100644 index 0000000000..154c945ba7 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/longmsgs.c @@ -0,0 +1,201 @@ +#include "test.h" +#include "mpi.h" +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif + +#define MIN_MESSAGE_LENGTH 256 +#define MAX_MESSAGE_LENGTH (16*1024*1024) +#define TAG1 1 +#define TAG2 2 +#define TAG3 3 +#define TAG4 4 +#define TAGSR 101 + +int verbose = 0; + +void Resetbuf( char *, int ); +void Checkbuf( char *, int, MPI_Status * ); + +void Resetbuf( char *buf, int len ) +{ + int i; + for (i=0; i 10) break; + } + ival++; + } + if (err) MPI_Abort( MPI_COMM_WORLD, 1 ); +} + +int main( int argc, char *argv[] ) +{ + int msglen, i; + int msglen_min = MIN_MESSAGE_LENGTH; + int msglen_max = MAX_MESSAGE_LENGTH; + int rank,poolsize,Master; + char *sendbuf,*recvbuf; + char ival; + MPI_Request request; + MPI_Status status; + + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&poolsize); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + + if(poolsize != 2) { + printf("Expected exactly 2 MPI processes\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + +/* + The following test allows this test to run on small-memory systems + that support the sysconf call interface. This test keeps the test from + becoming swap-bound. For example, on an old Linux system or a + Sony Playstation 2 (really!) + */ +#if defined(HAVE_SYSCONF) && defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) + { + long n_pages, pagesize; + int actmsglen_max; + n_pages = sysconf( _SC_PHYS_PAGES ); + pagesize = sysconf( _SC_PAGESIZE ); + /* We want to avoid integer overflow in the size calculation. + The best way is to avoid computing any products (such + as total memory = n_pages * pagesize) and instead + compute a msglen_max that fits within 1/4 of the available + pages */ + if (n_pages > 0 && pagesize > 0) { + /* Recompute msglen_max */ + int msgpages = 4 * ((msglen_max + pagesize - 1)/ pagesize); + while (n_pages < msgpages) { msglen_max /= 2; msgpages /= 2; } + } + /* printf ( "before = %d\n", msglen_max ); */ + MPI_Allreduce( &msglen_max, &actmsglen_max, 1, MPI_INT, + MPI_MIN, MPI_COMM_WORLD ); + msglen_max = actmsglen_max; + /* printf ( "after = %d\n", msglen_max ); */ + } +#endif + + Master = (rank == 0); + + if(Master && verbose) + printf("Size (bytes)\n------------\n"); + for(msglen = msglen_min; msglen <= msglen_max; msglen *= 2) { + + sendbuf = malloc(msglen); + recvbuf = malloc(msglen); + if(sendbuf == NULL || recvbuf == NULL) { + printf("Can't allocate %d bytes\n",msglen); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + ival = 0; + for (i=0; i +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +#ifndef MAXNP +#define MAXNP 16 +#endif + +/* + Test to make sure that nonblocking routines actually work. This + stresses them by sending large numbers of requests and receiving them + piecemeal. + */ +int main( int argc, char **argv ) +{ + int count, tag, nsend, myid, np, rcnt, scnt, i, j; + int *(sbuf[MAXNP]), *(rbuf[MAXNP]); + MPI_Status status; + MPI_Request *rsend, *rrecv; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &myid ); + MPI_Comm_size( MPI_COMM_WORLD, &np ); + + if (np > MAXNP) { + fprintf( stderr, + "This test must run with at most %d processes\n", MAXNP ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + nsend = 3 * np; + rsend = (MPI_Request *) malloc ( nsend * sizeof(MPI_Request) ); + rrecv = (MPI_Request *) malloc ( nsend * sizeof(MPI_Request) ); + if (!rsend || !rrecv) { + fprintf( stderr, "Failed to allocate space for requests\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + for (count = 1; count < 10000; count *= 2) { + for (i=0; i +#include +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + Test to make sure that nonblocking routines actually work + In this example, we assume that we do not know the message + sizes ahead of time. + + Just like nblock, but with the probe test. +*/ + +int main( int argc, char **argv ) +{ + int count, tag, nsend, myid, np, rcnt, scnt, i, j, *send_buf; + int length, finished; + int baselen = 1; + int **recv_buf; + MPI_Status status, rtn_status; + MPI_Request *rsend, *rrecv; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &myid ); + MPI_Comm_size( MPI_COMM_WORLD, &np ); +/* + MPE_Errors_call_dbx_in_xterm( (argv)[0], (char *)0 ); + MPE_Signals_call_debugger(); + */ + if (argc > 2 && argv[1] && strcmp( argv[1], "-first" ) == 0) + baselen = atoi(argv[2]); + +/* malloc buffers */ + nsend = 3 * np; + rsend = (MPI_Request *) malloc ( nsend * sizeof(MPI_Request) ); + rrecv = (MPI_Request *) malloc ( nsend * sizeof(MPI_Request) ); + recv_buf = (int **) malloc ( nsend * sizeof(int *) ); + if (!rsend || !rrecv) { + fprintf( stderr, "Failed to allocate space for requests\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + for (count = baselen; count < 10000; count *= 2) { + /* We'll send/recv from everyone */ + scnt = 0; + rcnt = 0; + + /* do sends */ + send_buf = (int *)malloc( count * sizeof(int) ); + for (j=0; j<3; j++) { + tag = j; + for (i=0; i +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char *argv[] ) +{ + int a[4]; + int i, nproc; + int rank, right, left; + MPI_Status status; + MPI_Request req[4]; + int index, it, count, errcnt = 0; + + /* start up */ + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &nproc); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + /* set up processor chain (Apps should use Cart_create/shift) */ + left = (rank == 0) ? MPI_PROC_NULL : rank - 1; + right = (rank == nproc - 1) ? MPI_PROC_NULL : rank + 1; + + /* initialize local matrix */ + /* globally: a[i] = i, i = 1 .. 2*nproc */ + /* locally : a[i] = 2*rank+i, i=1,2 */ + a[0] = -1; + a[1] = 2*rank + 1; + a[2] = 2*rank + 2; + a[3] = -1; + + /* start all receives and sends */ + MPI_Irecv(&a[0], 1, MPI_INT, left, 1, MPI_COMM_WORLD, &req[0]); + MPI_Irecv(&a[3], 1, MPI_INT, right, 0, MPI_COMM_WORLD, &req[3]); + MPI_Isend(&a[1], 1, MPI_INT, left, 0, MPI_COMM_WORLD, &req[1]); + MPI_Isend(&a[2], 1, MPI_INT, right, 1, MPI_COMM_WORLD, &req[2]); + + for (it=0; it<4; it++) { + status.MPI_SOURCE = nproc; + status.MPI_TAG = nproc; + MPI_Waitany( 4, req, &index, &status ); + if (index == 0 && left == MPI_PROC_NULL) { + if (status.MPI_TAG != MPI_ANY_TAG || + status.MPI_SOURCE != MPI_PROC_NULL) { + errcnt ++; + fprintf( stderr, "Incorrect null status for left\n" ); + } + MPI_Get_count( &status, MPI_INT, &count ); + if (count != 0) { + errcnt ++; + fprintf( stderr, "Incorrect null status for left (count)\n" ); + } + } + else if (index == 3 && right == MPI_PROC_NULL) { + if (status.MPI_TAG != MPI_ANY_TAG || + status.MPI_SOURCE != MPI_PROC_NULL) { + errcnt ++; + fprintf( stderr, "Incorrect null status for right\n" ); + } + MPI_Get_count( &status, MPI_INT, &count ); + if (count != 0) { + errcnt ++; + fprintf( stderr, "Incorrect null status for right (count)\n" ); + } + } + } + + /* Test results */ + if (left == MPI_PROC_NULL) { + if (a[0] != -1) { + fprintf( stderr, "Expected -1, found %d in left partner\n", a[0] ); + errcnt ++; + } + } + else { + if (a[0] != 2 * left + 2) { + fprintf( stderr, "Expected %d, found %d in left partner\n", + 2 * left + 2, a[0] ); + errcnt ++; + } + } + + if (right == MPI_PROC_NULL) { + if (a[3] != -1) { + fprintf( stderr, "Expected -1, found %d in right partner\n", a[3] ); + errcnt ++; + } + } + else { + if (a[3] != 2 * right + 1) { + fprintf( stderr, "Expected %d, found %d in right partner\n", + 2 * right + 1, a[3] ); + errcnt ++; + } + } + + + i = errcnt; + MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (rank == 0) { + if (errcnt > 0) { + printf( "Found %d errors in the run \n", errcnt ); + } + else + printf( "No errors in handling MPI_PROC_NULL\n" ); + } + + /* clean up */ + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/nullproc.std b/teshsuite/smpi/mpich-test/pt2pt/nullproc.std new file mode 100644 index 0000000000..02779e272d --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/nullproc.std @@ -0,0 +1,3 @@ +*** Testing handling of MPI_PROC_NULL *** +No errors in handling MPI_PROC_NULL +*** Testing handling of MPI_PROC_NULL *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/nullproc2.c b/teshsuite/smpi/mpich-test/pt2pt/nullproc2.c new file mode 100644 index 0000000000..fcffd5a7f3 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/nullproc2.c @@ -0,0 +1,129 @@ +/* + * Test for null proc handling with blocking routines + */ + + +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char *argv[] ) +{ + int a[4]; + int i, nproc; + int rank, right, left; + MPI_Status st[2], sts[2]; + MPI_Request req[2]; + int count, errcnt = 0; + + /* start up */ + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &nproc); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + /* set up processor chain (Apps should use Cart_create/shift) */ + left = (rank == 0) ? MPI_PROC_NULL : rank - 1; + right = (rank == nproc - 1) ? MPI_PROC_NULL : rank + 1; + + /* initialize local matrix */ + /* globally: a[i] = i, i = 1 .. 2*nproc */ + /* locally : a[i] = 2*rank+i, i=1,2 */ + a[0] = -1; + a[1] = 2*rank + 1; + a[2] = 2*rank + 2; + a[3] = -1; + + /* start all receives and sends */ + MPI_Isend(&a[1], 1, MPI_INT, left, 0, MPI_COMM_WORLD, &req[0]); + MPI_Isend(&a[2], 1, MPI_INT, right, 1, MPI_COMM_WORLD, &req[1]); + st[0].MPI_SOURCE = nproc; + st[0].MPI_TAG = -1; + st[1].MPI_SOURCE = nproc; + st[1].MPI_TAG = -1; + MPI_Recv(&a[0], 1, MPI_INT, left, 1, MPI_COMM_WORLD, &st[0]); + MPI_Recv(&a[3], 1, MPI_INT, right, 0, MPI_COMM_WORLD, &st[1]); + MPI_Waitall( 2, req, sts ); + + /* Test the end points */ + if (left == MPI_PROC_NULL) { + if (st[0].MPI_TAG != MPI_ANY_TAG || + st[0].MPI_SOURCE != MPI_PROC_NULL) { + errcnt ++; + fprintf( stderr, "Incorrect null status for left\n" ); + if (st[0].MPI_SOURCE != MPI_PROC_NULL) { + fprintf( stderr, "Source returned was %d but should be %d\n", + st[0].MPI_SOURCE, MPI_PROC_NULL ); + } + } + MPI_Get_count( &st[0], MPI_INT, &count ); + if (count != 0) { + errcnt ++; + fprintf( stderr, "Incorrect null status for left (count)\n" ); + fprintf( stderr, "Count was %d but should be 0\n", count ); + } + } + else if (right == MPI_PROC_NULL) { + if (st[1].MPI_TAG != MPI_ANY_TAG || + st[1].MPI_SOURCE != MPI_PROC_NULL) { + errcnt ++; + fprintf( stderr, "Incorrect null status for right\n" ); + if (st[1].MPI_SOURCE != MPI_PROC_NULL) { + fprintf( stderr, "Source returned was %d but should be %d\n", + st[1].MPI_SOURCE, MPI_PROC_NULL ); + } + } + MPI_Get_count( &st[1], MPI_INT, &count ); + if (count != 0) { + errcnt ++; + fprintf( stderr, "Incorrect null status for right (count)\n" ); + fprintf( stderr, "Count was %d but should be 0\n", count ); + } + } + + /* Test results */ + if (left == MPI_PROC_NULL) { + if (a[0] != -1) { + fprintf( stderr, "Expected -1, found %d in left partner\n", a[0] ); + errcnt ++; + } + } + else { + if (a[0] != 2 * left + 2) { + fprintf( stderr, "Expected %d, found %d in left partner\n", + 2 * left + 2, a[0] ); + errcnt ++; + } + } + + if (right == MPI_PROC_NULL) { + if (a[3] != -1) { + fprintf( stderr, "Expected -1, found %d in right partner\n", a[3] ); + errcnt ++; + } + } + else { + if (a[3] != 2 * right + 1) { + fprintf( stderr, "Expected %d, found %d in right partner\n", + 2 * right + 1, a[3] ); + errcnt ++; + } + } + + + i = errcnt; + MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (rank == 0) { + if (errcnt > 0) { + printf( "Found %d errors in the run \n", errcnt ); + } + else + printf( "No errors in handling MPI_PROC_NULL\n" ); + } + + /* clean up */ + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/nullproc2.std b/teshsuite/smpi/mpich-test/pt2pt/nullproc2.std new file mode 100644 index 0000000000..81711eff0e --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/nullproc2.std @@ -0,0 +1,3 @@ +*** Testing handling of MPI_PROC_NULL in blocking Recv *** +No errors in handling MPI_PROC_NULL +*** Testing handling of MPI_PROC_NULL in blocking Recv *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/order.c b/teshsuite/smpi/mpich-test/pt2pt/order.c new file mode 100644 index 0000000000..6795d71797 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/order.c @@ -0,0 +1,71 @@ +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + + +int main( int argc, char *argv[] ) +{ + int easy; + int rank; + int size; + int a; + int b; + MPI_Request request; + MPI_Status status; + double t1, t0; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + /* This test depends on a working wtime. Make a simple check */ + t0 = MPI_Wtime(); + if (t0 == 0 && MPI_Wtime() == 0) { + int loopcount = 1000000; + /* This test is too severe (systems with fast + processors and large MPI_Wtick values can + fail. Try harder to test MPI_Wtime */ + while (loopcount-- && MPI_Wtime() == 0) ; + if (loopcount <= 0) { + fprintf( stderr, + "MPI_WTIME is returning 0; a working value is needed\n\ +for this test.\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + t0 = MPI_Wtime(); + } + + easy = 1; + + MPI_Barrier( MPI_COMM_WORLD ); + if (rank == 0) + { + MPI_Irecv(&a, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &request); + MPI_Recv(&b, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, &status); + MPI_Wait(&request, &status); + /* Check for correct values: */ + if (a == 1 && b == 2) { + printf( " No Errors\n" ); + } + else { + printf("rank = %d, a = %d, b = %d\n", rank, a, b); + } + } + else + { + t1 = MPI_Wtime(); + smpi_sleep(easy); + //while (MPI_Wtime() - t1 < easy) ; + a = 1; + b = 2; + MPI_Send(&a, 1, MPI_INT, 0, 0, MPI_COMM_WORLD); + MPI_Send(&b, 1, MPI_INT, 0, 0, MPI_COMM_WORLD); + } + + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/overtake.c b/teshsuite/smpi/mpich-test/pt2pt/overtake.c new file mode 100644 index 0000000000..209285f75e --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/overtake.c @@ -0,0 +1,290 @@ +/* + * Program to test that the "no overtaking messages" semantics + * of point to point communications in MPI is satisfied. + * A long message is sent using MPI_Send and received using MPI_Recv, + * followed by lots of short messages sent the same way. + * Then Send -> Irecv, Bsend -> Recv, Bsend -> Irecv, + * Isend -> Recv, and Isend -> Irecv are all tried in the + * same way. + * + * Patrick Bridges + * bridges@mcs.anl.gov + * patrick@CS.MsState.Edu + */ + +#include +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 + +static int src = 0; +static int dest = 1; + +/* Which tests to perform (not yet implemented) */ +/* static int Do_Buffer = 1; */ +/* static int Do_Standard = 1; */ +/* In order to quiet noisy C compilers, we provide ANSI-style prototypes + where possible */ +void Generate_Data ( double *, int ); +void Normal_Test_Send ( double *, int ); +void Normal_Test_Recv ( double *, int ); +void Buffered_Test_Send ( double *, int ); +void Buffered_Test_Recv ( double *, int ); +void Async_Test_Send ( double *, int ); +void Async_Test_Recv ( double *, int ); +int Check_Data ( double *, int ); +void Clear_Buffer ( double *, int ); + + +void Generate_Data(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = (double)i+1; +} + +void Normal_Test_Send(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + + for (j = 0; j < 2; j++) { + /* send a long message */ + MPI_Send(buffer, (buff_size/2 - 10), MPI_DOUBLE, dest, 2000, + MPI_COMM_WORLD); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Send(buffer++, 1, MPI_DOUBLE, dest, 2000, MPI_COMM_WORLD); + } +} + +void Normal_Test_Recv(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + MPI_Status Stat; + + for (j = 0; j < 2; j++) { + /* Receive a long message */ + MPI_Recv(buffer, (buff_size/2 - 10), MPI_DOUBLE, src, + 2000, MPI_COMM_WORLD, &Stat); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Recv(buffer++, 1, MPI_DOUBLE, src, 2000, MPI_COMM_WORLD, &Stat); + } +} + +void Buffered_Test_Send(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j; + void *bbuffer; + int size; + + for (j = 0; j < 2; j++) { + /* send a long message */ + MPI_Bsend(buffer, (buff_size/2 - 10), MPI_DOUBLE, dest, 2000, + MPI_COMM_WORLD); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Bsend(buffer++, 1, MPI_DOUBLE, + dest, 2000, MPI_COMM_WORLD); + /* Force this set of Bsends to complete */ + MPI_Buffer_detach( &bbuffer, &size ); + MPI_Buffer_attach( bbuffer, size ); + } +} + +void Async_Test_Send(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j, req = 0; + MPI_Status Stat[22]; + MPI_Request Hand[22]; + + for (j = 0; j < 2; j++) { + /* send a long message */ + MPI_Isend(buffer, (buff_size/2 - 10), MPI_DOUBLE, + dest, 2000, MPI_COMM_WORLD, &(Hand[req++])); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Isend(buffer++, 1, MPI_DOUBLE, dest, 2000, + MPI_COMM_WORLD, &(Hand[req++])); + } + MPI_Waitall(req, Hand, Stat); +} + +void Async_Test_Recv(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i, j, req = 0; + MPI_Status Stat[22]; + MPI_Request Hand[22]; + + for (j = 0; j < 2; j++) { + /* Receive a long message */ + MPI_Irecv(buffer, (buff_size/2 - 10), MPI_DOUBLE, src, + 2000, MPI_COMM_WORLD, &(Hand[req++])); + buffer += buff_size/2 - 10; + /* Followed by 10 short ones */ + for (i = 0; i < 10; i++) + MPI_Irecv(buffer++, 1, MPI_DOUBLE, src, 2000, + MPI_COMM_WORLD, &(Hand[req++])); + } + MPI_Waitall(req, Hand, Stat); +} + +int Check_Data(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + int err = 0; + + for (i = 0; i < buff_size; i++) + if (buffer[i] != (i + 1)) { + err++; + fprintf( stderr, "Value at %d is %f, should be %f\n", i, + buffer[i], (double)(i+1) ); + if (err > 10) return 1; + } + return err; +} + +void Clear_Buffer(buffer, buff_size) +double *buffer; +int buff_size; +{ + int i; + for (i = 0; i < buff_size; i++) + buffer[i] = -1; +} + + +int main( int argc, char **argv ) +{ + int rank; /* My Rank (0 or 1) */ + double buffer[SIZE], *tmpbuffer, *tmpbuf; + int tsize, bsize; + char *Current_Test = NULL; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == src) { + Generate_Data(buffer, SIZE); + Normal_Test_Send(buffer, SIZE); + Normal_Test_Send(buffer, SIZE); +#if !defined(SIMPLE_SENDS) && !defined(NO_BUFFERED_SENDS) + MPI_Pack_size( SIZE, MPI_DOUBLE, MPI_COMM_WORLD, &bsize ); + tmpbuffer = (double *) malloc( bsize + MPI_BSEND_OVERHEAD ); + if (!tmpbuffer) { + fprintf( stderr, "Could not allocate bsend buffer of size %d\n", + bsize ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + MPI_Buffer_attach( tmpbuffer, bsize + MPI_BSEND_OVERHEAD ); + Buffered_Test_Send(buffer, SIZE); + Buffered_Test_Send(buffer, SIZE); + MPI_Buffer_detach( &tmpbuf, &tsize ); +#endif +#if !defined(SIMPLE_SENDS) && !defined(NO_ASYNC_SENDS) + Async_Test_Send(buffer, SIZE); + Async_Test_Send(buffer, SIZE); +#endif + Test_Waitforall( ); + + } else if (rank == dest) { + Test_Init("overtake", rank); + /* Test 1 */ + Current_Test = (char*)"Overtaking Test (Normal Send -> Normal Recieve)"; + Normal_Test_Recv(buffer, SIZE); + + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + + /* Test 2 */ + Clear_Buffer(buffer, SIZE); + Current_Test = (char*)"Overtaking Test (Normal Send -> Async Receive)"; + Async_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + +#if !defined(SIMPLE_SENDS) && !defined(NO_BUFFERED_SENDS) + /* Test 3 */ + Current_Test = (char*)"Overtaking Test (Buffered Send -> Normal Recieve)"; + Clear_Buffer(buffer, SIZE); + Normal_Test_Recv(buffer, SIZE); + + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + + /* Test 4 */ + Clear_Buffer(buffer, SIZE); + Current_Test = (char*)"Overtaking Test (Buffered Send -> Async Receive)"; + Async_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); +#endif + +#if !defined(SIMPLE_SENDS) && !defined(NO_ASYNC_SENDS) + /* Test 5 */ + Current_Test = (char*)"Overtaking Test (Async Send -> Normal Receive)"; + Clear_Buffer(buffer, SIZE); + Normal_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); + + /* Test 6 */ + Clear_Buffer(buffer, SIZE); + Current_Test = (char*)"Overtaking Test (Async Send -> Async Receive)"; + Async_Test_Recv(buffer, SIZE); + if (Check_Data(buffer, SIZE)) + Test_Failed(Current_Test); + else + Test_Passed(Current_Test); +#endif + + Test_Waitforall( ); + { + int rval = Summarize_Test_Results(); /* Returns number of tests; + that failed */ + Test_Finalize(); + MPI_Finalize(); + return rval; + } + } else { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + MPI_Finalize(); + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/pack.c b/teshsuite/smpi/mpich-test/pt2pt/pack.c new file mode 100644 index 0000000000..ee9a2ddc68 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/pack.c @@ -0,0 +1,77 @@ +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + Check pack/unpack of mixed datatypes. + */ +#define BUF_SIZE 100 +int main( int argc, char **argv ) +{ + int myrank; + char buffer[BUF_SIZE]; + int n, size, src, dest, errcnt, errs; + double a,b; + int pos; + + MPI_Status status; + MPI_Init(&argc, &argv); + + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + + src = 0; + dest = 1; + + src = 1; + dest = 0; + + errcnt = 0; + if (myrank == src) + { + pos = 0; + n = 10; + a = 1.1; + b = 2.2; + MPI_Pack(&n, 1, MPI_INT, buffer, BUF_SIZE, &pos, MPI_COMM_WORLD); + MPI_Pack(&a, 1, MPI_DOUBLE, buffer, BUF_SIZE, &pos, + MPI_COMM_WORLD); + MPI_Pack(&b, 1, MPI_DOUBLE, buffer, BUF_SIZE, &pos, + MPI_COMM_WORLD); + /* printf( "%d\n", pos ); */ + MPI_Send(&pos, 1, MPI_INT, dest, 999, MPI_COMM_WORLD); + MPI_Send(buffer, pos, MPI_PACKED, dest, 99, MPI_COMM_WORLD); + } + else + { + MPI_Recv(&size, 1, MPI_INT, src, 999, MPI_COMM_WORLD, &status); + MPI_Recv(buffer, size, MPI_PACKED, src, 99, + MPI_COMM_WORLD, &status); + pos = 0; + MPI_Unpack(buffer, size, &pos, &n, 1, MPI_INT, MPI_COMM_WORLD); + MPI_Unpack(buffer, size, &pos, &a, 1, MPI_DOUBLE, MPI_COMM_WORLD); + MPI_Unpack(buffer, size, &pos, &b, 1, MPI_DOUBLE, MPI_COMM_WORLD); + /* Check results */ + if (n != 10) { + errcnt++; + printf( "Wrong value for n; got %d expected %d\n", n, 10 ); + } + if (a != 1.1) { + errcnt++; + printf( "Wrong value for a; got %f expected %f\n", a, 1.1 ); + } + if (b != 2.2) { + errcnt++; + printf( "Wrong value for b; got %f expected %f\n", b, 2.2 ); + } + } + MPI_Allreduce( &errcnt, &errs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (myrank == 0) { + if (errs == 0) printf( "No errors\n" ); + else printf( "%d errors\n", errs ); + } + MPI_Finalize(); +return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/pack.std b/teshsuite/smpi/mpich-test/pt2pt/pack.std new file mode 100644 index 0000000000..8239e2033f --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/pack.std @@ -0,0 +1,3 @@ +*** Testing MPI_Pack *** +No errors +*** Testing MPI_Pack *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/persist.c b/teshsuite/smpi/mpich-test/pt2pt/persist.c new file mode 100644 index 0000000000..7ed605ea02 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/persist.c @@ -0,0 +1,54 @@ +#include "mpi.h" +#include +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + * This example causes the IBM SP2 MPI version to generate the message + * ERROR: 0032-158 Persistent request already active (2) in MPI_Startall, task 0 + * in the SECOND set of MPI_Startall (after the MPI_Request_free). + */ +int main( int argc, char **argv ) +{ + MPI_Request r[4]; + MPI_Status statuses[4]; + double sbuf1[10], sbuf2[10]; + double rbuf1[10], rbuf2[10]; + int size, rank, up_nbr, down_nbr, i; + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + up_nbr = (rank + 1) % size; + down_nbr = (size + rank - 1) % size; + + MPI_Recv_init( rbuf1, 10, MPI_DOUBLE, down_nbr, 0, MPI_COMM_WORLD, &r[0] ); + MPI_Recv_init( rbuf2, 10, MPI_DOUBLE, up_nbr, 1, MPI_COMM_WORLD, &r[1] ); + MPI_Send_init( sbuf1, 10, MPI_DOUBLE, up_nbr, 0, MPI_COMM_WORLD, &r[2] ); + MPI_Send_init( sbuf2, 10, MPI_DOUBLE, down_nbr, 1, MPI_COMM_WORLD, &r[3] ); + MPI_Startall( 4, r ); + MPI_Waitall( 4, r, statuses ); + + for (i=0; i<4; i++) { + MPI_Request_free( &r[i] ); + } + + MPI_Recv_init( rbuf1, 10, MPI_DOUBLE, down_nbr, 0, MPI_COMM_WORLD, &r[0] ); + MPI_Recv_init( rbuf2, 10, MPI_DOUBLE, up_nbr, 1, MPI_COMM_WORLD, &r[1] ); + MPI_Send_init( sbuf1, 10, MPI_DOUBLE, up_nbr, 0, MPI_COMM_WORLD, &r[2] ); + MPI_Send_init( sbuf2, 10, MPI_DOUBLE, down_nbr, 1, MPI_COMM_WORLD, &r[3] ); + MPI_Startall( 4, r ); + MPI_Waitall( 4, r, statuses ); + + for (i=0; i<4; i++) { + MPI_Request_free( &r[i] ); + } + + if (rank == 0) printf( "No errors\n" ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/persist.std b/teshsuite/smpi/mpich-test/pt2pt/persist.std new file mode 100644 index 0000000000..d07f2c7255 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/persist.std @@ -0,0 +1,3 @@ +*** Testing MPI_Startall/Request_free *** +No errors +*** Testing MPI_Startall/Request_free *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/persist2.c b/teshsuite/smpi/mpich-test/pt2pt/persist2.c new file mode 100644 index 0000000000..7bdfeaf21a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/persist2.c @@ -0,0 +1,80 @@ +#include "mpi.h" +#include +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + */ +int main( int argc, char **argv ) +{ + MPI_Request r[4]; + MPI_Status statuses[4]; + double sbuf1[10], sbuf2[10]; + double rbuf1[10], rbuf2[10]; + double userbuf[40+4*MPI_BSEND_OVERHEAD]; + int size, rank, up_nbr, down_nbr, i, err, toterr; + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + up_nbr = (rank + 1) % size; + down_nbr = (size + rank - 1) % size; + + for (i=0; i<10; i++) { + sbuf1[i] = (double)i; + sbuf2[i] = (double)(i+20); + } + MPI_Buffer_attach( userbuf, 40*sizeof(double) + 4 * MPI_BSEND_OVERHEAD ); + + MPI_Recv_init( rbuf1, 10, MPI_DOUBLE, down_nbr, 0, MPI_COMM_WORLD, &r[0] ); + MPI_Recv_init( rbuf2, 10, MPI_DOUBLE, up_nbr, 1, MPI_COMM_WORLD, &r[1] ); + MPI_Bsend_init( sbuf1, 10, MPI_DOUBLE, up_nbr, 0, MPI_COMM_WORLD, &r[2] ); + MPI_Bsend_init( sbuf2, 10, MPI_DOUBLE, down_nbr, 1, MPI_COMM_WORLD, &r[3] ); + MPI_Startall( 4, r ); + MPI_Waitall( 4, r, statuses ); + + for (i=0; i<4; i++) { + MPI_Request_free( &r[i] ); + } + + MPI_Recv_init( rbuf1, 10, MPI_DOUBLE, down_nbr, 0, MPI_COMM_WORLD, &r[0] ); + MPI_Recv_init( rbuf2, 10, MPI_DOUBLE, up_nbr, 1, MPI_COMM_WORLD, &r[1] ); + MPI_Bsend_init( sbuf1, 10, MPI_DOUBLE, up_nbr, 0, MPI_COMM_WORLD, &r[2] ); + MPI_Bsend_init( sbuf2, 10, MPI_DOUBLE, down_nbr, 1, MPI_COMM_WORLD, &r[3] ); + MPI_Startall( 4, r ); + MPI_Waitall( 4, r, statuses ); + + for (i=0; i<4; i++) { + MPI_Request_free( &r[i] ); + } + + /* Check data */ + err = 0; + for (i=0; i<10;i++) { + if (rbuf1[i] != i) { + err++; + if (err < 10) + fprintf( stderr, "Expected %d, rbuf1[%d] = %f\n", i, i, + rbuf1[i] ); + } + if (rbuf2[i] != i+20) { + err++; + if (err < 10) + fprintf( stderr, "Expected %d, rbuf2[%d] = %f\n", i+20, i, + rbuf2[i] ); + } + } + + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (rank == 0) { + if (toterr == 0) printf( "No errors\n" ); + else printf( "Found %d errors\n", toterr ); + } + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/persist2.std b/teshsuite/smpi/mpich-test/pt2pt/persist2.std new file mode 100644 index 0000000000..985fbdd10a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/persist2.std @@ -0,0 +1,3 @@ +*** Testing MPI_Startall(Bsend)/Request_free *** +No errors +*** Testing MPI_Startall(Bsend)/Request_free *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/persistent.c b/teshsuite/smpi/mpich-test/pt2pt/persistent.c new file mode 100644 index 0000000000..7c8e21395f --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/persistent.c @@ -0,0 +1,56 @@ +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + int rank, size, i, len, actlen, expected_len; + MPI_Request rq; + MPI_Status status; + double data[100]; + + len = 100; + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + if (size < 3 ) { + fprintf( stderr, "This test requires more than 2 proceses\n" ); + MPI_Finalize(); + return 1; + } + + if (rank == 0) { + MPI_Recv_init( data, len, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, + MPI_COMM_WORLD, &rq ); + for (i=1; i, caused problems +C on t3d with -device=t3d -arch=cray_t3d -no_short_longs -nodevdebug +C +C +C This is a very time-consuming program on a workstation cluster. +C For this reason, I've modified it to do fewer tests (1/10 as many) +C +c +c This is a simple benchmark designed to measure the latency and bandwidth +c of a message-passing MIMD computer. It is currently set up to run with +c MPI. +c +c Compile (MPI mpich version 1.0.11 or later) with +c % mpif77 -o pong pong.f +c +c (mpif77 is a script that hides details about libraries from the user) +c +c Execute as +c % mpirun -np 2 pong +c +c Make sure that ~mpi/bin is in your path. +c +c Note that the MPI-specific calls are: +c +c MPI_INIT +c MPI_COMM_RANK +c MPI_COMM_SIZE +c +c MPI_Wtime +c MPI_Wtick +c +c MPI_SEND +c MPI_RECV +c +c MPI_FINALIZE +c +c Some care needs to be taken in using the +c appropriate timing routine. Check the value of MPI_Wtick() to see if +c the clock resolution is reasonable for your tests. +c +c The benchmark measures +c the time to send a message of length N bytes from node 0 to node 1 and +c receive an acknowledging copy of that message which node 1 sends back to +c node 0. Note that node 1 waits for the completion of its receive before +c sending the message back to node 0. Note also that the program is not +c necessarily optimal any given system, but is intended +c to provide a reasonably transparent baseline measurement. +c +c For message lengths len (= num of doubles * sizedouble), +c a total of msgspersample ping-pong message exchanges are made, +c and half of the average round-trip time (i.e. the one-way message +c time) is then fit by a linear function y(N) = a + b*N via a least squares +c linear regression. The coefficient a is then interpreted as the latency +c (time to send a 0-length message) and b as the inverse bandwidth (i.e. 1/b = +c bandwidth in bytes/sec) +c +c The entire procedure is repeated twice, with the bandwidth, latency, and +c measured and fitted values of the message times reported for each instance. +c +c The underlying message passing performance characteristics of a +c particular system may not necessarily be accurately modeled by the simple +c linear function assumed here. This may be reflected in a dependency of +c the observed latency and bandwidth on the range of message sizes used. +c +c Original author: +c R. Leary, San Diego Supercomputer Center +c leary@sdsc.edu 9/20/94 +c +c Modified for MPI 10/27/95 +c frost@sdsc.edu + +c +c =========================== program header =========================== +c + + program pong + implicit none + include 'mpif.h' + +c sizedouble = size in bytes of double precision element + integer sizedouble + parameter(sizedouble=8) + +c Note: set these parameters to one of 3 cases: +c 1. size (each sample) < packetization length of architecture +c 2. size (each sample) > packetization length of architecture +c 3. size (1st sample) < packetization length of architecture +c & size (all others) > packetization length of architecture +c +c Some known packetization lengths: +c Paragon ~1500 bytes +c Cray T3D ~1500 bytes +c TCP/IP networks 256-1024 bytes +c +c samples = the number of data points collected + integer samples + parameter(samples=40) +c initsamplesize = # of elements transmitted in 1st sample + integer initsamplesize + parameter(initsamplesize=125) +c samplesizeinc = sample size increase per iteration (linear rate) + integer samplesizeinc + parameter(samplesizeinc=125) +c parameter(samplesizeinc=1) +c msgspersample = the number of messages + integer msgspersample +c parameter(msgspersample=1000) + parameter(msgspersample=100) + +c The buffer array contains the message , while x(i) is the message size +c and y(i) the corresponding measured one-way average time. +c Note that buffer is a double precision array +c +c ibufcount = total number of elements in buffer + integer ibufcount + parameter(ibufcount=(initsamplesize+((samples-1)*samplesizeinc))) +c + double precision buffer(ibufcount) + double precision x(samples), y(samples) + double precision t1, t2 + double precision a, b, bandw + double precision sumx, sumy, sumxx, sumxy + double precision det, fit + + integer stat(MPI_STATUS_SIZE) + integer ierr, ierr1, ierr2 + integer nodenum, numprocs + integer idest + integer i, iter, sample + integer num + +c +c =========================== begin =========================== +c + + call MPI_INIT( ierr ) + call MPI_COMM_RANK( MPI_COMM_WORLD, nodenum, ierr ) + call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr ) + + if (numprocs .ne. 2) then + write (6,*) 'This program is only valid for 2 processors' + write (6,*) 'numprocs = ', numprocs + stop + endif + +c Put something into array + do 2 i=1,ibufcount + buffer(i) = dfloat(i) + 2 continue + + if (nodenum .eq. 0) then + write (6,*) ' MPI pong test' + write (6,*) ' samples = ', samples + write (6,*) ' initsamplesize = ', initsamplesize + write (6,*) ' samplesizeinc = ', samplesizeinc + write (6,*) ' msgspersample = ', msgspersample + write (6,*) ' ibufcount = ', ibufcount + write (6,98) MPI_Wtick() + write (6,*) + endif + 98 format (' clock resolution = ',e10.5) + + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + +c +c =========================== main loop =========================== +c + +c Start main loop - iterate twice to generate two complete sets of timings + do 60 iter = 1,2 + do 40 sample = 1,samples + num = initsamplesize + ((sample-1)*samplesizeinc) + +c debug + write (6,99) nodenum, iter, sample, num + call MPI_BARRIER(MPI_COMM_WORLD, ierr) + 99 format ( 1x, 'PE = ', i1, ', iter = ',i1, + + ', sample = ', i3, ', num = ', i5 ) + +c Find initial elapsed time in seconds + + if(nodenum.eq.0) then +c Send message from node 0 to 1 and receive message from 1 + idest = 1 + t1 = MPI_Wtime() + do 20 i = 1,msgspersample + call MPI_SEND(buffer, num, MPI_DOUBLE_PRECISION, + + idest, 0, MPI_COMM_WORLD, ierr1) + call MPI_RECV(buffer, num, MPI_DOUBLE_PRECISION, + + MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + + stat, ierr2) + 20 continue + t2 = MPI_Wtime() + else +c Send message from node 1 to 0 and receive message from 0 + idest = 0 + t1 = MPI_Wtime() + do 21 i = 1,msgspersample + call MPI_RECV(buffer, num, MPI_DOUBLE_PRECISION, + + MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, + + stat, ierr2) + call MPI_SEND(buffer, num, MPI_DOUBLE_PRECISION, + + idest, 0, MPI_COMM_WORLD, ierr1) + 21 continue + t2 = MPI_Wtime() + endif + +c independent variable is message length: + x(sample) = dfloat(num * sizedouble) + +c dependent variable is average one-way transit time: + y(sample) = ((t2 - t1) * 0.5) / + + dfloat(msgspersample) + + 40 continue + +c now do linear least squares fit to data +c time = a + b*x + + if (nodenum .eq. 0) then + sumy = 0.d0 + sumx = 0.d0 + sumxy = 0.d0 + sumxx = 0. d0 + do 45 i=1,samples + sumx = sumx + x(i) + sumy = sumy + y(i) + sumxy = sumxy + ( x(i) * y(i) ) + sumxx = sumxx + ( x(i) * x(i) ) + 45 continue + + det = (dfloat(samples) * sumxx) - (sumx * sumx) + a = (1.d6 * ((sumxx * sumy) - (sumx * sumxy))) / det + b = (1.d6 * ((dfloat(samples) * sumxy) - (sumx * sumy))) / det + + write(6,*) + write(6,*) ' iter = ', iter + write(6,*) + write(6,*) ' least squares fit: time = a + b * (msg length)' + write(6,200) a + write(6,300) b + bandw = 1./b + write(6,400) bandw + write(6,*) + write(6,*) ' message observed fitted' + write(6,*) ' length(bytes) time(usec) time(usec)' + write(6,*) + do 50 i=1,samples + fit = a + b*x(i) + y(i) = y(i)*1.d6 + write(6,100) x(i),y(i),fit + 50 continue + endif + + 60 continue + +c +c =========================== end loop =========================== +c + + 100 format(3x,f8.0,5x,f12.2,5x,f12.2) + 200 format(5x,'a = latency = ',f8.2,' microseconds') + 300 format(5x,'b = inverse bandwidth = ' , f8.5,' secs/Mbyte') + 400 format(5x,'1/b = bandwidth = ',f8.2,' Mbytes/sec') + +c +c =========================== end program =========================== +c + + call MPI_FINALIZE(ierr) + stop + end diff --git a/teshsuite/smpi/mpich-test/pt2pt/probe.c b/teshsuite/smpi/mpich-test/pt2pt/probe.c new file mode 100644 index 0000000000..6d55b48185 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/probe.c @@ -0,0 +1,56 @@ +/* + This is a test of probe to receive a message of unknown length + */ + +#include +#include +#include "mpi.h" +#include "test.h" + +int main( int argc, char **argv ) +{ +int data, to, from, tag, maxlen, np, myid, src, dest; +MPI_Status status; + +MPI_Init( &argc, &argv ); +MPI_Comm_rank( MPI_COMM_WORLD, &myid ); +MPI_Comm_size( MPI_COMM_WORLD, &np ); + +/* dest writes out the received stats; for the output to be + consistant (with the final check), it should be procees 0 */ +if (argc > 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { + dest = np - 1; + src = 0; + } +else { + src = np - 1; + dest = 0; + } + +if (myid == src) { + to = dest; + tag = 2000; +#ifdef VERBOSE + printf( "About to send\n" ); +#endif + MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); + } +else { + tag = 2000; + from = MPI_ANY_SOURCE; + MPI_Probe( from, tag, MPI_COMM_WORLD, &status ); + MPI_Get_count( &status, MPI_INT, &maxlen ); + /* Here I'd normally allocate space; I'll just check that it is ok */ + if (maxlen > 1) + printf( "Error; size = %d\n", maxlen ); +#ifdef VERBOSE + printf( "About to receive\n" ); +#endif + MPI_Recv( &data, 1, MPI_INT, status.MPI_SOURCE, status.MPI_TAG, + MPI_COMM_WORLD, &status ); + } +MPI_Barrier( MPI_COMM_WORLD ); +Test_Waitforall( ); +MPI_Finalize(); +return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/probe1.c b/teshsuite/smpi/mpich-test/pt2pt/probe1.c new file mode 100644 index 0000000000..389283c39b --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/probe1.c @@ -0,0 +1,78 @@ +/* + This is a test of probe to receive a message of unknown type (used as a + server) + */ +#include +#include +#include "mpi.h" +#include "test.h" + +int main( int argc, char **argv ) +{ +int data, to, from, tag, maxlen, np, myid, flag, dest, src; +MPI_Status status, status1; + +MPI_Init( &argc, &argv ); +MPI_Comm_rank( MPI_COMM_WORLD, &myid ); +MPI_Comm_size( MPI_COMM_WORLD, &np ); + +/* dest writes out the received stats; for the output to be + consistant (with the final check), it should be procees 0 */ +if (argc > 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { + dest = np - 1; + src = 0; + } +else { + src = np - 1; + dest = 0; + } + +if (myid == src) { + to = dest; + tag = 2000; +#ifdef VERBOSE + printf( "About to send\n" ); +#endif + MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); + tag = 2001; +#ifdef VERBOSE + printf( "About to send 'done'\n" ); +#endif + MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); + } +else { + /* Server loop */ + while (1) { + tag = MPI_ANY_TAG; + from = MPI_ANY_SOURCE; + /* Should really use MPI_Probe, but functionally this will work + (it is less efficient, however) */ + do { + MPI_Iprobe( from, tag, MPI_COMM_WORLD, &flag, &status ); + } while (!flag); + if (status.MPI_TAG == 2001) { +#ifdef VERBOSE + printf( "Received terminate message\n" ); +#endif + /* Actually need to receive it ... */ + MPI_Recv( &data, 1, MPI_INT, status.MPI_SOURCE, + status.MPI_TAG, MPI_COMM_WORLD, &status1 ); + break; + } + if (status.MPI_TAG == 2000) { + MPI_Get_count( &status, MPI_INT, &maxlen ); + if (maxlen > 1) + printf( "Error; size = %d\n", maxlen ); +#ifdef VERBOSE + printf( "About to receive\n" ); +#endif + MPI_Recv( &data, 1, MPI_INT, status.MPI_SOURCE, + status.MPI_TAG, MPI_COMM_WORLD, &status1 ); + } + } + } +MPI_Barrier( MPI_COMM_WORLD ); +Test_Waitforall( ); +MPI_Finalize(); +return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/relrank.c b/teshsuite/smpi/mpich-test/pt2pt/relrank.c new file mode 100644 index 0000000000..6becaf753d --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/relrank.c @@ -0,0 +1,58 @@ +#include "mpi.h" +#include +#include "test.h" + +/* + * Test that receives are done by relative rank, and that the status value + * contains the relative rank + */ +int main( int argc, char **argv ) +{ + int rank, new_world_rank, size, order, errcnt = 0, i; + int tmpint = 0; + MPI_Comm new_world; + MPI_Status s; + + MPI_Init(&argc,&argv); + + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm_size(MPI_COMM_WORLD,&size); + + order = size - rank - 1; + MPI_Comm_split(MPI_COMM_WORLD, 0, order, &new_world); + + MPI_Comm_rank ( new_world, &new_world_rank ); + + /* Make sure that the split worked correctly */ + if (new_world_rank != order) { + errcnt ++; + fprintf( stderr, "Comm split did not properly order ranks!\n" ); + } + if (new_world_rank==0) { + MPI_Send(&tmpint, 1, MPI_INT, 1, 0, new_world); + /* printf("%d(%d): Sent message to: %d\n", new_world_rank, rank, 1); */ + } + else if (new_world_rank == 1) { + MPI_Recv(&tmpint, 1, MPI_INT, 0, 0, new_world,&s); + if (s.MPI_SOURCE != 0) { + errcnt++; + fprintf( stderr, + "Source incorrect in recv status (%d should be %d)\n", + s.MPI_SOURCE, 0 ); + } + /* + printf("%d(%d): Recv message from: -> %d(%d) <- these 2 should equal\n", + new_world_rank, rank, 0, s.MPI_SOURCE); + */ + } + + MPI_Comm_free( &new_world ); + i = errcnt; + MPI_Allreduce( &i, &errcnt, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (errcnt > 0) { + printf( "Found %d errors in the run\n", errcnt ); + } + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/reqcreate.c b/teshsuite/smpi/mpich-test/pt2pt/reqcreate.c new file mode 100644 index 0000000000..16646cba4d --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/reqcreate.c @@ -0,0 +1,85 @@ +#include +#include "mpi.h" +#include +#include "test.h" + +/* Test request creation */ + +int main( int argc, char **argv ) +{ + int i, n, n_goal = 2048, rc, len, buf[1]; + MPI_Request *req_array; + MPI_Status status; + char msg[MPI_MAX_ERROR_STRING]; + + MPI_Init( &argc, &argv ); + MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ); + + n = n_goal; + + req_array = (MPI_Request *)malloc( n * sizeof(MPI_Request) ); + + for (i=0; i +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif +#include "mpi.h" + +#define MAX_REQ 10000 + +#define DEFAULT_REQ 100 +#define DEFAULT_LEN 10000 +#define DEFAULT_LOOP 10 + +int main( int argc, char **argv ) +{ + int rank, size, loop, max_loop = DEFAULT_LOOP, max_req = DEFAULT_REQ; + int buf_len = DEFAULT_LEN; + int i, j, errs = 0, toterrs; + MPI_Request r; + MPI_Status status; + int *(b[MAX_REQ]); + MPI_Datatype dtype; + int sendrank = 0, recvrank = 1; + + MPI_Init( &argc, &argv ); + + dtype = MPI_INT; + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + +/* + The following test allows this test to run on small-memory systems + that support the sysconf call interface. This test keeps the test from + becoming swap-bound. For example, on an old Linux system or a + Sony Playstation 2 (really!) + */ +#if defined(HAVE_SYSCONF) && defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE) + if (rank == sendrank) + { + long n_pages, pagesize; + int msglen_max = max_req * buf_len * sizeof(int); + n_pages = sysconf( _SC_PHYS_PAGES ); + pagesize = sysconf( _SC_PAGESIZE ); + /* printf( "Total mem = %ld\n", n_pages * pagesize ); */ + /* We want to avoid integer overflow in the size calculation. + The best way is to avoid computing any products (such + as total memory = n_pages * pagesize) and instead + compute a msglen_max that fits within 1/4 of the available + pages */ + if (n_pages > 0 && pagesize > 0) { + /* Recompute msglen_max */ + int msgpages = 4 * ((msglen_max + pagesize - 1)/ pagesize); + while (n_pages < msgpages) { + msglen_max /= 2; msgpages /= 2; buf_len /= 2; + } + } + } +#else + /* printf( "No sysconf\n" ); */ +#endif + + /* Check command line args (allow usage even with one processor */ + argv++; + argc--; + while (argc--) { + if (strcmp( "-loop" , *argv ) == 0) { + argv++; argc--; + max_loop = atoi( *argv ); + } + else if (strcmp( "-req", *argv ) == 0) { + argv++; argc--; + max_req = atoi( *argv ); + } + else if (strcmp( "-len", *argv ) == 0) { + argv++; argc--; + buf_len = atoi( *argv ); + } + else { + fprintf( stderr, + "Usage: reqfree [ -loop n ] [ -req n ] [ -len n ]\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + argv++; + } + + MPI_Comm_size( MPI_COMM_WORLD, &size ); + if (size != 2) { + fprintf( stderr, "This program requires two processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* Assume only processor 0 has the command line */ + MPI_Bcast( &max_loop, 1, MPI_INT, 0, MPI_COMM_WORLD ); + MPI_Bcast( &max_req, 1, MPI_INT, 0, MPI_COMM_WORLD ); + MPI_Bcast( &buf_len, 1, MPI_INT, 0, MPI_COMM_WORLD ); + + /* Allocate buffers */ + for (i=0; i> third.out + $mpirun $args -np 2 ./third > third.out 2>&1 + echo '*** Testing Unexpected messages ***' >> third.out + rm -f third.stdo + cat >>third.stdo < /dev/null ; then + rc=0 +# else +# echo "Failed to run simple program!" +# echo "Output from run attempt was" +# cat third.out +# echo "mpirun program was $mpirun" +# echo "mpirun command was " +# echo "$mpirun $args -np 2 ./third >third.out 2>&1" +# rc=1 +# fi +# CleanExe third +# rm -f third.out +# exit $rc +fi + +# If the programs are not available, run make. +if [ ! -x sendrecv_mpich -a $makeeach = 0 -a $runtests = 1 ] ; then + $MAKE +fi + +testfiles="" +if [ $runtests = 1 ] ; then +echo '**** Testing MPI Point-to-point routines ****' + +RunTest sendrecv_mpich 2 "**** Testing MPI_Send and MPI_Recv ****" "" "sendrecv-0.out sendrecv-1.out" + +RunTest sendrecv2 2 "**** Testing MPI_Send and MPI_Recv (2) ****" + + +#Uses MPI_Pack and Unpack +#RunTest sendrecv3 2 "**** Testing MPI_Send and MPI_Recv (3) ****" + +RunTest sendrecv4 2 "**** Testing MPI_Send and MPI_Recv (4) ****" + +#not supported +#RunTest bsendtest 2 "**** Testing MPI_Bsend and MPI_Recv (4) ****" "" "bsendtest-0.out bsendtest-1.out" + +RunTest isndrcv 2 "**** Testing MPI_Isend and MPI_Irecv ****" "" "isndrcv-0.out isndrcv-1.out" + +#RunTest irsend 2 "**** Testing MPI_Irsend ****" + +#RunTest irsendinit 2 "**** Testing MPI_Rsend_init ****" + + +#rsend and ssend to implement, removed for now +RunTest longmsgs 2 "**** Testing long messages ****" + +RunTest testsome 2 "**** Testing MPI_Testsome/Testany/Waitsome ****" + +#issend used, replaced by isend - fails +#RunTest testall_mpich 2 "**** Testing MPI_Testall ****" + +#MPI_Cancel, not yet implemented +#RunTest cancel 2 "**** Checking MPI_Cancel (Irecv's) ****" + +#RunTest cancel2 2 "**** Checking MPI_Cancel (Persistent Recv's) ****" + +#RunTest cancel3 2 "**** Checking MPI_Cancel (Persistent Send's) ****" + +#RunTest cancelmessages 2 "**** Checking MPI_Cancel (short/eager/rndv) ****" + +#RunTest cancelibm 2 "**** Checking MPI_Cancel (successful/nonsuccessful) ****" + +# This test exposes a SEGV in the MPICH1 code. However, this is an +# uncommon situtation. Users who need this feature should switch to +# MPICH2 (www.mcs.anl.gov/mpi/mpich2) +#RunTest cancelissend 2 "**** Checking MPI_Cancel and MPI_Issend (short msgs) ****" + +RunTest sndrcv 2 "**** Testing MPI_Sendrecv ****" + +RunTest sndrcvrep 2 "**** Testing MPI_Sendrecv_replace ****" + +#fails : check if buffer is correctly moved +RunTest sndrcvrpl2 2 "**** Testing MPI_Sendrecv_replace (long) ****" + +#not implemented :TODO, should be simple, add a nbelements parameter to the datatype, compute it at creation time, then multiply status->count by this number +#RunTest getelm 2 "**** Testing MPI_Get_elements ****" + +#uses Pack_size, Buffer_attach, Bsend, Buffer_detach +#RunTest overtake 2 "**** Verifying that messages are non-overtaking ****" "" "overtake-0.out overtake-1.out" + +#RunTest ssendtest 2 "**** Verifying ssend ****" + +#RunTest ssendtest2 2 "**** Verifying ssend (2) ****" + +#RunTest issendtest 2 "**** Verifying Issend ****" "" "issendtest-1.out" + +#RunTest issend2 2 "**** Verifying Issend (2) ****" + +#uses MPI_Cancel, lets a lot of orphan comms. +#RunTest reqcreate 1 "**** Checking the request creation routines ****" + + +#hangs without reason: TODO debug +#RunTest reqfree 2 "**** Checking request free ****" "-req 2048" + +RunTest typebase 1 "**** Checking the type (sizes) routines ****" + +RunTest typecreate 1 "**** Checking the type creation routines ****" + +RunTest typetest 2 "**** Checking the type routines ****" "" "typetest-0.out typetest-1.out" + +#weird error, because comment says smpi returned value is same as expected from mpich .. modified to handle this value as correct +RunTest typeub 2 "**** Checking the type routines: MPI_UB ****" + +#todo : handle lb correctly ! +RunTest typeub2 1 "**** Checking the type routines: MPI_UB(2) ****" + +#types too complex for smpi (structs of vectors) +#RunTest typeub3 1 "**** Checking the type routines: MPI_UB(3) ****" + +#TODO: handle LB +RunTest typelb 1 "**** Checking the type routines: MPI_LB ****" + +RunTest structlb 1 "**** Checking Type_struct routines: MPI_LB ****" + +#ssend, replaced by send +RunTest dtypelife 2 "**** Checking the type routines: MPI_Type_free ****" + +#TODO: handle alignment for extent values +#RunTest dataalign 2 "**** Checking the type alignment ****" + +RunTest dtyperecv 2 "**** Checking the type recv ****" + +RunTest commit 1 "**** Checking the type routines: MPI_Type_commit ****" + +RunTest third 2 "*** Testing Unexpected messages ***" + +RunTest fourth 4 "*** Testing Irecv/Isend/Wait ***" + +RunTest fifth 4 "*** Testing Irecv/Isend/Waitall ***" + +#uses MPI_keyval_create, Attr_put, Attr_get, Attr_delete, Keyval_free +#RunTest sixth 2 "*** Testing attribute manipulation ***" + +RunTest nblock 4 "*** Testing Isend/Irecv (large numbers) ***" + +#TODO : unlock probing ... +#RunTest nbtest 4 "*** Testing Isend/Probe/Recv (large numbers) ***" + +RunTest sendmany 8 "*** Testing Send (many procs) ***" + +# ... replaced by smpi_sleep calls +RunTest order 2 "*** Testing Recv ordering ***" + +RunTest sendorder 2 "**** Checking Message Ordering ****" + +RunTest exittest 3 "**** Checking Exit Processing ****" + +#uses MPI_Errors +#RunTest trunc 2 "*** Testing Message truncation ***" +#TODO: handle MPI_ERR_IN_STATUS +#RunTest truncmult 2 '*** Testing Message trunction in multiple completions ***' + +RunTest nullproc 2 "*** Testing handling of MPI_PROC_NULL ***" + +RunTest nullproc2 2 "*** Testing handling of MPI_PROC_NULL in blocking Recv ***" + +RunTest relrank 2 "*** Testing handling of relative rank ***" + +RunTest hvectest 2 "*** Testing Vector type ***" + +RunTest hvectest2 2 "*** Testing struct type for vectors (MPI_UB) ***" + +#too complex for now +RunTest hvec 2 "*** Testing Type_Hvector ***" +#fails +RunTest hindexed 1 "*** Testing Type_Hindexed ***" + +RunTest probe 2 "*** Testing Probe and Get_count ***" + +RunTest probe1 2 "*** Testing Iprobe and Get_count ***" + +RunTest self 1 "*** Testing send to self ***" +#TODO : handle COMM_SELF +#RunTest selfvsworld 2 "*** Testing COMM_SELF and COMM_WORLD ***" + +RunTest testtest1 2 "*** Testing MPI_Test ***" + +RunTest persistent 4 "*** Testing MPI_Recv_init ***" + +RunTest persist 4 "*** Testing MPI_Startall/Request_free ***" +#used MPI_Buffers and Bsends +#RunTest persist2 4 "*** Testing MPI_Startall(Bsend)/Request_free ***" + +RunTest waitall 4 "*** Testing MPI_Waitall ***" + +#uses issend, replaced by isend, and ssend replaced by send +#weirdly fails when launched by make +RunTest waitall2 2 "*** Testing MPI_Waitall (order) ***" + +RunTest waitall3 4 "*** Testing MPI_Waitall (order-irecv) ***" + +RunTest waitall4 4 "*** Testing MPI_Waitall (order-isend) ***" +#semi fails +RunTest waitany 4 "*** Testing MPI_Waitany ***" + +#RunTest pack 2 "*** Testing MPI_Pack ***" + +#calls to ssend replaced by send +RunTest flood 2 "**** Testing large messages ****" +RunTest sendcplx 2 "*** Testing Fortran send ***" +#very long +#RunTest flood2 2 "**** Testing large numbers of messages ****" +# +# Run Fortran tests ONLY if Fortran available +if [ 0 = 1 ] ; then + echo "FORTRAN TESTS" + # + #echo "*** secondf ***" >> pt2pt.out + #$mpirun $args -np 2 ./secondf "$@" >> pt2pt.out 2>&1 + # + RunTest isendf 2 "*** Testing isend from Fortran ***" + + RunTest allpair 2 "*** Testing pt-2-pt from Fortran ***" + + RunTest allpair2 2 "*** Testing pt-2-pt from Fortran (many calls) ***" + # + OutTime + testfiles="$testfiles structf.out" + rm -f structf.out + MakeExe structf + echo '*** Testing Type_struct from Fortran ***' + echo '*** Testing Type_struct from Fortran ***' >> structf.out + # This is tricky. Because of a bug in IRIX64, we need to direct + # input from /dev/null (so that we can know that we need a new process + # group). This should be ok for everyone, but SunOS 4.1.4 tends to + # panic (!!) in this case. Since both behaviors represent broken + # operating systems, we test for ARCH=IRIX64 + if [ "LINUX" = "IRIX64" ] ; then + $mpirun $args -np 2 ./structf "$@" >> structf.out 2>&1 < /dev/null + else + $mpirun $args -np 2 ./structf "$@" >> structf.out 2>&1 + fi + echo '*** Testing Type_struct from Fortran ***' >> structf.out + CheckOutput structf + CleanExe structf + # + RunTest send1 2 "*** Testing pt-2-pt from Fortran (2) ***" + + RunTest sendfort 2 "*** Testing Fortran logical datatype ***" + + # +# testfiles="$testfiles pingpong.out" +# rm -f pingpong.out +# MakeExe pingpong +# echo '*** Testing pt-2-pt from Fortran (3) ***' +# echo '*** Testing pt-2-pt from Fortran (3) ***' >> pingpong.out +# $mpirun $args -np 2 ./pingpong "$@" >> pingpong.out 2>&1 +# echo '*** Testing pt-2-pt from Fortran (3) ***' >> pingpong.out +# CheckOutput pingpong +# CleanExe pingpong + # + echo "END OF FORTRAN TESTS" +fi +# +else + # Just run checks + testfiles=`echo *.out` + if test "$testfiles" = "*.out" ; then + echo "No output files remain from previous test!" + exit 1 + fi +fi +# +writesummaryfile=$savewritesummaryfile +echo '*** Checking for differences from expected output ***' +CheckAllOutput pt2pt.diff +exit 0 + diff --git a/teshsuite/smpi/mpich-test/pt2pt/secondf.f b/teshsuite/smpi/mpich-test/pt2pt/secondf.f new file mode 100644 index 0000000000..af3d2ef75f --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/secondf.f @@ -0,0 +1,59 @@ +C +C second - test program that sends an array of floats from the first process +C of a group to the last, using send and recv +C +C + program main + include 'mpif.h' +C + integer rank, size, to, from, tag, count, i, ierr + integer src, dest + integer st_source, st_tag, st_count +C MPI_Status status + integer status(MPI_STATUS_SIZE) + double precision data(100) + + call MPI_INIT( ierr ) +C print *, 'about to call comm rank' + call MPI_COMM_RANK( MPI_COMM_WORLD, rank, ierr ) +C print *, rank, 'about to call comm size' + call MPI_COMM_SIZE( MPI_COMM_WORLD, size, ierr ) + print *, 'Process ', rank, ' of ', size, ' is alive' +C +C src = size - 1 +C dest = 0 + dest = size - 1 + src = 0 +C + if (rank .eq. src) then + to = dest + count = 10 + tag = 2001 + do 10 i=1, 10 + data(i) = i + 10 continue + call MPI_SEND( data, count, MPI_DOUBLE_PRECISION, to, tag, + & MPI_COMM_WORLD, ierr ) + print *, rank, ' sent' + print *, (data(i),i=1,10) + elseif (rank .eq. dest) then + tag = MPI_ANY_TAG + count = 10 + from = MPI_ANY_SOURCE + call MPI_RECV(data, count, MPI_DOUBLE_PRECISION, from, tag, + & MPI_COMM_WORLD, status, ierr ) + + call MPI_GET_COUNT( status, MPI_DOUBLE_PRECISION, + & st_count, ierr ) + st_source = status(MPI_SOURCE) + st_tag = status(MPI_TAG) +c + print *, 'Status info: source = ', st_source, + & ' tag = ', st_tag, ' count = ', st_count + print *, rank, ' received', (data(i),i=1,10) + endif + + call MPI_FINALIZE( ierr ) + print *, 'Process ', rank, ' exiting' + end + diff --git a/teshsuite/smpi/mpich-test/pt2pt/self.c b/teshsuite/smpi/mpich-test/pt2pt/self.c new file mode 100644 index 0000000000..1cad66fd27 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/self.c @@ -0,0 +1,63 @@ +#include "mpi.h" +#include +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + * This needs to test long messages as well as short ones. + * The most likely failure mode for this program is that it will + * hang. Sorry about that.... + * + */ +int main( int argc, char **argv ) +{ +int sendbuf[10]; +int sendcount = 10; +int recvbuf[10]; +int recvcount = 10; +int source = 0, recvtag = 2; +int dest = 0, sendtag = 2; +int i, *longsend, *longrecv; + + int mpi_errno = MPI_SUCCESS; + MPI_Status status_array[2]; + MPI_Request req[2]; + + MPI_Init( &argc, &argv ); + if ((mpi_errno = MPI_Irecv ( recvbuf, recvcount, MPI_INT, + source, recvtag, MPI_COMM_WORLD, &req[1] ))) + return mpi_errno; + if ((mpi_errno = MPI_Isend ( sendbuf, sendcount, MPI_INT, dest, + sendtag, MPI_COMM_WORLD, &req[0] ))) + return mpi_errno; + + fprintf( stdout, "[%d] Starting waitall\n", 0 ); + mpi_errno = MPI_Waitall ( 2, req, status_array ); + fprintf( stdout, "[%d] Ending waitall\n", 0 ); + + for (i=16; i<257000; i *= 2) { + longsend = (int *)malloc( i * sizeof(int) ); + longrecv = (int *)malloc( i * sizeof(int) ); + if (!longsend || !longrecv) { + } + if ((mpi_errno = MPI_Irecv ( longrecv, i, MPI_INT, source, recvtag, + MPI_COMM_WORLD, &req[1] ))) + return mpi_errno; + if ((mpi_errno = MPI_Isend ( longsend, i, MPI_INT, dest, sendtag, + MPI_COMM_WORLD, &req[0] ))) + return mpi_errno; + + fprintf( stdout, "[%d] Starting waitall (%d)\n", 0, i ); + mpi_errno = MPI_Waitall ( 2, req, status_array ); + fprintf( stdout, "[%d] Ending waitall\n", 0 ); + + free( longsend ); + free( longrecv ); + } + + MPI_Finalize(); + return (mpi_errno); +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/self.std b/teshsuite/smpi/mpich-test/pt2pt/self.std new file mode 100644 index 0000000000..eff05ec011 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/self.std @@ -0,0 +1,32 @@ +*** Testing send to self *** +[0] Starting waitall +[0] Ending waitall +[0] Starting waitall (16) +[0] Ending waitall +[0] Starting waitall (32) +[0] Ending waitall +[0] Starting waitall (64) +[0] Ending waitall +[0] Starting waitall (128) +[0] Ending waitall +[0] Starting waitall (256) +[0] Ending waitall +[0] Starting waitall (512) +[0] Ending waitall +[0] Starting waitall (1024) +[0] Ending waitall +[0] Starting waitall (2048) +[0] Ending waitall +[0] Starting waitall (4096) +[0] Ending waitall +[0] Starting waitall (8192) +[0] Ending waitall +[0] Starting waitall (16384) +[0] Ending waitall +[0] Starting waitall (32768) +[0] Ending waitall +[0] Starting waitall (65536) +[0] Ending waitall +[0] Starting waitall (131072) +[0] Ending waitall +*** Testing send to self *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/selfvsworld.c b/teshsuite/smpi/mpich-test/pt2pt/selfvsworld.c new file mode 100644 index 0000000000..621b99d279 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/selfvsworld.c @@ -0,0 +1,104 @@ +/* ----------------------------------------------------------------------- + * Code: mismatch.c + * Lab: Parallel Processing Performance Tools + * Usage: mismatch + * Run on two nodes + * You will need to stop the deadlocked program with \ + * Author: Roslyn Leibensperger Last revised: 3/19/97 RYL + * + * Modified by Bill Gropp (ANL) to use Iprobe to detect the message and + * always produce output (no need to abort a deadlocked program). + * Unfortunately(?), the version of POE that had this bug is no longer + * available, so we can't test whether using Iprobe would show the same + * problem. + * ------------------------------------------------------------------------ */ +#include +#include "mpi.h" +#define MSGLEN 100 /* length of message in elements */ +#define TAG_A 100 +#define TAG_B 200 + +int main( int argc, char *argv[] ) +{ + float message1 [MSGLEN], /* message buffers */ + message2 [MSGLEN], + message3 [MSGLEN]; + int rank, /* rank of task in communicator */ + dest=0, source=0, /* rank in communicator of destination */ + /* and source tasks */ + send_tag=0, recv_tag=0, /* message tags */ + flag, size, i; + int errs = 0, toterrs; + MPI_Status status; /* status of communication */ + MPI_Status statuses[2]; + MPI_Request requests[2]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + if (size != 2) { + printf( "Must run with exactly 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + /* printf ( " Task %d initialized\n", rank ); */ + + /* initialize message buffers */ + for ( i=0; i +#ifdef HAVE_UNISTD_H +#include +#endif +#include +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +#define MAXPES 32 +#define MYBUFSIZE 16*1024 +static int buffer[MAXPES][MYBUFSIZE]; + +#define NUM_RUNS 10 + + +int main ( int argc, char *argv[] ) +{ + int i; + int count, size; + int self, npes; + double secs; + MPI_Request request[MAXPES]; + MPI_Status status; + + + MPI_Init (&argc, &argv); + MPI_Comm_rank (MPI_COMM_WORLD, &self); + MPI_Comm_size (MPI_COMM_WORLD, &npes); + + assert (npes <= MAXPES); + + for (size = 1; size <= MYBUFSIZE ; size += size) + { + + secs = -MPI_Wtime (); + for (count = 0; count < NUM_RUNS; count++) + { + MPI_Barrier (MPI_COMM_WORLD); + + for (i = 0; i < npes; i++) + { + if (i == self) + continue; + MPI_Irecv (buffer[i], size, MPI_INT, i, + MPI_ANY_TAG, MPI_COMM_WORLD, &request[i]); + } + + for (i = 0; i < npes; i++) + { + if (i == self) + continue; + MPI_Send (buffer[self], size, MPI_INT, i, 0, MPI_COMM_WORLD); + } + + for (i = 0; i < npes; i++) + { + if (i == self) + continue; + MPI_Wait (&request[i], &status); + } + + } + MPI_Barrier (MPI_COMM_WORLD); + secs += MPI_Wtime (); + + if (self == 0) + { + secs = secs / (double) NUM_RUNS; + printf ( "length = %d ints\n", size ); + fflush(stdout); +/* + printf ("%f\n", + (double) (size * sizeof (int) * (npes - 1)) / + (secs * 1024.0 * 1024.0)); + */ + } + } + MPI_Finalize(); + return (0); +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sendmany.std b/teshsuite/smpi/mpich-test/pt2pt/sendmany.std new file mode 100644 index 0000000000..c12c0acbff --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sendmany.std @@ -0,0 +1,17 @@ +*** Testing Send (many procs) *** +length = 1 ints +length = 2 ints +length = 4 ints +length = 8 ints +length = 16 ints +length = 32 ints +length = 64 ints +length = 128 ints +length = 256 ints +length = 512 ints +length = 1024 ints +length = 2048 ints +length = 4096 ints +length = 8192 ints +length = 16384 ints +*** Testing Send (many procs) *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/sendorder.c b/teshsuite/smpi/mpich-test/pt2pt/sendorder.c new file mode 100644 index 0000000000..e226f9d046 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sendorder.c @@ -0,0 +1,173 @@ +/* + Test ordering of messages that differ only in data + + sendorder [ -n number-of-sends ] [ -m length-of-long-sends ] + */ + +#include +#include +#include +#include "mpi.h" + +/* Prototypes */ +void delay( int ); +void CheckStatus( MPI_Status *, int, int, int, int * ); + +/* + This is a delay to make sure that several messages are in the queue when + the MPI_Recv is called + + 10ms delay for now. +*/ +void delay( int ms ) +{ + double t, deltat = ms * 0.001; + t = MPI_Wtime(); + //while (MPI_Wtime() - t < deltat) ; + smpi_sleep(deltat); +} + +void CheckStatus( MPI_Status *status, int tag, int src, int cnt, int *err ) +{ + int n; + + if (status->MPI_TAG != tag && status->MPI_SOURCE != src) { + if (*err < 10) { + fprintf( stdout, + "Error in message status! tag = %d and source = %d\n", status->MPI_TAG, + status->MPI_SOURCE ); + } + (void)*err++; + } + MPI_Get_count( status, MPI_INT, &n ); + if (n != cnt) { + if (*err < 10) { + fprintf( stdout, + "Error in message status! length is %d and should be %d\n", n, cnt ); + } + (void)*err++; + } +} + +int main( int argc, char *argv[] ) +{ + int i, n, m, val, *buf; + MPI_Status status; + int src, dest, tag, err = 0, toterr; + int rank, size; + MPI_Comm comm; + + MPI_Init( &argc, &argv ); + + n = 1000; /* Number of tests */ + comm = MPI_COMM_WORLD; + tag = 3; + m = 1000; /* Size in ints of longer buffer */ + + /* Check for options + */ + argc--; argv++; + while (argc > 0) { + if (argv[0] && strcmp( argv[0], "-n" ) == 0) { + argc++; + n = atoi( argv[0] ); + } + else if (argv[0] && strcmp( argv[0], "-m" ) == 0) { + argc++; + m = atoi( argv[0] ); + } + argc--; argv++; + } + /* Ensure that everyone has the values */ + MPI_Bcast( &n, 1, MPI_INT, 0, MPI_COMM_WORLD ); + MPI_Bcast( &m, 1, MPI_INT, 0, MPI_COMM_WORLD ); + + MPI_Comm_rank( comm, &rank ); + MPI_Comm_size( comm, &size ); + if (size < 2) { + fprintf( stderr, "This program requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + src = 0; + dest = size - 1; + + /* Single Int */ + MPI_Barrier( comm ); + if (rank == src) { + for (i=0; i +#include +#include +#include "mpi.h" + +#ifdef HAVE_MPICHCONF_H +#include "mpichconf.h" +#endif + +static int src = 1; +static int dest = 0; + +static int do_test1 = 1; +static int do_test2 = 1; +static int do_test3 = 1; + +static int verbose = 0; + +#define MAX_TYPES 13 +static int ntypes = 0; +static int nolongdouble = 0; +static MPI_Datatype BasicTypes[MAX_TYPES]; + +static int maxbufferlen = 10000; +static char *(BasicNames[MAX_TYPES]); + +/* In order to quiet noisy C compilers, we provide ANSI-style prototypes + where possible */ + +void AllocateBuffers ( void **, MPI_Datatype *, int, int ); +void FreeBuffers ( void **, int ); +void FillBuffers ( void **, MPI_Datatype *, int, int ); +int CheckBuffer ( void *, MPI_Datatype, int ); +void SetupBasicTypes (void); +void SenderTest1 (void); +void ReceiverTest1 (void); +void SenderTest2 (void); +void ReceiverTest2 (void); +void SenderTest3 (void); +void ReceiverTest3 (void); + +void +AllocateBuffers(bufferspace, buffertypes, num_types, bufferlen) + void **bufferspace; + MPI_Datatype *buffertypes; + int num_types; + int bufferlen; +{ + int i; + for (i = 0; i < ntypes; i++) { + if (buffertypes[i] == MPI_CHAR) + bufferspace[i] = malloc(bufferlen * sizeof(char)); + else if (buffertypes[i] == MPI_SHORT) + bufferspace[i] = malloc(bufferlen * sizeof(short)); + else if (buffertypes[i] == MPI_INT) + bufferspace[i] = malloc(bufferlen * sizeof(int)); + else if (buffertypes[i] == MPI_LONG) + bufferspace[i] = malloc(bufferlen * sizeof(long)); + else if (buffertypes[i] == MPI_UNSIGNED_CHAR) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned char)); + else if (buffertypes[i] == MPI_UNSIGNED_SHORT) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned short)); + else if (buffertypes[i] == MPI_UNSIGNED) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned int)); + else if (buffertypes[i] == MPI_UNSIGNED_LONG) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned long)); + else if (buffertypes[i] == MPI_FLOAT) + bufferspace[i] = malloc(bufferlen * sizeof(float)); + else if (buffertypes[i] == MPI_DOUBLE) + bufferspace[i] = malloc(bufferlen * sizeof(double)); +#if defined(HAVE_LONG_DOUBLE) && !defined(HAS_XDR) + else if (buffertypes[i] == MPI_LONG_DOUBLE) { + int dlen; + MPI_Type_size( MPI_LONG_DOUBLE, &dlen ); + bufferspace[i] = malloc(bufferlen * dlen); + } +#endif +#if defined(HAVE_LONG_LONG_INT) && !defined(HAS_XDR) + else if (buffertypes[i] == MPI_LONG_LONG_INT) + bufferspace[i] = malloc(bufferlen * sizeof(long long) ); +#endif + else if (buffertypes[i] == MPI_BYTE) + bufferspace[i] = malloc(bufferlen * sizeof(unsigned char)); + } +} + +void FreeBuffers(void **buffers, int nbuffers) +{ + int i; + for (i = 0; i < nbuffers; i++) + free(buffers[i]); +} + +void FillBuffers(bufferspace, buffertypes, num_types, bufferlen) + void **bufferspace; + MPI_Datatype *buffertypes; + int num_types; + int bufferlen; +{ + int i, j; + for (i = 0; i < ntypes; i++) { + for (j = 0; j < bufferlen; j++) { + if (buffertypes[i] == MPI_CHAR) + ((char *)bufferspace[i])[j] = (char)(j & 0x7f); + else if (buffertypes[i] == MPI_SHORT) + ((short *)bufferspace[i])[j] = (short)j; + else if (buffertypes[i] == MPI_INT) + ((int *)bufferspace[i])[j] = (int)j; + else if (buffertypes[i] == MPI_LONG) + ((long *)bufferspace[i])[j] = (long)j; + else if (buffertypes[i] == MPI_UNSIGNED_CHAR) + ((unsigned char *)bufferspace[i])[j] = (unsigned char)j; + else if (buffertypes[i] == MPI_UNSIGNED_SHORT) + ((unsigned short *)bufferspace[i])[j] = (unsigned short)j; + else if (buffertypes[i] == MPI_UNSIGNED) + ((unsigned int *)bufferspace[i])[j] = (unsigned int)j; + else if (buffertypes[i] == MPI_UNSIGNED_LONG) + ((unsigned long *)bufferspace[i])[j] = (unsigned long)j; + else if (buffertypes[i] == MPI_FLOAT) + ((float *)bufferspace[i])[j] = (float)j; + else if (buffertypes[i] == MPI_DOUBLE) + ((double *)bufferspace[i])[j] = (double)j; +#if defined(HAVE_LONG_DOUBLE) && !defined(HAS_XDR) + else if (buffertypes[i] == MPI_LONG_DOUBLE) + ((long double *)bufferspace[i])[j] = (long double)j; +#endif +#if defined(HAVE_LONG_LONG_INT) && !defined(HAS_XDR) + else if (buffertypes[i] == MPI_LONG_LONG_INT) + ((long long *)bufferspace[i])[j] = (long long)j; +#endif + else if (buffertypes[i] == MPI_BYTE) + ((unsigned char *)bufferspace[i])[j] = (unsigned char)j; + } + } +} + +int +CheckBuffer(bufferspace, buffertype, bufferlen) + void *bufferspace; + MPI_Datatype buffertype; + int bufferlen; +{ + int j; + char valerr[256]; + valerr[0] = 0; + for (j = 0; j < bufferlen; j++) { + if (buffertype == MPI_CHAR) { + if (((char *)bufferspace)[j] != (char)(j & 0x7f)) { + sprintf( valerr, "%x != %x", + ((char *)bufferspace)[j], (char)(j&0x7f) ); + break; + } + } else if (buffertype == MPI_SHORT) { + if (((short *)bufferspace)[j] != (short)j) { + sprintf( valerr, "%d != %d", + ((short *)bufferspace)[j], (short)j ); + break; + } + } else if (buffertype == MPI_INT) { + if (((int *)bufferspace)[j] != (int)j) { + sprintf( valerr, "%d != %d", + ((int *)bufferspace)[j], (int)j ); + break; + } + } else if (buffertype == MPI_LONG) { + if (((long *)bufferspace)[j] != (long)j) { + break; + } + } else if (buffertype == MPI_UNSIGNED_CHAR) { + if (((unsigned char *)bufferspace)[j] != (unsigned char)j) { + break; + } + } else if (buffertype == MPI_UNSIGNED_SHORT) { + if (((unsigned short *)bufferspace)[j] != (unsigned short)j) { + break; + } + } else if (buffertype == MPI_UNSIGNED) { + if (((unsigned int *)bufferspace)[j] != (unsigned int)j) { + break; + } + } else if (buffertype == MPI_UNSIGNED_LONG) { + if (((unsigned long *)bufferspace)[j] != (unsigned long)j) { + break; + } + } else if (buffertype == MPI_FLOAT) { + if (((float *)bufferspace)[j] != (float)j) { + break; + } + } else if (buffertype == MPI_DOUBLE) { + if (((double *)bufferspace)[j] != (double)j) { + break; + } +#if defined(HAVE_LONG_DOUBLE) && !defined(HAS_XDR) + } else if (buffertype == MPI_LONG_DOUBLE) { + if (((long double *)bufferspace)[j] != (long double)j) { + break; + } +#endif +#if defined(HAVE_LONG_LONG_INT) && !defined(HAS_XDR) + } else if (buffertype == MPI_LONG_LONG_INT) { + if (((long long *)bufferspace)[j] != (long long)j) { + break; + } +#endif + } else if (buffertype == MPI_BYTE) { + if (((unsigned char *)bufferspace)[j] != (unsigned char)j) { + break; + } + } + } + /* Return +1 so an error in the first location is > 0 */ + if (j < bufferlen) { + if (valerr[0]) fprintf( stderr, "Different value[%d] = %s\n", + j, valerr ); + else + fprintf( stderr, "Different value[%d]\n", j ); + return j+1; + } + return 0; +} + +void +SetupBasicTypes() +{ + BasicTypes[0] = MPI_CHAR; BasicNames[0] = (char*)"MPI_CHAR" ; + BasicTypes[1] = MPI_SHORT; BasicNames[1] = (char*)"MPI_SHORT"; + BasicTypes[2] = MPI_INT; BasicNames[2] = (char*)"MPI_INT" ; + BasicTypes[3] = MPI_LONG; BasicNames[3] = (char*)"MPI_LONG" ; + BasicTypes[4] = MPI_UNSIGNED_CHAR; BasicNames[4] = (char*)"MPI_UNSIGNED_CHAR"; + BasicTypes[5] = MPI_UNSIGNED_SHORT; BasicNames[5] = (char*)"MPI_UNSIGNED_SHORT"; + BasicTypes[6] = MPI_UNSIGNED; BasicNames[6] = (char*)"MPI_UNSIGNED"; + BasicTypes[7] = MPI_UNSIGNED_LONG; BasicNames[7] = (char*)"MPI_UNSIGNED_LONG"; + BasicTypes[8] = MPI_FLOAT; BasicNames[8] = (char*)"MPI_FLOAT"; + BasicTypes[9] = MPI_DOUBLE; BasicNames[9] = (char*)"MPI_DOUBLE"; + BasicTypes[10] = MPI_BYTE; BasicNames[10] = (char*)"MPI_BYTE"; + /* By making the BYTE type LAST, we make it easier to handle heterogeneous + systems that may not support all of the types */ + ntypes = 11; +#if defined (HAVE_LONG_DOUBLE) && !defined(HAS_XDR) + /* This test allows us to use MPI_LONG_DOUBLE, but rely on size > 0 + for "actually implemented" */ + if (!nolongdouble) { + int l; + MPI_Type_size( MPI_LONG_DOUBLE, &l ); + if (l > 0) { + BasicTypes[ntypes] = MPI_LONG_DOUBLE; + BasicNames[ntypes] = (char*)"MPI_LONG_DOUBLE"; + ntypes++; + } + } +#endif +#if defined(HAVE_LONG_LONG_INT) && !defined(HAS_XDR) + BasicTypes[ntypes] = MPI_LONG_LONG_INT; + BasicNames[ntypes] = (char*)"MPI_LONG_LONG_INT"; + ntypes++; +#endif +} + +void +SenderTest1() +{ + void *bufferspace[MAX_TYPES]; + int i, j; + + AllocateBuffers(bufferspace, BasicTypes, ntypes, maxbufferlen); + FillBuffers(bufferspace, BasicTypes, ntypes, maxbufferlen); + for (i = 0; i < ntypes; i++) { + MPI_Send( (void *)0, 0, BasicTypes[i], dest, 2000, MPI_COMM_WORLD ); + for (j = 0; j < maxbufferlen; j += 500) + MPI_Send(bufferspace[i], j, BasicTypes[i], dest, + 2000, MPI_COMM_WORLD); + } + FreeBuffers(bufferspace, ntypes); +} + +void +ReceiverTest1() +{ + void *bufferspace[MAX_TYPES]; + int i, j; + char message[81]; + MPI_Status Stat; + int dummy, passed; + + AllocateBuffers(bufferspace, BasicTypes, ntypes, maxbufferlen); + for (i = 0; i < ntypes; i++) { + passed = 1; + MPI_Recv( (void *)0, 0, BasicTypes[i], src, + 2000, MPI_COMM_WORLD, &Stat); + if (Stat.MPI_SOURCE != src) { + fprintf(stderr, "*** Incorrect Source returned. ***\n"); + Test_Failed(message); + passed = 0; + } else if (Stat.MPI_TAG != 2000) { + fprintf(stderr, "*** Incorrect Tag returned. ***\n"); + Test_Failed(message); + passed = 0; + } else if (MPI_Get_count(&Stat, BasicTypes[i], &dummy) || + dummy != 0) { + fprintf(stderr, + "*** Incorrect Count returned, Count = %d. ***\n", + dummy); + Test_Failed(message); + passed = 0; + } + /* Try different sized messages */ + for (j = 0; j < maxbufferlen; j += 500) { + MPI_Recv(bufferspace[i], j, BasicTypes[i], src, + 2000, MPI_COMM_WORLD, &Stat); + sprintf(message, "Send-Receive Test, Type %d, Count %d", + i, j); + if (Stat.MPI_SOURCE != src) { + fprintf(stderr, "*** Incorrect Source returned. ***\n"); + Test_Failed(message); + passed = 0; + } else if (Stat.MPI_TAG != 2000) { + fprintf(stderr, "*** Incorrect Tag returned. ***\n"); + Test_Failed(message); + passed = 0; + } else if (MPI_Get_count(&Stat, BasicTypes[i], &dummy) || + dummy != j) { + fprintf(stderr, + "*** Incorrect Count returned, Count = %d (should be %d). ***\n", + dummy, j); + Test_Failed(message); + passed = 0; + } else if(CheckBuffer(bufferspace[i], BasicTypes[i], j)) { + fprintf(stderr, + "*** Incorrect Message received (type = %d (%s), count = %d). ***\n", + i, BasicNames[i], j ); + Test_Failed(message); + passed = 0; + } +#ifdef VERBOSE + else { + fprintf(stderr, + "Message of count %d, type %d received correctly.\n", + j, i ); + } +#endif + } + sprintf(message, "Send-Receive Test, Type %d (%s)", + i, BasicNames[i] ); + if (passed) + Test_Passed(message); + else + Test_Failed(message); + } + FreeBuffers(bufferspace, ntypes); +} + +#define MAX_ORDER_TAG 2010 +/* Test Tag Selectivity. + Note that we must use non-blocking sends here, since otherwise we could + deadlock waiting to receive/send the first message +*/ +void +SenderTest2() +{ + int *buffer; + int i; + MPI_Request r[10]; + MPI_Status s[10]; + + buffer = (int *)malloc(maxbufferlen * sizeof(int)); + for (i = 0; i < maxbufferlen; i++) + buffer[i] = i; + + for (i = 2001; i <= MAX_ORDER_TAG; i++) + MPI_Isend(buffer, maxbufferlen, MPI_INT, dest, + i, MPI_COMM_WORLD, &r[i-2001] ); + + MPI_Waitall( MAX_ORDER_TAG-2001+1, r, s ); + free(buffer); + + return; +} + +void +ReceiverTest2() +{ + int *buffer; + int i, j; + char message[81]; + MPI_Status Stat; + int dummy, passed; + int errloc; + + buffer = (int *)calloc(maxbufferlen,sizeof(int)); + passed = 1; + + for (i = MAX_ORDER_TAG; i >= 2001; i--) { + MPI_Recv(buffer, maxbufferlen, MPI_INT, src, + i, MPI_COMM_WORLD, &Stat); + sprintf(message, "Tag Selectivity Test, Tag %d", + i); + if (Stat.MPI_SOURCE != src) { + fprintf(stderr, "*** Incorrect Source returned. ***\n"); + Test_Failed(message); + } else if (Stat.MPI_TAG != i) { + fprintf(stderr, "*** Incorrect Tag returned. ***\n"); + Test_Failed(message); + } else if (MPI_Get_count(&Stat, MPI_INT, &dummy) || + dummy != maxbufferlen) { + fprintf(stderr, + "*** Incorrect Count returned, Count = %d. ***\n", + dummy); + Test_Failed(message); + } else if((errloc = + CheckBuffer((void*)buffer, MPI_INT, maxbufferlen))) { + fprintf(stderr, + "*** Incorrect Message received at %d (tag=%d). ***\n", + errloc-1, i); + Test_Failed(message); + passed = 0; + } + /* Clear out the buffer */ + for (j = 0; j < maxbufferlen; j++) + buffer[j] = -1; + } + strncpy(message, "Tag Selectivity Test", 81); + if (passed) + Test_Passed(message); + else + Test_Failed(message); + free(buffer); + return; +} + +void +SenderTest3() +{ + int ibuf[10]; + + /* A receive test might not fail until it is triggered... */ + MPI_Send( ibuf, 10, MPI_INT, dest, 15, MPI_COMM_WORLD); + + return; +} + +void +ReceiverTest3( void ) +{ + int buffer[20]; + MPI_Datatype bogus_type = MPI_DATATYPE_NULL; + MPI_Status status; + int myrank; + int small_tag; +/* + if (verbose) + MPI_Errhandler_set(MPI_COMM_WORLD, TEST_ERRORS_WARN); + else + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); +*/ + MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); + + if (myrank == 0 && verbose) { + fprintf( stderr, +"There should be eight error messages about invalid communicator\n\ +count argument, datatype argument, tag, rank, buffer send and buffer recv\n" ); + } + if (MPI_Send(buffer, 20, MPI_INT, dest, + 1, MPI_COMM_NULL) == MPI_SUCCESS){ + Test_Failed("NULL Communicator Test"); + } + else + Test_Passed("NULL Communicator Test"); + + if (MPI_Send(buffer, -1, MPI_INT, dest, + 1, MPI_COMM_WORLD) == MPI_SUCCESS){ + Test_Failed("Invalid Count Test"); + } + else + Test_Passed("Invalid Count Test"); + + if (MPI_Send(buffer, 20, bogus_type, dest, + 1, MPI_COMM_WORLD) == MPI_SUCCESS){ + Test_Failed("Invalid Type Test"); + } + else + Test_Passed("Invalid Type Test"); + + small_tag = -1; + if (small_tag == MPI_ANY_TAG) small_tag = -2; + if (MPI_Send(buffer, 20, MPI_INT, dest, + small_tag, MPI_COMM_WORLD) == MPI_SUCCESS) { + Test_Failed("Invalid Tag Test"); + } + else + Test_Passed("Invalid Tag Test"); + + /* Form a tag that is too large */ + /*MPI_Attr_get( MPI_COMM_WORLD, MPI_TAG_UB, (void **)&tag_ubp, &flag ); + if (!flag) Test_Failed("Could not get tag ub!" ); + large_tag = *tag_ubp + 1; + if (large_tag > *tag_ubp) { + if (MPI_Send(buffer, 20, MPI_INT, dest, + large_tag, MPI_COMM_WORLD) == MPI_SUCCESS) { + Test_Failed("Invalid Tag Test"); + } + else + Test_Passed("Invalid Tag Test"); + } +*/ + if (MPI_Send(buffer, 20, MPI_INT, 300, + 1, MPI_COMM_WORLD) == MPI_SUCCESS) { + Test_Failed("Invalid Destination Test"); + } + else + Test_Passed("Invalid Destination Test"); + + if (MPI_Send((void *)0, 10, MPI_INT, dest, + 1, MPI_COMM_WORLD) == MPI_SUCCESS){ + Test_Failed("Invalid Buffer Test (send)"); + } + else + Test_Passed("Invalid Buffer Test (send)"); + + /* A receive test might not fail until it is triggered... */ + if (MPI_Recv((void *)0, 10, MPI_INT, src, + 15, MPI_COMM_WORLD, &status) == MPI_SUCCESS){ + Test_Failed("Invalid Buffer Test (recv)"); + } + else + Test_Passed("Invalid Buffer Test (recv)"); + + /* Just to keep things happy, see if there is a message to receive */ + { int flag, ibuf[10]; + + MPI_Iprobe( src, 15, MPI_COMM_WORLD, &flag, &status ); + if (flag) + MPI_Recv( ibuf, 10, MPI_INT, src, 15, MPI_COMM_WORLD, &status ); + } + MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); + return; +} + +/* Allow -nolongdouble to suppress long double testing */ +int main( int argc, char **argv ) +{ + int myrank, mysize; + int rc, itemp, i; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + MPI_Comm_size(MPI_COMM_WORLD, &mysize); + Test_Init("sendrecv", myrank); + SetupBasicTypes(); + + if (mysize != 2) { + fprintf(stderr, + "*** This test program requires exactly 2 processes.\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + /* Get the min of the basic types */ + itemp = ntypes; + MPI_Allreduce( &itemp, &ntypes, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD ); + + /* dest writes out the received stats; for the output to be + consistant (with the final check), it should be procees 0 */ + for (i=1; i +#include "dtypes.h" +#include "gcomm.h" +#include "test.h" + +int verbose = 0; +/* + This program is from mpich/tsuite/pt2pt and should be changed there only. + It needs gcomm and dtype from mpich/tsuite, and can be run with + any number of processes > 1. + */ +int main( int argc, char **argv) +{ + MPI_Datatype *types; + void **inbufs, **outbufs; + char **names; + int *counts, *bytesize, ntype; + MPI_Comm comms[20]; + int ncomm = 20, rank, np, partner, tag, count; + int i, j, k, err, toterr, world_rank, errloc; + MPI_Status status; + char *obuf; + + MPI_Init( &argc, &argv ); + + /* + * Check for -basiconly to select only the simple datatypes + */ + for (i=1; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } + FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); + FreeComms( comms, ncomm ); + MPI_Finalize(); + return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sendrecv3.c b/teshsuite/smpi/mpich-test/pt2pt/sendrecv3.c new file mode 100644 index 0000000000..9664a630a5 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sendrecv3.c @@ -0,0 +1,158 @@ +#include "mpi.h" +#include +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* + This program is from mpich/tsuite/pt2pt and should be changed there only. + It needs gcomm and dtype from mpich/tsuite, and can be run with + any number of processes > 1. + + This version uses Pack to send a message and Unpack OR the datatype + to receive it. + */ +int main( int argc, char **argv ) +{ +MPI_Datatype *types; +void **inbufs, **outbufs; +char **names; +char *packbuf, *unpackbuf; +int packsize, unpacksize, position; +int *counts, *bytesize, ntype; +MPI_Comm comms[20]; +int ncomm = 20, rank, np, partner, tag, count; +int i, j, k, err, toterr, world_rank; +int errloc; +MPI_Status status; +char *obuf; + +MPI_Init( &argc, &argv ); + +AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); +GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ +err = 0; +for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } +MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } +FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); +FreeComms( comms, ncomm ); +MPI_Barrier( MPI_COMM_WORLD ); +MPI_Finalize(); +return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sendrecv4.c b/teshsuite/smpi/mpich-test/pt2pt/sendrecv4.c new file mode 100644 index 0000000000..a4e5a05f51 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sendrecv4.c @@ -0,0 +1,175 @@ +#include "mpi.h" +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* + This program is from mpich/tsuite/pt2pt and should be changed there only. + It needs gcomm and dtype from mpich/tsuite, and can be run with + any number of processes > 1. + + This version sends and receives EVERYTHING from MPI_BOTTOM, by putting + the data into a structure. + + This code isn't quite correct, since the MPI_Type_struct that is + created for the type may not have the correct extent. + One possible change is to make the struct type include the count, and + send/receive one instance of the data item. + + The GenerateData call should return extents; when the extent of the + created structure doesn't match, we can at least issue an error message. + */ +int main( int argc, char **argv ) +{ +MPI_Datatype *types; +void **inbufs, **outbufs; +char **names; +int *counts, *bytesize, ntype; +MPI_Comm comms[20]; +int ncomm = 20, rank, np, partner, tag, count; +int i, j, k, err, toterr, world_rank, errloc; +MPI_Status status; +char *obuf; +MPI_Datatype offsettype; +int blen; +MPI_Aint displ, extent, natural_extent; + +MPI_Init( &argc, &argv ); + +AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); +GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ +err = 0; +for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } +MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } + +FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); +FreeComms( comms, ncomm ); +MPI_Finalize(); +return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sixth.c b/teshsuite/smpi/mpich-test/pt2pt/sixth.c new file mode 100644 index 0000000000..5f56e6eac3 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sixth.c @@ -0,0 +1,114 @@ +#include +#include "mpi.h" +#ifdef HAVE_STDLIB_H +#include +#else +extern char *malloc(); +#endif +#include "test.h" + +typedef struct _table { + int references; + int length; + int *value; +} Table; + +/* Prototypes for picky compilers */ +int copy_table ( MPI_Comm, int, void *, void *, void *, int * ); +void create_table ( int, int *, Table ** ); +int delete_table ( MPI_Comm, int, void *, void * ); + +/* These are incorrect...*/ +int copy_table ( MPI_Comm oldcomm, int keyval, void *extra_state, + void *attr_in, void *attr_out, int *flag) +{ + Table *table = (Table *)attr_in;; + + table->references++; + (*(void **)attr_out) = attr_in; + (*flag) = 1; + (*(int *)extra_state)++; + return (MPI_SUCCESS); +} + +void create_table ( int num, int *values, Table **table_out ) +{ + int i; + (*table_out) = (Table *)malloc(sizeof(Table)); + (*table_out)->references = 1; + (*table_out)->length = num; + (*table_out)->value = (int *)malloc(sizeof(int)*num); + for (i=0;ivalue[i] = values[i]; +} + +int delete_table ( MPI_Comm comm, int keyval, + void *attr_val, void *extra_state) +{ + Table *table = (Table *)attr_val; + + if ( table->references == 1 ) + free(table); + else + table->references--; + (*(int *)extra_state)--; + return MPI_SUCCESS; +} + +int main ( int argc, char **argv ) +{ + int rank, size; + Table *table; + MPI_Comm new_comm; + int table_key; + int values[3]; + int table_copies = 1; + int found; + int errors = 0; + + MPI_Init ( &argc, &argv ); + MPI_Comm_rank ( MPI_COMM_WORLD, &rank ); + MPI_Comm_size ( MPI_COMM_WORLD, &size ); + + values[0] = 1; values[1] = 2; values[2] = 3; + create_table(3,values,&table); + + MPI_Keyval_create ( copy_table, delete_table, &table_key, + (void *)&table_copies ); + MPI_Attr_put ( MPI_COMM_WORLD, table_key, (void *)table ); + MPI_Comm_dup ( MPI_COMM_WORLD, &new_comm ); + MPI_Attr_get ( new_comm, table_key, (void **)&table, &found ); + + if (!found) { + printf( "did not find attribute on new comm\n" ); + errors++; + } + + if ((table_copies != 2) && (table->references != 2)) { + printf( "table_copies != 2 (=%d) and table->references != 2 (=%d)\n", + table_copies, table->references ); + errors++; + } + + MPI_Comm_free ( &new_comm ); + + if ((table_copies != 1) && (table->references != 1)) { + printf( "table_copies != 1 (=%d) and table->references != 1 (=%d)\n", + table_copies, table->references ); + errors++; + } + + MPI_Attr_delete ( MPI_COMM_WORLD, table_key ); + + if ( table_copies != 0 ) { + printf( "table_copies != 0 (=%d)\n", table_copies ); + errors++; + } + if (errors) + printf("[%d] OOPS. %d errors!\n",rank,errors); + + MPI_Keyval_free ( &table_key ); + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sndrcv.c b/teshsuite/smpi/mpich-test/pt2pt/sndrcv.c new file mode 100644 index 0000000000..9323ccc1df --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sndrcv.c @@ -0,0 +1,138 @@ +#include "mpi.h" +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* + This program is from mpich/tsuite/pt2pt and should be changed there only. + It needs gcomm and dtype from mpich/tsuite, and can be run with + any number of processes > 1. + + This version uses sendrecv and sendrecv_replace (but only in the + head-to-head mode). + */ +int main( int argc, char **argv ) +{ +MPI_Datatype *types; +void **inbufs, **outbufs; +char **names; +int *counts, *bytesize, ntype; +MPI_Comm comms[20]; +int ncomm = 20, rank, np, partner=0, tag, count; +int i, j, k, err, toterr, world_rank, errloc; +MPI_Status status; +char *obuf, *ibuf; + +MPI_Init( &argc, &argv ); + +AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); +GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ +err = 0; +for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } +MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } +FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); +FreeComms( comms, ncomm ); +MPI_Finalize(); +return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.c b/teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.c new file mode 100644 index 0000000000..87b599935d --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.c @@ -0,0 +1,68 @@ +#include "mpi.h" +#include +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char **argv ) +{ + MPI_Status status; + int count, dest, source, sendtag, recvtag, len, rc; + int rank, size, errcnt = 0; + MPI_Comm comm; + int *buf; + MPI_Datatype dtype; + MPI_Init( &argc, &argv ); + + MPI_Comm_dup( MPI_COMM_WORLD, &comm ); + MPI_Errhandler_set( comm, MPI_ERRORS_RETURN ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + /* Check recoverable errors */ + if (rank == 0) { + rc = MPI_Sendrecv_replace( (char *)0, 1, MPI_INT, 0, 0, + 0, 0, comm, &status ); + if (!rc) { + errcnt++; + fprintf( stderr, "Failed to detect null buffer\n" ); + } + buf = 0; /* Give buf a value before use */ + rc = MPI_Sendrecv_replace( buf, 1, MPI_DATATYPE_NULL, 0, + 0, 0, 0, comm, &status ); + if (!rc) { + errcnt++; + fprintf( stderr, "Failed to detect null datatype\n" ); + } + /* Could be others */ + } + + /* Check non-contiguous datatypes */ + MPI_Type_vector( 1, 1, 10, MPI_INT, &dtype ); + MPI_Type_commit( &dtype ); + + buf = (int *)malloc( 10 * 10 * sizeof(int) ); + dest = (rank + 1) % size; + source = (rank + size - 1) % size; + + count = 0; + sendtag = 1; + recvtag = 1; + MPI_Sendrecv_replace( buf, count, dtype, dest, + sendtag, source, recvtag, MPI_COMM_WORLD, &status ); + MPI_Get_count( &status, dtype, &len ); + if (len != 0) { + errcnt ++; + fprintf( stderr, "Computed %d for count, should be %d\n", len, 0 ); + } + + MPI_Type_free( &dtype ); + MPI_Comm_free( &comm ); + if (rank == 0) { + printf( "Completed test of MPI_Sendrecv_replace\n" ); + } + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.std b/teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.std new file mode 100644 index 0000000000..72502518b4 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sndrcvrep.std @@ -0,0 +1,3 @@ +**** Testing MPI_Sendrecv_replace **** +Completed test of MPI_Sendrecv_replace +**** Testing MPI_Sendrecv_replace **** diff --git a/teshsuite/smpi/mpich-test/pt2pt/sndrcvrpl2.c b/teshsuite/smpi/mpich-test/pt2pt/sndrcvrpl2.c new file mode 100644 index 0000000000..fdb5c81d27 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/sndrcvrpl2.c @@ -0,0 +1,64 @@ + +/* + * Based on a program from James Clippinger (james@cs.dartmouth.edu), + * http://www.cs.dartmouth.edu/~james/. + * + */ +#include "test.h" +#include +#include +#include "mpi.h" + +int main( int argc, char **argv ) +{ + MPI_Status status; + int count, rank, size, dest, source, i, err = 0, toterr; + long *buf; + + /* Initialize MPI and get my rank and total number of + processors */ + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + /* Send-receive-replace the buffer */ + count = 1 << 14; + buf = (long *)malloc( count * sizeof(long) ); + for (i=0; i 10) break; + printf( "Received %ld in buf[%d]; expected %d\n", + buf[i], i, source + size*i ); + } + } +/* + fprintf(stderr, "Done with SRR on proc %d\n", rank); + */ + + /* Finalize everything */ + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (rank == 0) { + if (toterr == 0) + printf( " No Errors\n" ); + else + printf( "Test failed with %d errors!\n", toterr ); + } + free( buf ); + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/ssendtest.c b/teshsuite/smpi/mpich-test/pt2pt/ssendtest.c new file mode 100644 index 0000000000..6de2ca96d2 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/ssendtest.c @@ -0,0 +1,145 @@ +/* + * Program to test that the "synchronous send" semantics + * of point to point communications in MPI is (probably) satisfied. + * Two messages are send in one order; the destination uses MPI_Iprobe + * to look for the SECOND message before doing a receive on the first. + * To give a finite-termination, a fixed amount of time is used for + * the Iprobe test. + * + * This program has been patterned off of "overtake.c" + * + * William Gropp + * gropp@mcs.anl.gov + */ + +#include +#include "test.h" +#include "mpi.h" + +#define SIZE 10000 +/* Amount of time in seconds to wait for the receipt of the second Ssend + message */ +#define MAX_TIME 10 +static int src = 0; +static int dest = 1; + +/* Prototypes for picky compilers */ +void Generate_Data ( int *, int ); + +void Generate_Data( int *buffer, int buff_size) +{ + int i; + + for (i = 0; i < buff_size; i++) + buffer[i] = i+1; +} + +int main( int argc, char **argv) +{ + int rank; /* My Rank (0 or 1) */ + int act_size = 0; + int flag, np, rval, i; + int buffer[SIZE]; + double t0; + char *Current_Test = NULL; + MPI_Status status, status1, status2; + int count1, count2; + int sizes[4]; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size( MPI_COMM_WORLD, &np ); + if (np != 2) { + fprintf(stderr, "*** This program uses exactly 2 processes! ***\n"); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + sizes[0] = 0; + sizes[1] = 1; + sizes[2] = 1000; + sizes[3] = SIZE; +/* for (i = 0; i < 4; i++ ) { */ + for (i = 1; i < 2; i++ ) { + act_size = sizes[i]; + if (rank == src) { + Generate_Data(buffer, SIZE); + MPI_Recv( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD, &status ); + MPI_Send( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD ); + MPI_Ssend( buffer, act_size, MPI_INT, dest, 1, MPI_COMM_WORLD ); + MPI_Ssend( buffer, act_size, MPI_INT, dest, 2, MPI_COMM_WORLD ); + + } else if (rank == dest) { + Test_Init("ssendtest", rank); + /* Test 1 */ + Current_Test = (char*)"Ssend Test (Synchronous Send -> Normal Recieve)"; + MPI_Send( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD ); + MPI_Recv( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD, &status ); + t0 = MPI_Wtime(); + flag = 0; + /* This test depends on a working wtime. Make a simple check */ + if (t0 == 0 && MPI_Wtime() == 0) { + int loopcount = 1000000; + /* This test is too severe (systems with fast + processors and large MPI_Wtick values can + fail. Try harder to test MPI_Wtime */ + while (loopcount-- && MPI_Wtime() == 0) ; + if (loopcount <= 0) { + fprintf( stderr, + "MPI_WTIME is returning 0; a working value is needed\n\ +for this test.\n" ); + Test_Failed(Current_Test); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + t0 = MPI_Wtime(); + } + while (MPI_Wtime() - t0 < MAX_TIME) { + MPI_Iprobe( src, 2, MPI_COMM_WORLD, &flag, &status ); + if (flag) { + Test_Failed(Current_Test); + break; + } + } + if (!flag) + Test_Passed(Current_Test); + MPI_Recv( buffer, act_size, MPI_INT, src, 1, MPI_COMM_WORLD, + &status1 ); + MPI_Recv( buffer, act_size, MPI_INT, src, 2, MPI_COMM_WORLD, + &status2 ); + + MPI_Get_count( &status1, MPI_INT, &count1 ); + MPI_Get_count( &status2, MPI_INT, &count2 ); + if (count1 != act_size) { + fprintf( stdout, + "(1) Wrong count from recv of ssend: got %d (%d)\n", + count1, act_size ); + } + if (status1.MPI_TAG != 1) { + fprintf( stdout, "(1) Wrong tag from recv of ssend: got %d\n", + status1.MPI_TAG ); + } + if (count2 != act_size) { + fprintf( stdout, + "(2) Wrong count from recv of ssend: got %d (%d)\n", + count1, act_size ); + } + if (status2.MPI_TAG != 2) { + fprintf( stdout, "(2) Wrong tag from recv of ssend: got %d\n", + status2.MPI_TAG ); + } + + } + } + + Test_Waitforall( ); + rval = 0; + if (rank == dest) { + rval = Summarize_Test_Results(); /* Returns number of tests; + that failed */ + Test_Finalize(); + } + MPI_Finalize(); + return rval; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/ssendtest2.c b/teshsuite/smpi/mpich-test/pt2pt/ssendtest2.c new file mode 100644 index 0000000000..0bc65a89a7 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/ssendtest2.c @@ -0,0 +1,78 @@ +/* + * Test from oertel@ZIB-Berlin.DE + */ + +/* + * Test of MPI_Ssend on MPI implementation on Cray T3D + * + * Process dest should receive numbers 1,...,10 but + * receives 274878030344 instead !!! + * + * Test program works correctly with MPI_Ssend replaced by MPI_Send! + * + * + * Compiler options: /mpp/bin/cc -Tcray-t3d -g -X2 -I"directory of mpi.h" + * + * Output of run with option -mpiversion: + +ssendt3d -mpiversion +MPI model implementation 1.00.11., T3D Device Driver, Version 0.0 +MPI model implementation 1.00.11., T3D Device Driver, Version 0.0 +Configured with -arch=cray_t3d -device=t3d -opt=-g -ar_nolocal -make=gmake +Configured with -arch=cray_t3d -device=t3d -opt=-g -ar_nolocal -make=gmake +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 +Received 274878008072 + + */ + +#include +#include "mpi.h" + + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +#define SIZE 10 + +static int src = 0; +static int dest = 1; + +int main( int argc, char **argv ) +{ + int rank; /* My Rank (0 or 1) */ + int i, ivalue; + MPI_Status Stat; + + MPI_Init(&argc, &argv); + MPI_Comm_rank( MPI_COMM_WORLD, &rank); + + if (rank == src) { + + for (i=1; i<=SIZE; i++) + { + MPI_Ssend( &i, 1, MPI_INT, dest, 2000, MPI_COMM_WORLD); + } + + } else if (rank == dest) { + + for (i=1; i<=SIZE; i++) + { + MPI_Recv( &ivalue, 1, MPI_INT, src, 2000, MPI_COMM_WORLD, &Stat); + printf("Received %d\n", ivalue); fflush(stdout); + } + } + + MPI_Barrier( MPI_COMM_WORLD); + MPI_Finalize(); + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/ssendtest2.std b/teshsuite/smpi/mpich-test/pt2pt/ssendtest2.std new file mode 100644 index 0000000000..33fda5325a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/ssendtest2.std @@ -0,0 +1,12 @@ +**** Verifying ssend (2) **** +Received 1 +Received 2 +Received 3 +Received 4 +Received 5 +Received 6 +Received 7 +Received 8 +Received 9 +Received 10 +**** Verifying ssend (2) **** diff --git a/teshsuite/smpi/mpich-test/pt2pt/structf.f b/teshsuite/smpi/mpich-test/pt2pt/structf.f new file mode 100644 index 0000000000..efb555bc81 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/structf.f @@ -0,0 +1,87 @@ +C Thanks to +C William R. Magro +C for this test +C +C It has been modifiedly slightly to work with the automated MPI +C tests. +C WDG. +C + program bustit + implicit none + + include 'mpif.h' + + integer ierr + integer comm + integer newtype + integer me + integer position + integer type(5) + integer length(5) + integer disp(5) + integer bufsize + parameter (bufsize=100) + character buf(bufsize) + character name*(10) + integer status(MPI_STATUS_SIZE) + integer i, size + double precision x + integer src, dest + +C Enroll in MPI + call mpi_init(ierr) + +C get my rank + call mpi_comm_rank(MPI_COMM_WORLD, me, ierr) + call mpi_comm_size(MPI_COMM_WORLD, size, ierr ) + if (size .lt. 2) then + print *, "Must have at least 2 processes" + call MPI_Abort( 1, MPI_COMM_WORLD, ierr ) + endif + + comm = MPI_COMM_WORLD + src = 0 + dest = 1 + + if(me.eq.src) then + i=5 + x=5.1234d0 + name="hello" + + type(1)=MPI_CHARACTER + length(1)=5 + call mpi_address(name,disp(1),ierr) + + type(2)=MPI_DOUBLE_PRECISION + length(2)=1 + call mpi_address(x,disp(2),ierr) + + call mpi_type_struct(2,length,disp,type,newtype,ierr) + call mpi_type_commit(newtype,ierr) + call mpi_barrier( MPI_COMM_WORLD, ierr ) + call mpi_send(MPI_BOTTOM,1,newtype,dest,1,comm,ierr) + call mpi_type_free(newtype,ierr) +C write(*,*) "Sent ",name(1:5),x + else +C Everyone calls barrier incase size > 2 + call mpi_barrier( MPI_COMM_WORLD, ierr ) + if (me.eq.dest) then + position=0 + + name = " " + x = 0.0d0 + call mpi_recv(buf,bufsize,MPI_PACKED, src, + . 1, comm, status, ierr) + + call mpi_unpack(buf,bufsize,position, + . name,5,MPI_CHARACTER, comm,ierr) + call mpi_unpack(buf,bufsize,position, + . x,1,MPI_DOUBLE_PRECISION, comm,ierr) + print 1, name, x + 1 format( " Received ", a, f7.4 ) + endif + endif + + call mpi_finalize(ierr) + + end diff --git a/teshsuite/smpi/mpich-test/pt2pt/structf.std b/teshsuite/smpi/mpich-test/pt2pt/structf.std new file mode 100644 index 0000000000..fa328cec56 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/structf.std @@ -0,0 +1,3 @@ +*** Testing Type_struct from Fortran *** + Received hello 5.1234 +*** Testing Type_struct from Fortran *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/structlb.c b/teshsuite/smpi/mpich-test/pt2pt/structlb.c new file mode 100644 index 0000000000..a28d29532e --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/structlb.c @@ -0,0 +1,55 @@ +#include "mpi.h" +#include + +int main( int argc, char **argv) +{ + int blockcnt[2], size; + MPI_Datatype tmptype, newtype, oldtypes[2]; + MPI_Aint offsets[2], extent, lb, ub; + + MPI_Init(&argc, &argv); + + blockcnt[0] = 1; + offsets[0] = 1; + oldtypes[0] = MPI_BYTE; + blockcnt[1] = 1; /* set upper bound to avoid padding */ + offsets[1] = 2; + oldtypes[1] = MPI_UB; + MPI_Type_struct(2, blockcnt, offsets, oldtypes, &tmptype); + MPI_Type_commit(&tmptype); + + MPI_Type_size(tmptype, &size); + MPI_Type_lb(tmptype, &lb); + MPI_Type_ub(tmptype, &ub); + MPI_Type_extent(tmptype, &extent); +#ifdef DEBUG + printf("tmptype: size: %d lb: %ld ub: %ld ex: %ld\n", size, lb, ub, + extent); +#endif + + blockcnt[0] = 1; + offsets[0] = 1; + oldtypes[0] = tmptype; + MPI_Type_struct(1, blockcnt, offsets, oldtypes, &newtype); + MPI_Type_commit(&newtype); + + MPI_Type_size(newtype, &size); + MPI_Type_lb(newtype, &lb); + MPI_Type_ub(newtype, &ub); + MPI_Type_extent(newtype, &extent); +#ifdef DEBUG + printf("newtype: size: %ld lb: %ld ub: %ld ex: %d\n", size, lb, ub, + extent); +#endif + if (size != 1 || lb != 2 || ub != 3 || extent != 1) { + printf ("lb = %ld (should be 2), ub = %ld (should be 3) extent = %ld should be 1, size = %d (should be 1)\n", lb, ub, extent, size) ; + } + else { + printf( " No Errors\n" ); + } + MPI_Type_free(&tmptype); + MPI_Type_free(&newtype); + MPI_Finalize(); + + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/systest.c b/teshsuite/smpi/mpich-test/pt2pt/systest.c new file mode 100644 index 0000000000..af5fbb8705 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/systest.c @@ -0,0 +1,433 @@ +#include "mpi.h" +#include + +#define MAX2(a,b) (((a)>(b)) ? (a) : (b)) + +int GlobalReadInteger(); +void Hello(); +/* +void Ring(); +void Stress(); +void Globals(); +*/ + +int main( int argc, char **argv ) +{ + + int me, option; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD,&me); + + fprintf(stderr,"Process %d is alive\n",me); + + while (1) { + + MPI_Barrier(MPI_COMM_WORLD); + + again: + if (me == 0) { + /* Read user input for action */ + (void) printf("\nOptions: 0=quit, 1=Hello, 2=Ring, 3=Stress, "); + (void) printf("4=Globals : "); + (void) fflush(stdout); + } + option = GlobalReadInteger(); + if ( (option < 0) || (option > 4) ) + goto again; + + switch (option) { + case 0: + MPI_Finalize(); + return; + case 1: + Hello(); break; + case 2: + Ring(); break; +/* + case 3: + Stress(); break; + case 4: + Globals(); break; +*/ + default: + fprintf(stderr,"systest: invalid option %d\n", option); break; + } + } +} + +int GlobalReadInteger() +/* + Process zero reads an integer from stdin and broadcasts + to everyone else +*/ +{ + int me, value, *msg, msg_len, type=999 ,zero=0; + + MPI_Comm_rank(MPI_COMM_WORLD, &me); + if (me == 0) { + if (scanf("%d", &value) != 1) + fprintf(stderr,"failed reading integer value from stdin\n"); + } + MPI_Bcast(&value, 1, MPI_INT, 0, MPI_COMM_WORLD); + return value; +} + +static void Hello() +/* + Everyone exchanges a hello message with everyone else. + The hello message just comprises the sending and target nodes. +*/ +{ + int nproc, me; + int type = 1; + int buffer[2], node, length; + MPI_Status status; + + MPI_Comm_rank(MPI_COMM_WORLD, &me); + MPI_Comm_size(MPI_COMM_WORLD, &nproc); + + if (me == 0) { + printf("\nHello test ... show network integrity\n----------\n\n"); + fflush(stdout); + } + + for (node = 0; node= 4*1024*1024) ) + max_len = 512*1024; + if ( (buffer = malloc((unsigned) max_len)) == (char *) NULL) { + printf("process %d could not allocate buffer of size %d\n",me,max_len); + MPI_Abort(MPI_COMM_WORLD,7777); + } + + lenbuf = 1; + while (lenbuf <= max_len) { + start_ustime = MPI_Wtime(); + if (me == 0) { + MPI_Send(buffer,lenbuf,MPI_CHAR,left, type,MPI_COMM_WORLD); + MPI_Recv(buffer,lenbuf,MPI_CHAR,right,type,MPI_COMM_WORLD,&status); + } + else { + MPI_Recv(buffer,lenbuf,MPI_CHAR,right,type,MPI_COMM_WORLD,&status); + MPI_Send(buffer,lenbuf,MPI_CHAR,left, type,MPI_COMM_WORLD); + } + used_ustime = MPI_Wtime() - start_ustime; + + if (used_ustime > 0) + us_rate = 1.0 * (double) (nproc * lenbuf) / (double) used_ustime; + else + us_rate = 0.0; + if (me == 0) + printf("len=%d bytes, used= %d us, rate=%f Mbytes/sec\n", + lenbuf, used_ustime, us_rate); + + lenbuf *= 2; + } + free(buffer); +} + +double ranf() +/* Returns ran # uniform in (0,1) ... probably rather bad statistics. */ +{ + static unsigned long seed = 54321; + + seed = seed * 1812433253 + 12345; + return (seed & 0x7fffffff) * 4.6566128752458e-10; +} + +static void RandList(lo, hi, list, n) + int lo, hi, *list, n; +/* + Fill list with n random integers between lo & hi inclusively +*/ +{ + int i, ran; + double dran; + + for (i=0; i hi) + ran = hi; + list[i] = ran; + } +} + +static void Stress() +/* + Stress the system by passing messages between a randomly selected + list of nodes +*/ +{ +#define N_LEN 10 +#ifdef NCUBE + /* ncube does not handle msgs larger than + 32K at present (see nwrite) */ + static int len[N_LEN] = {0,1,2,4,8,4096,8192,16384,32768,32768}; +#else + static int len[N_LEN] = {0,1,2,4,8,4096,8192,16384,32768,65536}; +#endif + int me = p4_get_my_id(); + int nproc = p4_num_total_ids(); + int zero = 0; + int type, lenbuf, i, j, from, to; + int *list_i, *list_j, *list_n; + char *buffer; + int n_stress, mod, *msg, msg_len; + + + type = 6; + if (me == 0) { + (void) printf("\nStress test ... randomly exchange messages\n-----------"); + (void) printf("\n\nInput no. of messages: "); + (void) fflush(stdout); + } + n_stress = GlobalReadInteger(); + if ( (n_stress <= 0) || (n_stress > 100000) ) + n_stress = 1000; + p4_dprintfl(00,"n_stress=%d\n",n_stress); + + lenbuf = n_stress * sizeof(int); + + if (!(buffer = p4_shmalloc((unsigned) len[N_LEN-1]))) + p4_error("Stress: failed to allocate buffer", len[N_LEN-1]); + + type = 7; + if (me == 0) { /* Make random list of pairs and message lengths */ + if (!(list_i = (int *) p4_shmalloc((unsigned) lenbuf))) + p4_error("Stress: failed to allocate list_i",lenbuf); + if (!(list_j = (int *) p4_shmalloc((unsigned) lenbuf))) + p4_error("Stress: failed to allocate list_j",lenbuf); + if (!(list_n = (int *) p4_shmalloc((unsigned) lenbuf))) + p4_error("Stress: failed to allocate list_n",lenbuf); + + RandList((int) 0, nproc-1, list_i, n_stress); + RandList((int) 0, nproc-1, list_j, n_stress); + RandList((int) 0, N_LEN-1, list_n, n_stress); + for (i=0; i=0 ) ? (a) : -(a)) + int nerrs = 0; + double diff; + + while (n--) { + diff = *a++ - *b++; + if (ABS(diff) > 1.0e-8) + nerrs++; + } + + return nerrs; +} + +static void Globals() +/* + Test out functioning of the global operations. +*/ +{ + int nproc = p4_num_total_ids(); + int me = p4_get_my_id(); + int n, i, start, used, nerrs; + double *a, *b, rate; + +#define DO(string, op) \ + start = p4_clock(); \ + if (p4_global_op(33, (char *) a, n, sizeof(double), op, P4DBL)) \ + p4_error("p4_global_op failed",n); \ + used = p4_clock()-start; \ + rate = (used>0) ? n/(1.0e+3 * used) : 0.0; \ + nerrs = CompareVectors(n, a, b); \ + if (me == 0) \ + (void) printf("%s, len=%d, used=%d ms, rate=%f Mop/sec, nerrs=%d\n",\ + string, n, used, rate, nerrs); + + if (me == 0) { + (void) printf("\nGlobal operations test\n----------------------"); + (void) printf("\n\nInput vector length "); + (void) fflush(stdout); + } + n = GlobalReadInteger(); + if ( (n < 0) || (n > 1000000) ) + n = 1000; + + if (!(a = (double *) p4_shmalloc((unsigned) (n*sizeof(double))))) + p4_error("failed to create work space (a)",n); + if (!(b = (double *) p4_shmalloc((unsigned) (n*sizeof(double))))) + p4_error("failed to create work space (b)",n); + + /* Summation */ + + for (i=0; i + +#define MAX2(a,b) (((a)>(b)) ? (a) : (b)) + +int GlobalReadInteger(); +void Hello(); +/* +void Ring(); +void Stress(); +void Globals(); +*/ + +int main(argc,argv) +int argc; +char **argv; +{ + + int me, option; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD,&me); + + fprintf(stderr,"Process %d is alive\n",me); + + while (1) { + + MPI_Barrier(MPI_COMM_WORLD); + + again: + if (me == 0) { + /* Read user input for action */ + (void) printf("\nOptions: 0=quit, 1=Hello, 2=Ring, 3=Stress, "); + (void) printf("4=Globals : "); + (void) fflush(stdout); + } + option = GlobalReadInteger(); + if ( (option < 0) || (option > 4) ) + goto again; + + switch (option) { + case 0: + MPI_Finalize(); + return; + case 1: + Hello(); break; +/* + case 2: + Ring(); break; + case 3: + Stress(); break; + case 4: + Globals(); break; +*/ + default: + fprintf(stderr,"systest: invalid option %d\n", option); break; + } + } +} + +int GlobalReadInteger() +/* + Process zero reads an integer from stdin and broadcasts + to everyone else +*/ +{ + int me, value, *msg, msg_len, type=999 ,zero=0; + + MPI_Comm_rank(MPI_COMM_WORLD, &me); + if (me == 0) { + if (scanf("%d", &value) != 1) + fprintf(stderr,"failed reading integer value from stdin\n"); + } + MPI_Bcast(&value, 1, MPI_INT, 0, MPI_COMM_WORLD); + return value; +} + +static void Hello() +/* + Everyone exchanges a hello message with everyone else. + The hello message just comprises the sending and target nodes. +*/ +{ + int nproc, me; + int type = 1; + int buffer[2], node, length; + MPI_Status status; + + MPI_Comm_rank(MPI_COMM_WORLD, &me); + MPI_Comm_size(MPI_COMM_WORLD, &nproc); + + if (me == 0) { + printf("\nHello test ... show network integrity\n----------\n\n"); + fflush(stdout); + } + + for (node = 0; node +#include +#include "test.h" + +#if defined(USE_STDARG) +#include +#endif + +static int tests_passed = 0; +static int tests_failed = 0; +static char failed_tests[255][81]; +static char suite_name[255]; +FILE *fileout = NULL; + +void Test_Init(const char *suite, int rank) +{ + char filename[512]; + + sprintf(filename, "%s-%d.out", suite, rank); + strncpy(suite_name, suite, 255); + fileout = fopen(filename, "w"); + if (!fileout) { + fprintf( stderr, "Could not open %s on node %d\n", filename, rank ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + //MPI_Errhandler_create( Test_Errors_warn, &TEST_ERRORS_WARN ); +} + +void Test_Message(const char *mess) +{ + fprintf(fileout, "[%s]: %s\n", suite_name, mess); + fflush(fileout); +} + +void Test_Failed(const char *test) +{ + fprintf(fileout, "[%s]: *** Test '%s' Failed! ***\n", suite_name, test); + strncpy(failed_tests[tests_failed], test, 81); + fflush(fileout); + tests_failed++; +} + +void Test_Passed(const char *test) +{ +#ifdef VERBOSE + fprintf(fileout, "[%s]: Test '%s' Passed.\n", suite_name, test); + fflush(fileout); +#endif + tests_passed++; +} + +int Summarize_Test_Results() +{ +#ifdef VERBOSE + fprintf(fileout, "For test suite '%s':\n", suite_name); +#else + if (tests_failed > 0) +#endif + { + fprintf(fileout, "Of %d attempted tests, %d passed, %d failed.\n", + tests_passed + tests_failed, tests_passed, tests_failed); + } + if (tests_failed > 0) { + int i; + + fprintf(fileout, "*** Tests Failed:\n"); + for (i = 0; i < tests_failed; i++) + fprintf(fileout, "*** %s\n", failed_tests[i]); + } + return tests_failed; +} + +void Test_Finalize( void ) +{ + //if (TEST_ERRORS_WARN != MPI_ERRHANDLER_NULL) + //MPI_Errhandler_free( &TEST_ERRORS_WARN ); + if (fileout) { + fflush(fileout); + //fclose(fileout); + } +} + +#include "mpi.h" +/* Wait for every process to pass through this point. This test is used + to make sure that all processes complete, and that a test "passes" because + it executed, not because it some process failed. + */ +void Test_Waitforall( void ) +{ + int m, one, myrank, n; + + MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); + MPI_Comm_size( MPI_COMM_WORLD, &n ); + one = 1; + MPI_Allreduce( &one, &m, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + + if (m != n) { + printf( "[%d] Expected %d processes to wait at end, got %d\n", myrank, + n, m ); + } + if (myrank == 0) + printf( " No Errors\n" ); +} + +/* + Handler prints warning messsage and returns. Internal. Not + a part of the standard. + */ +MPI_Errhandler TEST_ERRORS_WARN ; + +#ifdef USE_STDARG +void Test_Errors_warn( MPI_Comm *comm, int *code, ... ) +{ + char buf[MPI_MAX_ERROR_STRING]; + int myid; + char *string; +#ifdef MPIR_DEBUG + char *file; + int *line; +#endif + static int in_handler = 0; + va_list Argp; + +#ifdef USE_OLDSTYLE_STDARG + va_start( Argp ); +#else + va_start( Argp, code ); +#endif + string = va_arg(Argp,char *); +#ifdef MPIR_DEBUG + /* These are only needed for debugging output */ + file = va_arg(Argp,char *); + line = va_arg(Argp,int *); +#endif + va_end( Argp ); +#else +void Test_Errors_warn( MPI_Comm *comm, int *code, char *string, char *file, + int *line ) +{ + char buf[MPI_MAX_ERROR_STRING]; + int myid, result_len; + static int in_handler = 0; +#endif + + if (in_handler) return; + in_handler = 1; + + MPI_Comm_rank( MPI_COMM_WORLD, &myid ); + //MPI_Error_string( *code, buf, &result_len ); +#ifdef MPIR_DEBUG + /* Generate this information ONLY when debugging MPIR */ + fprintf( stderr, "%d - File: %s Line: %d\n", myid, + file, *line ); +#endif + fprintf( stderr, "%d - %s : %s\n", myid, + string ? string : "", buf ); + fflush( stderr ); + in_handler = 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/test.h b/teshsuite/smpi/mpich-test/pt2pt/test.h new file mode 100644 index 0000000000..fb83d2279c --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/test.h @@ -0,0 +1,29 @@ +/* Header for testing procedures */ + +#ifndef _INCLUDED_TEST_H_ +#define _INCLUDED_TEST_H_ + +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Test_Init (const char *, int); +#ifdef USE_STDARG +void Test_Printf (const char *, ...); +void Test_Errors_warn ( MPI_Comm *, int *, ... ); +#else +/* No prototype */ +void Test_Printf(); +void Test_Errors_warn(); +#endif +void Test_Message (const char *); +void Test_Failed (const char *); +void Test_Passed (const char *); +int Summarize_Test_Results (void); +void Test_Finalize (void); +void Test_Waitforall (void); + +extern MPI_Errhandler TEST_ERRORS_WARN; +#endif diff --git a/teshsuite/smpi/mpich-test/pt2pt/testall.c b/teshsuite/smpi/mpich-test/pt2pt/testall.c new file mode 100644 index 0000000000..22f471ba7c --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/testall.c @@ -0,0 +1,175 @@ +#include "mpi.h" +#include +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* + Multiple completions + + This is similar to a test in allpair.f, but with an expanded range of + datatypes and communicators. + */ + +int main( int argc, char **argv ) +{ +MPI_Datatype *types; +void **inbufs, **outbufs; +char **names; +int *counts, *bytesize, ntype; +MPI_Comm comms[20]; +int ncomm = 20, rank, np, partner, tag; +int i, j, k, err, toterr, world_rank; +MPI_Status status, statuses[2]; +int flag; +char *obuf; +MPI_Request requests[2]; + + +MPI_Init( &argc, &argv ); + +AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); +GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ +err = 0; +for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } +MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } +FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); +FreeComms( comms, ncomm ); +MPI_Finalize(); + +return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/testhetero b/teshsuite/smpi/mpich-test/pt2pt/testhetero new file mode 100755 index 0000000000..5f32e12ac9 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/testhetero @@ -0,0 +1,92 @@ +#! /bin/sh +# +# This is a simple heterogeneous test which exploits the mpicc command and +# mpirun. This is an example of how heterogeneous programs may be built and +# run +# +# +# Parameters for all programs and systems +set -x +device=ch_p4 +mpihome=../../.. +rshcmd=rsh +programs="sendrecv sendrecv2 sendrecv3 sendrecv4 getelm" +# Extra files needed for each program. +sendrecvfiles="test.c" +sendrecvargs="-nolongdouble" +sendrecv2files="dtypes.c gcomm.c" +sendrecv3files="dtypes.c gcomm.c" +sendrecv4files="dtypes.c gcomm.c" +getelmfiles="" +# +# +# arch1 is local, arch2 is remote +arch1=sun4 +arch2=freebsd +name2=dogbert +# +debug_args="" +fail_hard=1 +rebuild=0 +mpirun_args="" +for arg in "$@" ; do + case "$arg" in + -echo) set -x ;; + -noclean) noclean=1 ;; + -debug) debug_args="-p4dbg 99 -p4rdbg 99" ;; + -mpichdebug) debug_args="$debug_args -mpichdebug" ;; + -xxgdb) mpirun_args="-xxgdb" ;; + -soft) fail_hard=0 ;; + -force | -rebuild) rebuild=1 ;; + -alpha) arch2=alpha ; name2=ptera ;; + -help) + echo "Test heterogeneous operation of MPICH with ch_p4 using" + echo "the versions of MPICH built in the current tree." + echo "Should be run on a sun4; it rsh's to other machines as" + echo "necessary." + exit 1 + ;; + *) echo "Unrecognized argument $arg" + exit 1 + ;; + esac +done +# +arches="$arch1 $arch2" +# +mypwd=`pwd` +# Fixup for brain-dead automounters +mypwd=`echo $mypwd | sed s%/tmp_mnt%%g` +# +# Build local versions +if [ 1 = 1 ] ; then + for pgm in $programs ; do + eval extrafiles=$"${pgm}files" + $mpihome/lib/$arch1/$device/mpicc -o $pgm.$arch1 $pgm.c $extrafiles + done +fi +# +# Build remote versions +for pgm in $programs ; do + eval extrafiles=$"${pgm}files" + $rshcmd -n $name2 "(cd $mypwd ; $mpihome/lib/$arch2/$device/mpicc \ + -o $pgm.$arch2 $pgm.c $extrafiles)" +done +# +# Run the programs +for pgm in $programs ; do + echo "Running $pgm..." + eval extraargs=$"${pgm}args" + $mpihome/lib/$arch1/$device/mpirun $mpirun_args \ + -arch $arch1 -np 1 -arch $arch2 -np 1 $pgm.%a $debug_args \ + $extraargs +done +# +# Remove the executables +if [ -z "$noclean" ] ; then + for arch in $arches ; do + for pgm in $programs ; do + rm -f $pgm.$arch + done + done +fi diff --git a/teshsuite/smpi/mpich-test/pt2pt/testsome.c b/teshsuite/smpi/mpich-test/pt2pt/testsome.c new file mode 100644 index 0000000000..955e503baf --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/testsome.c @@ -0,0 +1,173 @@ +#include "mpi.h" +#include +#include +#include "dtypes.h" +#include "gcomm.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int verbose = 0; +/* + Multiple completions + + This is similar to a test in allpair.f, but with an expanded range of + datatypes and communicators. + */ + +int main( int argc, char **argv ) +{ +MPI_Datatype *types; +void **inbufs, **outbufs; +char **names; +int *counts, *bytesize, ntype; +MPI_Comm comms[20]; +int ncomm = 20, rank, np, partner, tag; +int i, j, k, err, toterr, world_rank; +MPI_Status status, statuses[2]; +int flag, index, outcount, indices[2]; +char *obuf; +MPI_Request requests[2]; + + +MPI_Init( &argc, &argv ); + +AllocateForData( &types, &inbufs, &outbufs, &counts, &bytesize, + &names, &ntype ); +GenerateData( types, inbufs, outbufs, counts, bytesize, names, &ntype ); + +MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); +MakeComms( comms, 20, &ncomm, 0 ); + +/* Test over a wide range of datatypes and communicators */ +err = 0; +for (i=0; i 0) { + fprintf( stderr, "%d errors on %d\n", err, rank ); + } + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) { + printf( " No Errors\n" ); + } + else { + printf (" Found %d errors\n", toterr ); + } + } +FreeDatatypes( types, inbufs, outbufs, counts, bytesize, names, ntype ); +FreeComms( comms, ncomm ); +MPI_Finalize(); + +return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/testtest1.c b/teshsuite/smpi/mpich-test/pt2pt/testtest1.c new file mode 100644 index 0000000000..e087567a89 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/testtest1.c @@ -0,0 +1,83 @@ +/* + This is a test of MPI_Test to receive a message of known length (used as a + server) + */ +#include "mpi.h" +#include +#include +#include "test.h" + +int main( int argc, char **argv ) +{ + int data, to, from, tag, maxlen, np, myid, flag, dest, src; + MPI_Status status; + MPI_Request request; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &myid ); + MPI_Comm_size( MPI_COMM_WORLD, &np ); + +/* dest writes out the received stats; for the output to be + consistant (with the final check), it should be procees 0 */ + if (argc > 1 && argv[1] && strcmp( "-alt", argv[1] ) == 0) { + dest = np - 1; + src = 0; + } + else { + src = np - 1; + dest = 0; + } + + if (myid == src) { + to = dest; + tag = 2000; + data = 100; +#ifdef VERBOSE + printf( "About to send\n" ); +#endif + MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); + tag = 2001; + data = 0; +#ifdef VERBOSE + printf( "About to send 'done'\n" ); +#endif + MPI_Send( &data, 1, MPI_INT, to, tag, MPI_COMM_WORLD ); + } + else { + /* Server loop */ + while (1) { + tag = MPI_ANY_TAG; + from = MPI_ANY_SOURCE; + MPI_Irecv( &data, 1, MPI_INT, from, tag, MPI_COMM_WORLD, + &request ); + /* Should really use MPI_Wait, but functionally this will work + (it is less efficient, however) */ + do { + MPI_Test( &request, &flag, &status ); + } while (!flag); + if (status.MPI_TAG == 2001) { +#ifdef VERBOSE + printf( "Received terminate message\n" ); +#endif + break; + } + if (status.MPI_TAG == 2000) { + MPI_Get_count( &status, MPI_INT, &maxlen ); + if (maxlen != 1) { + fprintf( stderr, "Should have received one integer; got %d\n", + maxlen ); + } + /* Check data: */ + if (data != 100) { + fprintf( stderr, + "Did not receive correct data: %d instead of %d\n", + data, 100 ); + } + } + } + } + MPI_Barrier( MPI_COMM_WORLD ); + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/testtypes.c b/teshsuite/smpi/mpich-test/pt2pt/testtypes.c new file mode 100644 index 0000000000..be041816e3 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/testtypes.c @@ -0,0 +1,37 @@ +#include +#include "mpi.h" +#include "test.h" + +int main( int argc, char **argv ) +{ + int i_size; + MPI_Aint i_extent; + + MPI_Init(&argc, &argv); + + + MPI_Type_extent(MPI_INT, &i_extent); + printf("MPI_Type_extent (MPI_INT) = %ld\n", i_extent); + + MPI_Type_size(MPI_INT, &i_size); + printf("MPI_Type_size (MPI_INT) = %d\n", i_size); + + + MPI_Type_extent(MPI_UNSIGNED, &i_extent); + printf("MPI_Type_extent (MPI_UNSIGNED) = %ld\n", i_extent); + + MPI_Type_size(MPI_UNSIGNED, &i_size); + printf("MPI_Type_size (MPI_UNSIGNED) = %d\n", i_size); + +#if defined(HAVE_LONG_DOUBLE) + MPI_Type_extent(MPI_LONG_DOUBLE, &i_extent); + printf("MPI_Type_extent (MPI_LONG_DOUBLE) = %ld\n", i_extent); + + MPI_Type_size(MPI_LONG_DOUBLE, &i_size); + printf("MPI_Type_size (MPI_LONG_DOUBLE) = %d\n", i_size); +#endif + + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/third.c b/teshsuite/smpi/mpich-test/pt2pt/third.c new file mode 100644 index 0000000000..f8a2f24950 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/third.c @@ -0,0 +1,121 @@ +/* + third - test program that tests queueing by sending messages with various + tags, receiving them in particular order. + */ + +#include +#include +#include "mpi.h" +#include "test.h" +#ifdef HAVE_UNISTD_H +/* For sleep */ +#include +#endif + +#ifndef HAVE_SLEEP +void sleep( int secs ) +{ +#ifdef VX_WORKS + /* Also needs include ? */ + struct timespec rqtp = { 10, 0 }; + nanosleep(&rqtp, NULL); +#else + double t; + t = MPI_Wtime(); + while (MPI_Wtime() - t < (double)secs) ; +#endif +} +#endif + +/* Define VERBOSE to get printed output */ +int main( int argc, char **argv ) +{ + int rank, size, to, from, tag, count; + int src, dest, waiter; + int st_count; +#ifdef VERBOSE + int st_source, st_tag; +#endif + MPI_Status status; + char data[100]; + MPI_Request rq[2]; + MPI_Status statuses[2]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); +/* + src = size - 1; + dest = 0; + */ + src = 0; + dest = size - 1; + /* waiter = dest; */ /* Receiver delays, so msgs unexpected */ + /* waiter = src; */ /* Sender delays, so recvs posted */ + waiter = 10000; /* nobody waits */ + + if (rank == src) + { + if (waiter == src) + sleep(10); + to = dest; + tag = 2001; + sprintf(data,"First message, type 2001"); + count = strlen(data) + 1; + MPI_Isend( data, count, MPI_CHAR, to, tag, MPI_COMM_WORLD, &rq[0] ); +#ifdef VERBOSE + printf("%d sent :%s:\n", rank, data ); +#endif + tag = 2002; + sprintf(data,"Second message, type 2002"); + count = strlen(data) + 1; + MPI_Isend( data, count, MPI_CHAR, to, tag, MPI_COMM_WORLD, &rq[1] ); + MPI_Waitall( 2, rq, statuses ); +#ifdef VERBOSE + printf("%d sent :%s:\n", rank, data ); +#endif + } + else + if (rank == dest) + { + if (waiter == dest) + sleep(10); + from = MPI_ANY_SOURCE; + count = 100; + + tag = 2002; + MPI_Recv(data, count, MPI_CHAR, from, tag, MPI_COMM_WORLD, &status ); + + MPI_Get_count( &status, MPI_CHAR, &st_count ); + if (st_count != strlen("Second message, type 2002") + 1) { + printf( "Received wrong length!\n" ); + } +#ifdef VERBOSE + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG; + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + printf( "%d received :%s:\n", rank, data); +#endif + tag = 2001; + MPI_Recv(data, count, MPI_CHAR, from, tag, MPI_COMM_WORLD, &status ); + + MPI_Get_count( &status, MPI_CHAR, &st_count ); + if (st_count != strlen("First message, type 2001") + 1) { + printf( "Received wrong length!\n" ); + } +#ifdef VERBOSE + st_source = status.MPI_SOURCE; + st_tag = status.MPI_TAG;\ + printf( "Status info: source = %d, tag = %d, count = %d\n", + st_source, st_tag, st_count ); + printf( "%d received :%s:\n", rank, data); +#endif + } +#ifdef VERBOSE + printf( "Process %d exiting\n", rank ); +#endif + Test_Waitforall( ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/trunc.c b/teshsuite/smpi/mpich-test/pt2pt/trunc.c new file mode 100644 index 0000000000..d07993a13a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/trunc.c @@ -0,0 +1,163 @@ +/* + * This file tests that message truncation errors are properly detected and + * handled (in particular, that data is NOT overwritten). + */ + +#include "mpi.h" +#include +#include +#include "test.h" +/* Prototypes for picky compilers */ +int SetupRecvBuf ( int * ); +int CheckRecvErr ( int, MPI_Status *, int *, const char * ); + +int main( int argc, char **argv ) +{ + int err = 0, toterr; + int world_rank; + MPI_Comm comm, dupcomm; + int rank, size; + int partner, merr, flag; + MPI_Status status; + MPI_Request request; + int i, sendbuf[10], recvbuf[10]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + + comm = MPI_COMM_WORLD; + MPI_Comm_dup( comm, &dupcomm ); + MPI_Comm_rank( comm, &rank ); + MPI_Comm_size( comm, &size ); + +/* We'll RECEIVE into rank 0, just to simplify any debugging. The tests are + sender receiver + send( count = 10 ) recv(count = 1) + isend( count = 10 ) + sendrecv sendrecv + wait recv(count=1) (unexpected recv) + irecv( count = 1) + sendrecv sendrecv + send( count = 10) wait (expected/err trunc) + irecv( count = 1) + sendrecv sendrecv + send( count = 10) test (expected/err trunc) + */ + + if (size < 2) { + fprintf( stderr, "This test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if (rank == 0) { + /* Only return on the RECEIVERS side */ + MPI_Errhandler_set( comm, MPI_ERRORS_RETURN ); + partner = size - 1; + SetupRecvBuf( recvbuf ); + merr = MPI_Recv( recvbuf, 1, MPI_INT, partner, 1, comm, &status ); + err += CheckRecvErr( merr, &status, recvbuf, "Recv" ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + + SetupRecvBuf( recvbuf ); + merr = MPI_Recv( recvbuf, 1, MPI_INT, partner, 2, comm, &status ); + err += CheckRecvErr( merr, &status, recvbuf, "Unexpected Recv" ); + + SetupRecvBuf( recvbuf ); + merr = MPI_Irecv( recvbuf, 1, MPI_INT, partner, 3, comm, &request ); + + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + merr = MPI_Wait( &request, &status ); + err += CheckRecvErr( merr, &status, recvbuf, "Irecv/Wait" ); + + SetupRecvBuf( recvbuf ); + merr = MPI_Irecv( recvbuf, 1, MPI_INT, partner, 4, comm, &request ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + do { + merr = MPI_Test( &request, &flag, &status ); + } while (merr == 0 && flag == 0); + err += CheckRecvErr( merr, &status, recvbuf, "Irecv/Test" ); + } + else if (rank == size - 1) { + partner = 0; + for (i=0; i<10; i++) + sendbuf[i] = 100 + i; + MPI_Send( sendbuf, 10, MPI_INT, partner, 1, comm ); + MPI_Isend( sendbuf, 10, MPI_INT, partner, 2, comm, &request ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + MPI_Wait( &request, &status ); + + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + MPI_Send( sendbuf, 10, MPI_INT, partner, 3, comm ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + MPI_Send( sendbuf, 10, MPI_INT, partner, 4, comm ); + } + MPI_Comm_free( &dupcomm ); + + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) + printf( " No Errors\n" ); + else + printf( "Found %d errors in Truncated Message test\n", toterr ); + } + MPI_Finalize( ); + return toterr; +} + +int SetupRecvBuf( recvbuf ) +int *recvbuf; +{ + int i; + for (i=0; i<10; i++) + recvbuf[i] = i+1; + return 0; +} + +int CheckRecvErr( merr, status, recvbuf, msg ) +int merr, *recvbuf; +MPI_Status *status; +const char *msg; +{ + int class; + int err = 0, rlen; + char buf[MPI_MAX_ERROR_STRING]; + +/* Get the MPI Error class from merr */ + MPI_Error_class( merr, &class ); + switch (class) { + case MPI_ERR_TRUNCATE: + /* Check that data buf is ok */ + if (recvbuf[1] != 2) { + err++; + fprintf( stderr, + "Receive buffer overwritten! Found %d in 2nd pos.\n", + recvbuf[1] ); + } + break; + + case MPI_ERR_IN_STATUS: + /* Check for correct message */ + /* ERR IN STATUS is correct ONLY for multiple completion routines */ +/* if (status->MPI_ERROR == MPI_ERR_TRUNCATE) + break; */ + /* Else, fall through into default... */ + default: + /* Wrong error; get message and print */ + MPI_Error_string( merr, buf, &rlen ); + fprintf( stderr, + "Got unexpected error message from %s: %s\n", msg, buf ); + err++; + } + return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/truncmult.c b/teshsuite/smpi/mpich-test/pt2pt/truncmult.c new file mode 100644 index 0000000000..7f1e68b075 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/truncmult.c @@ -0,0 +1,258 @@ +/* + * This file tests that message truncation errors are properly detected and + * handled (in particular, that data is NOT overwritten). + * + * This version checks the multiple completion routines + */ + +#include "mpi.h" +#include +#include +#include "test.h" +/* Prototypes for picky compilers */ +int SetupRecvBuf ( int * ); +int CheckRecvErr ( int, MPI_Status *, int *, const char * ); +int CheckRecvOk ( MPI_Status *, int *, int, const char * ); + +int main( int argc, char **argv ) +{ + int err = 0, toterr; + int world_rank; + MPI_Comm comm, dupcomm; + int rank, size; + int partner, merr; + MPI_Status statuses[4], status; + MPI_Request requests[4]; + int i, sendbuf[10], + recvbuf1[10], recvbuf2[10], recvbuf3[10], recvbuf4[10]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &world_rank ); + + comm = MPI_COMM_WORLD; + MPI_Comm_dup( comm, &dupcomm ); + MPI_Comm_rank( comm, &rank ); + MPI_Comm_size( comm, &size ); + +/* We'll RECEIVE into rank 0, just to simplify any debugging. Just in + case the MPI implementation tests for errors when the irecv is issued, + we make sure that the matching sends don't occur until the receives + are posted. + + sender receiver + irecv(tag=1,count=1) + irecv(tag=2,count=1) + sendrecv sendrecv + send(tag=1,count=1) + send(tag=2,count=10) + waitall() + error in status, err trunc + wait for tag = 1 if necessary + sendrecv sendrecv + Ditto, but with 2 truncated messages + Ditto, but with testall. (not done yet) + All of the above, but with waitsome/testsome (not done yet) + */ + + if (rank == 0) { + /* Only return on the RECEIVERS side */ + MPI_Errhandler_set( comm, MPI_ERRORS_RETURN ); + partner = size - 1; + + SetupRecvBuf( recvbuf1 ); + SetupRecvBuf( recvbuf2 ); + merr = MPI_Irecv( recvbuf1, 1, MPI_INT, partner, 1, comm, + &requests[0] ); /* this will succeed */ + merr = MPI_Irecv( recvbuf2, 1, MPI_INT, partner, 2, comm, + &requests[1] ); /* this will fail */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + merr = MPI_Waitall( 2, requests, statuses ); + if (merr != MPI_ERR_IN_STATUS) { + err++; + fprintf( stderr, "Did not return MPI_ERR_IN_STATUS\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if (statuses[0].MPI_ERROR == MPI_ERR_PENDING) { + /* information - first send is not yet complete */ + if ((statuses[0].MPI_ERROR = MPI_Wait( &requests[0], &statuses[0] )) == MPI_SUCCESS) { + err++; + fprintf( stderr, "failed to complete legal request (1)\n" ); + } + } + if (statuses[0].MPI_ERROR != MPI_SUCCESS) { + err ++; + fprintf( stderr, "Could not complete legal send-receive\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + err += CheckRecvErr( merr, &statuses[1], recvbuf2, "Irecv" ); + + SetupRecvBuf( recvbuf1 ); + SetupRecvBuf( recvbuf2 ); + SetupRecvBuf( recvbuf3 ); + SetupRecvBuf( recvbuf4 ); + merr = MPI_Irecv( recvbuf1, 1, MPI_INT, partner, 1, comm, + &requests[0] ); /* this will succeed */ + merr = MPI_Irecv( recvbuf2, 1, MPI_INT, partner, 2, comm, + &requests[1] ); /* this will fail */ + merr = MPI_Irecv( recvbuf3, 1, MPI_INT, partner, 3, comm, + &requests[2] ); /* this will fail */ + merr = MPI_Irecv( recvbuf4, 1, MPI_INT, partner, 4, comm, + &requests[3] ); /* this will succeed */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + merr = MPI_Waitall( 4, requests, statuses ); + if (merr != MPI_ERR_IN_STATUS) { + err++; + fprintf( stderr, "Did not return MPI_ERR_IN_STATUS (4)\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if (statuses[0].MPI_ERROR == MPI_ERR_PENDING) { + /* information - first send is not yet complete */ + if ((statuses[0].MPI_ERROR = MPI_Wait( &requests[0], &statuses[0] )) != MPI_SUCCESS) { + err++; + fprintf( stderr, "failed to complete legal request (1a)\n" ); + } + } + /* Check for correct completion */ + err += CheckRecvOk( &statuses[0], recvbuf1, 1, "4-1" ); + + if (statuses[3].MPI_ERROR == MPI_ERR_PENDING) { + /* information - first send is not yet complete */ + if ((statuses[3].MPI_ERROR = MPI_Wait( &requests[3], &statuses[3] )) != MPI_SUCCESS) { + err++; + fprintf( stderr, "failed to complete legal request (3a)\n" ); + } + } + /* Check for correct completion */ + err += CheckRecvOk( &statuses[3], recvbuf4, 4, "4-4" ); + + if (statuses[0].MPI_ERROR != MPI_SUCCESS) { + err ++; + fprintf( stderr, "Could not complete legal send-receive-0\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + if (statuses[3].MPI_ERROR != MPI_SUCCESS) { + err ++; + fprintf( stderr, "Could not complete legal send-receive-3\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + + if (statuses[1].MPI_ERROR == MPI_ERR_PENDING) { + statuses[1].MPI_ERROR = MPI_Wait( &requests[1], &statuses[1] ); + } + err += CheckRecvErr( merr, &statuses[1], recvbuf2, "Irecv-2" ); + if (statuses[2].MPI_ERROR == MPI_ERR_PENDING) { + statuses[2].MPI_ERROR = MPI_Wait( &requests[2], &statuses[2] ); + } + err += CheckRecvErr( merr, &statuses[2], recvbuf3, "Irecv-3" ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + } + else if (rank == size - 1) { + partner = 0; + for (i=0; i<10; i++) + sendbuf[i] = 100 + i; + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + MPI_Send( sendbuf, 1, MPI_INT, partner, 1, comm ); + MPI_Send( sendbuf, 10, MPI_INT, partner, 2, comm ); + + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + MPI_Send( sendbuf, 1, MPI_INT, partner, 1, comm ); + MPI_Send( sendbuf, 10, MPI_INT, partner, 2, comm ); + MPI_Send( sendbuf, 10, MPI_INT, partner, 3, comm ); + MPI_Send( sendbuf, 1, MPI_INT, partner, 4, comm ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_INT, partner, 0, + MPI_BOTTOM, 0, MPI_INT, partner, 0, + dupcomm, &status ); + } + MPI_Comm_free( &dupcomm ); + + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (world_rank == 0) { + if (toterr == 0) + printf( " No Errors\n" ); + else + printf( "Found %d errors in Truncated Message Multiple Completion test\n", toterr ); + } + MPI_Finalize( ); + return toterr; +} + +int SetupRecvBuf( recvbuf ) +int *recvbuf; +{ + int i; + for (i=0; i<10; i++) + recvbuf[i] = i+1; + return 0; +} + +int CheckRecvOk( status, recvbuf, tag, msg ) +int *recvbuf, tag; +MPI_Status *status; +const char *msg; +{ + int err = 0, count; + + if (status->MPI_TAG != tag) { + err++; + fprintf( stderr, "Wrong tag; was %d should be %d (%s)\n", + status->MPI_TAG, tag, msg ); + } + MPI_Get_count( status, MPI_INT, &count ); + if (count != 1) { + err++; + fprintf( stderr, "Wrong count; was %d expected 1 (%s)\n", count, msg ); + } + return err; +} + +int CheckRecvErr( merr, status, recvbuf, msg ) +int merr, *recvbuf; +MPI_Status *status; +const char *msg; +{ + int class; + int err = 0, rlen; + char buf[MPI_MAX_ERROR_STRING]; + +/* Get the MPI Error class from merr */ + MPI_Error_class( merr, &class ); + switch (class) { + case MPI_ERR_TRUNCATE: + /* Check that data buf is ok */ + if (recvbuf[1] != 2) { + err++; + fprintf( stderr, + "Receive buffer overwritten! Found %d in 2nd pos.\n", + recvbuf[1] ); + } + break; + + case MPI_ERR_IN_STATUS: + /* Check for correct message */ + MPI_Error_class(status->MPI_ERROR, &class); + if (class != MPI_ERR_TRUNCATE) { + MPI_Error_string( status->MPI_ERROR, buf, &rlen ); + fprintf( stderr, + "Unexpected error message for err in status for %s: %s\n", + msg, buf ); + } + break; + default: + /* Wrong error; get message and print */ + MPI_Error_string( merr, buf, &rlen ); + fprintf( stderr, + "Got unexpected error message from %s: %s\n", msg, buf ); + err++; + } + return err; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/typebase.c b/teshsuite/smpi/mpich-test/pt2pt/typebase.c new file mode 100644 index 0000000000..bdd784a2a5 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typebase.c @@ -0,0 +1,126 @@ +/* + */ +#include "mpi.h" +#include +#include "test.h" + +/* + * This program checks that the type inquiry routines work with the + * basic types + */ + +#define MAX_TYPES 14 +static int ntypes; +static MPI_Datatype BasicTypes[MAX_TYPES]; +static char *(BasicTypesName[MAX_TYPES]); +static int BasicSizes[MAX_TYPES]; + +/* Prototypes for picky compilers */ +void SetupBasicTypes (void); + +void +SetupBasicTypes() +{ + BasicTypes[0] = MPI_CHAR; + BasicTypes[1] = MPI_SHORT; + BasicTypes[2] = MPI_INT; + BasicTypes[3] = MPI_LONG; + BasicTypes[4] = MPI_UNSIGNED_CHAR; + BasicTypes[5] = MPI_UNSIGNED_SHORT; + BasicTypes[6] = MPI_UNSIGNED; + BasicTypes[7] = MPI_UNSIGNED_LONG; + BasicTypes[8] = MPI_FLOAT; + BasicTypes[9] = MPI_DOUBLE; + + BasicTypesName[0] = (char*)"MPI_CHAR"; + BasicTypesName[1] = (char*)"MPI_SHORT"; + BasicTypesName[2] = (char*)"MPI_INT"; + BasicTypesName[3] = (char*)"MPI_LONG"; + BasicTypesName[4] = (char*)"MPI_UNSIGNED_CHAR"; + BasicTypesName[5] = (char*)"MPI_UNSIGNED_SHORT"; + BasicTypesName[6] = (char*)"MPI_UNSIGNED"; + BasicTypesName[7] = (char*)"MPI_UNSIGNED_LONG"; + BasicTypesName[8] = (char*)"MPI_FLOAT"; + BasicTypesName[9] = (char*)"MPI_DOUBLE"; + + BasicSizes[0] = sizeof(char); + BasicSizes[1] = sizeof(short); + BasicSizes[2] = sizeof(int); + BasicSizes[3] = sizeof(long); + BasicSizes[4] = sizeof(unsigned char); + BasicSizes[5] = sizeof(unsigned short); + BasicSizes[6] = sizeof(unsigned); + BasicSizes[7] = sizeof(unsigned long); + BasicSizes[8] = sizeof(float); + BasicSizes[9] = sizeof(double); + + ntypes = 10; +#ifdef HAVE_LONG_DOUBLE + BasicTypes[ntypes] = MPI_LONG_DOUBLE; + BasicSizes[ntypes] = sizeof(long double); + BasicTypesName[ntypes] = (char*)"MPI_LONG_DOUBLE"; + ntypes++; +#endif + BasicTypes[ntypes] = MPI_BYTE; + BasicSizes[ntypes] = sizeof(unsigned char); + BasicTypesName[ntypes] = (char*)"MPI_BYTE"; + ntypes++; + +#ifdef HAVE_LONG_LONG_INT + BasicTypes[ntypes] = MPI_LONG_LONG_INT; + BasicSizes[ntypes] = sizeof(long long); + BasicTypesName[ntypes] = "MPI_LONG_LONG_INT"; + ntypes++; +#endif + } + +int main( int argc, char **argv ) +{ +int i, errs; +int size; +MPI_Aint extent, lb, ub; + +MPI_Init( &argc, &argv ); + +/* This should be run by a single process */ + +SetupBasicTypes(); + +errs = 0; +for (i=0; i +#include "mpi.h" +#include "test.h" +#include + +int main( int argc, char **argv ) +{ + int i, n, n_goal = 2048, rc, len; + MPI_Datatype *type_array; + char msg[MPI_MAX_ERROR_STRING]; + + MPI_Init( &argc, &argv ); + MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ); + + n = n_goal; + + type_array = (MPI_Datatype *)malloc( n * sizeof(MPI_Datatype) ); + + for (i=0; i + +int +main( int argc, char **argv) +{ + int blockcnt[2], rank; + MPI_Aint offsets[2], lb, ub, extent; + MPI_Datatype tmp_type, newtype; + + MPI_Init(&argc, &argv); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + if (rank == 0) { + blockcnt[0] = 1; + offsets[0] = 3; + MPI_Type_hindexed(1, blockcnt, offsets, MPI_BYTE, &tmp_type); + blockcnt[0] = 1; + offsets[0] = 1; + MPI_Type_hindexed(1, blockcnt, offsets, tmp_type, &newtype); + MPI_Type_commit(&newtype); + + MPI_Type_lb(newtype, &lb); + MPI_Type_extent(newtype, &extent); + MPI_Type_ub(newtype, &ub); + + /* Check that the results are correct */ +#ifdef DEBUG + printf("lb=%ld, ub=%ld, extent=%ld\n", lb, ub, extent); + printf("Should be lb=4, ub=5, extent=1\n"); +#endif + if (lb != 4 || ub != 5 || extent != 1) { + printf ("lb = %ld (should be 4), ub = %ld (should be 5) extent = %ld should be 1\n", lb, ub, extent) ; + } + else { + printf( " No Errors\n" ); + } + + MPI_Type_free(&tmp_type); + MPI_Type_free(&newtype); + + } + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/typetest.c b/teshsuite/smpi/mpich-test/pt2pt/typetest.c new file mode 100644 index 0000000000..593728cad8 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typetest.c @@ -0,0 +1,310 @@ +/* + * Patrick Bridges * bridges@mcs.anl.gov * patrick@CS.MsState.Edu + * + * Modified by William Gropp + */ + +#include +#include +#include "test.h" +#include "mpi.h" +#include +/* CM5 users need to comment out the next include (memory.h) because + of an error in the CM5 include file (memory.h is inconsistent with + string.h) */ +/* #include */ + +struct struct1 { + double d1; + char c1[8]; +}; + + +struct struct2 { + double d1; + double d2; + char c1[8]; + char c2[8]; + double d3; + char c3[8]; + double d4; + char c4[8]; +}; + +struct struct3 { + double d1[2]; + char c1[2][8]; + struct struct1 s1[2]; +}; + +/* Structure with probable gap */ +struct struct4 { + int a1; + char c1, c2; + int a2; +}; + +int main( int argc, char **argv ) +{ + int rank, size, ret; + MPI_Status Status; + MPI_Datatype struct1_t, struct2_t, struct3_t, struct4_t, struct4a_t, + astruct1_t, carray_t; + static int block1[2] = {1, 1}; + static int block2[6] = {2, 2, 1, 1, 1, 1}; + static int block3[3] = {2, 2, 1}; + static int block4[4] = {1, 1, 1, 1}; + static int block4a[3] = {1, 2, 1}; + MPI_Aint disp1[2], disp2[6], disp3[6], disp4[4], disp4a[3]; + MPI_Datatype type1[2], type2[6], type3[3]; + MPI_Datatype type4[4] = {MPI_INT, MPI_CHAR, MPI_CHAR, MPI_INT}; + MPI_Datatype type4a[3] = {MPI_INT, MPI_CHAR, MPI_INT}; + struct struct1 dummy1; + struct struct2 dummy2; + struct struct3 dummy3; + struct struct4 dummy4; + int i, master_rank = 0, slave_rank = 1; + + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + for (i=1; i +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +/* + * Trying to manipulate the extent of a datatype with succesive + * calls to MPI_Type_struct. Tests that a MPI_UB buried within + * a structure is found. From kalns@canidae.cps.msu.edu (modified to + * fit test structure). + */ +int main( int argc, char **argv ) +{ + int errs = 0, toterrs, rank; + MPI_Aint extent; + int blens[2]; + MPI_Aint displ[2]; + MPI_Datatype types[2]; + MPI_Datatype type1,type2,type3; + MPI_Aint extent1, extent2, extent3; + + MPI_Init( &argc, &argv ); + + /* 2 blocks of 1 int each, stride of 4 ; expect extent to be 20 + */ + MPI_Type_vector( 2, 1, 4, MPI_INT, &type1 ); + MPI_Type_commit( &type1 ); + MPI_Type_extent( type1, &extent ); + extent1 = 5 * sizeof(int); + if (extent != extent1) { + errs++; + printf("extent(type1)=%ld\n",(long)extent); + } + + blens[0]=1; + blens[1]=1; + displ[0]=0; + displ[1]=sizeof(int)*4; + types[0]=type1; + types[1]=MPI_UB; + extent2 = displ[1]; + + /* using MPI_UB and Type_struct, monkey with the extent, making it 16 + */ + MPI_Type_struct( 2, blens, displ, types, &type2 ); + MPI_Type_commit( &type2 ); + MPI_Type_extent( type2, &extent ); + if (extent != extent2) { + errs++; + printf("extent(type2)=%ld\n",(long)extent); + } + + /* monkey with the extent again, making it 4 + * ===> MPICH gives 4 + * ===> MPIF gives 16, the old extent + */ + displ[1]=sizeof(int); + types[0]=type2; + types[1]=MPI_UB; + extent3 = extent2; + + MPI_Type_struct( 2, blens, displ, types, &type3 ); + MPI_Type_commit( &type3 ); + + MPI_Type_extent( type3, &extent ); + if (extent != extent3 && extent != 4) { + errs++; + printf("extent(type3)=%ld\n",(long)extent); + } + + MPI_Type_free( &type1 ); + MPI_Type_free( &type2 ); + MPI_Type_free( &type3 ); + + MPI_Allreduce( &errs, &toterrs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + if (rank == 0) { + if (toterrs == 0) printf( "No errors\n" ); + else printf( "Found %d errors\n", toterrs ); + } + + + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/typeub.std b/teshsuite/smpi/mpich-test/pt2pt/typeub.std new file mode 100644 index 0000000000..618153b712 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typeub.std @@ -0,0 +1,3 @@ +**** Checking the type routines: MPI_UB **** +No errors +**** Checking the type routines: MPI_UB **** diff --git a/teshsuite/smpi/mpich-test/pt2pt/typeub2.c b/teshsuite/smpi/mpich-test/pt2pt/typeub2.c new file mode 100644 index 0000000000..68951f2b94 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typeub2.c @@ -0,0 +1,75 @@ +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char *argv[] ) +{ + MPI_Datatype dt1, dt2, dt3; + MPI_Aint ex1, ex2, ex3; + int sz1, sz2, sz3; + MPI_Aint lb,ub; + MPI_Aint disp[3]; + MPI_Datatype types[3]; + int blocklen[3]; + + MPI_Init(&argc, &argv); + + blocklen[0] = 1; blocklen[1] = 1; blocklen[2] = 1; + disp[0] = -3; disp[1] = 0; disp[2] = 6; + types[0] = MPI_LB; types[1] = MPI_INT; types[2] = MPI_UB; + + MPI_Type_struct(3,blocklen,disp, types,&dt1); + MPI_Type_commit(&dt1); + + MPI_Type_lb(dt1, &lb); MPI_Type_ub(dt1, &ub); + MPI_Type_extent(dt1,&ex1); MPI_Type_size(dt1,&sz1); + + /* Values should be lb = -3, ub = 6 extent 9; + size depends on implementation */ + if (lb != -3 || ub != 6 || ex1 != 9) { + printf("Example 3.26 type1 lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex1, sz1); + } + else + printf("Example 3.26 type1 correct\n" ); + + MPI_Type_contiguous(2,dt1,&dt2); + MPI_Type_lb(dt2, &lb); MPI_Type_ub(dt2, &ub); + MPI_Type_extent(dt2,&ex2); MPI_Type_size(dt2,&sz2); + /* Values should be lb = -3, ub = 15, extent = 18, size + depends on implementation */ + if (lb != -3 || ub != 15 || ex2 != 18) { + printf("Example 3.26 type2 lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex2, sz2); + } + else + printf( "Example 3.26 type2 correct\n" ); + + types[0]=dt1; types[1]=dt1; + blocklen[0]=1; blocklen[1]=1; + disp[0]=0; disp[1]=ex1; + + MPI_Type_struct(2, blocklen, disp, types, &dt3); + MPI_Type_commit(&dt3); + + MPI_Type_lb(dt3, &lb); MPI_Type_ub(dt3, &ub); + MPI_Type_extent(dt3,&ex3); MPI_Type_size(dt3,&sz3); + /* Another way to express type2 */ + if (lb != -3 || ub != 15 || ex3 != 18) { + printf("type3 lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex3, sz2); + } + else + printf( "type3 correct\n" ); + + MPI_Type_free( &dt1 ); + MPI_Type_free( &dt2 ); + MPI_Type_free( &dt3 ); + + MPI_Finalize(); + return 0; +} + diff --git a/teshsuite/smpi/mpich-test/pt2pt/typeub2.std b/teshsuite/smpi/mpich-test/pt2pt/typeub2.std new file mode 100644 index 0000000000..63ebaed6cc --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typeub2.std @@ -0,0 +1,5 @@ +**** Checking the type routines: MPI_UB(2) **** +Example 3.26 type1 correct +Example 3.26 type2 correct +type3 correct +**** Checking the type routines: MPI_UB(2) **** diff --git a/teshsuite/smpi/mpich-test/pt2pt/typeub3.c b/teshsuite/smpi/mpich-test/pt2pt/typeub3.c new file mode 100644 index 0000000000..8cc3ea5907 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typeub3.c @@ -0,0 +1,104 @@ +/* This test checks that all of the MPI Type routines correctly compute + the UB and LB of a datatype from the greatest/least instance */ + +#include "mpi.h" +#include + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +int main( int argc, char *argv[] ) +{ + MPI_Datatype dt1, dt2, dt3, dt4, dt5; + MPI_Aint ex; + int sz; + MPI_Aint lb,ub; + MPI_Aint disp[3]; + MPI_Datatype types[3]; + int blocklen[3]; + int idisp[3]; + + MPI_Init(&argc, &argv); + + /* Create a datatype with explicit LB and UB */ + blocklen[0] = 1; blocklen[1] = 1; blocklen[2] = 1; + disp[0] = -3; disp[1] = 0; disp[2] = 6; + types[0] = MPI_LB; types[1] = MPI_INT; types[2] = MPI_UB; + + /* Generate samples for contiguous, hindexed, hvector, indexed, + and vector (struct and contiguous tested in typeub2) */ + + MPI_Type_struct(3,blocklen,disp, types,&dt1); + MPI_Type_commit(&dt1); + + /* This type is the same as in typeub2, and is tested there */ + + types[0]=dt1; types[1]=dt1; + blocklen[0]=1; blocklen[1]=1; + disp[0]=-4; disp[1]=7; + idisp[0]=-4; idisp[1]=7; + + MPI_Type_hindexed( 2, blocklen, disp, dt1, &dt2 ); + MPI_Type_commit( &dt2 ); + + MPI_Type_lb( dt2, &lb ); MPI_Type_ub( dt2, &ub ); + MPI_Type_extent( dt2, &ex ); MPI_Type_size( dt2, &sz ); + + if (lb != -7 || ub != 13 || ex != 20) { + printf("hindexed lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex, sz); + } + else + printf( "hindexed ok\n" ); + + MPI_Type_indexed( 2, blocklen, idisp, dt1, &dt3 ); + MPI_Type_commit( &dt3 ); + + MPI_Type_lb( dt3, &lb ); MPI_Type_ub( dt3, &ub ); + MPI_Type_extent( dt3, &ex ); MPI_Type_size( dt3, &sz ); + + if (lb != -39 || ub != 69 || ex != 108) { + printf("indexed lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex, sz); + } + else + printf( "indexed ok\n" ); + + MPI_Type_hvector( 2, 1, 14, dt1, &dt4 ); + MPI_Type_commit( &dt4 ); + + MPI_Type_lb( dt4, &lb ); MPI_Type_ub( dt4, &ub ); + MPI_Type_extent( dt4, &ex ); MPI_Type_size( dt4, &sz ); + + if (lb != -3 || ub != 20 || ex != 23) { + printf("hvector lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex, sz); + } + else + printf( "hvector ok\n" ); + + MPI_Type_vector( 2, 1, 14, dt1, &dt5 ); + MPI_Type_commit( &dt5 ); + + MPI_Type_lb( dt5, &lb ); MPI_Type_ub( dt5, &ub ); + MPI_Type_extent( dt5, &ex ); MPI_Type_size( dt5, &sz ); + + + if (lb != -3 || ub != 132 || ex != 135) { + printf("vector lb %d ub %d extent %d size %d\n", + (int)lb, (int)ub, (int)ex, sz); + } + else + printf( "vector ok\n" ); + + MPI_Type_free( &dt1 ); + MPI_Type_free( &dt2 ); + MPI_Type_free( &dt3 ); + MPI_Type_free( &dt4 ); + MPI_Type_free( &dt5 ); + + MPI_Finalize(); + return 0; +} + diff --git a/teshsuite/smpi/mpich-test/pt2pt/typeub3.std b/teshsuite/smpi/mpich-test/pt2pt/typeub3.std new file mode 100644 index 0000000000..363339f476 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/typeub3.std @@ -0,0 +1,6 @@ +**** Checking the type routines: MPI_UB(3) **** +hindexed ok +indexed ok +hvector ok +vector ok +**** Checking the type routines: MPI_UB(3) **** diff --git a/teshsuite/smpi/mpich-test/pt2pt/waitall.c b/teshsuite/smpi/mpich-test/pt2pt/waitall.c new file mode 100644 index 0000000000..3fd97ed022 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/waitall.c @@ -0,0 +1,120 @@ +/* + * This code tests waitall; in particular, the that ordering requirement + * on nonblocking communication is observed. + */ + +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +#ifdef HAVE_UNISTD_H +/* For sleep */ +#include +#endif + +#define MAX_REQ 32 + +#ifndef HAVE_SLEEP +void sleep( int secs ) +{ +#ifdef VX_WORKS + /* Also needs include ? */ + struct timespec rqtp = { 10, 0 }; + nanosleep(&rqtp, NULL); +#else + double t; + t = MPI_Wtime(); + while (MPI_Wtime() - t < (double)secs) ; +#endif +} +#endif + +int main( int argc, char **argv ) +{ + int rank, size; + int i, j, count, err = 0, toterr; + MPI_Request r[MAX_REQ]; + MPI_Status s[MAX_REQ]; + int buf[MAX_REQ][MAX_REQ]; + + MPI_Init( &argc, &argv ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + if (size < 2) { + fprintf( stderr, "This test requires at least 2 processes\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } + /* First, cause the wait all to happen AFTER the Sends */ + if (rank == 0) { + for (i=0; i +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Pause( double ); + +void Pause( double sec ) +{ + /*double t1 =*/ MPI_Wtime(); + smpi_sleep(sec); +//while (MPI_Wtime() - t1 < sec) ; +} + +int main( int argc, char **argv ) +{ + int size, rank, flag, i; + int *buf1, *buf2, cnt; + double t0; + MPI_Status statuses[2]; + MPI_Request req[2]; + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + if (size < 2) { + printf( "This test requires at least 2 processors\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } + + /* Large enough that almost certainly a rendezvous algorithm will be used + by Issend. buflimit.c will give you a more reliable value */ + cnt = 35000; + + /* Test: + process 0 process 1 + Irecv1 + Irecv2 + Sendrecv Sendrecv + pause(2 sec) pause(2 sec) + Issend2 Waitall + test(2) for 5 secs + Ssend1 + Wait(2) if necessary + + If the test for Issend2 never succeeds, then the waitall appears to be + waiting for req1 first. By using Issend, we can keep the program from + hanging. + */ + buf1 = (int *)malloc( cnt * sizeof(int) ); + buf2 = (int *)malloc( cnt * sizeof(int) ); + if (!buf1 || !buf2) { + printf( "Could not allocate buffers of size %d\n", cnt ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } + + for (i=0; i MPI_Wtime() && !flag) + MPI_Test( &req[0], &flag, &statuses[0] ); + MPI_Send( buf1, cnt, MPI_INT, size-1, 1, MPI_COMM_WORLD ); + if (!flag) { + printf( + "*ERROR: MPI_Waitall appears to be waiting for requests in the order\n\ +they appear in the request list\n" ); + MPI_Wait( &req[0], &statuses[0] ); + } + else { + printf( "No errors\n" ) ; + } + } + else if (rank == size - 1) { + MPI_Irecv( buf1, cnt, MPI_INT, 0, 1, MPI_COMM_WORLD, &req[0] ); + MPI_Irecv( buf2, cnt, MPI_INT, 0, 2, MPI_COMM_WORLD, &req[1] ); + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 0, 3, + MPI_BOTTOM, 0, MPI_BYTE, 0, 3, MPI_COMM_WORLD, &statuses[0] ); + Pause( 2.0 ); + MPI_Waitall( 2, req, statuses ); + } + + free( buf1 ); + free( buf2 ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/waitall2.std b/teshsuite/smpi/mpich-test/pt2pt/waitall2.std new file mode 100644 index 0000000000..a04ff38118 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/waitall2.std @@ -0,0 +1,3 @@ +*** Testing MPI_Waitall (order) *** +No errors +*** Testing MPI_Waitall (order) *** diff --git a/teshsuite/smpi/mpich-test/pt2pt/waitall3.c b/teshsuite/smpi/mpich-test/pt2pt/waitall3.c new file mode 100644 index 0000000000..0b76a88b95 --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/waitall3.c @@ -0,0 +1,124 @@ +/* + Test of waitall. This makes sure that the requests in a wait can occur + in any order. + + Run with 4 processes. This checks for code that listens to a specified + process. This is similar to the test in waitall2, except the incoming + messages come from processes 1 and 2. (no message comes from process 3). + */ + +#include +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Pause( double ); + +void Pause( double sec ) +{ + /*double t1 =*/ MPI_Wtime(); +smpi_sleep(sec); +// while (MPI_Wtime() - t1 < sec) ; +} + +int main( int argc, char **argv ) +{ + int size, rank, flag, i; + int *buf1, *buf2, cnt; + double t0; + MPI_Status statuses[2]; + MPI_Request req[2]; + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + if (size < 3) { + printf( "This test requires at least 3 processors\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } + + /* Large enough that almost certainly a rendezvous algorithm will be used + by Issend. buflimit.c will give you a more reliable value */ + cnt = 35000; + + /* Test: + process 0 process 1 process 2 + Irecv1 + Irecv2 + Barrier Barrier Barrier + pause(2 sec) pause(2 sec) + issend2 Waitall + test(2) for 5 secs + sendrecv (process 2) sendrecv(process0) + ssend1 + wait(2) if necessary + + If the test for Issend2 never succeeds, then the waitall appears to be + waiting for req1 first. By using Issend, we can keep the program from + hanging. + */ + buf1 = (int *)malloc( cnt * sizeof(int) ); + buf2 = (int *)malloc( cnt * sizeof(int) ); + if (!buf1 || !buf2) { + printf( "Could not allocate buffers of size %d\n", cnt ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } + + for (i=0; i MPI_Wtime() && !flag) + MPI_Test( &req[0], &flag, &statuses[0] ); +/* Tell process 2 to go ahead */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 2, 3, + MPI_BOTTOM, 0, MPI_BYTE, 2, 3, MPI_COMM_WORLD, &statuses[0] ); + if (!flag) { + printf( + "*ERROR: MPI_Waitall appears to be waiting for requests in the order\n\ +they appear in the request list\n" ); + /* We can wait now since process 2 should have allowed the wait + to proceed */ + MPI_Wait( &req[0], &statuses[0] ); + } + else { + printf( " No Errors\n" ) ; + } + } + else if (rank == 1) { + MPI_Irecv( buf1, cnt, MPI_INT, 2, 1, MPI_COMM_WORLD, &req[0] ); + MPI_Irecv( buf2, cnt, MPI_INT, 0, 2, MPI_COMM_WORLD, &req[1] ); + MPI_Barrier( MPI_COMM_WORLD ); + Pause( 2.0 ); + MPI_Waitall( 2, req, statuses ); + } + else if (rank == 2) { + MPI_Barrier( MPI_COMM_WORLD ); + /* Wait for process 0 to tell us to go ahead */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 0, 3, + MPI_BOTTOM, 0, MPI_BYTE, 0, 3, MPI_COMM_WORLD, &statuses[0] ); + MPI_Send( buf1, cnt, MPI_INT, 1, 1, MPI_COMM_WORLD ); + } + else { + MPI_Barrier( MPI_COMM_WORLD ); + } + + free( buf1 ); + free( buf2 ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/waitall4.c b/teshsuite/smpi/mpich-test/pt2pt/waitall4.c new file mode 100644 index 0000000000..bb4dc1d05a --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/waitall4.c @@ -0,0 +1,127 @@ +/* + Test of waitall. This makes sure that the requests in a wait can occur + in any order. + + Run with 4 processes. This checks for code that listens to a specified + process. This is similar to the test in waitall3, except that the + wait is on sends instead of receives. Messages are sent by process 2 to + processes 0 and 1. Process 3 is uninvolved. + */ + +#include +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Pause( double ); + +void Pause( double sec ) +{ + /*double t1 =*/ MPI_Wtime(); + smpi_sleep(sec); +// while (MPI_Wtime() - t1 < sec) ; +} + +int main( int argc, char **argv ) +{ + int size, rank, flag, i; + int *buf1, *buf2, cnt; + double t0; + MPI_Status statuses[2]; + MPI_Request req[2]; + + MPI_Init( &argc, &argv ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + + if (size < 3) { + printf( "This test requires at least 3 processors\n" ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } + + /* Large enough that almost certainly a rendezvous algorithm will be used + by Issend. buflimit.c will give you a more reliable value */ + cnt = 35000; + + /* Test: + process 0 process 1 process 2 + Issend1 + Issend0 + Barrier Barrier Barrier + pause(2 sec) pause(2 sec) pause(1 sec) + irecv2 waitall + test(2) for 5 secs + sendrecv (process 1) sendrecv(process0) + recv2 + wait(2) if necessary + + If the test for Irecv2 never succeeds, then the waitall appears to be + waiting for req1 first. By using Issend, we can keep the program from + hanging. + */ + buf1 = (int *)malloc( cnt * sizeof(int) ); + buf2 = (int *)malloc( cnt * sizeof(int) ); + if (!buf1 || !buf2) { + printf( "Could not allocate buffers of size %d\n", cnt ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + return 1; + } + + for (i=0; i MPI_Wtime() && !flag) + MPI_Test( &req[0], &flag, &statuses[0] ); + /* printf( "Test succeeded at %f with flag %d\n", MPI_Wtime()-t0, flag ); */ + /* Tell process 2 to go ahead */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 1, 3, + MPI_BOTTOM, 0, MPI_BYTE, 1, 3, MPI_COMM_WORLD, &statuses[0] ); + if (!flag) { + printf( + "*ERROR: MPI_Waitall appears to be waiting for requests in the order\n\ +they appear in the request list\n" ); + /* We can wait now since process 2 should have allowed the wait + to proceed */ + MPI_Wait( &req[0], &statuses[0] ); + } + else { + printf( " No Errors\n" ) ; + } + } + else if (rank == 2) { + MPI_Isend( buf1, cnt, MPI_INT, 1, 1, MPI_COMM_WORLD, &req[0] ); + MPI_Isend( buf2, cnt, MPI_INT, 0, 2, MPI_COMM_WORLD, &req[1] ); + MPI_Barrier( MPI_COMM_WORLD ); + Pause( 1.0 ); + MPI_Waitall( 2, req, statuses ); + } + else if (rank == 1) { + MPI_Status status; + MPI_Barrier( MPI_COMM_WORLD ); + /* Wait for process 0 to tell us to go ahead */ + MPI_Sendrecv( MPI_BOTTOM, 0, MPI_BYTE, 0, 3, + MPI_BOTTOM, 0, MPI_BYTE, 0, 3, MPI_COMM_WORLD, &statuses[0] ); + MPI_Recv( buf1, cnt, MPI_INT, 2, 1, MPI_COMM_WORLD, &status ); + } + else { + MPI_Barrier( MPI_COMM_WORLD ); + } + + free( buf1 ); + free( buf2 ); + MPI_Finalize(); + return 0; +} diff --git a/teshsuite/smpi/mpich-test/pt2pt/waitany.c b/teshsuite/smpi/mpich-test/pt2pt/waitany.c new file mode 100644 index 0000000000..2e71fd08de --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/waitany.c @@ -0,0 +1,104 @@ +/* + * This code tests waitany; in one version of MPICH, it uncovered some + * problems with the ADI Test calls. + */ +/* #define i_ntotin 256 */ /* ok */ +/* #define i_ntotin 257 */ /* fails */ +#define i_ntotin 256 /* fails */ + +#include +#include "mpi.h" + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +#define DAR 32 /* ``Data: ARray'' */ + + +int main( int argc, char **argv ) + { + int locId ; + int data [i_ntotin] ; + + MPI_Init(&argc, &argv) ; + MPI_Comm_rank(MPI_COMM_WORLD, &locId) ; + + if(locId == 0) { + + /* The server... */ + + MPI_Status status[2] ; + MPI_Request events [2] ; + + int eventId ; + + int dstId = 1 ; + + int i ; + + for(i = 0 ; i < i_ntotin ; i++) + data [i] = i + 1 ; + + events [0] = MPI_REQUEST_NULL ; + events [1] = MPI_REQUEST_NULL ; + + MPI_Isend(data, i_ntotin, MPI_INT, dstId, DAR, + MPI_COMM_WORLD, events + 1) ; + /* enable send of data */ + + /*_begin_trace_code */ + /* printf("locId = %d: MPI_Isend(%x, %d, %x, %d, %d, %x, %x)\n", + locId, data, i_ntotin, MPI_INT, dstId, DAR, MPI_COMM_WORLD, events [1]); + */ + /*_end_trace_code */ + + /*_begin_trace_code */ + /* printf("locId = %d: MPI_Waitany(%d, [%x, %x], %x %x)...", + locId, 2, events [0], events [1], &eventId, &status) ; */ + /*_end_trace_code */ + + MPI_Waitany(2, events, &eventId, status) ; + + /*_begin_trace_code */ + printf("done. eventId = %x\n", eventId) ; + /*_end_trace_code */ + } + + if(locId == 1) { + + /* The Client... */ + + MPI_Status status ; + + int srcId = MPI_ANY_SOURCE ; + + /*_begin_trace_code */ + /* + printf("locId = %d: MPI_Recv(%x, %d, %x, %d, %d, %x, %x)...", + locId, data, i_ntotin, MPI_INT, srcId, DAR, MPI_COMM_WORLD, &status) ; + */ + /*_end_trace_code */ + + MPI_Recv(data, i_ntotin, MPI_INT, srcId, DAR, + MPI_COMM_WORLD, &status) ; + + /*_begin_trace_code */ + /*printf("done.\n") ;*/ + /*_end_trace_code */ + + /* + printf("locId = %d: data [0] = %d, data [%d] = %d\n", + locId, data [0], i_ntotin - 1, data [i_ntotin - 1]) ; + */ + } + + MPI_Barrier( MPI_COMM_WORLD ); + if (locId == 0) + printf( "Test complete\n" ); + MPI_Finalize() ; + return 0; +} + + + diff --git a/teshsuite/smpi/mpich-test/pt2pt/waitany.std b/teshsuite/smpi/mpich-test/pt2pt/waitany.std new file mode 100644 index 0000000000..db3ac7200c --- /dev/null +++ b/teshsuite/smpi/mpich-test/pt2pt/waitany.std @@ -0,0 +1,4 @@ +*** Testing MPI_Waitany *** +done. eventId = 1 +Test complete +*** Testing MPI_Waitany *** diff --git a/teshsuite/smpi/mpich-test/runbase b/teshsuite/smpi/mpich-test/runbase new file mode 100644 index 0000000000..543d534b8e --- /dev/null +++ b/teshsuite/smpi/mpich-test/runbase @@ -0,0 +1,252 @@ +#! /bin/sh +# +# This file contains support shell routines and steps common to all +# runtests scripts. +# +# Find MPIRUN +# +# Some people don't include "." in their path (! in case an ls trojan horse, +# I guess - if someone does that to you, you have bigger problems). This +# code tests to see if you have a path to mpirun; if not, it tries ./mpirun. +# +# One particular problem is having multiple mpiruns in the path. An +# absolute path for mpirun will fix many problems +FindMPIRUN () { + if [ -z "$MPICH_USE_LIB_MPIRUN" -a ! -x "$mpirun" ] ; then + IFS="${IFS= }"; saveifs="$IFS"; IFS="${IFS}:" + for dir in $PATH ; do + if [ -x $dir/mpirun ] ; then + if [ -n "${MPICH_VERSION}" ] ; then + # Test that we've found the correct mpirun + if strings $dir/mpirun | grep "MPIRUN for MPICH" > /dev/null ; then + : + else + # echo "$dir/mpirun isn't for MPICH" + continue + fi + fi + mpirun="mpirun" + break + fi + done + IFS="$saveifs" + fi + if [ -z "$mpirun" -a -x "./mpirun" ] ; then + mpirun=./mpirun + fi + # + if [ -z "$mpirun" ] ; then + echo "No mpirun in path. Testing can not proceed." + exit 1 + fi +} + +# MakeExe program-name +MakeExe() { + if [ -s $STOPFILE ] ; then + echo "Found stopfile $STOPFILE; exiting" + exit 0 + fi + if [ ! -x $1 ] ; then + $MAKE $1 + if [ ! -x $1 ] ; then + if [ "$MPITEST_CONTINUE" = "always" ] ; then + echo "Could not build executable $1; skipping this test" + else + echo "Could not build executable $1; aborting tests" + exit 1 + fi + fi + fi +} + +# CleanExe program-name +CleanExe() { + if [ $makeeach = 1 ] ; then + /bin/rm -f $1 $1.o + fi +} + +# Output marker +OutTime() { + if [ $quiet = 0 ] ; then + if [ -z "$hostname" ] ; then + hostname=`hostname` + fi + d=`date` + echo "$hostname : $d" + fi +} + +# Do an "on the fly" check for problems. +# Checkout testname difffile +# difffile may be empty, in which case stdout is used. +# If $writesummaryfile is yes, output the results to $summaryfile. +# Use XML-style output for the summary file: +# +# +# directory +# +# text from different +# +# +# +# We'd also like to support +# $np +# but we don't have that information when this routine is called +CheckOutput() { + bfile=$1 + difffile=$2 + fileok="no" + if [ ! -s ${bfile}.out ] ; then + echo "No output file ${bfile}.out!" + else + cmpfile="" + # Handle Fortran systems that generate stop statements + rm -f ${bfile}.tout + grep -v 'FORTRAN STOP' ${bfile}.out > ${bfile}.tout + for stdfile in $srcdir/${bfile}.std $srcdir/${bfile}.std2 \ + $srcdir/std/${bfile}.std ${bfile}.stdo ; do + if [ -z "$cmpfile" -a -s "$stdfile" ] ; then + cmpfile=$stdfile + fi + if test -s $stdfile && diff -b ${bfile}.tout $stdfile > /dev/null ; then + fileok=yes + break; + fi + done + if [ $fileok = "no" ] ; then + if [ -n "$difffile" ] ; then + if [ -n "$cmpfile" ] ; then + echo "Differences in ${bfile}.out" >> ${difffile} + diff -b ${bfile}.tout $cmpfile >> ${difffile} + else + echo "Cannot find a file to compare against for test ${bfile}.out" + fi + else + if [ -n "$cmpfile" ] ; then + echo "Differences in ${bfile}.out" + diff -b ${bfile}.tout $cmpfile + else + echo "Cannot find a file to compare against for test ${bfile}.out" + fi + fi + nodiff=0 + fi + if [ "$writesummaryfile" = "yes" ] ; then + if [ $fileok = "yes" ] ; then + passed=pass + else + passed=fail + fi + mydir=`pwd` + cat >>$summaryfile < +$bfile +$mydir +$passed +EOF + if [ -n "$np" ] ; then + echo "$np" >> $summaryfile + fi + if [ $fileok = "no" ] ; then + echo "" >> $summaryfile + if [ ! -s ${bfile}.out ] ; then + echo "No output file" >>$summaryfile + else + if [ -z "$cmpfile" ] ; then + cmpfile="/dev/null" + fi + diff -b ${bfile}.tout $cmpfile | \ + sed -e 's/&/-AMP-amp;/g' -e 's//-AMP-gt;/g' | \ + sed -e 's/-AMP-/\&/g' >> $summaryfile + fi + echo "" >> $summaryfile + fi + if [ -s "$bfile.tbk" ] ; then + echo "" >> $summaryfile + echo "$bfile.tbk" >>$summaryfile + echo "" >> $summaryfile + fi + echo "" >> $summaryfile + fi + rm -f ${bfile}.tout + fi +} + +# Runtest pgm-name np marker-test args outfiles +# filename.tbk is a traceback file. Use a tool like pardump $1 > $1.tbk +# to get such files +RunTest() { + OutTime + pgm=$1 + np=$2 + testfiles="$testfiles $pgm.out" + /bin/rm -f $pgm.out $pgm.tbk + MakeExe $1 + if [ ! -x $pgm ] ; then + # If no executable, put the make data into $1.out + $MAKE $pgm > $pgm.out 2>&1 + else + mname=$3 + if [ -z "$mname" ] ; then mname="*** $1 ***" ; fi + echo "$mname" >> $pgm.out + echo "$mname" + mvarg="" + if [ -n "$5" ] ; then rm -f $5 ; + if [ -n "$MPIRUNMVBACK" ] ; then mvarg="$MPIRUNMVBACK \"$5\"" ; fi + fi + # The eval is necessary to ensure that the mvarg value is properly + # tokenized. The ./$1 ensures that the program will be found, + # even if . is not in the PATH. + + eval $mpirun $args -np $np $mvarg ./$pgm $4 > $pgm.out 2>&1 + if [ -n "$5" ] ; then + for file in $5 ; do + if [ -s $file ] ; then + cat $file >> $pgm.out ; rm -f $file + fi + done + fi + echo "$mname" >> $pgm.out + if [ ! -s $srcdir/$pgm.std -a ! -s $pgm.stdo ] ; then + # We don't need a special file if the output is just "No Errors" + cat >>$pgm.stdo < +#include "test.h" + +#define NUM_DIMS 2 + +int main( int argc, char **argv ) +{ + int rank, size, i; + int errors=0; + int dims[NUM_DIMS]; + int periods[NUM_DIMS]; + int coords[NUM_DIMS]; + int new_coords[NUM_DIMS]; + int reorder = 1; + MPI_Comm comm_temp, comm_cart, new_comm; + int topo_status; + int ndims; + int new_rank; + int remain_dims[NUM_DIMS]; + int newnewrank; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* Clear dims array and get dims for topology */ + for(i=0;i +#include "test.h" + +#define NUM_DIMS 2 + +int verbose = 0; + +int main( int argc, char **argv ) +{ + int rank, size, i; + int dims[NUM_DIMS]; + int periods[NUM_DIMS]; + int new_coords[NUM_DIMS]; + int new_new_coords[NUM_DIMS]; + int reorder = 1; + int left, right, top, bottom; + MPI_Comm comm_cart; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* Clear dims array and get dims for topology */ + for(i=0;i +/* stdlib.h Needed for malloc declaration */ +#include +#include "test.h" + +#define NUM_DIMS 2 + +int main( int argc, char **argv ) +{ + int rank, size, i; + int errors=0; + int dims[NUM_DIMS]; + int periods[NUM_DIMS]; + int *rbuf, *sbuf; + int new_rank; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* Clear dims array and get dims for topology */ + for(i=0;i +#include "test.h" + +/* This test makes sure that the ordering if reorder is FALSE is + as specified in 6.2, virtual topologies + + At the same time, it duplicates the tests in cart.c, but + with reorder = 0. +*/ + +#define NUM_DIMS 2 + +int main( int argc, char **argv ) +{ + int rank, size, i; + int errors=0; + int dims[NUM_DIMS]; + int periods[NUM_DIMS]; + int coords[NUM_DIMS]; + int new_coords[NUM_DIMS]; + int reorder = 0; + MPI_Comm comm_temp, comm_cart, new_comm; + int topo_status; + int ndims; + int new_rank; + int remain_dims[NUM_DIMS]; + int newnewrank; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + MPI_Comm_size( MPI_COMM_WORLD, &size ); + + /* Clear dims array and get dims for topology */ + for(i=0;i + +int main( int argc, char *argv[] ) +{ + int dims[10]; + int i, j, ndims, totnodes, err, errcnt = 0; + + MPI_Init( &argc, &argv ); + + MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ); + + /* Try for error checks */ + dims[0] = 2; + dims[1] = 2; + dims[2] = 0; + err = MPI_Dims_create( 26, 3, dims ); + if (err == MPI_SUCCESS) { + printf( "The product of the specified dims does not divide the nnodes and MPI_Dims_create did not return an error\n" ); + for (i=0; i<3; i++) { + printf( "dims[%d] = %d\n", i, dims[i] ); + } + errcnt++; + } + + /* Check for a few reasonable decompositions */ + dims[0] = dims[1] = 0; + err = MPI_Dims_create( 16, 2, dims ); + if (err) { + char msg[MPI_MAX_ERROR_STRING]; + int result_len; + MPI_Error_string( err, msg, &result_len ); + printf( "Unexpected error return from dims_create (16,2) %s\n", msg ); + errcnt++; + } + else { + if (dims[0] * dims[1] != 16) { + printf( "Returned dimensions do not match request\n" ); + errcnt++; + } +#ifdef MPICH_NAME + if (dims[0] != 4) { + errcnt++; + printf( "Expected 4 x 4, got %d x %d\n", dims[0],dims[1] ); + } +#endif + } + + dims[0] = dims[1] = 0; + /* 60 = 2 * 2 * 3 * 5 */ + err = MPI_Dims_create( 60, 2, dims ); + if (err) { + char msg[MPI_MAX_ERROR_STRING]; + int result_len; + MPI_Error_string( err, msg, &result_len ); + printf( "Unexpected error return from dims_create (16,2) %s\n", msg ); + errcnt++; + } + else { + if (dims[0] * dims[1] != 60) { + printf( "Returned dimensions do not match request (%d)\n", + dims[0] * dims[1] ); + errcnt++; + } +#ifdef MPICH_NAME + if (dims[0] == 1 || dims[1] == 1) { + errcnt++; + printf( "Expected rectangular decomp, got %d x %d\n", + dims[0],dims[1] ); + } +#endif + } + + /* Test a range of values */ + for (ndims=1; ndims<=4; ndims++) { + for (i=2; i<64; i++) { + for (j=0; j 1) { + printf( "Dims = " ); + for (j=0; j +/* stdlib.h Needed for malloc declaration */ +#include +#include "test.h" + +void NumberEdges ( int **, int **, int, int, int ); +void PrintGraph ( int, int *, int * ); + +int main( int argc, char **argv ) +{ + MPI_Comm comm, new_comm; + int reorder; + int nbrarray[3], baseindex; + int size, i, j, nnodes, nedges, q_nnodes, q_nedges, q_nnbrs, newrank; + int *index, *edges, *q_index, *q_edges, *rankbuf; + int worldrank, err = 0, toterr; + + MPI_Init( &argc, &argv ); + + MPI_Comm_rank( MPI_COMM_WORLD, &worldrank ); + +/* Generate the graph for a binary tree. + + Note that EVERY process must have the SAME data + */ + comm = MPI_COMM_WORLD; + MPI_Comm_size( comm, &size ); + + index = (int *)malloc( (size + 1) * sizeof(int) ); + edges = (int *)malloc( (size + 1) * 3 * sizeof(int) ); + reorder = 0; + for (i=0; i < size; i++) { + index[i] = 0; + } + NumberEdges( &index, &edges, -1, 0, size - 1 ); + nedges= index[0]; + for (i=1; i 0) ? index[i-1] : 0; + for (j=0; j= size) { + err++; + printf( "Rank %d missing in graph_map\n", i ); + } + } + + MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + if (worldrank == 0) { + if (toterr == 0) + printf( "No errors in MPI Graph routines\n" ); + else + printf( "Found %d errors in MPI Graph routines\n", toterr ); + } + + MPI_Comm_free( &new_comm ); + free( index ); + free( edges ); + free( q_index ); + free( q_edges ); + free( rankbuf ); + MPI_Finalize( ); + return 0; +} + +/* + * Routine to print out a graph for debugging + */ +void PrintGraph( nnodes, index, edges ) +int nnodes, *index, *edges; +{ + int i, lastidx, j; + lastidx=0; + printf( "rank\tindex\tedges\n" ); + for (i=0; i= 0) { +#ifdef DEBUG + printf( "Adding parent %d to %d\n", parent, first ); +#endif + *index = *index + 1; + *edges++ = parent; + } + if (first >= last) { + /* leaf */ + index++; + if (parent >= 0) { + *Index = index; + *Edges = edges; + } + return; + } + +/* Internal node. Always at least a left child */ +#ifdef DEBUG + printf( "Adding left child %d to %d\n", first + 1, first ); +#endif + *index = *index + 1; + *edges++ = first + 1; + +/* Try to add a right child */ + right = (last - first)/2; + right = first + right + 1; + if (right == first + 1) + right++; + if (right <= last) { + /* right child */ +#ifdef DEBUG + printf( "Adding rightchild %d to %d\n", right, first ); +#endif + *index = *index + 1; + *edges++ = right; + } + index++; + if (first + 1 <= last && right - 1 > first) { + NumberEdges( &index, &edges, first, first + 1, + (right <= last) ? right - 1: last ); + } + if (right <= last) { + NumberEdges( &index, &edges, first, right, last ); + } + if (parent >= 0) { + *Index = index; + *Edges = edges; + } +} diff --git a/teshsuite/smpi/mpich-test/topol/graphtest.std b/teshsuite/smpi/mpich-test/topol/graphtest.std new file mode 100644 index 0000000000..0182e23abe --- /dev/null +++ b/teshsuite/smpi/mpich-test/topol/graphtest.std @@ -0,0 +1,5 @@ +**** Testing MPI_Graph_create etc **** +Checking graph_get +Checking graph_map +No errors in MPI Graph routines +**** Testing MPI_Graph_create etc **** diff --git a/teshsuite/smpi/mpich-test/topol/test.c b/teshsuite/smpi/mpich-test/topol/test.c new file mode 100644 index 0000000000..9c7b2997bc --- /dev/null +++ b/teshsuite/smpi/mpich-test/topol/test.c @@ -0,0 +1,102 @@ +/* Procedures for recording and printing test results */ + +#include +#include +#include "test.h" +#include "mpi.h" + +static int tests_passed = 0; +static int tests_failed = 0; +static char failed_tests[255][81]; +static char suite_name[255]; +FILE *fileout = NULL; + +void Test_Init(suite, rank) +char *suite; +int rank; +{ + char filename[512]; + + sprintf(filename, "%s-%d.out", suite, rank); + strncpy(suite_name, suite, 255); + fileout = fopen(filename, "w"); + if (!fileout) { + fprintf( stderr, "Could not open %s on node %d\n", filename, rank ); + MPI_Abort( MPI_COMM_WORLD, 1 ); + } +} + +void Test_Message(mess) +char *mess; +{ + fprintf(fileout, "[%s]: %s\n", suite_name, mess); + fflush(fileout); +} + +void Test_Failed(test) +char *test; +{ + fprintf(fileout, "[%s]: *** Test '%s' Failed! ***\n", suite_name, test); + strncpy(failed_tests[tests_failed], test, 81); + fflush(fileout); + tests_failed++; +} + +void Test_Passed(test) +char *test; +{ +#ifdef VERBOSE + fprintf(fileout, "[%s]: Test '%s' Passed.\n", suite_name, test); + fflush(fileout); +#endif + tests_passed++; +} + +int Summarize_Test_Results() +{ +#ifdef VERBOSE + fprintf(fileout, "For test suite '%s':\n", suite_name); +#else + if (tests_failed > 0) +#endif + { + fprintf(fileout, "Of %d attempted tests, %d passed, %d failed.\n", + tests_passed + tests_failed, tests_passed, tests_failed); + } + if (tests_failed > 0) { + int i; + + fprintf(fileout, "*** Tests Failed:\n"); + for (i = 0; i < tests_failed; i++) + fprintf(fileout, "*** %s\n", failed_tests[i]); + } + return tests_failed; +} + +void Test_Finalize() +{ + fflush(fileout); + fclose(fileout); +} + +#include "mpi.h" +/* Wait for every process to pass through this point. This test is used + to make sure that all processes complete, and that a test "passes" because + it executed, not because it some process failed. + */ +void Test_Waitforall( ) +{ +int m, one, myrank, n; + +MPI_Comm_rank( MPI_COMM_WORLD, &myrank ); +MPI_Comm_size( MPI_COMM_WORLD, &n ); +one = 1; +MPI_Allreduce( &one, &m, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); + +if (m != n) { + printf( "[%d] Expected %d processes to wait at end, got %d\n", myrank, + n, m ); + } +if (myrank == 0) + printf( " No Errors\n" ); +} diff --git a/teshsuite/smpi/mpich-test/topol/test.h b/teshsuite/smpi/mpich-test/topol/test.h new file mode 100644 index 0000000000..b79cd2c6ac --- /dev/null +++ b/teshsuite/smpi/mpich-test/topol/test.h @@ -0,0 +1,24 @@ +/* Header for testing procedures */ + +#ifndef _INCLUDED_TEST_H_ +#define _INCLUDED_TEST_H_ + +#if defined(NEEDS_STDLIB_PROTOTYPES) +#include "protofix.h" +#endif + +void Test_Init (char *, int); +#ifdef USE_STDARG +void Test_Printf (char *, ...); +#else +/* No prototype */ +void Test_Printf(); +#endif +void Test_Message (char *); +void Test_Failed (char *); +void Test_Passed (char *); +int Summarize_Test_Results (void); +void Test_Finalize (void); +void Test_Waitforall (void); + +#endif diff --git a/teshsuite/smpi/mpich-test/topol/twod.f b/teshsuite/smpi/mpich-test/topol/twod.f new file mode 100644 index 0000000000..6d791b31d5 --- /dev/null +++ b/teshsuite/smpi/mpich-test/topol/twod.f @@ -0,0 +1,291 @@ +c********************************************************************** +c twod.f - a solution to the Poisson problem by using Jacobi +c interation on a 2-d decomposition +c +c .... the rest of this is from pi3.f to show the style ... +c +c Each node: +c 1) receives the number of rectangles used in the approximation. +c 2) calculates the areas of it's rectangles. +c 3) Synchronizes for a global summation. +c Node 0 prints the result. +c +c Variables: +c +c pi the calculated result +c n number of points of integration. +c x midpoint of each rectangle's interval +c f function to integrate +c sum,pi area of rectangles +c tmp temporary scratch space for global summation +c i do loop index +c +c This code is included (without the prints) because one version of +c MPICH SEGV'ed (probably because of errors in handling send/recv of +c MPI_PROC_NULL source/destination). +c +c**************************************************************************** + program main + include "mpif.h" + integer maxn + parameter (maxn = 128) + double precision a(maxn,maxn), b(maxn,maxn), f(maxn,maxn) + integer nx, ny + integer myid, numprocs, it, rc, comm2d, ierr, stride + integer nbrleft, nbrright, nbrtop, nbrbottom + integer sx, ex, sy, ey + integer dims(2) + logical periods(2) + double precision diff2d, diffnorm, dwork + double precision t1, t2 + external diff2d + data periods/2*.false./ + + call MPI_INIT( ierr ) + call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr ) + call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr ) +c print *, "Process ", myid, " of ", numprocs, " is alive" + if (myid .eq. 0) then +c +c Get the size of the problem +c +c print *, 'Enter nx' +c read *, nx + nx = 10 + endif +c print *, 'About to do bcast on ', myid + call MPI_BCAST(nx,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + ny = nx +c +c Get a new communicator for a decomposition of the domain. Let MPI +c find a "good" decomposition +c + dims(1) = 0 + dims(2) = 0 + call MPI_DIMS_CREATE( numprocs, 2, dims, ierr ) + call MPI_CART_CREATE( MPI_COMM_WORLD, 2, dims, periods, .true., + * comm2d, ierr ) +c +c Get my position in this communicator +c + call MPI_COMM_RANK( comm2d, myid, ierr ) +c print *, "Process ", myid, " of ", numprocs, " is alive" +c +c My neighbors are now +/- 1 with my rank. Handle the case of the +c boundaries by using MPI_PROCNULL. + call fnd2dnbrs( comm2d, nbrleft, nbrright, nbrtop, nbrbottom ) +c print *, "Process ", myid, ":", +c * nbrleft, nbrright, nbrtop, nbrbottom +c +c Compute the decomposition +c + call fnd2ddecomp( comm2d, nx, sx, ex, sy, ey ) +c print *, "Process ", myid, ":", sx, ex, sy, ey +c +c Create a new, "strided" datatype for the exchange in the "non-contiguous" +c direction +c + call mpi_Type_vector( ey-sy+1, 1, ex-sx+3, + $ MPI_DOUBLE_PRECISION, stride, ierr ) + call mpi_Type_commit( stride, ierr ) +c +c +c Initialize the right-hand-side (f) and the initial solution guess (a) +c + call twodinit( a, b, f, nx, sx, ex, sy, ey ) +c +c Actually do the computation. Note the use of a collective operation to +c check for convergence, and a do-loop to bound the number of iterations. +c + call MPI_BARRIER( MPI_COMM_WORLD, ierr ) + t1 = MPI_WTIME() + do 10 it=1, 100 + call exchng2( a, b, sx, ex, sy, ey, comm2d, stride, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + call sweep2d( b, f, nx, sx, ex, sy, ey, a ) + call exchng2( b, a, sx, ex, sy, ey, comm2d, stride, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + call sweep2d( a, f, nx, sx, ex, sy, ey, b ) + dwork = diff2d( a, b, nx, sx, ex, sy, ey ) + call MPI_Allreduce( dwork, diffnorm, 1, MPI_DOUBLE_PRECISION, + $ MPI_SUM, comm2d, ierr ) + if (diffnorm .lt. 1.0e-5) goto 20 +c if (myid .eq. 0) print *, 2*it, ' Difference is ', diffnorm +10 continue + if (myid .eq. 0) print *, 'Failed to converge' +20 continue + t2 = MPI_WTIME() +c if (myid .eq. 0) then +c print *, 'Converged after ', 2*it, ' Iterations in ', t2 - t1, +c $ ' secs ' +c endif +c +c + call MPI_Type_free( stride, ierr ) + call MPI_Comm_free( comm2d, ierr ) + if (myid .eq. 0) then + print *, ' No Errors' + endif + call MPI_FINALIZE(rc) + end +c +c Perform a Jacobi sweep for a 2-d decomposition +c + subroutine sweep2d( a, f, n, sx, ex, sy, ey, b ) + integer n, sx, ex, sy, ey + double precision a(sx-1:ex+1, sy-1:ey+1), f(sx-1:ex+1, sy-1:ey+1), + + b(sx-1:ex+1, sy-1:ey+1) +c + integer i, j + double precision h +c + h = 1.0d0 / dble(n+1) + do 10 i=sx, ex + do 10 j=sy, ey + b(i,j) = 0.25 * (a(i-1,j)+a(i,j+1)+a(i,j-1)+a(i+1,j)) - + + h * h * f(i,j) + 10 continue + return + end + + subroutine exchng2( a, b, sx, ex, sy, ey, + $ comm2d, stride, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + include "mpif.h" + integer sx, ex, sy, ey, stride + double precision a(sx-1:ex+1, sy-1:ey+1), + $ b(sx-1:ex+1, sy-1:ey+1) + integer nbrleft, nbrright, nbrtop, nbrbottom, comm2d + integer status(MPI_STATUS_SIZE), ierr, nx +c + nx = ex - sx + 1 +c These are just like the 1-d versions, except for less data + call MPI_SENDRECV( b(ex,sy), nx, MPI_DOUBLE_PRECISION, + $ nbrtop, 0, + $ a(sx-1,sy), nx, MPI_DOUBLE_PRECISION, + $ nbrbottom, 0, comm2d, status, ierr ) + call MPI_SENDRECV( b(sx,sy), nx, MPI_DOUBLE_PRECISION, + $ nbrbottom, 1, + $ a(ex+1,sy), nx, MPI_DOUBLE_PRECISION, + $ nbrtop, 1, comm2d, status, ierr ) +c +c This uses the "strided" datatype + call MPI_SENDRECV( b(sx,ey), 1, stride, nbrright, 0, + $ a(sx,sy-1), 1, stride, nbrleft, 0, + $ comm2d, status, ierr ) + call MPI_SENDRECV( b(sx,sy), 1, stride, nbrleft, 1, + $ a(sx,ey+1), 1, stride, nbrright, 1, + $ comm2d, status, ierr ) + return + end + +c +c The rest of the 2-d program +c + double precision function diff2d( a, b, nx, sx, ex, sy, ey ) + integer nx, sx, ex, sy, ey + double precision a(sx-1:ex+1, sy-1:ey+1), b(sx-1:ex+1, sy-1:ey+1) +c + double precision sum + integer i, j +c + sum = 0.0d0 + do 10 j=sy,ey + do 10 i=sx,ex + sum = sum + (a(i,j) - b(i,j)) ** 2 + 10 continue +c + diff2d = sum + return + end + subroutine twodinit( a, b, f, nx, sx, ex, sy, ey ) + integer nx, sx, ex, sy, ey + double precision a(sx-1:ex+1, sy-1:ey+1), b(sx-1:ex+1, sy-1:ey+1), + & f(sx-1:ex+1, sy-1:ey+1) +c + integer i, j +c + do 10 j=sy-1,ey+1 + do 10 i=sx-1,ex+1 + a(i,j) = 0.0d0 + b(i,j) = 0.0d0 + f(i,j) = 0.0d0 + 10 continue +c +c Handle boundary conditions +c + if (sx .eq. 1) then + do 20 j=sy,ey + a(0,j) = 1.0d0 + b(0,j) = 1.0d0 + 20 continue + endif + if (ex .eq. nx) then + do 21 j=sy,ey + a(nx+1,j) = 0.0d0 + b(nx+1,j) = 0.0d0 + 21 continue + endif + if (sy .eq. 1) then + do 30 i=sx,ex + a(i,0) = 1.0d0 + b(i,0) = 1.0d0 + 30 continue + endif +c + return + end + +c +c This file contains a routine for producing a decomposition of a 1-d array +c when given a number of processors. It may be used in "direct" product +c decomposition. The values returned assume a "global" domain in [1:n] +c + subroutine MPE_DECOMP1D( n, numprocs, myid, s, e ) + integer n, numprocs, myid, s, e + integer nlocal + integer deficit +c + nlocal = n / numprocs + s = myid * nlocal + 1 + deficit = mod(n,numprocs) + s = s + min(myid,deficit) + if (myid .lt. deficit) then + nlocal = nlocal + 1 + endif + e = s + nlocal - 1 + if (e .gt. n .or. myid .eq. numprocs-1) e = n + return + end +c +c This routine show how to determine the neighbors in a 2-d decomposition of +c the domain. This assumes that MPI_Cart_create has already been called +c + subroutine fnd2dnbrs( comm2d, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + integer comm2d, nbrleft, nbrright, nbrtop, nbrbottom +c + integer ierr +c + call MPI_Cart_shift( comm2d, 0, 1, nbrleft, nbrright, ierr ) + call MPI_Cart_shift( comm2d, 1, 1, nbrbottom, nbrtop, ierr ) +c + return + end +c +c Note: THIS IS A TEST PROGRAM. THE ACTUAL VALUES MOVED ARE NOT +c CORRECT FOR A POISSON SOLVER. +c + subroutine fnd2ddecomp( comm2d, n, sx, ex, sy, ey ) + integer comm2d + integer n, sx, ex, sy, ey + integer dims(2), coords(2), ierr + logical periods(2) +c + call MPI_Cart_get( comm2d, 2, dims, periods, coords, ierr ) + + call MPE_DECOMP1D( n, dims(1), coords(1), sx, ex ) + call MPE_DECOMP1D( n, dims(2), coords(2), sy, ey ) +c + return + end diff --git a/teshsuite/smpi/mpich-test/topol/twod2.f b/teshsuite/smpi/mpich-test/topol/twod2.f new file mode 100644 index 0000000000..eb5ea7565b --- /dev/null +++ b/teshsuite/smpi/mpich-test/topol/twod2.f @@ -0,0 +1,289 @@ +c********************************************************************** +c twod.f - a solution to the Poisson problem by using Jacobi +c interation on a 2-d decomposition +c +c .... the rest of this is from pi3.f to show the style ... +c +c Each node: +c 1) receives the number of rectangles used in the approximation. +c 2) calculates the areas of it's rectangles. +c 3) Synchronizes for a global summation. +c Node 0 prints the result. +c +c Variables: +c +c pi the calculated result +c n number of points of integration. +c x midpoint of each rectangle's interval +c f function to integrate +c sum,pi area of rectangles +c tmp temporary scratch space for global summation +c i do loop index +c +c This code is included (without the prints) because one version of +c MPICH SEGV'ed (probably because of errors in handling send/recv of +c MPI_PROC_NULL source/destination). +c +c**************************************************************************** + program main + include "mpif.h" + integer maxn + parameter (maxn = 128) + double precision a(maxn,maxn), b(maxn,maxn), f(maxn,maxn) + integer nx, ny + integer myid, numprocs, it, rc, comm2d, ierr, stride + integer nbrleft, nbrright, nbrtop, nbrbottom + integer sx, ex, sy, ey + integer dims(2) + logical periods(2) + double precision diff2d, diffnorm, dwork + double precision t1, t2 + external diff2d + data periods/2*.false./ + + call MPI_INIT( ierr ) + call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr ) + call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr ) +c print *, "Process ", myid, " of ", numprocs, " is alive" + if (myid .eq. 0) then +c +c Get the size of the problem +c +c print *, 'Enter nx' +c read *, nx + nx = 10 + endif +c print *, 'About to do bcast on ', myid + call MPI_BCAST(nx,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr) + ny = nx +c +c Get a new communicator for a decomposition of the domain. Let MPI +c find a "good" decomposition +c + dims(1) = 0 + dims(2) = 0 + call MPI_DIMS_CREATE( numprocs, 2, dims, ierr ) + call MPI_CART_CREATE( MPI_COMM_WORLD, 2, dims, periods, .true., + * comm2d, ierr ) +c +c Get my position in this communicator +c + call MPI_COMM_RANK( comm2d, myid, ierr ) +c print *, "Process ", myid, " of ", numprocs, " is alive" +c +c My neighbors are now +/- 1 with my rank. Handle the case of the +c boundaries by using MPI_PROCNULL. + call fnd2dnbrs( comm2d, nbrleft, nbrright, nbrtop, nbrbottom ) +c print *, "Process ", myid, ":", +c * nbrleft, nbrright, nbrtop, nbrbottom +c +c Compute the decomposition +c + call fnd2ddecomp( comm2d, nx, sx, ex, sy, ey ) +c print *, "Process ", myid, ":", sx, ex, sy, ey +c +c Create a new, "strided" datatype for the exchange in the "non-contiguous" +c direction +c + call mpi_Type_vector( ey-sy+1, 1, ex-sx+3, + $ MPI_DOUBLE_PRECISION, stride, ierr ) + call mpi_Type_commit( stride, ierr ) +c +c +c Initialize the right-hand-side (f) and the initial solution guess (a) +c + call twodinit( a, b, f, nx, sx, ex, sy, ey ) +c +c Actually do the computation. Note the use of a collective operation to +c check for convergence, and a do-loop to bound the number of iterations. +c + call MPI_BARRIER( MPI_COMM_WORLD, ierr ) + t1 = MPI_WTIME() + do 10 it=1, 100 + call exchng2( b, sx, ex, sy, ey, comm2d, stride, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + call sweep2d( b, f, nx, sx, ex, sy, ey, a ) + call exchng2( a, sx, ex, sy, ey, comm2d, stride, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + call sweep2d( a, f, nx, sx, ex, sy, ey, b ) + dwork = diff2d( a, b, nx, sx, ex, sy, ey ) + call MPI_Allreduce( dwork, diffnorm, 1, MPI_DOUBLE_PRECISION, + $ MPI_SUM, comm2d, ierr ) + if (diffnorm .lt. 1.0e-5) goto 20 + if (myid .eq. 0) print *, 2*it, ' Difference is ', diffnorm +10 continue + if (myid .eq. 0) print *, 'Failed to converge' +20 continue + t2 = MPI_WTIME() +c if (myid .eq. 0) then +c print *, 'Converged after ', 2*it, ' Iterations in ', t2 - t1, +c $ ' secs ' +c endif +c +c + call MPI_Type_free( stride, ierr ) + call MPI_Comm_free( comm2d, ierr ) + call MPI_FINALIZE(rc) + end +c +c Perform a Jacobi sweep for a 2-d decomposition +c + subroutine sweep2d( a, f, n, sx, ex, sy, ey, b ) + integer n, sx, ex, sy, ey + double precision a(sx-1:ex+1, sy-1:ey+1), f(sx-1:ex+1, sy-1:ey+1), + + b(sx-1:ex+1, sy-1:ey+1) +c + integer i, j + double precision h +c + h = 1.0d0 / dble(n+1) + do 10 j=sy, ey + do 10 i=sx, ex + b(i,j) = 0.25 * (a(i-1,j)+a(i,j+1)+a(i,j-1)+a(i+1,j)) - + + h * h * f(i,j) + 10 continue + return + end +c + subroutine exchng2( a, sx, ex, sy, ey, + $ comm2d, stride, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + include "mpif.h" + integer sx, ex, sy, ey, stride + double precision a(sx-1:ex+1, sy-1:ey+1) + integer nbrleft, nbrright, nbrtop, nbrbottom, comm2d + integer status(MPI_STATUS_SIZE), ierr, nx +c + nx = ex - sx + 1 +c These are just like the 1-d versions, except for less data + call MPI_SENDRECV( a(sx,ey), nx, MPI_DOUBLE_PRECISION, + $ nbrtop, 0, + $ a(sx,sy-1), nx, MPI_DOUBLE_PRECISION, + $ nbrbottom, 0, comm2d, status, ierr ) + call MPI_SENDRECV( a(sx,sy), nx, MPI_DOUBLE_PRECISION, + $ nbrbottom, 1, + $ a(sx,ey+1), nx, MPI_DOUBLE_PRECISION, + $ nbrtop, 1, comm2d, status, ierr ) +c +c This uses the "strided" datatype + call MPI_SENDRECV( a(ex,sy), 1, stride, nbrright, 0, + $ a(sx-1,sy), 1, stride, nbrleft, 0, + $ comm2d, status, ierr ) + call MPI_SENDRECV( a(sx,sy), 1, stride, nbrleft, 1, + $ a(ex+1,sy), 1, stride, nbrright, 1, + $ comm2d, status, ierr ) + return + end + +c +c The rest of the 2-d program +c + double precision function diff2d( a, b, nx, sx, ex, sy, ey ) + integer nx, sx, ex, sy, ey + double precision a(sx-1:ex+1, sy-1:ey+1), b(sx-1:ex+1, sy-1:ey+1) +c + double precision sum + integer i, j +c + sum = 0.0d0 + do 10 j=sy,ey + do 10 i=sx,ex + sum = sum + (a(i,j) - b(i,j)) ** 2 + 10 continue +c + diff2d = sum + return + end + subroutine twodinit( a, b, f, nx, sx, ex, sy, ey ) + integer nx, sx, ex, sy, ey + double precision a(sx-1:ex+1, sy-1:ey+1), b(sx-1:ex+1, sy-1:ey+1), + & f(sx-1:ex+1, sy-1:ey+1) +c + integer i, j +c + do 10 j=sy-1,ey+1 + do 10 i=sx-1,ex+1 + a(i,j) = 0.0d0 + b(i,j) = 0.0d0 + f(i,j) = 0.0d0 + 10 continue +c +c Handle boundary conditions +c + if (sx .eq. 1) then + do 20 j=sy,ey + a(0,j) = 1.0d0 + b(0,j) = 1.0d0 + 20 continue + endif + if (ex .eq. nx) then + do 21 j=sy,ey + a(nx+1,j) = 0.0d0 + b(nx+1,j) = 0.0d0 + 21 continue + endif + if (sy .eq. 1) then + do 30 i=sx,ex + a(i,0) = 1.0d0 + b(i,0) = 1.0d0 + 30 continue + endif +c + return + end + +c +c This file contains a routine for producing a decomposition of a 1-d array +c when given a number of processors. It may be used in "direct" product +c decomposition. The values returned assume a "global" domain in [1:n] +c + subroutine MPE_DECOMP1D( n, numprocs, myid, s, e ) + integer n, numprocs, myid, s, e + integer nlocal + integer deficit +c + nlocal = n / numprocs + s = myid * nlocal + 1 + deficit = mod(n,numprocs) + s = s + min(myid,deficit) + if (myid .lt. deficit) then + nlocal = nlocal + 1 + endif + e = s + nlocal - 1 + if (e .gt. n .or. myid .eq. numprocs-1) e = n + return + end +c +c This routine show how to determine the neighbors in a 2-d decomposition of +c the domain. This assumes that MPI_Cart_create has already been called +c + subroutine fnd2dnbrs( comm2d, + $ nbrleft, nbrright, nbrtop, nbrbottom ) + integer comm2d, nbrleft, nbrright, nbrtop, nbrbottom +c + integer ierr +c + call MPI_Cart_shift( comm2d, 0, 1, nbrleft, nbrright, ierr ) + call MPI_Cart_shift( comm2d, 1, 1, nbrbottom, nbrtop, ierr ) +c + return + end +c +c Note: THIS IS A TEST PROGRAM. THE ACTUAL VALUES MOVED ARE NOT +c CORRECT FOR A POISSON SOLVER. +c + subroutine fnd2ddecomp( comm2d, n, sx, ex, sy, ey ) + integer comm2d + integer n, sx, ex, sy, ey + integer dims(2), coords(2), ierr + logical periods(2) +c + call MPI_Cart_get( comm2d, 2, dims, periods, coords, ierr ) + + call MPE_DECOMP1D( n, dims(1), coords(1), sx, ex ) + call MPE_DECOMP1D( n, dims(2), coords(2), sy, ey ) +c + return + end + + -- 2.20.1