From: Martin Quinson Date: Thu, 24 Mar 2022 19:38:59 +0000 (+0100) Subject: Sync MBI generators with upstream modifications X-Git-Tag: v3.32~402 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/531226ecb9f68ada77348e52433ffd19c6151f93 Sync MBI generators with upstream modifications --- diff --git a/teshsuite/smpi/MBI/MBIutils.py b/teshsuite/smpi/MBI/MBIutils.py index 59b22f8780..f084a4e09f 100644 --- a/teshsuite/smpi/MBI/MBIutils.py +++ b/teshsuite/smpi/MBI/MBIutils.py @@ -12,14 +12,14 @@ import signal import hashlib class AbstractTool: - def ensure_image(self, params=""): + def ensure_image(self, params="", dockerparams=""): """Verify that this is executed from the right docker image, and complain if not.""" if os.path.exists("/MBI") or os.path.exists("trust_the_installation"): print("This seems to be a MBI docker image. Good.") else: print("Please run this script in a MBI docker image. Run these commands:") print(" docker build -f Dockerfile -t mpi-bugs-initiative:latest . # Only the first time") - print(f" docker run -it --rm --name MIB --volume $(pwd):/MBI mpi-bugs-initiative /MBI/MBI.py {params}") + print(f" docker run -it --rm --name MIB --volume $(pwd):/MBI {dockerparams}mpi-bugs-initiative /MBI/MBI.py {params}") sys.exit(1) def build(self, rootdir, cached=True): @@ -169,7 +169,7 @@ def categorize(tool, toolname, test_id, expected): diagnostic = f'hard timeout' else: diagnostic = f'timeout after {elapsed} sec' - elif outcome == 'failure': + elif outcome == 'failure' or outcome == 'segfault': res_category = 'failure' diagnostic = f'tool error, or test not run' elif outcome == 'UNIMPLEMENTED': diff --git a/teshsuite/smpi/MBI/P2PMatchingGenerator.py b/teshsuite/smpi/MBI/P2PMatchingGenerator.py index 4698b13976..46e48ad228 100755 --- a/teshsuite/smpi/MBI/P2PMatchingGenerator.py +++ b/teshsuite/smpi/MBI/P2PMatchingGenerator.py @@ -35,7 +35,7 @@ END_MBI_TESTS #include #include -#define buff_size 128 +#define buff_size 1 int main(int argc, char **argv) { int nprocs = -1; diff --git a/teshsuite/smpi/MBI/RMARemoteLocalConcurrencyGenerator.py b/teshsuite/smpi/MBI/RMARemoteLocalConcurrencyGenerator.py index 4a8e83ca12..5eecc704ea 100755 --- a/teshsuite/smpi/MBI/RMARemoteLocalConcurrencyGenerator.py +++ b/teshsuite/smpi/MBI/RMARemoteLocalConcurrencyGenerator.py @@ -80,7 +80,7 @@ int main(int argc, char **argv) { for e in epoch: for p1 in get: - for p2 in rput + rstore + rload + rget : + for p2 in put + rstore + rload + get : patterns = {} patterns = {'e': e, 'p1': p1, 'p2': p2} patterns['generatedby'] = f'DO NOT EDIT: this file was generated by {os.path.basename(sys.argv[0])}. DO NOT EDIT.' @@ -100,12 +100,19 @@ for e in epoch: replace['longdesc'] = 'Global Concurrency error. @{p2}@ conflicts with @{p1}@' replace['outcome'] = 'ERROR: GlobalConcurrency' replace['errormsg'] = 'Global Concurrency error. @{p2}@ at @{filename}@:@{line:MBIERROR2}@ conflicts with @{p1}@ line @{line:MBIERROR1}@' + + # Replace Put and Get first argument + if p2 in put: + replace['operation2'] = 'MPI_Put(&winbuf[20], N, MPI_INT, target, 0, N, type, win);' + if p2 in get: + replace['operation2'] = 'MPI_Get(&winbuf[20], N, MPI_INT, target, 0, N, type, win);' + make_file(template, f'GlobalConcurrency_rl_{e}_{p1}_{p2}_nok.c', replace) for e in epoch: for p1 in put: - for p2 in rstore + rload + rput: + for p2 in rstore + rload + put: patterns = {} patterns = {'e': e, 'p1': p1, 'p2': p2} patterns['generatedby'] = f'DO NOT EDIT: this file was generated by {os.path.basename(sys.argv[0])}. DO NOT EDIT.' @@ -125,4 +132,9 @@ for e in epoch: replace['longdesc'] = 'Global Concurrency error. @{p2}@ conflicts with @{p1}@' replace['outcome'] = 'ERROR: LocalConcurrency' replace['errormsg'] = 'Global Concurrency error. @{p2}@ at @{filename}@:@{line:MBIERROR2}@ conflicts with @{p1}@ line @{line:MBIERROR1}@' + + # Replace Put first argument + if p2 in put: + replace['operation2'] = 'MPI_Put(&winbuf[20], N, MPI_INT, target, 0, N, type, win);' + make_file(template, f'GlobalConcurrency_rl_{e}_{p1}_{p2}_nok.c', replace) diff --git a/teshsuite/smpi/MBI/RMAWinBufferGenerator.py b/teshsuite/smpi/MBI/RMAWinBufferGenerator.py index a737150842..db49af8b61 100755 --- a/teshsuite/smpi/MBI/RMAWinBufferGenerator.py +++ b/teshsuite/smpi/MBI/RMAWinBufferGenerator.py @@ -55,10 +55,22 @@ int main(int argc, char *argv[]) { MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("Hello from rank %d \\n", rank); + if (numProcs < 2) + printf("MBI ERROR: This test needs at least 2 processes to produce a bug!\\n"); + MPI_Win win; get_win(&win); + MPI_Win_fence(0, win); + + if (rank == 0) { + int localbuf[N] = {12345}; + MPI_Put(&localbuf, N, MPI_INT, 1, 0, N, MPI_INT, win); + } + + MPI_Win_fence(0, win); + MPI_Win_free(&win); @{bufferfree}@ diff --git a/teshsuite/smpi/MBI/generator_utils.py b/teshsuite/smpi/MBI/generator_utils.py index 3ef5d5c937..3991a497d7 100644 --- a/teshsuite/smpi/MBI/generator_utils.py +++ b/teshsuite/smpi/MBI/generator_utils.py @@ -15,7 +15,7 @@ icoll4op = ['MPI_Ireduce', 'MPI_Iallreduce'] coll4root = ['MPI_Reduce', 'MPI_Bcast', 'MPI_Gather', 'MPI_Scatter'] icoll4root = ['MPI_Ireduce', 'MPI_Ibcast', 'MPI_Igather', 'MPI_Iscatter'] pcoll = [] -tcoll = ['MPI_Comm_split', 'MPI_Op_create', 'MPI_Comm_group', 'MPI_Comm_dup', 'MPI_Type_contiguous', 'MPI_Comm_create', 'MPI_Group_excl'] +tcoll = ['MPI_Comm_split', 'MPI_Op_create', 'MPI_Comm_dup', 'MPI_Type_contiguous', 'MPI_Comm_create', 'MPI_Group_excl'] # MPI_Comm_dup removed tcoll4color = ['MPI_Comm_split'] tcoll4topo = ['MPI_Cart_get'] @@ -36,9 +36,7 @@ probe = ['MPI_Probe'] epoch = ['MPI_Win_fence', 'MPI_Win_lock', 'MPI_Win_lock_all'] rma = ['MPI_Get', 'MPI_Put'] get = ['MPI_Get'] -rget = ['MPI_RGet'] put = ['MPI_Put'] -rput = ['MPI_RPut'] store = ['store'] load = ['load'] rstore = ['rstore'] @@ -396,32 +394,26 @@ finEpoch['MPI_Win_lock'] =lambda n: 'MPI_Win_unlock(target, win);' epoch['MPI_Win_lock_all'] =lambda n: 'MPI_Win_lock_all(0,win);' finEpoch['MPI_Win_lock_all'] =lambda n: 'MPI_Win_unlock_all(win);' -init['MPI_Put'] = lambda n: f'int localbuf{n} = 12345;' +init['MPI_Put'] = lambda n: f'int localbuf{n}[N] = {{12345}};' operation['MPI_Put'] = lambda n: f'MPI_Put(&localbuf{n}, N, MPI_INT, target, 0, N, type, win);' -init['MPI_RPut'] = lambda n: "" -operation['MPI_RPut'] = lambda n: f'MPI_Put(&winbuf[20], N, MPI_INT, target, 0, N, type, win);' - -init['MPI_Get'] = lambda n: f'int localbuf{n} = 54321;' +init['MPI_Get'] = lambda n: f'int localbuf{n}[N] = {{54321}};' operation['MPI_Get'] = lambda n: f'MPI_Get(&localbuf{n}, N, MPI_INT, target, 0, N, type, win);' -init['MPI_RGet'] = lambda n: "" -operation['MPI_RGet'] = lambda n: f'MPI_Get(&winbuf[20], N, MPI_INT, target, 0, N, type, win);' - -init['store'] = lambda n: f'int localbuf{n} = 0;' -operation['store'] = lambda n: f'localbuf{n} = 8;' +init['store'] = lambda n: f'int localbuf{n}[N] = {{0}};' +operation['store'] = lambda n: f'localbuf{n}[0] = 8;' init['rstore'] = lambda n: "" operation['rstore'] = lambda n: f'winbuf[20] = 12346;' -init['load'] = lambda n: f'int localbuf{n} = 0;' -operation['load'] = lambda n: f'int load = localbuf{n};' +init['load'] = lambda n: f'int localbuf{n}[N] = {{0}};' +operation['load'] = lambda n: f'int load = localbuf{n}[0];' init['rload'] = lambda n: "" operation['rload'] = lambda n: "int load = winbuf[20];" -init['loadstore'] = lambda n: f'int localbuf{n} = 0;' -operation['loadstore'] = lambda n: f'if (localbuf{n} % 2 == 0) localbuf{n}++; ' +init['loadstore'] = lambda n: f'int localbuf{n}[N] = {{0}};' +operation['loadstore'] = lambda n: f'if (localbuf{n}[0] % 2 == 0) localbuf{n}[0]++; '