Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Import (some bits of) the MBI test suite
authorMartin Quinson <martin.quinson@ens-rennes.fr>
Sun, 6 Mar 2022 13:41:03 +0000 (14:41 +0100)
committerMartin Quinson <martin.quinson@ens-rennes.fr>
Sun, 6 Mar 2022 13:46:17 +0000 (14:46 +0100)
Not activated on the robots for now, as some tests fail.

17 files changed:
CMakeLists.txt
COPYING
ChangeLog
MANIFEST.in
docs/source/Installing_SimGrid.rst
teshsuite/smpi/MBI/CMakeLists.txt [new file with mode: 0644]
teshsuite/smpi/MBI/CollMatchingGenerator.py [new file with mode: 0755]
teshsuite/smpi/MBI/MBI.py [new file with mode: 0755]
teshsuite/smpi/MBI/MBIutils.py [new file with mode: 0644]
teshsuite/smpi/MBI/generator_utils.py [new file with mode: 0644]
teshsuite/smpi/MBI/simgrid.py [new file with mode: 0644]
tools/cmake/DefinePackages.cmake
tools/cmake/Option.cmake
tools/jenkins/Coverage.sh
tools/jenkins/Flags.sh
tools/jenkins/Sanitizers.sh
tools/jenkins/build.sh

index c234bf3..d1b67ba 100644 (file)
@@ -960,6 +960,7 @@ endif()
 message("        Compile Smpi ................: ${HAVE_SMPI}")
 message("          Smpi fortran ..............: ${SMPI_FORTRAN}")
 message("          MPICH3 testsuite ..........: ${enable_smpi_MPICH3_testsuite}")
+message("          MBI testsuite .............: ${enable_smpi_MBI_testsuite}")
 message("          Privatization .............: ${HAVE_PRIVATIZATION}")
 message("          PAPI support...............: ${HAVE_PAPI}")
 message("        Compile Boost.Context support: ${HAVE_BOOST_CONTEXTS}")
diff --git a/COPYING b/COPYING
index c2b447e..ae29638 100644 (file)
--- a/COPYING
+++ b/COPYING
@@ -185,6 +185,12 @@ Copyright:
   Copyright (c) 2018 Two Blue Cubes Ltd.
 License: BSL-1.0
 
+Files: teshsuite/smpi/MBI/*
+Copyright:
+  Copyright (C) 2021-2022, The MBI project.
+Comment: The MBI.py script was written for SimGrid while the other files are kept in sync with the MBI source tree.
+License: GPL-3
+
 Files: src/include/xxhash.hpp
 Copyright:
   Copyright (C) 2012-2018, Yann Collet.
index 2684e57..135a869 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -7,7 +7,8 @@ MC:
  - Support mutex, semaphore and barrier in DPOR reduction
  - Seems to work on Arm64 architectures too.
  - Display a nice error message when ptrace is not usable.
- - Remove the ISP test suite: it's not free software, and it's superseeded by MBI
+ - New test suite, imported from the MPI Bugs Initiative (MBI). Not all MBI generators are integrated yet.
+ - Remove the ISP test suite: it's not free software, and it's superseeded by MBI.
 
 SMPI:
  - fix for FG#100 by ensuring small asynchronous messages never overtake larger
index db097f1..7f575a2 100644 (file)
@@ -846,6 +846,11 @@ include teshsuite/s4u/wait-all-for/wait-all-for.cpp
 include teshsuite/s4u/wait-all-for/wait-all-for.tesh
 include teshsuite/s4u/wait-any-for/wait-any-for.cpp
 include teshsuite/s4u/wait-any-for/wait-any-for.tesh
+include teshsuite/smpi/MBI/CollMatchingGenerator.py
+include teshsuite/smpi/MBI/MBI.py
+include teshsuite/smpi/MBI/MBIutils.py
+include teshsuite/smpi/MBI/generator_utils.py
+include teshsuite/smpi/MBI/simgrid.py
 include teshsuite/smpi/auto-shared/auto-shared.c
 include teshsuite/smpi/auto-shared/auto-shared.tesh
 include teshsuite/smpi/bug-17132/bug-17132.c
@@ -2608,6 +2613,7 @@ include teshsuite/platforms/CMakeLists.txt
 include teshsuite/python/CMakeLists.txt
 include teshsuite/s4u/CMakeLists.txt
 include teshsuite/smpi/CMakeLists.txt
+include teshsuite/smpi/MBI/CMakeLists.txt
 include teshsuite/smpi/mpich3-test/CMakeLists.txt
 include teshsuite/smpi/mpich3-test/attr/CMakeLists.txt
 include teshsuite/smpi/mpich3-test/coll/CMakeLists.txt
index 80cb783..510603d 100644 (file)
@@ -280,7 +280,7 @@ enable_ns3 (on/OFF)
 enable_smpi (ON/off)
   Allows one to run MPI code on top of SimGrid.
 
-enable_smpi_ISP_testsuite (on/OFF)
+enable_smpi_MBI_testsuite (on/OFF)
   Adds many extra tests for the model checker module.
 
 enable_smpi_MPICH3_testsuite (on/OFF)
diff --git a/teshsuite/smpi/MBI/CMakeLists.txt b/teshsuite/smpi/MBI/CMakeLists.txt
new file mode 100644 (file)
index 0000000..46d3f0f
--- /dev/null
@@ -0,0 +1,73 @@
+# Copyright 2021-2022. The SimGrid Team. All rights reserved. 
+
+# Integrates the MBI tests into the SimGrid build chain when asked to
+
+# Only the python scripts are embeeded in the archive, and the C test files are generated at config time using these scripts.
+# These python scripts are copied over from the MBI repository with as little changes as possible.
+
+set(generator_scripts CollMatchingGenerator.py) # More generators to come
+
+if (enable_smpi_MBI_testsuite)
+  if (NOT enable_smpi)
+    message(FATAL_ERROR "MBI test suite cannot be enabled without SMPI. Please change either setting.")
+  endif()
+  if (NOT enable_model-checking)
+    message(FATAL_ERROR "MBI test suite cannot be enabled without the Mc SimGrid model-checker. Please change either setting.")
+  endif()
+
+  message(STATUS "Generating the MBI scripts")
+  file(REMOVE_RECURSE  ${CMAKE_BINARY_DIR}/MBI/tmp)
+  file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/MBI/tmp)
+  file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/generator_utils.py DESTINATION ${CMAKE_BINARY_DIR}/MBI/tmp)
+  foreach (script ${generator_scripts})
+    message(STATUS "  $ ${CMAKE_CURRENT_SOURCE_DIR}/${script}")
+    execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${script}
+                    WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/MBI/tmp)
+  endforeach()
+
+  set(CMAKE_C_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicc")
+  set(CMAKE_CXX_COMPILER "${CMAKE_BINARY_DIR}/smpi_script/bin/smpicxx")
+  include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
+
+  # Connect the MBI tests to the other tests
+  add_custom_target(tests-mbi COMMENT "Recompiling the MBI tests and tools.")
+  add_dependencies(tests tests-mbi)
+
+  file(GLOB cfiles RELATIVE ${CMAKE_BINARY_DIR}/MBI/tmp ${CMAKE_BINARY_DIR}/MBI/tmp/*.c )
+  foreach(cfile ${cfiles})
+    # Copy the generated files only if different
+    file(COPY_FILE ${CMAKE_BINARY_DIR}/MBI/tmp/${cfile} ${CMAKE_BINARY_DIR}/MBI/${cfile} ONLY_IF_DIFFERENT)
+    string(REGEX REPLACE "[.]c" "" basefile ${cfile})
+    
+    # Generate an executable for each of them
+    add_executable(mbi_${basefile} EXCLUDE_FROM_ALL ${CMAKE_BINARY_DIR}/MBI/${cfile})
+    target_link_libraries(mbi_${basefile} simgrid)
+    target_compile_options(mbi_${basefile} PRIVATE "-Wno-error")
+    set_target_properties(mbi_${basefile} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/MBI)
+    add_dependencies(tests-mbi mbi_${basefile})
+
+    # Generate a test case for each source file, using the MBI runner
+    ADD_TEST(NAME mbi-${basefile}
+             COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/MBI.py ${CMAKE_BINARY_DIR} ./mbi_${basefile} ${cfile}
+             WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/MBI)
+    SET_TESTS_PROPERTIES(mbi-${basefile}  PROPERTIES DEPENDS mbi-${basefile})
+    SET_TESTS_PROPERTIES(mbi-${basefile}  PROPERTIES DEPENDS simgrid-mc)
+  endforeach()
+
+  if("${CMAKE_BINARY_DIR}" STREQUAL "${CMAKE_HOME_DIRECTORY}")
+  else()
+    file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/MBIutils.py DESTINATION ${CMAKE_BINARY_DIR}/MBI) 
+  endif()
+endif()
+
+# Add the needed files to the distribution
+foreach(script ${generator_scripts})
+  set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${script})
+endforeach()
+
+set(teshsuite_src ${teshsuite_src}
+                  ${CMAKE_CURRENT_SOURCE_DIR}/generator_utils.py
+                  ${CMAKE_CURRENT_SOURCE_DIR}/MBI.py
+                  ${CMAKE_CURRENT_SOURCE_DIR}/MBIutils.py
+                  ${CMAKE_CURRENT_SOURCE_DIR}/simgrid.py
+                  PARENT_SCOPE)
\ No newline at end of file
diff --git a/teshsuite/smpi/MBI/CollMatchingGenerator.py b/teshsuite/smpi/MBI/CollMatchingGenerator.py
new file mode 100755 (executable)
index 0000000..19bb925
--- /dev/null
@@ -0,0 +1,174 @@
+#! /usr/bin/python3
+import sys
+from generator_utils import *
+
+template = """// @{generatedby}@
+/* ///////////////////////// The MPI Bugs Initiative ////////////////////////
+
+  Origin: MBI
+
+  Description: @{shortdesc}@
+    @{longdesc}@
+
+        Version of MPI: Conforms to MPI 1.1, does not require MPI 2 implementation
+
+BEGIN_MPI_FEATURES
+       P2P!basic: Lacking
+       P2P!nonblocking: Lacking
+       P2P!persistent: Lacking
+       COLL!basic: @{collfeature}@
+       COLL!nonblocking: @{icollfeature}@
+       COLL!persistent: Lacking
+       COLL!tools: Lacking
+       RMA: Lacking
+END_MPI_FEATURES
+
+BEGIN_MBI_TESTS
+  $ mpirun -np 2 ${EXE}
+  | @{outcome}@
+  | @{errormsg}@
+END_MBI_TESTS
+//////////////////////       End of MBI headers        /////////////////// */
+
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define buff_size 128
+
+int main(int argc, char **argv) {
+  int nprocs = -1;
+  int rank = -1;
+       int root = 0;
+
+  MPI_Init(&argc, &argv);
+  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  printf("Hello from rank %d \\n", rank);
+
+  if (nprocs < 2)
+    printf("MBI ERROR: This test needs at least 2 processes to produce a bug.\\n");
+
+       MPI_Comm newcom = MPI_COMM_WORLD;
+       MPI_Datatype type = MPI_INT;
+  MPI_Op op = MPI_SUM;
+
+  int dbs = sizeof(int)*nprocs; /* Size of the dynamic buffers for alltoall and friends */
+  @{init1}@
+  @{init2}@
+
+  if (@{change_cond}@) {
+    @{operation1a}@ /* MBIERROR1 */
+       @{fini1a}@
+    @{operation2a}@
+       @{fini2a}@
+  } else {
+    @{operation1b}@ /* MBIERROR2 */
+       @{fini1b}@
+    @{operation2b}@
+       @{fini2b}@
+  }
+
+  @{free1}@
+  @{free2}@
+  
+  MPI_Finalize();
+  printf("Rank %d finished normally\\n", rank);
+  return 0;
+}
+"""
+
+for c1 in coll + icoll + ibarrier:
+    for c2 in coll + icoll + ibarrier:
+        patterns = {}
+        patterns = {'c1': c1, 'c2': c2}
+        patterns['generatedby'] = f'DO NOT EDIT: this file was generated by {sys.argv[0]}. DO NOT EDIT.'
+        patterns['collfeature'] = 'Yes' if c1 in coll or c2 in coll else 'Lacking'
+        patterns['icollfeature'] = 'Yes' if c1 in icoll + ibarrier or c2 in icoll + ibarrier else 'Lacking'
+        patterns['c1'] = c1
+        patterns['c2'] = c2
+        patterns['init1'] = init[c1]("1")
+        patterns['init2'] = init[c2]("2")
+        patterns['fini1a'] = fini[c1]("1")
+        patterns['fini2a'] = fini[c2]("2")
+        patterns['fini1b'] = fini[c1]("1")
+        patterns['fini2b'] = fini[c2]("2")
+        patterns['free1'] = free[c1]("1")
+        patterns['free2'] = free[c2]("2")
+        patterns['operation1a'] = operation[c1]("1")
+        patterns['operation1b'] = operation[c1]("1")
+        patterns['operation2a'] = operation[c2]("2")
+        patterns['operation2b'] = operation[c2]("2")
+        patterns['change_cond'] = 'rank % 2'
+
+        if c1 == c2:
+            # Generate the correct code using the same collective twice
+            replace = patterns
+            replace['shortdesc'] = 'Correct collective ordering'
+            replace['longdesc'] = f'All ranks call {c1} twice'
+            replace['outcome'] = 'OK'
+            replace['errormsg'] = ''
+            make_file(template, f'CallOrdering_{c1}_{c2}_ok.c', replace)
+            # Generate the correct code using the collective once
+            replace = patterns
+            replace['shortdesc'] = 'Correct collective ordering'
+            replace['longdesc'] = f'All ranks call {c1} once'
+            replace['outcome'] = 'OK'
+            replace['errormsg'] = ''
+            replace['init2'] = ''
+            replace['operation2a'] = ''
+            replace['operation2b'] = ''
+            replace['fini2a'] = ''
+            replace['fini2b'] = ''
+            replace['free2'] = ''
+            make_file(template, f'CallOrdering_{c1}_ok.c', replace)
+        else:
+            # Generate the correct ordering with two different collectives
+            replace = patterns
+            replace['shortdesc'] = 'Correct collective ordering'
+            replace['longdesc'] = f'All ranks call {c1} and then {c2}'
+            replace['outcome'] = 'OK'
+            replace['errormsg'] = ''
+            make_file(template, f'CallOrdering_{c1}_{c2}_ok.c', replace)
+            # Generate the incorrect ordering with two different collectives
+            replace = patterns
+            replace['shortdesc'] = 'Incorrect collective ordering'
+            replace['longdesc'] = f'Odd ranks call {c1} and then {c2} while even ranks call these collectives in the other order'
+            replace['outcome'] = 'ERROR: CallMatching'
+            replace['errormsg'] = 'Collective mistmatch. @{c1}@ at @{filename}@:@{line:MBIERROR1}@ is matched with @{c2}@ line @{filename}@:@{line:MBIERROR2}@.'
+            replace['operation1b'] = operation[c2]("2")  # Inversion
+            replace['operation2b'] = operation[c1]("1")
+            replace['fini1a'] = fini[c1]("1") # Inversion
+            replace['fini2a'] = fini[c2]("2")
+            replace['fini1b'] = fini[c2]("2") # Inversion
+            replace['fini2b'] = fini[c1]("1")
+            replace['free1'] = free[c2]("2") 
+            replace['free2'] = free[c1]("1")
+
+            make_file(template, f'CallOrdering_{c1}_{c2}_nok.c', replace)
+
+    # Generate the incorrect ordering with one collective
+    replace = patterns
+    replace['shortdesc'] = 'Incorrect collective ordering'
+    replace['longdesc'] = f'Odd ranks call {c1} while even ranks do not call any collective'
+    replace['outcome'] = 'ERROR: CallMatching'
+    replace['errormsg'] = 'Collective mistmatch. @{c1}@ at @{filename}@:@{line:MBIERROR1}@ is not matched.'
+    replace['operation1b'] = ''  # Remove functions
+    replace['operation2b'] = ''
+    replace['operation2a'] = ''
+    replace['fini1b'] = ''
+    replace['fini2a'] = ''
+    replace['fini2b'] = ''
+    make_file(template, f'CallOrdering_{c1}_none_nok.c', replace)
+    # Generate a correct ordering with a conditional not depending on ranks
+    replace = patterns
+    replace['shortdesc'] = 'Correct collective ordering'
+    replace['longdesc'] = f'All ranks call {c1} and then {c2} or inversely'
+    replace['outcome'] = 'OK'
+    replace['errormsg'] = ''
+    replace['change_cond'] = 'nprocs<256'
+    replace['operation2b'] = '' # Remove functions
+    replace['operation2a'] = ''
+    replace['fini2b'] = ''
+    replace['free2a'] = ''
+    make_file(template, f'CallOrdering_{c1}_none_ok.c', replace)
diff --git a/teshsuite/smpi/MBI/MBI.py b/teshsuite/smpi/MBI/MBI.py
new file mode 100755 (executable)
index 0000000..bed5d75
--- /dev/null
@@ -0,0 +1,49 @@
+#! /usr/bin/env python3
+
+# Copyright 2021-2022. The SimGrid Team. All rights reserved. 
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the license (GNU LGPL) which comes with this package.
+
+# This script runs a previously compiled MBI script and checks on the result.
+# This file was written for the SimGrid project. Parts coming from the MBI projets are MBIutils.py and simgrid.py
+
+import sys
+import os
+import re
+
+from MBIutils import *
+import simgrid
+
+if len(sys.argv) != 4:
+    print(f"Usage: MBI.py binary source (received: {sys.argv})")
+    sys.exit(1)
+
+if not os.path.exists("cluster.xml"):
+    with open('cluster.xml', 'w') as outfile:
+        outfile.write("<?xml version='1.0'?>\n")
+        outfile.write("<!DOCTYPE platform SYSTEM \"https://simgrid.org/simgrid.dtd\">\n")
+        outfile.write('<platform version="4.1">\n')
+        outfile.write(' <cluster id="acme" prefix="node-" radical="0-99" suffix="" speed="1Gf" bw="125MBps" lat="50us"/>\n')
+        outfile.write('</platform>\n')
+
+
+simgrid = simgrid.Tool()
+
+(name, path, binary, filename) = sys.argv
+for test in parse_one_code(filename):
+    execcmd = re.sub("mpirun", f"{path}/smpi_script/bin/smpirun -wrapper '{path}/bin/simgrid-mc --log=mc_safety.t:info' -platform ./cluster.xml -analyze --cfg=smpi/finalization-barrier:on --cfg=smpi/list-leaks:10 --cfg=model-check/max-depth:10000", test['cmd'])
+    execcmd = re.sub('\${EXE}', binary, execcmd)
+    execcmd = re.sub('\$zero_buffer', "--cfg=smpi/buffering:zero", execcmd)
+    execcmd = re.sub('\$infty_buffer', "--cfg=smpi/buffering:infty", execcmd)
+
+    if os.path.exists(f'{filename}.txt'):
+        os.remove(f'{filename}.txt')
+    run_cmd(buildcmd="", execcmd=execcmd, cachefile=filename, filename=filename, binary=binary, timeout=300, batchinfo="", read_line_lambda=None)
+    outcome = simgrid.parse(filename)
+
+    (res_category, elapsed, diagnostic, outcome) = categorize(simgrid, "simgrid", filename, test['expect'])
+
+    if res_category != "TRUE_NEG" and res_category != "TRUE_POS":
+        print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n")
+        print(f"SimGrid gave the wrong result ({outcome} instead of {test['expect']}).")
+        sys.exit(1)
\ No newline at end of file
diff --git a/teshsuite/smpi/MBI/MBIutils.py b/teshsuite/smpi/MBI/MBIutils.py
new file mode 100644 (file)
index 0000000..4c56706
--- /dev/null
@@ -0,0 +1,323 @@
+# Copyright 2021-2022. The MBI project. All rights reserved. 
+# This program is free software; you can redistribute it and/or modify it under the terms of the license (GNU GPL).
+
+import os
+import time
+import subprocess
+import sys
+import re
+import shlex
+import select
+import signal
+import hashlib
+
+class AbstractTool:
+    def ensure_image(self, params=""):
+        """Verify that this is executed from the right docker image, and complain if not."""
+        if os.path.exists("/MBI") or os.path.exists("trust_the_installation"):
+            print("This seems to be a MBI docker image. Good.")
+        else:
+            print("Please run this script in a MBI docker image. Run these commands:")
+            print("  docker build -f Dockerfile -t mpi-bugs-initiative:latest . # Only the first time")
+            print(f"  docker run -it --rm --name MIB --volume $(pwd):/MBI mpi-bugs-initiative /MBI/MBI.py {params}")
+            sys.exit(1)
+
+    def build(self, rootdir, cached=True):
+        """Rebuilds the tool binaries. By default, we try to reuse the existing build."""
+        print ("Nothing to do to rebuild the tool binaries.")
+
+    def setup(self, rootdir):
+        """
+        Ensure that this tool (previously built) is usable in this environment: setup the PATH, etc.
+        This is called only once for all tests, from the logs directory.
+        """
+        pass
+
+    def run(execcmd, filename, binary, id, timeout):
+        """Compile that test code and anaylse it with the Tool if needed (a cache system should be used)"""
+        pass
+
+    def teardown(self):
+        """
+        Clean the results of all test runs: remove temp files and binaries.
+        This is called only once for all tests, from the logs directory.
+        """
+        pass
+
+    def parse(self, cachefile):
+        """Read the result of a previous run from the cache, and compute the test outcome"""
+        return 'failure'
+
+# Associate all possible detailed outcome to a given error scope. Scopes must be sorted alphabetically.
+possible_details = {
+    # scope limited to one call
+    'InvalidBuffer':'AInvalidParam', 'InvalidCommunicator':'AInvalidParam', 'InvalidDatatype':'AInvalidParam', 'InvalidRoot':'AInvalidParam', 'InvalidTag':'AInvalidParam', 'InvalidWindow':'AInvalidParam', 'InvalidOperator':'AInvalidParam', 'InvalidOtherArg':'AInvalidParam', 'ActualDatatype':'AInvalidParam',
+    'InvalidSrcDest':'AInvalidParam', 
+    # scope: Process-wide
+#    'OutOfInitFini':'BInitFini', 
+    'CommunicatorLeak':'BResLeak', 'DatatypeLeak':'BResLeak', 'GroupLeak':'BResLeak', 'OperatorLeak':'BResLeak', 'TypeLeak':'BResLeak', 'RequestLeak':'BResLeak',
+    'MissingStart':'BReqLifecycle', 'MissingWait':'BReqLifecycle',
+    'LocalConcurrency':'BLocalConcurrency',
+    # scope: communicator
+    'CallMatching':'DMatch', 
+    'CommunicatorMatching':'CMatch', 'DatatypeMatching':'CMatch', 'OperatorMatching':'CMatch', 'RootMatching':'CMatch', 'TagMatching':'CMatch',
+    'MessageRace':'DRace', 
+    
+    'GlobalConcurrency':'DGlobalConcurrency',
+    # larger scope
+#    'BufferingHazard':'EBufferingHazard',
+    'OK':'FOK'}
+
+error_scope = {
+    'AInvalidParam':'single call',
+    'BResLeak':'single process',
+#    'BInitFini':'single process',
+    'BReqLifecycle':'single process',
+    'BLocalConcurrency':'single process',
+    'CMatch':'multi-processes',
+    'DRace':'multi-processes',
+    'DMatch':'multi-processes',
+    'DGlobalConcurrency':'multi-processes',
+#    'EBufferingHazard':'system',
+    'FOK':'correct executions'
+}
+
+displayed_name = {
+    'AInvalidParam':'Invalid parameter',
+    'BResLeak':'Resource leak',
+#    'BInitFini':'MPI call before initialization/after finalization',
+    'BReqLifecycle':'Request lifecycle',
+    'BLocalConcurrency':'Local concurrency',
+    'CMatch':'Parameter matching',
+    'DMatch':"Call ordering",
+    'DRace':'Message race',
+    'DGlobalConcurrency':'Global concurrency',
+    'EBufferingHazard':'Buffering hazard',
+    'FOK':"Correct execution",
+
+    'aislinn':'Aislinn','civl':'CIVL','hermes':'Hermes', 'isp':'ISP','itac':'ITAC', 'simgrid':'Mc SimGrid', 'smpi':'SMPI','smpivg':'SMPI+VG', 'mpisv':'MPI-SV', 'must':'MUST', 'parcoach':'PARCOACH'
+}
+
+def parse_one_code(filename):
+    """
+    Reads the header of the provided filename, and extract a list of todo item, each of them being a (cmd, expect, test_num) tupple.
+    The test_num is useful to build a log file containing both the binary and the test_num, when there is more than one test in the same binary.
+    """
+    res = []
+    test_num = 0
+    with open(filename, "r") as input:
+        state = 0  # 0: before header; 1: in header; 2; after header
+        line_num = 1
+        for line in input:
+            if re.match(".*BEGIN_MBI_TESTS.*", line):
+                if state == 0:
+                    state = 1
+                else:
+                    raise Exception(f"MBI_TESTS header appears a second time at line {line_num}: \n{line}")
+            elif re.match(".*END_MBI_TESTS.*", line):
+                if state == 1:
+                    state = 2
+                else:
+                    raise Exception(f"Unexpected end of MBI_TESTS header at line {line_num}: \n{line}")
+            if state == 1 and re.match("\s+\$ ?.*", line):
+                m = re.match('\s+\$ ?(.*)', line)
+                cmd = m.group(1)
+                nextline = next(input)
+                detail = 'OK'
+                if re.match('[ |]*OK *', nextline):
+                    expect = 'OK'
+                else:
+                    m = re.match('[ |]*ERROR: *(.*)', nextline)
+                    if not m:
+                        raise Exception(
+                            f"\n{filename}:{line_num}: MBI parse error: Test not followed by a proper 'ERROR' line:\n{line}{nextline}")
+                    expect = 'ERROR'
+                    detail = m.group(1)
+                    if detail not in possible_details:
+                        raise Exception(
+                            f"\n{filename}:{line_num}: MBI parse error: Detailled outcome {detail} is not one of the allowed ones.")
+                test = {'filename': filename, 'id': test_num, 'cmd': cmd, 'expect': expect, 'detail': detail}
+                res.append(test.copy())
+                test_num += 1
+                line_num += 1
+
+    if state == 0:
+        raise Exception(f"MBI_TESTS header not found in file '{filename}'.")
+    if state == 1:
+        raise Exception(f"MBI_TESTS header not properly ended in file '{filename}'.")
+
+    if len(res) == 0:
+        raise Exception(f"No test found in {filename}. Please fix it.")
+    return res
+
+def categorize(tool, toolname, test_ID, expected):
+    outcome = tool.parse(test_ID)
+
+    if not os.path.exists(f'{test_ID}.elapsed') and not os.path.exists(f'logs/{toolname}/{test_ID}.elapsed'):
+        if outcome == 'failure':
+            elapsed = 0
+        else:
+            raise Exception(f"Invalid test result: {test_ID}.txt exists but not {test_ID}.elapsed")
+    else:
+        with open(f'{test_ID}.elapsed' if os.path.exists(f'{test_ID}.elapsed') else f'logs/{toolname}/{test_ID}.elapsed', 'r') as infile:
+            elapsed = infile.read()
+
+    # Properly categorize this run
+    if outcome == 'timeout':
+        res_category = 'timeout'
+        if elapsed is None:
+            diagnostic = f'hard timeout'
+        else:
+            diagnostic = f'timeout after {elapsed} sec'
+    elif outcome == 'failure':
+        res_category = 'failure'
+        diagnostic = f'tool error, or test not run'
+    elif outcome == 'UNIMPLEMENTED':
+        res_category = 'unimplemented'
+        diagnostic = f'coverage issue'
+    elif outcome == 'other':
+        res_category = 'other'
+        diagnostic = f'inconclusive run'
+    elif expected == 'OK':
+        if outcome == 'OK':
+            res_category = 'TRUE_NEG'
+            diagnostic = f'correctly reported no error'
+        else:
+            res_category = 'FALSE_POS'
+            diagnostic = f'reported an error in a correct code'
+    elif expected == 'ERROR':
+        if outcome == 'OK':
+            res_category = 'FALSE_NEG'
+            diagnostic = f'failed to detect an error'
+        else:
+            res_category = 'TRUE_POS'
+            diagnostic =  f'correctly detected an error'
+    else:
+        raise Exception(f"Unexpected expectation: {expected} (must be OK or ERROR)")
+
+    return (res_category, elapsed, diagnostic, outcome)
+
+
+def run_cmd(buildcmd, execcmd, cachefile, filename, binary, timeout, batchinfo, read_line_lambda=None):
+    """
+    Runs the test on need. Returns True if the test was ran, and False if it was cached.
+    
+    The result is cached if possible, and the test is rerun only if the `test.txt` (containing the tool output) or the `test.elapsed` (containing the timing info) do not exist, or if `test.md5sum` (containing the md5sum of the code to compile) does not match.
+
+    Parameters:
+     - buildcmd and execcmd are shell commands to run. buildcmd can be any shell line (incuding && groups), but execcmd must be a single binary to run. 
+     - cachefile is the name of the test
+     - filename is the source file containing the code
+     - binary the file name in which to compile the code
+     - batchinfo: something like "1/1" to say that this run is the only batch (see -b parameter of MBI.py)
+     - read_line_lambda: a lambda to which each line of the tool output is feed ASAP. It allows MUST to interrupt the execution when a deadlock is reported.
+    """
+    if os.path.exists(f'{cachefile}.txt') and os.path.exists(f'{cachefile}.elapsed') and os.path.exists(f'{cachefile}.md5sum'):
+        hash_md5 = hashlib.md5()
+        with open(filename, 'rb') as sourcefile :
+            for chunk in iter(lambda: sourcefile.read(4096), b""):
+                hash_md5.update(chunk)
+        newdigest = hash_md5.hexdigest()
+        with open(f'{cachefile}.md5sum', 'r') as md5file:
+            olddigest = md5file.read()
+        #print(f'Old digest: {olddigest}; New digest: {newdigest}')
+        if olddigest == newdigest:
+            print(f" (result cached -- digest: {olddigest})")
+            return False
+        else:
+            os.remove(f'{cachefile}.txt')
+
+    print(f"Wait up to {timeout} seconds")
+
+    start_time = time.time()
+    if buildcmd == None:
+        output = f"No need to compile {binary}.c (batchinfo:{batchinfo})\n\n"
+    else:
+        output = f"Compiling {binary}.c (batchinfo:{batchinfo})\n\n"
+        output += f"$ {buildcmd}\n"
+
+        compil = subprocess.run(buildcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        if compil.stdout is not None:
+            output += str(compil.stdout, errors='replace')
+        if compil.returncode != 0:
+            output += f"Compilation of {binary}.c raised an error (retcode: {compil.returncode})"
+            for line in (output.split('\n')):
+                print(f"| {line}", file=sys.stderr)
+            with open(f'{cachefile}.elapsed', 'w') as outfile:
+                outfile.write(str(time.time() - start_time))
+            with open(f'{cachefile}.txt', 'w') as outfile:
+                outfile.write(output)
+            return True
+
+    output += f"\n\nExecuting the command\n $ {execcmd}\n"
+    for line in (output.split('\n')):
+        print(f"| {line}", file=sys.stderr)
+
+    # We run the subprocess and parse its output line by line, so that we can kill it as soon as it detects a timeout
+    process = subprocess.Popen(shlex.split(execcmd), stdout=subprocess.PIPE,
+                               stderr=subprocess.STDOUT, preexec_fn=os.setsid)
+    poll_obj = select.poll()
+    poll_obj.register(process.stdout, select.POLLIN)
+
+    pid = process.pid
+    pgid = os.getpgid(pid)  # We need that to forcefully kill subprocesses when leaving
+    outcome = None
+    while True:
+        if poll_obj.poll(5):  # Something to read? Do check the timeout status every 5 sec if not
+            line = process.stdout.readline()
+            # From byte array to string, replacing non-representable strings with question marks
+            line = str(line, errors='replace')
+            output = output + line
+            print(f"| {line}", end='', file=sys.stderr)
+            if read_line_lambda != None:
+                read_line_lambda(line, process)
+        if time.time() - start_time > timeout:
+            outcome = 'timeout'
+            with open(f'{cachefile}.timeout', 'w') as outfile:
+                outfile.write(f'{time.time() - start_time} seconds')
+            break
+        if process.poll() is not None:  # The subprocess ended. Grab all existing output, and return
+            line = 'more'
+            while line != None and line != '':
+                line = process.stdout.readline()
+                if line is not None:
+                    # From byte array to string, replacing non-representable strings with question marks
+                    line = str(line, errors='replace')
+                    output = output + line
+                    print(f"| {line}", end='', file=sys.stderr)
+
+            break
+
+    # We want to clean all forked processes in all cases, no matter whether they are still running (timeout) or supposed to be off. The runners easily get clogged with zombies :(
+    try:
+        os.killpg(pgid, signal.SIGTERM)  # Terminate all forked processes, to make sure it's clean whatever the tool does
+        process.terminate()  # No op if it's already stopped but useful on timeouts
+        time.sleep(0.2)  # allow some time for the tool to finish its childs
+        os.killpg(pgid, signal.SIGKILL)  # Finish 'em all, manually
+        os.kill(pid, signal.SIGKILL)  # die! die! die!
+    except ProcessLookupError:
+        pass  # OK, it's gone now
+
+    elapsed = time.time() - start_time
+
+    rc = process.poll()
+    if rc < 0:
+        status = f"Command killed by signal {-rc}, elapsed time: {elapsed}\n"
+    else:
+        status = f"Command return code: {rc}, elapsed time: {elapsed}\n"
+    print(status)
+    output += status
+
+    with open(f'{cachefile}.elapsed', 'w') as outfile:
+        outfile.write(str(elapsed))
+
+    with open(f'{cachefile}.txt', 'w') as outfile:
+        outfile.write(output)
+    with open(f'{cachefile}.md5sum', 'w') as outfile:
+        hash = hashlib.md5()
+        with open(filename, 'rb') as sourcefile :
+            for chunk in iter(lambda: sourcefile.read(4096), b""):
+                hash.update(chunk)
+        outfile.write(hash.hexdigest())
+    
+    return True
diff --git a/teshsuite/smpi/MBI/generator_utils.py b/teshsuite/smpi/MBI/generator_utils.py
new file mode 100644 (file)
index 0000000..e978b1e
--- /dev/null
@@ -0,0 +1,477 @@
+# Copyright 2021-2022. The MBI project. All rights reserved. 
+# This program is free software; you can redistribute it and/or modify it under the terms of the license (GNU GPL).
+
+# This is a simple templating system, dedicated to the systematic generation of MPI source code
+
+import os
+import re
+
+# Collectives
+coll = ['MPI_Barrier','MPI_Bcast', 'MPI_Reduce', 'MPI_Gather', 'MPI_Scatter', 'MPI_Scan', 'MPI_Exscan', 'MPI_Allgather', 'MPI_Allreduce', 'MPI_Allgatherv', 'MPI_Alltoall', 'MPI_Alltoallv']
+icoll = ['MPI_Ibcast', 'MPI_Ireduce', 'MPI_Igather', 'MPI_Iscatter', 'MPI_Iscan', 'MPI_Iexscan', 'MPI_Iallgather', 'MPI_Iallreduce', 'MPI_Iallgatherv', 'MPI_Ialltoall', 'MPI_Ialltoallv']
+ibarrier = ['MPI_Ibarrier']
+coll4op = ['MPI_Reduce', 'MPI_Allreduce']
+icoll4op = ['MPI_Ireduce', 'MPI_Iallreduce']
+coll4root =  ['MPI_Reduce', 'MPI_Bcast', 'MPI_Gather', 'MPI_Scatter']
+icoll4root = ['MPI_Ireduce', 'MPI_Ibcast', 'MPI_Igather', 'MPI_Iscatter']
+pcoll = []
+tcoll = ['MPI_Comm_split', 'MPI_Op_create', 'MPI_Comm_group', 'MPI_Comm_dup', 'MPI_Type_contiguous', 'MPI_Comm_create', 'MPI_Group_excl']
+tcoll4color = ['MPI_Comm_split'] 
+tcoll4topo = ['MPI_Cart_get']
+
+# P2P
+allsend = ['MPI_Send', 'MPI_Isend', 'MPI_Ssend', 'MPI_Bsend','MPI_Send_init']
+allrecv = ['MPI_Recv', 'MPI_Irecv', 'MPI_Recv_init'] 
+send = ['MPI_Send']
+ssend = ['MPI_Ssend']
+bsend = ['MPI_Bsend']
+isend = ['MPI_Isend']
+psend = ['MPI_Send_init']
+recv = ['MPI_Recv'] 
+irecv = ['MPI_Irecv'] 
+precv = ['MPI_Recv_init'] 
+probe = ['MPI_Probe']
+
+# RMA
+epoch = ['MPI_Win_fence', 'MPI_Win_lock', 'MPI_Win_lock_all']
+rma = ['MPI_Get', 'MPI_Put']
+get = ['MPI_Get']
+rget = ['MPI_RGet']
+put = ['MPI_Put'] 
+rput = ['MPI_RPut'] 
+store = ['store']
+load = ['load']
+rstore = ['rstore']
+rload = ['rload']
+loadstore = ['loadstore']
+
+
+# setup
+init = {}
+start = {}
+operation = {}
+fini = {}
+free = {} 
+write = {}
+error = {}
+epoch = {}
+finEpoch = {}
+
+
+### COLL:basic
+
+init['MPI_Bcast'] = lambda n: f'int buf{n}[buff_size];'
+start['MPI_Bcast'] = lambda n: ""
+operation['MPI_Bcast'] = lambda n: f'MPI_Bcast(buf{n}, buff_size, type, root, newcom);'
+fini['MPI_Bcast'] = lambda n: ""
+free['MPI_Bcast'] = lambda n: ""
+write['MPI_Bcast'] = lambda n: ""
+
+init['MPI_Barrier'] = lambda n: ""
+start['MPI_Barrier'] = lambda n: ""
+operation['MPI_Barrier'] = lambda n: 'MPI_Barrier(newcom);'
+fini['MPI_Barrier'] = lambda n: ""
+free['MPI_Barrier'] = lambda n: ""
+write['MPI_Barrier'] = lambda n: ""
+
+init['MPI_Reduce'] = lambda n: f"int sum{n}, val{n} = 1;"
+start['MPI_Reduce'] = lambda n: ""
+operation['MPI_Reduce'] = lambda n: f"MPI_Reduce(&val{n}, &sum{n}, 1, type, op, root, newcom);"
+fini['MPI_Reduce'] = lambda n: ""
+free['MPI_Reduce'] = lambda n: ""
+write['MPI_Reduce'] = lambda n: ""
+
+init['MPI_Gather'] = lambda n: f"int val{n}=1, buf{n}[buff_size];"
+start['MPI_Gather'] = lambda n: ""
+operation['MPI_Gather'] = lambda n: f"MPI_Gather(&val{n}, 1, type, buf{n},1, type, root, newcom);"
+fini['MPI_Gather'] = lambda n: ""
+free['MPI_Gather'] = lambda n: ""
+write['MPI_Gather'] = lambda n: ""
+
+init['MPI_Scatter'] = lambda n: f"int val{n}, buf{n}[buff_size];"
+start['MPI_Scatter'] = lambda n: ""
+operation['MPI_Scatter'] = lambda n: f"MPI_Scatter(&buf{n}, 1, type, &val{n}, 1, type, root, newcom);"
+fini['MPI_Scatter'] = lambda n: ""
+free['MPI_Scatter'] = lambda n: ""
+write['MPI_Scatter'] = lambda n: ""
+
+init['MPI_Allreduce'] = lambda n: f"int sum{n}, val{n} = 1;"
+start['MPI_Allreduce'] = lambda n: ""
+operation['MPI_Allreduce'] = lambda n: f"MPI_Allreduce(&val{n}, &sum{n}, 1, type, op, newcom);"
+fini['MPI_Allreduce'] = lambda n: ""
+free['MPI_Allreduce'] = lambda n: ""
+write['MPI_Allreduce'] = lambda n: ""
+
+init['MPI_Scan'] = lambda n: f"int outbuf{n}[buff_size], inbuf{n}[buff_size];"
+start['MPI_Scan'] = lambda n: ""
+operation['MPI_Scan'] = lambda n: f"MPI_Scan(&outbuf{n}, inbuf{n}, buff_size, type, op, newcom);"
+fini['MPI_Scan'] = lambda n: ""
+free['MPI_Scan'] = lambda n: ""
+write['MPI_Scan'] = lambda n: ""
+
+init['MPI_Exscan'] = lambda n: f"int outbuf{n}[buff_size], inbuf{n}[buff_size];"
+start['MPI_Exscan'] = lambda n: ""
+operation['MPI_Exscan'] = lambda n: f"MPI_Exscan(&outbuf{n}, inbuf{n}, buff_size, type, op, newcom);"
+fini['MPI_Exscan'] = lambda n: ""
+free['MPI_Exscan'] = lambda n: ""
+write['MPI_Exscan'] = lambda n: ""
+
+init['MPI_Allgather'] = lambda n: f"int val{n}=1, *rbuf{n} = (int*)malloc(dbs);"
+start['MPI_Allgather'] = lambda n: "" 
+operation['MPI_Allgather'] = lambda n: f"MPI_Allgather(&val{n}, 1, type, rbuf{n}, 1, type, newcom);"
+fini['MPI_Allgather'] = lambda n: ""
+free['MPI_Allgather'] = lambda n: f"free(rbuf{n});" 
+write['MPI_Allgather'] = lambda n: "" 
+
+init['MPI_Alltoallv'] = lambda n: (f"int *sbuf{n}=(int*)malloc(dbs*2), *rbuf{n}=(int*)malloc(dbs*2), *scounts{n}=(int*)malloc(dbs), *rcounts{n}=(int*)malloc(dbs), *sdispls{n}=(int*)malloc(dbs), *rdispls{n}=(int*)malloc(dbs);\n"
+  +  "  for (int i = 0; i < nprocs; i++) {\n"
+  + f"    scounts{n}[i] = 2;\n"
+  + f"    rcounts{n}[i] = 2;\n"
+  + f"    sdispls{n}[i] = (nprocs - (i + 1)) * 2;\n"
+  + f"    rdispls{n}[i] = i * 2;\n"
+  +  "  }")
+start['MPI_Alltoallv'] = lambda n: "" 
+operation['MPI_Alltoallv'] = lambda n: f"MPI_Alltoallv(sbuf{n}, scounts{n}, sdispls{n}, type, rbuf{n}, rcounts{n}, rdispls{n}, type, newcom);"
+fini['MPI_Alltoallv'] = lambda n: "" 
+free['MPI_Alltoallv'] = lambda n: f"free(sbuf{n});free(rbuf{n});free(scounts{n});free(rcounts{n});free(sdispls{n});free(rdispls{n});"
+write['MPI_Alltoallv'] = lambda n: "" 
+
+init['MPI_Alltoall'] = lambda n: f"int *sbuf{n} = (int*)malloc(dbs), *rbuf{n} = (int*)malloc(dbs);"
+start['MPI_Alltoall'] = lambda n: "" 
+operation['MPI_Alltoall'] = lambda n: f"MPI_Alltoall(sbuf{n}, 1, type, rbuf{n}, 1, type, newcom);"
+fini['MPI_Alltoall'] = lambda n: "" 
+free['MPI_Alltoall'] = lambda n: f"free(sbuf{n});free(rbuf{n});"
+write['MPI_Alltoall'] = lambda n: "" 
+
+init['MPI_Allgatherv'] = lambda n: (f"int *rbuf{n} = (int*)malloc(dbs*2), *rcounts{n}=(int*)malloc(dbs),  *displs{n}=(int*)malloc(dbs);\n" 
+  +  "  for (int i = 0; i < nprocs; i++) {\n"
+  + f"    rcounts{n}[i] = 1;\n"
+  + f"    displs{n}[i] = 2 * (nprocs - (i + 1));\n"
+  +  "  }")
+start['MPI_Allgatherv'] = lambda n: "" 
+operation['MPI_Allgatherv'] = lambda n: f"MPI_Allgatherv(&rank, 1, type, rbuf{n}, rcounts{n}, displs{n}, type, newcom);"
+fini['MPI_Allgatherv'] = lambda n: "" 
+free['MPI_Allgatherv'] = lambda n: f"free(rbuf{n});free(rcounts{n});free(displs{n});"
+write['MPI_Allgatherv'] = lambda n: "" 
+
+
+### COLL:nonblocking
+
+init['MPI_Ibarrier'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL; MPI_Status stat{n};"
+start['MPI_Ibarrier'] = lambda n: ""
+operation['MPI_Ibarrier'] = lambda n: f'MPI_Ibarrier(newcom, &req{n});'
+fini['MPI_Ibarrier'] = lambda n: f"MPI_Wait(&req{n}, &stat{n});"
+free['MPI_Ibarrier'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Ibarrier'] = lambda n: ""
+
+init['MPI_Ireduce'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL; MPI_Status stat{n}; int sum{n}, val{n} = 1;"
+start['MPI_Ireduce'] = lambda n: ""
+operation['MPI_Ireduce'] = lambda n: f"MPI_Ireduce(&val{n}, &sum{n}, 1, type, op, root, newcom, &req{n});"
+fini['MPI_Ireduce'] = lambda n: f"MPI_Wait(&req{n}, &stat{n});" 
+free['MPI_Ireduce'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Ireduce'] = lambda n: f"sum{n}++;"
+
+init['MPI_Iallreduce'] = lambda n: f'MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status stat{n}; int sum{n}, val{n} = 1;'
+start['MPI_Iallreduce'] = lambda n: ""
+operation['MPI_Iallreduce'] = lambda n: f'MPI_Iallreduce(&val{n}, &sum{n}, 1, type, op, newcom, &req{n});'
+fini['MPI_Iallreduce'] = lambda n: f'MPI_Wait(&req{n}, &stat{n});'
+free['MPI_Iallreduce'] = lambda n: f"if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});"
+write['MPI_Iallreduce'] = lambda n: f"sum{n}++;"
+
+init['MPI_Ibcast'] = lambda n: f'MPI_Request req{n}=MPI_REQUEST_NULL; MPI_Status sta{n};int buf{n}[buff_size];'
+start['MPI_Ibcast'] = lambda n: ""
+operation['MPI_Ibcast'] = lambda n: f'MPI_Ibcast(buf{n}, buff_size, type, root, newcom, &req{n});'
+fini['MPI_Ibcast'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Ibcast'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Ibcast'] = lambda n: f'buf{n}[0]++;'
+
+init['MPI_Igather'] = lambda n: f"int val{n}=1, buf{n}[buff_size];MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};"
+start['MPI_Igather'] = lambda n: "" 
+operation['MPI_Igather'] = lambda n: f'MPI_Igather(&val{n}, 1, type, &buf{n},1, type, root, newcom, &req{n});'
+write['MPI_Igather'] = lambda n: f'val{n}=3;'
+fini['MPI_Igather'] = lambda n: f'MPI_Wait(&req{n},&sta{n});'
+free['MPI_Igather'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});' 
+
+init['MPI_Iscatter'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};int val{n}, buf{n}[buff_size];"
+start['MPI_Iscatter'] = lambda n: ""
+operation['MPI_Iscatter'] = lambda n: f"MPI_Iscatter(&buf{n}, 1, type, &val{n}, 1, type, root, newcom,&req{n});"
+fini['MPI_Iscatter'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Iscatter'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Iscatter'] = lambda n: f'buf{n}[0]++;'
+
+init['MPI_Iscan'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n}; int outbuf{n}[buff_size], inbuf{n}[buff_size];"
+start['MPI_Iscan'] = lambda n: ""
+operation['MPI_Iscan'] = lambda n: f"MPI_Iscan(&outbuf{n}, inbuf{n}, buff_size, type, op, newcom,&req{n});"
+fini['MPI_Iscan'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Iscan'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Iscan'] = lambda n: f'outbuf{n}[0]++;'
+
+init['MPI_Iexscan'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};int outbuf{n}[buff_size], inbuf{n}[buff_size];"
+start['MPI_Iexscan'] = lambda n: ""
+operation['MPI_Iexscan'] = lambda n: f"MPI_Iexscan(&outbuf{n}, inbuf{n}, buff_size, type, op, newcom,&req{n});"
+fini['MPI_Iexscan'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Iexscan'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Iexscan'] = lambda n: f'outbuf{n}[0]++;'
+
+init['MPI_Iallgather'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};int val{n}=1, *rbuf{n} = (int*)malloc(dbs);"
+start['MPI_Iallgather'] = lambda n: "" 
+operation['MPI_Iallgather'] = lambda n: f"MPI_Iallgather(&val{n}, 1, type, rbuf{n}, 1, type, newcom,&req{n});"
+fini['MPI_Iallgather'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Iallgather'] = lambda n: f"free(rbuf{n});" 
+write['MPI_Iallgather'] = lambda n: f'val{n}++;'
+
+init['MPI_Iallgatherv'] = lambda n: (f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};int *rbuf{n} = (int*)malloc(dbs*2), *rcounts{n}=(int*)malloc(dbs),  *displs{n}=(int*)malloc(dbs);\n" 
+  +  "  for (int i = 0; i < nprocs; i++) {\n"
+  + f"    rcounts{n}[i] = 1;\n"
+  + f"    displs{n}[i] = 2 * (nprocs - (i + 1));\n"
+  +  "  }")
+start['MPI_Iallgatherv'] = lambda n: "" 
+operation['MPI_Iallgatherv'] = lambda n: f"MPI_Iallgatherv(&rank, 1, type, rbuf{n}, rcounts{n}, displs{n}, type, newcom,&req{n});"
+fini['MPI_Iallgatherv'] = lambda n: f"MPI_Wait(&req{n},&sta{n});" 
+free['MPI_Iallgatherv'] = lambda n: f"free(rbuf{n});free(rcounts{n});free(displs{n});"
+write['MPI_Iallgatherv'] = lambda n: f"rbuf{n}[0]++;" 
+
+init['MPI_Ialltoall'] = lambda n: f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};int *sbuf{n} = (int*)malloc(dbs), *rbuf{n} = (int*)malloc(dbs);"
+start['MPI_Ialltoall'] = lambda n: "" 
+operation['MPI_Ialltoall'] = lambda n: f"MPI_Ialltoall(sbuf{n}, 1, type, rbuf{n}, 1, type, newcom, &req{n});"
+fini['MPI_Ialltoall'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Ialltoall'] = lambda n: f"free(sbuf{n});free(rbuf{n});"
+write['MPI_Ialltoall'] = lambda n: f"rbuf{n}[0]++;"
+
+init['MPI_Ialltoallv'] = lambda n: (f"MPI_Request req{n}=MPI_REQUEST_NULL;MPI_Status sta{n};int *sbuf{n}=(int*)malloc(dbs*2), *rbuf{n}=(int*)malloc(dbs*2), *scounts{n}=(int*)malloc(dbs), *rcounts{n}=(int*)malloc(dbs), *sdispls{n}=(int*)malloc(dbs), *rdispls{n}=(int*)malloc(dbs);\n"
+  +  "  for (int i = 0; i < nprocs; i++) {\n"
+  + f"    scounts{n}[i] = 2;\n"
+  + f"    rcounts{n}[i] = 2;\n"
+  + f"    sdispls{n}[i] = (nprocs - (i + 1)) * 2;\n"
+  + f"    rdispls{n}[i] = i * 2;\n"
+  +  "  }")
+start['MPI_Ialltoallv'] = lambda n: "" 
+operation['MPI_Ialltoallv'] = lambda n: f"MPI_Ialltoallv(sbuf{n}, scounts{n}, sdispls{n}, type, rbuf{n}, rcounts{n}, rdispls{n}, type, newcom,&req{n});"
+fini['MPI_Ialltoallv'] = lambda n: f"MPI_Wait(&req{n},&sta{n});"
+free['MPI_Ialltoallv'] = lambda n: f"free(sbuf{n});free(rbuf{n});free(scounts{n});free(rcounts{n});free(sdispls{n});free(rdispls{n});"
+write['MPI_Ialltoallv'] = lambda n: f"rbuf{n}[0]++;"
+
+### COLL:persistent
+
+
+
+### COLL:tools
+
+init['MPI_Comm_split'] = lambda n: f'MPI_Comm com[size]; color = rank % 2; int key = 1;'
+start['MPI_Comm_split'] = lambda n: ""
+operation['MPI_Comm_split'] = lambda n: 'MPI_Comm_split(MPI_COMM_WORLD,color,key, &com[j]);'
+error['MPI_Comm_split'] = 'CommunicatorLeak'
+fini['MPI_Comm_split'] = lambda n: "if(com[j] != MPI_COMM_NULL) MPI_Comm_free(&com[j]);"
+free['MPI_Comm_split'] = lambda n: ""
+
+
+init['MPI_Cart_get'] = lambda n: ""
+start['MPI_Cart_get'] = lambda n: ""
+operation['MPI_Cart_get'] = lambda n: f'MPI_Cart_get(newcom, 2, dims, periods, coords);'
+write['MPI_Cart_get'] = lambda n: ""
+fini['MPI_Cart_get'] = lambda n: ""
+free['MPI_Cart_get'] = lambda n: ""
+
+
+init['MPI_Op_create'] = lambda n: 'MPI_Op op[size];'
+operation['MPI_Op_create'] = lambda n: 'MPI_Op_create((MPI_User_function *)myOp, 0, &op[j]);'
+error['MPI_Op_create'] = 'OperatorLeak'
+fini['MPI_Op_create'] = lambda n: "MPI_Op_free(&op[j]);"
+free['MPI_Op_create'] = lambda n: ""
+
+init['MPI_Comm_group'] = lambda n: 'MPI_Group grp[size];'
+operation['MPI_Comm_group'] = lambda n: 'MPI_Comm_group(MPI_COMM_WORLD, &grp[j]);'
+error['MPI_Comm_group'] = 'GroupLeak'
+fini['MPI_Comm_group'] = lambda n: "MPI_Group_free(&grp[j]);"
+free['MPI_Comm_group'] = lambda n: "" 
+
+init['MPI_Group_excl'] = lambda n: 'MPI_Group worldgroup, grp[size];\n MPI_Comm_group(MPI_COMM_WORLD, &worldgroup);'
+operation['MPI_Group_excl'] = lambda n: 'MPI_Group_excl(worldgroup, 1, &rank, &grp[j]);' 
+error['MPI_Group_excl'] = 'GroupLeak'
+fini['MPI_Group_excl'] = lambda n: "MPI_Group_free(&grp[j]);"
+free['MPI_Group_excl'] = lambda n: "MPI_Group_free(&worldgroup);"
+
+init['MPI_Comm_create'] = lambda n: 'MPI_Comm com[size]; MPI_Group grp[size];'
+operation['MPI_Comm_create'] = lambda n: 'MPI_Comm_group(MPI_COMM_WORLD, &grp[j]);\n MPI_Comm_create(MPI_COMM_WORLD, grp[j], &com[j]);\n MPI_Group_free(&grp[j]);'
+error['MPI_Comm_create'] = 'CommunicatorLeak'
+fini['MPI_Comm_create'] = lambda n: "MPI_Comm_free(&com[j]);"
+free['MPI_Comm_create'] = lambda n: ""
+
+init['MPI_Comm_dup'] = lambda n: f'MPI_Comm com[size];'
+operation['MPI_Comm_dup'] = lambda n: 'MPI_Comm_dup(MPI_COMM_WORLD, &com[j]);'
+error['MPI_Comm_dup'] = 'CommunicatorLeak'
+fini['MPI_Comm_dup'] = lambda n: "MPI_Comm_free(&com[j]);"
+free['MPI_Comm_dup'] = lambda n: "" 
+
+init['MPI_Type_contiguous'] = lambda n: 'MPI_Datatype type[size];'
+operation['MPI_Type_contiguous'] = lambda n: 'MPI_Type_contiguous(2, MPI_DOUBLE, &type[j]);'
+error['MPI_Type_contiguous'] = 'TypeLeak'
+fini['MPI_Type_contiguous'] = lambda n: "MPI_Type_free(&type[j]);"
+free['MPI_Type_contiguous'] = lambda n: "" 
+
+
+
+
+### P2P:basic 
+
+init['MPI_Send'] = lambda n: f'int buf{n}=rank;'
+start['MPI_Send'] = lambda n: ""
+operation['MPI_Send'] = lambda n: f'MPI_Send(&buf{n}, buff_size, type, dest, stag, newcom);'
+fini['MPI_Send'] = lambda n: ""
+free['MPI_Send'] = lambda n: ""
+write['MPI_Send'] = lambda n: ""
+
+init['MPI_Ssend'] = lambda n: f'int buf{n}=rank;'
+start['MPI_Ssend'] = lambda n: ""
+operation['MPI_Ssend'] = lambda n: f'MPI_Ssend(&buf{n}, buff_size, type, dest, stag, newcom);'
+fini['MPI_Ssend'] = lambda n: ""
+free['MPI_Ssend'] = lambda n: ""
+write['MPI_Ssend'] = lambda n: ""
+
+init['MPI_Bsend'] = lambda n: (f'int buf{n}=rank;\n'
+            + f'int buffer_attached_size{n} = MPI_BSEND_OVERHEAD + sizeof(int);\n' 
+            + f'char* buffer_attached{n} = (char*)malloc(buffer_attached_size{n});\n'
+            + f'MPI_Buffer_attach(buffer_attached{n}, buffer_attached_size{n});')
+start['MPI_Bsend'] = lambda n: ""
+operation['MPI_Bsend'] = lambda n: f'MPI_Bsend(&buf{n}, buff_size, type, dest, stag, newcom);'
+fini['MPI_Bsend'] = lambda n: ""
+free['MPI_Bsend'] = (lambda n: f'MPI_Buffer_detach(&buffer_attached{n}, &buffer_attached_size{n});\n'
+            + f'free(buffer_attached{n});')
+write['MPI_Bsend'] = lambda n: ""
+
+init['MPI_Recv'] = lambda n: f'int buf{n}=-1; MPI_Status sta{n};'
+start['MPI_Recv'] = lambda n: ""
+operation['MPI_Recv'] = lambda n: f'MPI_Recv(&buf{n}, buff_size, type, src, rtag, newcom, &sta{n});'
+fini['MPI_Recv'] = lambda n: ""
+free['MPI_Recv'] = lambda n: ""
+write['MPI_Recv'] = lambda n: ""
+
+init['MPI_Probe'] = lambda n: ""
+start['MPI_Probe'] = lambda n: ""
+operation['MPI_Probe'] = lambda n: f'MPI_Probe(src, 0, newcom, &sta);'
+fini['MPI_Probe'] = lambda n: ""
+free['MPI_Probe'] = lambda n: ""
+write['MPI_Probe'] = lambda n: ""
+
+
+
+### P2P:nonblocking
+
+init['MPI_Isend'] = lambda n: f'int buf{n}=rank; MPI_Request req{n}=MPI_REQUEST_NULL;'
+start['MPI_Isend'] = lambda n: "" 
+operation['MPI_Isend'] = lambda n: f'MPI_Isend(&buf{n}, buff_size, type, dest, stag, newcom, &req{n});'
+fini['MPI_Isend'] = lambda n: f'MPI_Wait(&req{n}, MPI_STATUS_IGNORE);'
+free['MPI_Isend'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Isend'] = lambda n: f'buf{n}=4;'
+
+init['MPI_Irecv'] = lambda n: f'int buf{n}=-1; MPI_Request req{n}=MPI_REQUEST_NULL;'
+start['MPI_Irecv'] = lambda n: "" 
+operation['MPI_Irecv'] = lambda n: f'MPI_Irecv(&buf{n}, buff_size, type, src, rtag, newcom, &req{n});'
+fini['MPI_Irecv'] = lambda n: f' MPI_Wait(&req{n}, MPI_STATUS_IGNORE);'
+free['MPI_Irecv'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Irecv'] = lambda n: f'buf{n}++;' 
+
+### P2P:persistent
+
+init['MPI_Send_init'] = lambda n: f'int buf{n}=rank; MPI_Request req{n}=MPI_REQUEST_NULL;'
+operation['MPI_Send_init'] = lambda n: f'MPI_Send_init(&buf{n}, buff_size, type, dest, stag, newcom, &req{n});' 
+start['MPI_Send_init'] = lambda n: f'MPI_Start(&req{n});'
+fini['MPI_Send_init'] = lambda n: f'MPI_Wait(&req{n}, MPI_STATUS_IGNORE);'
+free['MPI_Send_init'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Send_init'] = lambda n: f'buf{n}=4;' 
+
+init['MPI_Recv_init'] = lambda n: f'int buf{n}=-1; MPI_Request req{n}=MPI_REQUEST_NULL;'
+start['MPI_Recv_init'] = lambda n: f'MPI_Start(&req{n});'
+operation['MPI_Recv_init'] = lambda n: f'MPI_Recv_init(&buf{n}, buff_size, type, src, rtag, newcom, &req{n});'
+fini['MPI_Recv_init'] = lambda n: f'MPI_Wait(&req{n}, MPI_STATUS_IGNORE);'
+free['MPI_Recv_init'] = lambda n: f'if(req{n} != MPI_REQUEST_NULL) MPI_Request_free(&req{n});'
+write['MPI_Recv_init'] = lambda n: f'buf{n}++;' 
+
+### RMA
+
+epoch['MPI_Win_fence'] =lambda n: 'MPI_Win_fence(0, win);'
+finEpoch['MPI_Win_fence'] =lambda n: 'MPI_Win_fence(0, win);'
+epoch['MPI_Win_lock'] =lambda n: 'MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);'
+finEpoch['MPI_Win_lock'] =lambda n: 'MPI_Win_unlock(target, win);'
+epoch['MPI_Win_lock_all'] =lambda n: 'MPI_Win_lock_all(0,win);'
+finEpoch['MPI_Win_lock_all'] =lambda n: 'MPI_Win_unlock_all(win);'
+
+init['MPI_Put'] = lambda n: f'int localbuf{n} = 12345;'
+operation['MPI_Put'] = lambda n: f'MPI_Put(&localbuf{n}, N, MPI_INT, target, 0, N, type, win);'
+
+init['MPI_RPut'] = lambda n: "" 
+operation['MPI_RPut'] = lambda n: f'MPI_Put(&winbuf[20], N, MPI_INT, target, 0, N, type, win);'
+
+init['MPI_Get'] = lambda n: f'int localbuf{n} = 54321;'
+operation['MPI_Get'] = lambda n: f'MPI_Get(&localbuf{n}, N, MPI_INT, target, 0, N, type, win);' 
+
+init['MPI_RGet'] = lambda n: ""
+operation['MPI_RGet'] = lambda n: f'MPI_Get(&winbuf[20], N, MPI_INT, target, 0, N, type, win);' 
+
+init['store'] = lambda n: f'int localbuf{n} = 0;'
+operation['store'] = lambda n: f'localbuf{n} = 8;'
+
+init['rstore'] = lambda n: ""
+operation['rstore'] = lambda n: f'winbuf[20] = 12346;'
+
+init['load'] = lambda n: f'int localbuf{n} = 0;'
+operation['load'] = lambda n: f'int load = localbuf{n};'
+
+init['rload'] = lambda n: "" 
+operation['rload'] = lambda n: "int load = winbuf[20];"
+
+init['loadstore'] = lambda n: f'int localbuf{n} = 0;'
+operation['loadstore'] = lambda n: f'if (localbuf{n} % 2 == 0)  localbuf{n}++; '
+
+
+
+
+def find_line(content, target, filename):
+    res = 1
+    for line in content.split('\n'):
+        if re.search(f'[^:]{target}', line):
+            #print(f'Found {target} at {line}')
+            return res
+        res += 1
+    raise Exception(f"Line target {target} not found in {filename}.")
+
+
+def make_file(template, filename, replace):
+    output = template
+    filename = re.sub("_MPI_", "_", filename)
+    replace['filename'] = filename
+    # Replace all variables that don't have a ':' in their name
+    while re.search("@\{[^@:]*\}@", output):
+        m = re.search("@\{([^@:]*)\}@", output)
+        target = m.group(1)
+        #print(f"Replace @{{{target}}}@")
+        if target in replace.keys():
+            output = re.sub(f'@\{{{target}\}}@', replace[target], output)
+            #print(f"Replace {target} -> {replace[target]}")
+        else:
+            raise Exception(f"Variable {target} used in template, but not defined.")
+    # Now replace all variables with a ':' in their name: line targets are like that, and we don't want to resolve them before the others change the lines
+    while re.search("@\{([^:@]*):([^@]*)\}@", output):
+        m = re.search("@\{([^:@]*):([^@]*)\}@", output)
+        (kind, target) = (m.group(1), m.group(2))
+        if kind == 'line':
+            replace = f'{find_line(output, target, filename)}'
+            #print(f"Replace @{{line:{target}}}@ with '{replace}'")
+            output = re.sub(f'@\{{line:{target}\}}@', replace, output)
+        else:
+            raise Exception(f"Unknown variable kind: {kind}:{target}")
+
+    if os.path.exists(filename):
+        with open(filename, 'r') as file:
+            prev = file.read().split('\n')[0]
+            prev = re.sub('^.*?scripts/','scripts/', prev)
+            prev = re.sub('. DO NOT EDIT.', '', prev)
+        now = output.split('\n')[0]
+        now = re.sub('^.*?scripts/','scripts/', now)
+        now = re.sub('. DO NOT EDIT.', '', now)
+
+        print(f'WARNING: overwriting {filename}. Previously generated by: {prev}; regenerated by {now}')
+
+    # Ready to output it
+    with open(filename, 'w') as outfile:
+        outfile.write(output)
diff --git a/teshsuite/smpi/MBI/simgrid.py b/teshsuite/smpi/MBI/simgrid.py
new file mode 100644 (file)
index 0000000..1dc6bef
--- /dev/null
@@ -0,0 +1,105 @@
+# Copyright 2021-2022. The MBI project. All rights reserved. 
+# This program is free software; you can redistribute it and/or modify it under the terms of the license (GNU GPL).
+
+import re
+import os
+from MBIutils import *
+
+class Tool(AbstractTool):
+    def identify(self):
+        return "SimGrid wrapper"
+
+    def build(self, rootdir, cached=True):
+        if cached and (os.path.exists(f"{rootdir}/builds/SimGrid/bin/smpicc") or os.path.exists('/usr/bin/simgrid-mc')):
+            return
+
+        here = os.getcwd() # Save where we were
+        os.chdir(rootdir)
+        # Get a GIT checkout. Either create it, or refresh it
+        if os.path.exists("tools/simgrid/.git"):
+            subprocess.run("cd tools/simgrid && git pull &&  cd ../..", shell=True, check=True)
+        else:
+            subprocess.run("rm -rf tools/simgrid && git clone --depth=1 https://framagit.org/simgrid/simgrid.git tools/simgrid", shell=True, check=True)
+
+        # Build and install it
+        os.chdir("tools/simgrid")
+        subprocess.run(f"cmake -DCMAKE_INSTALL_PREFIX={rootdir}/builds/SimGrid -Denable_model-checking=ON .", shell=True, check=True)
+        subprocess.run("make -j$(nproc) install VERBOSE=1", shell=True, check=True)
+
+        # Back to our previous directory
+        os.chdir(here)
+
+
+    def ensure_image(self):
+        AbstractTool.ensure_image(self, "-x simgrid")
+
+    def setup(self, rootdir):
+        os.environ['PATH'] = os.environ['PATH'] + ":" + rootdir + "/builds/SimGrid/bin"
+        os.environ['VERBOSE'] = '1'
+
+    def run(self, execcmd, filename, binary, id, timeout, batchinfo):
+        cachefile = f'{binary}_{id}'
+
+        if not os.path.exists("cluster.xml"):
+            with open('cluster.xml', 'w') as outfile:
+                outfile.write("<?xml version='1.0'?>\n")
+                outfile.write("<!DOCTYPE platform SYSTEM \"https://simgrid.org/simgrid.dtd\">\n")
+                outfile.write('<platform version="4.1">\n')
+                outfile.write(' <cluster id="acme" prefix="node-" radical="0-99" suffix="" speed="1Gf" bw="125MBps" lat="50us"/>\n')
+                outfile.write('</platform>\n')
+
+        execcmd = re.sub("mpirun", "smpirun -wrapper simgrid-mc -platform ./cluster.xml -analyze --cfg=smpi/finalization-barrier:on --cfg=smpi/list-leaks:10 --cfg=model-check/max-depth:10000", execcmd)
+        if re.search("Concurrency", binary):  # DPOR reduction in simgrid cannot deal with RMA calls as they contain mutexes
+            execcmd = re.sub("smpirun", "smpirun --cfg=model-check/reduction:none", execcmd)
+        execcmd = re.sub('\${EXE}', binary, execcmd)
+        execcmd = re.sub('\$zero_buffer', "--cfg=smpi/buffering:zero", execcmd)
+        execcmd = re.sub('\$infty_buffer', "--cfg=smpi/buffering:infty", execcmd)
+
+        run_cmd(
+            buildcmd=f"smpicc {filename} -trace-call-location -g -Wl,-znorelro -Wl,-znoseparate-code -o {binary}",
+            execcmd=execcmd,
+            cachefile=cachefile,
+            filename=filename,
+            binary=binary,
+            timeout=timeout,
+            batchinfo=batchinfo)
+
+    def teardown(self): 
+        subprocess.run("find -type f -a -executable | xargs rm -f", shell=True, check=True) # Remove generated cruft (binary files)
+        subprocess.run("rm -f smpitmp-* core", shell=True, check=True) 
+
+    def parse(self, cachefile):
+        if os.path.exists(f'{cachefile}.timeout') or os.path.exists(f'logs/simgrid/{cachefile}.timeout'):
+            outcome = 'timeout'
+        if not (os.path.exists(f'{cachefile}.txt') or os.path.exists(f'logs/simgrid/{cachefile}.txt')):
+            return 'failure'
+
+        with open(f'{cachefile}.txt' if os.path.exists(f'{cachefile}.txt') else f'logs/simgrid/{cachefile}.txt', 'r') as infile:
+            output = infile.read()
+
+        if re.search('Compilation of .*? raised an error \(retcode: ', output):
+            return 'UNIMPLEMENTED'
+
+        if re.search('MBI_MSG_RACE', output):
+            return 'MBI_MSG_RACE'
+
+        if re.search('MC is currently not supported here', output):
+            return 'failure'
+
+        if re.search('DEADLOCK DETECTED', output):
+            return 'deadlock'
+        if re.search('returned MPI_ERR', output):
+            return 'mpierr'
+        if re.search('Not yet implemented', output):
+            return 'UNIMPLEMENTED'
+        if re.search('CRASH IN THE PROGRAM', output):
+            return 'failure'
+        if re.search('Probable memory leaks in your code: SMPI detected', output):
+            return 'resleak'
+        if re.search('DFS exploration ended.', output):
+            return 'OK'
+
+        print (f">>>>[ INCONCLUSIVE ]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ({cachefile})")
+        print(output)
+        print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
+        return 'other'
index 3709408..a78c0a6 100644 (file)
@@ -1028,6 +1028,7 @@ set(CMAKEFILES_TXT
   teshsuite/surf/CMakeLists.txt
   teshsuite/xbt/CMakeLists.txt
 
+  teshsuite/smpi/MBI/CMakeLists.txt
   teshsuite/smpi/mpich3-test/CMakeLists.txt
   teshsuite/smpi/mpich3-test/attr/CMakeLists.txt
   teshsuite/smpi/mpich3-test/coll/CMakeLists.txt
index 2515b91..b165f35 100644 (file)
@@ -48,7 +48,7 @@ else()
   option(enable_smpi_papi    "Whether SMPI supports PAPI bindings." off)
 endif()
 option(enable_smpi_MPICH3_testsuite "Whether the test suite form MPICH 3 should be built" off)
-option(enable_smpi_ISP_testsuite "Whether the test suite from ISP should be built." off)
+option(enable_smpi_MBI_testsuite "Whether the test suite from MBI should be built." off)
 
 # Internal targets used by jenkins
 ###
index 0487ff1..6b240b2 100755 (executable)
@@ -72,7 +72,7 @@ cmake -Denable_documentation=OFF \
       -Denable_ns3=ON \
       -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=ON -Denable_model-checking=ON \
       -Denable_smpi_papi=ON \
-      -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=ON \
+      -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_MBI_testsuite=OFF \
       -Denable_coverage=ON -DLTO_EXTRA_FLAG="auto" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "$WORKSPACE"
 
 #build with sonarqube scanner wrapper
index c07669c..046a5ad 100755 (executable)
@@ -90,7 +90,7 @@ cmake -Denable_documentation=OFF -Denable_java=${buildjava} -Denable_msg=${build
       -Denable_compile_optimizations=OFF -Denable_compile_warnings=ON \
       -Denable_mallocators=ON -Denable_debug=${builddebug} \
       -Denable_smpi=${buildsmpi} -Denable_smpi_MPICH3_testsuite=${buildsmpi} -Denable_model-checking=${buildmc} \
-      -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=OFF \
+      -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_MBI_testsuite=OFF \
       -Denable_ns3=$(onoff test "$buildmc" != "ON") -Denable_coverage=OFF -DLTO_EXTRA_FLAG="auto" "$WORKSPACE"
 
 make -j$NUMPROC tests
index a07ead4..bcbe854 100755 (executable)
@@ -79,7 +79,7 @@ cmake -Denable_documentation=OFF -Denable_java=OFF \
       -Denable_mallocators=OFF \
       -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=ON -Denable_model-checking=OFF \
       -Denable_ns3=ON \
-      -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=ON -Denable_coverage=OFF\
+      -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_MBI_testsuite=OFF -Denable_coverage=OFF\
       -Denable_fortran=OFF -Denable_python=OFF -DLTO_EXTRA_FLAG="auto" ${SANITIZER_OPTIONS} "$WORKSPACE"
 
 make -j$NUMPROC tests
index e22568a..9e47e12 100755 (executable)
@@ -207,7 +207,7 @@ fi
 cmake -G"$GENERATOR" ${INSTALL:+-DCMAKE_INSTALL_PREFIX=$INSTALL} \
   -Denable_debug=ON -Denable_documentation=OFF -Denable_coverage=OFF \
   -Denable_model-checking=$(onoff test "$build_mode" = "ModelChecker") \
-  -Denable_smpi_ISP_testsuite=$(onoff test "$build_mode" = "ModelChecker") \
+  -Denable_smpi_MBI_testsuite=OFF \
   -Denable_compile_optimizations=$(onoff test "$build_mode" != "DynamicAnalysis") \
   -Denable_smpi_MPICH3_testsuite=$(onoff test "$build_mode" = "Debug") \
   -Denable_mallocators=$(onoff test "$build_mode" != "DynamicAnalysis") \