From: Martin Quinson Date: Wed, 30 Sep 2020 19:09:50 +0000 (+0200) Subject: Merge branch 'master' into adrien X-Git-Tag: v3.26~418^2~2^2 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/b9625f82f86db0674e911887addce45dca31b57f?hp=aa4c8eeb3051e5adfef317cda2ae590ca13e6f3e Merge branch 'master' into adrien --- diff --git a/.github/workflows/jarfile.yml b/.github/workflows/jarfile.yml new file mode 100644 index 0000000000..9d7766b62f --- /dev/null +++ b/.github/workflows/jarfile.yml @@ -0,0 +1,81 @@ +name: SimGrid complete jar file generation + +on: [workflow_dispatch] + +jobs: + build: + runs-on: ${{ matrix.config.os }}-latest + strategy: + matrix: + config: + - { name: "Windows MingW", os: windows, cc: "gcc", cxx: "g++", generator: "MinGW Makefiles", cmake_extra_options: "-Denable_lto=OFF" } + - { name: "Ubuntu gcc", os: ubuntu, cc: "gcc", cxx: "g++", generator: "Unix Makefiles", cmake_extra_options: "-DLTO_EXTRA_FLAG=auto" } + - { name: "MacOS clang", os: macos, cc: "clang", cxx: "clang++", generator: "Unix Makefiles", cmake_extra_options: "-DLTO_EXTRA_FLAG=auto" } + steps: + - uses: actions/checkout@v2 + # install dependencies + - name: Init options + run: | + echo "::set-env name=CC::${{ matrix.config.cc }}" + echo "::set-env name=CXX::${{ matrix.config.cxx }}" + - name: Install boost on ubuntu + if: matrix.config.os == 'ubuntu' + run: sudo apt-get update && sudo apt-get install -yq libboost-dev + - name: Install boost on macos + if: matrix.config.os == 'macos' + run: brew install boost + - name: Install boost and gcc on windows + if: matrix.config.os == 'windows' + run: | + Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') + scoop install gcc --global + echo "::set-env name=BOOST_ROOT::$env:BOOST_ROOT_1_72_0" + echo "::set-env name=BOOST_INCLUDEDIR::$env:BOOST_ROOT\boost\include" + echo "::set-env name=BOOST_LIBRARYDIR::$env:BOOST_ROOT\lib" + - name: Build jar with Cmake + run: | + mkdir build + cd build + cmake -Denable_documentation=OFF -Denable_java=ON -Denable_msg=ON -Denable_lib_in_jar=ON -Dminimal-bindings=ON -Denable_compile_optimizations=ON -Denable_smpi=OFF ${{ matrix.config.cmake_extra_options }} -G "${{ matrix.config.generator }}" .. + make -j2 simgrid-java_jar + - name: Upload jar + uses: actions/upload-artifact@v2 + with: + name: jar-${{ matrix.config.os }} + path: build/simgrid.jar + + create_jar: + needs: build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Download all jars from ubuntu + uses: actions/download-artifact@v2 + - name: Build final jar + run: | + patch=$(grep -r set\(SIMGRID_VERSION_PATCH ./CMakeLists.txt | sed 's/.*"\([[:digit:]]\+\)".*/\1/g') + major=$(grep -r set\(SIMGRID_VERSION_MAJOR ./CMakeLists.txt | sed 's/.*"\([[:digit:]]\+\)".*/\1/g') + minor=$(grep -r set\(SIMGRID_VERSION_MINOR ./CMakeLists.txt | sed 's/.*"\([[:digit:]]\+\)".*/\1/g') + if [ $patch -ne 0 ]; then + version="$major.$minor.$patch" + else + version="$major.$minor" + fi + mkdir content + cd content + for j in ubuntu macos windows ; do unzip -n ../jar-$j/simgrid.jar ; done + strip NATIVE/*/*/*.so + x86_64-linux-gnu-strip NATIVE/*/*/lib*dll + zip -r ../simgrid-${version}.jar * + - name: Upload jar + uses: actions/upload-artifact@v2 + with: + name: jar-final + path: simgrid-*.jar + - name: cleanup artifacts + uses: geekyeggo/delete-artifact@v1 + with: + name: | + jar-ubuntu + jar-windows + jar-macos diff --git a/.gitignore b/.gitignore index e835918df4..94d8c39b30 100644 --- a/.gitignore +++ b/.gitignore @@ -273,7 +273,7 @@ examples/smpi/replay_multiple_manual_deploy/workload_mixed2 examples/smpi/replay_multiple_manual_deploy/workload_nojob examples/smpi/replay/one_trace examples/smpi/replay/smpi_replay -examples/smpi/smpi_msg_masterslave/masterslave_mailbox_smpi +examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi examples/smpi/trace_call_location/smpi_trace_call_location examples/smpi/trace_simple/smpi_trace_simple examples/smpi/trace/smpi_trace diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ae5ebb7d2a..fc9bff366d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -54,13 +54,14 @@ pages: stage: deploy script: - pip3 install --requirement docs/requirements.txt - - cd docs + - cd docs/source/_ext/javasphinx; python3 setup.py build; python3 setup.py install + - cd ../../.. - LC_ALL=C.UTF-8 ./Build.sh - mv build/html ../public # - The CSS contains a reference to a font or something, not something we gonna fix on our side # - The javasphinx output does not exist in the git, so the "edit on FramaGit" link is broken. # I'd like to report this as a bug, but javasphinx seems abandonned upstream. - - linkchecker --ignore-url='.*\.css$' --ignore-url=public/java/org ../public +#not installed - linkchecker --ignore-url='.*\.css$' --ignore-url=public/java/org ../public # From time to time, we should check external links with the # following, but it has a lot of false positive # - linkchecker --ignore-url='.*\.css$' --ignore-url=public/java/org --check-extern ../public diff --git a/CMakeLists.txt b/CMakeLists.txt index f511ad011c..bae099b0eb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -179,7 +179,7 @@ if(WIN32) unset(CMAKE_INCLUDE_WIN) endif() -# library dependency cannot start with a space (CMP0004), so initialize it with something that is never desactivated. +# library dependency cannot start with a space (CMP0004), so initialize it with something that is never deactivated. set(SIMGRID_DEP "-lm") ### Determine the assembly flavor that we need today @@ -555,10 +555,10 @@ file(READ ${CMAKE_HOME_DIRECTORY}/src/smpi/smpitools.sh SMPITOOLS_SH) # Definiti ### SMPI script used when simgrid is installed set(exec_prefix ${CMAKE_INSTALL_PREFIX}) -set(includeflag "-I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR} -I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/smpi") set(includedir "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") set(libdir "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") -set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${CMAKE_INSTALL_LIBDIR}") +set(includeflag "-I${includedir} -I${includedir}/smpi") +set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${libdir}") if(NS3_LIBRARY_PATH) set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}") endif() @@ -583,17 +583,17 @@ endforeach() ### SMPI scripts used when compiling simgrid set(exec_prefix "${CMAKE_BINARY_DIR}/smpi_script/") -set(includeflag "-I${CMAKE_HOME_DIRECTORY}/include -I${CMAKE_HOME_DIRECTORY}/include/smpi") -set(includeflag "${includeflag} -I${CMAKE_BINARY_DIR}/include -I${CMAKE_BINARY_DIR}/include/smpi") set(includedir "${CMAKE_HOME_DIRECTORY}/include") set(libdir "${CMAKE_BINARY_DIR}/lib") -set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${CMAKE_BINARY_DIR}/lib") +set(includeflag "-I${includedir} -I${includedir}/smpi") +set(includeflag "${includeflag} -I${CMAKE_BINARY_DIR}/include -I${CMAKE_BINARY_DIR}/include/smpi") +set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${libdir}") if(NS3_LIBRARY_PATH) set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}") endif() set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}\"") -set(SMPIMAIN ${CMAKE_BINARY_DIR}/lib/simgrid/smpimain) -set(SMPIREPLAYMAIN ${CMAKE_BINARY_DIR}/lib/simgrid/smpireplaymain) +set(SMPIMAIN ${libdir}/simgrid/smpimain) +set(SMPIREPLAYMAIN ${libdir}/simgrid/smpireplaymain) foreach(script cc cxx ff f90 run) configure_file(${CMAKE_HOME_DIRECTORY}/src/smpi/smpi${script}.in ${CMAKE_BINARY_DIR}/smpi_script/bin/smpi${script} @ONLY) diff --git a/ChangeLog b/ChangeLog index e1df82b8dd..ce874a1eda 100644 --- a/ChangeLog +++ b/ChangeLog @@ -64,6 +64,7 @@ Fixed bugs (FG#.. -> FramaGit bugs; FG!.. -> FG merge requests) - GH#128: Parallelization of simulation with --cfg=contexts/nthreads - GH#139: Allow pthread creation in SMPI - GH#336: Packet-level simulation using SMPI? + - GH#346: [SMPI] error while loading shared libraries: libsimgrid.so - GH!337: Fix link_energy plugin for wifi platforms - GH!339: Add Mailbox set_receiver method to python binding diff --git a/MANIFEST.in b/MANIFEST.in index b281628e94..8b1f81f942 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -66,18 +66,6 @@ include examples/c/app-pingpong/app-pingpong.tesh include examples/c/app-pingpong/app-pingpong_d.xml include examples/c/app-token-ring/app-token-ring.c include examples/c/app-token-ring/app-token-ring.tesh -include examples/c/async-wait/async-wait.c -include examples/c/async-wait/async-wait.tesh -include examples/c/async-wait/async-wait2_d.xml -include examples/c/async-wait/async-wait3_d.xml -include examples/c/async-wait/async-wait4_d.xml -include examples/c/async-wait/async-wait_d.xml -include examples/c/async-waitall/async-waitall.c -include examples/c/async-waitall/async-waitall.tesh -include examples/c/async-waitall/async-waitall_d.xml -include examples/c/async-waitany/async-waitany.c -include examples/c/async-waitany/async-waitany.tesh -include examples/c/async-waitany/async-waitany_d.xml include examples/c/cloud-capping/cloud-capping.c include examples/c/cloud-capping/cloud-capping.tesh include examples/c/cloud-masterworker/cloud-masterworker.c @@ -86,6 +74,18 @@ include examples/c/cloud-migration/cloud-migration.c include examples/c/cloud-migration/cloud-migration.tesh include examples/c/cloud-simple/cloud-simple.c include examples/c/cloud-simple/cloud-simple.tesh +include examples/c/comm-wait/comm-wait.c +include examples/c/comm-wait/comm-wait.tesh +include examples/c/comm-wait/comm-wait2_d.xml +include examples/c/comm-wait/comm-wait3_d.xml +include examples/c/comm-wait/comm-wait4_d.xml +include examples/c/comm-wait/comm-wait_d.xml +include examples/c/comm-waitall/comm-waitall.c +include examples/c/comm-waitall/comm-waitall.tesh +include examples/c/comm-waitall/comm-waitall_d.xml +include examples/c/comm-waitany/comm-waitany.c +include examples/c/comm-waitany/comm-waitany.tesh +include examples/c/comm-waitany/comm-waitany_d.xml include examples/c/dht-kademlia/answer.c include examples/c/dht-kademlia/answer.h include examples/c/dht-kademlia/common.h @@ -317,12 +317,12 @@ include examples/python/actor-suspend/actor-suspend.py include examples/python/actor-suspend/actor-suspend.tesh include examples/python/actor-yield/actor-yield.py include examples/python/actor-yield/actor-yield.tesh -include examples/python/async-wait/async-wait.py -include examples/python/async-wait/async-wait.tesh -include examples/python/async-waitall/async-waitall.py -include examples/python/async-waitall/async-waitall.tesh -include examples/python/async-waitany/async-waitany.py -include examples/python/async-waitany/async-waitany.tesh +include examples/python/comm-wait/comm-wait.py +include examples/python/comm-wait/comm-wait.tesh +include examples/python/comm-waitall/comm-waitall.py +include examples/python/comm-waitall/comm-waitall.tesh +include examples/python/comm-waitany/comm-waitany.py +include examples/python/comm-waitany/comm-waitany.tesh include examples/python/exec-async/exec-async.py include examples/python/exec-async/exec-async.tesh include examples/python/exec-basic/exec-basic.py @@ -373,21 +373,6 @@ include examples/s4u/app-pingpong/s4u-app-pingpong.tesh include examples/s4u/app-pingpong/simix-breakpoint.tesh include examples/s4u/app-token-ring/s4u-app-token-ring.cpp include examples/s4u/app-token-ring/s4u-app-token-ring.tesh -include examples/s4u/async-ready/s4u-async-ready.cpp -include examples/s4u/async-ready/s4u-async-ready.tesh -include examples/s4u/async-ready/s4u-async-ready_d.xml -include examples/s4u/async-wait/s4u-async-wait.cpp -include examples/s4u/async-wait/s4u-async-wait.tesh -include examples/s4u/async-wait/s4u-async-wait_d.xml -include examples/s4u/async-waitall/s4u-async-waitall.cpp -include examples/s4u/async-waitall/s4u-async-waitall.tesh -include examples/s4u/async-waitall/s4u-async-waitall_d.xml -include examples/s4u/async-waitany/s4u-async-waitany.cpp -include examples/s4u/async-waitany/s4u-async-waitany.tesh -include examples/s4u/async-waitany/s4u-async-waitany_d.xml -include examples/s4u/async-waituntil/s4u-async-waituntil.cpp -include examples/s4u/async-waituntil/s4u-async-waituntil.tesh -include examples/s4u/async-waituntil/s4u-async-waituntil_d.xml include examples/s4u/cloud-capping/s4u-cloud-capping.cpp include examples/s4u/cloud-capping/s4u-cloud-capping.tesh include examples/s4u/cloud-migration/s4u-cloud-migration.cpp @@ -396,6 +381,24 @@ include examples/s4u/cloud-simple/s4u-cloud-simple.cpp include examples/s4u/cloud-simple/s4u-cloud-simple.tesh include examples/s4u/comm-dependent/s4u-comm-dependent.cpp include examples/s4u/comm-dependent/s4u-comm-dependent.tesh +include examples/s4u/comm-ready/s4u-comm-ready.cpp +include examples/s4u/comm-ready/s4u-comm-ready.tesh +include examples/s4u/comm-ready/s4u-comm-ready_d.xml +include examples/s4u/comm-suspend/s4u-comm-suspend.cpp +include examples/s4u/comm-suspend/s4u-comm-suspend.tesh +include examples/s4u/comm-suspend/s4u-comm-suspend_d.xml +include examples/s4u/comm-wait/s4u-comm-wait.cpp +include examples/s4u/comm-wait/s4u-comm-wait.tesh +include examples/s4u/comm-wait/s4u-comm-wait_d.xml +include examples/s4u/comm-waitall/s4u-comm-waitall.cpp +include examples/s4u/comm-waitall/s4u-comm-waitall.tesh +include examples/s4u/comm-waitall/s4u-comm-waitall_d.xml +include examples/s4u/comm-waitany/s4u-comm-waitany.cpp +include examples/s4u/comm-waitany/s4u-comm-waitany.tesh +include examples/s4u/comm-waitany/s4u-comm-waitany_d.xml +include examples/s4u/comm-waituntil/s4u-comm-waituntil.cpp +include examples/s4u/comm-waituntil/s4u-comm-waituntil.tesh +include examples/s4u/comm-waituntil/s4u-comm-waituntil_d.xml include examples/s4u/dht-chord/s4u-dht-chord-node.cpp include examples/s4u/dht-chord/s4u-dht-chord.cpp include examples/s4u/dht-chord/s4u-dht-chord.hpp @@ -477,6 +480,8 @@ include examples/s4u/network-ns3/one_cluster_d.xml include examples/s4u/network-ns3/onelink_d.xml include examples/s4u/network-ns3/s4u-network-ns3.cpp include examples/s4u/network-ns3/s4u-network-ns3.tesh +include examples/s4u/network-wifi/s4u-network-wifi.cpp +include examples/s4u/network-wifi/s4u-network-wifi.tesh include examples/s4u/platform-failures/s4u-platform-failures.cpp include examples/s4u/platform-failures/s4u-platform-failures.tesh include examples/s4u/platform-failures/s4u-platform-failures_d.xml @@ -629,9 +634,9 @@ include examples/smpi/replay_multiple_manual_deploy/workload_compute_consecutive include examples/smpi/replay_multiple_manual_deploy/workload_compute_simple include examples/smpi/replay_multiple_manual_deploy/workload_mixed2_same_time include examples/smpi/replay_multiple_manual_deploy/workload_mixed2_same_time_and_resources -include examples/smpi/smpi_s4u_masterslave/deployment_masterslave_mailbox_smpi.xml -include examples/smpi/smpi_s4u_masterslave/masterslave_mailbox_smpi.cpp -include examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh +include examples/smpi/smpi_s4u_masterworker/deployment_masterworker_mailbox_smpi.xml +include examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi.cpp +include examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh include examples/smpi/trace/trace.c include examples/smpi/trace/trace.tesh include examples/smpi/trace_call_location/adjust @@ -1814,6 +1819,20 @@ include docs/source/Tutorial_Algorithms.rst include docs/source/Tutorial_MPI_Applications.rst include docs/source/XML_Reference.rst include docs/source/_ext/autodoxy.py +include docs/source/_ext/javasphinx/LICENSE +include docs/source/_ext/javasphinx/MANIFEST.in +include docs/source/_ext/javasphinx/README.md +include docs/source/_ext/javasphinx/doc/conf.py +include docs/source/_ext/javasphinx/doc/index.rst +include docs/source/_ext/javasphinx/javasphinx/__init__.py +include docs/source/_ext/javasphinx/javasphinx/apidoc.py +include docs/source/_ext/javasphinx/javasphinx/compiler.py +include docs/source/_ext/javasphinx/javasphinx/domain.py +include docs/source/_ext/javasphinx/javasphinx/extdoc.py +include docs/source/_ext/javasphinx/javasphinx/formatter.py +include docs/source/_ext/javasphinx/javasphinx/htmlrst.py +include docs/source/_ext/javasphinx/javasphinx/util.py +include docs/source/_ext/javasphinx/setup.py include docs/source/_ext/showfile.css include docs/source/_ext/showfile.js include docs/source/_ext/showfile.py @@ -1940,13 +1959,12 @@ include examples/platforms/two_hosts_profiles.xml include examples/platforms/two_peers.xml include examples/platforms/vivaldi.xml include examples/platforms/wifi.xml -include examples/platforms/wifi_decay_2STA.xml include examples/python/CMakeLists.txt include examples/python/actor-create/actor-create_d.xml include examples/python/actor-lifetime/actor-lifetime_d.xml -include examples/python/async-wait/async-wait_d.xml -include examples/python/async-waitall/async-waitall_d.xml -include examples/python/async-waitany/async-waitany_d.xml +include examples/python/comm-wait/comm-wait_d.xml +include examples/python/comm-waitall/comm-waitall_d.xml +include examples/python/comm-waitany/comm-waitany_d.xml include examples/s4u/CMakeLists.txt include examples/smpi/CMakeLists.txt include examples/smpi/NAS/CMakeLists.txt @@ -1954,7 +1972,7 @@ include examples/smpi/mc/non_deterministic.tesh include examples/smpi/mc/only_send_deterministic.tesh include examples/smpi/replay_multiple/CMakeLists.txt include examples/smpi/replay_multiple_manual_deploy/CMakeLists.txt -include examples/smpi/smpi_s4u_masterslave/CMakeLists.txt +include examples/smpi/smpi_s4u_masterworker/CMakeLists.txt include include/simgrid/Exception.hpp include include/simgrid/actor.h include include/simgrid/barrier.h @@ -2061,6 +2079,7 @@ include include/xbt/mallocator.h include include/xbt/misc.h include include/xbt/module.h include include/xbt/parmap.h +include include/xbt/parse_units.hpp include include/xbt/random.hpp include include/xbt/range.hpp include include/xbt/replay.hpp @@ -2655,6 +2674,7 @@ include src/xbt/xbt_log_layout_simple.cpp include src/xbt/xbt_main.cpp include src/xbt/xbt_os_file.cpp include src/xbt/xbt_os_time.c +include src/xbt/xbt_parse_units.cpp include src/xbt/xbt_replay.cpp include src/xbt/xbt_str.cpp include src/xbt/xbt_str_test.cpp diff --git a/NEWS b/NEWS index 1e54108843..8bfbd6e9dc 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,7 @@ __ _____ _ __ ___(_) ___ _ __ |___ / |___ \ / /_ \_/ \___|_| |___/_|\___/|_| |_| |____(_)_____|\___/ (unreleased) + * SMPI: improved support of the proxy apps (including those using petsc) _ _____ ____ ____ __ _____ _ __ ___(_) ___ _ __ |___ / |___ \| ___| \ \ / / _ \ '__/ __| |/ _ \| '_ \ |_ \ __) |___ \ diff --git a/contrib/benchmarking_code_block/Rdhist.R b/contrib/benchmarking_code_block/Rdhist.R index ac5baa1404..502a6f861a 100644 --- a/contrib/benchmarking_code_block/Rdhist.R +++ b/contrib/benchmarking_code_block/Rdhist.R @@ -176,7 +176,7 @@ create.base.plot <- function() { ##' ##' Used to add raw data or summary statistics to the plot of a distribution. ##' The height of Y is arbitrary, and can be set to optimize visualization. -##' If SE estimates are available, tehse wil be plotted +##' If SE estimates are available, these will be plotted ##' @name plot.data ##' @title Add data to plot ##' @param trait.data data to be plotted diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index ecab1a3b49..0efc6f8b4d 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -1595,7 +1595,7 @@ UML_LOOK = NO # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more -# managable. Set this to 0 for no limit. Note that the threshold may be +# manageable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 diff --git a/doc/doxygen/FAQ.doc b/doc/doxygen/FAQ.doc index 2509ee9f1d..674f55dac4 100644 --- a/doc/doxygen/FAQ.doc +++ b/doc/doxygen/FAQ.doc @@ -9,7 +9,7 @@ This document is the FAQ of the MSG interface. Some entries are a bit aging and You are at the right place... To understand what you can do or cannot do with SimGrid, you should read the tutorial -slides from the SimGrid's website. You may find more uptodate +slides from the SimGrid's website. You may find more up-to-date material on the blog of Martin Quinson. @@ -91,7 +91,7 @@ condition variables or a SimGrid semaphore, as described in @ref msg_synchro (in Java, only semaphores are available). But actually, many synchronization patterns can be encoded with communication on mailboxes. Typically, if you need one process to notify another one, -you could use a condition variable or a semphore, but sending a +you could use a condition variable or a semaphore, but sending a message to a specific mailbox does the trick in most cases. @subsubsection faq_MIA_communication_time How can I get the *real* communication time? @@ -109,7 +109,7 @@ int sender() m_task_t task = MSG_task_create("Task", task_comp_size, task_comm_size, calloc(1,sizeof(double))); *((double*) task->data) = MSG_get_clock(); - MSG_task_put(task, slaves[i % slaves_count], PORT_22); + MSG_task_put(task, workers[i % workers_count], PORT_22); XBT_INFO("Send completed"); return 0; } diff --git a/doc/doxygen/inside.doc b/doc/doxygen/inside.doc index 3945ed00c6..d6eb7d0ac0 100644 --- a/doc/doxygen/inside.doc +++ b/doc/doxygen/inside.doc @@ -2,8 +2,8 @@ This page describes the software infrastructure behind the SimGrid project. This is not the components' organisation (described in @ref -uhood_arch) but informations on how to extend the framework, how the -automatic tests are run, and so on. These informations are split on +uhood_arch) but information on how to extend the framework, how the +automatic tests are run, and so on. These information are split on several pages, as follows: - @ref uhood_tech_inside @@ -69,10 +69,10 @@ ln -s $(realpath tools/git-hooks/clang-format.pre-commit) .git/hooks/pre-commit @endverbatim This will add an extra verification before integrating any commit that -you could prepare. If your code does not respects our formating code, +you could prepare. If your code does not respects our formatting code, git will say so, and provide a ready to use patch that you can apply to improve your commit. Just carefully read the error message you get -to find the exact command with git-apply to fix your formating. +to find the exact command with git-apply to fix your formatting. If you find that for a specific commit, the formatter does a very bad job, then add --no-verify to your git commit command line. diff --git a/doc/doxygen/inside_extending.doc b/doc/doxygen/inside_extending.doc index f0f02b6110..1a7f221dbb 100644 --- a/doc/doxygen/inside_extending.doc +++ b/doc/doxygen/inside_extending.doc @@ -34,7 +34,7 @@ the classes of the corresponding interfaces. For instance, if you want to add a new cup model called `Plop`, create two files cpu_plop.hpp and cpu_plop_cpp which contains classes CpuPlopModel, CpuPlop and -CpuPlopAction implementating respectively the interfaces CpuModel, Cpu and +CpuPlopAction implementing respectively the interfaces CpuModel, Cpu and CpuAction. You also need to define a initializing function like this: ~~~~ @@ -101,7 +101,7 @@ void sg_my_network_plugin_init() { } ~~~~ -Then you need to add an entry in surf_interface.cpp refering to your +Then you need to add an entry in surf_interface.cpp referring to your initialization function. ~~~~ @@ -145,13 +145,13 @@ The workflow of a simcall is the following: - If the simcall is not marked as "blocking" in its definition, call `ActorImpl::simcall_answer()` that adds back the issuer process to the list of processes to run in the next scheduling round. - It is thus the responsability of the blocking simcalls to call + It is thus the responsibility of the blocking simcalls to call `ActorImpl::simcall_answer()` themselves in their handler. Note that empty HANDLERs can be omitted. These functions usually do some parameter checking, or retrieve some information about the simcall issuer, but when there no need for such things, the handler -can be omited. In that case, we directly call the function +can be omitted. In that case, we directly call the function `simcall_()`. To simplify the simcall creation, a python script generates most of @@ -204,7 +204,7 @@ in. For simcalls which might block, `kernel_sync()` can be used. It takes a C++ callback and executes it immediately in maestro. This C++ callback is expected to return a `simgrid::kernel::Future` reprensenting the operation -in the kernal. When the operations completes, the user process is waken up +in the kernel. When the operations completes, the user process is waken up with the result: ~~~ diff --git a/doc/doxygen/inside_release.doc b/doc/doxygen/inside_release.doc index f3609b57f9..34614e2bb0 100644 --- a/doc/doxygen/inside_release.doc +++ b/doc/doxygen/inside_release.doc @@ -122,7 +122,7 @@ Release numbering semantic: - 3.X.Y where Y is odd: git current status between two releases - No expectations on such versions - Example - - 3.22.4: unannounced/losely documented stable release + - 3.22.4: unannounced/loosely documented stable release - 3.22.5: git status somewhere between the release of 3.22.4 and the next one - 3.23: Documented and announced stable release diff --git a/doc/doxygen/inside_tests.doc b/doc/doxygen/inside_tests.doc index 10a394a924..e7aba36bbb 100644 --- a/doc/doxygen/inside_tests.doc +++ b/doc/doxygen/inside_tests.doc @@ -61,7 +61,7 @@ It is important to keep your tests fast. We run them very very often, and you should strive to make them as fast as possible, to not bother the other developers. Do not hesitate to stress test your code, but make sure that it runs reasonably fast, or nobody will run "ctest" -before commiting code. +before committing code. @section inside_tests_add_integration Adding integration tests @@ -89,10 +89,10 @@ To add a new integration test, you thus have 3 things to do: details.@n Tesh is sometimes annoying as you have to ensure that the expected output will always be exactly the same. In particular, your should - not output machine dependent informations such as absolute data - path, nor memory adresses as they would change on each run. Several + not output machine dependent information such as absolute data + path, nor memory addresses as they would change on each run. Several steps can be used here, such as the obfucation of the memory - adresses unless the verbose logs are displayed (using the + addresses unless the verbose logs are displayed (using the #XBT_LOG_ISENABLED() macro), or the modification of the log formats to hide the timings when they depend on the host machine.@n The script located in /tools/tesh/generate_tesh can @@ -130,7 +130,7 @@ ADD_TEST(my-test-name ${CMAKE_BINARY_DIR}/bin/tesh As usual, you must run "make distcheck" after modifying the cmake files, to ensure that you did not forget any files in the distributed archive. -@section inside_tests_ci Continous Integration +@section inside_tests_ci Continuous Integration We use several systems to automatically test SimGrid with a large set of parameters, across as many platforms as possible. diff --git a/doc/doxygen/module-index.doc b/doc/doxygen/module-index.doc index d1f2962b06..096e81d84d 100644 --- a/doc/doxygen/module-index.doc +++ b/doc/doxygen/module-index.doc @@ -47,7 +47,7 @@ This is not the way it goes in SimGrid: the network routing is defined in a global and compact way instead. This eases the modeling of very large systems, and allows highly optimized datastructures and algorithms in the simulator. The proposed description mechanism is -thus much more convinient and efficient. In addition, it is more +thus much more convenient and efficient. In addition, it is more expressive than the classical solution based on forwarding tables on each host and router. @@ -104,7 +104,7 @@ in other languages). Until then, please head to @ref platform. @defgroup SURF_API SURF @brief Internal kernel of all the simulators used in SimGrid, and associated models. -SURF provides the core functionnalities to simulate a virtual +SURF provides the core functionalities to simulate a virtual platform. It is very low-level and is not intended to be used by end users, but rather to serve as a basis for higher-level simulators. Its interfaces are not frozen (and probably never will be), and the diff --git a/doc/doxygen/module-sd.doc b/doc/doxygen/module-sd.doc index e739dca3d1..90276fac3c 100644 --- a/doc/doxygen/module-sd.doc +++ b/doc/doxygen/module-sd.doc @@ -2,12 +2,12 @@ @defgroup SD_API SimDag: Legacy handling of DAG algorithms @brief Programming environment for DAG applications -SimDag provides functionnalities to simulate parallel task scheduling +SimDag provides functionalities to simulate parallel task scheduling arranged in DAGs (Direct Acyclic Graphs). Only centralized algorithms can be expressed with SimDag; consider using @ref MSG_API "MSG" for distributed algorithms). -SimDag is the oldest interface in SimGrid, even if it was temporarly +SimDag is the oldest interface in SimGrid, even if it was temporarily removed when the new superfast kernel was added in SimGrid v3.0. It will certainly be deprecated by future releases of the S4U API, when inter-activity dependencies are added. diff --git a/doc/doxygen/module-xbt.doc b/doc/doxygen/module-xbt.doc index b43ac117c2..d3dc154d1f 100644 --- a/doc/doxygen/module-xbt.doc +++ b/doc/doxygen/module-xbt.doc @@ -40,7 +40,7 @@ /** @defgroup XBT_ex Exception support (C++) */ /** @defgroup XBT_ex_c Exception support (C) */ /** @defgroup XBT_log Logging support */ - /** @defgroup XBT_error Assert macro familly */ + /** @defgroup XBT_error Assert macro family */ /** @defgroup XBT_config Configuration support */ /** @defgroup XBT_mallocator Mallocators */ /** @} */ diff --git a/doc/doxygen/outcomes_logs.doc b/doc/doxygen/outcomes_logs.doc index e22541c2f3..bdebfd449c 100644 --- a/doc/doxygen/outcomes_logs.doc +++ b/doc/doxygen/outcomes_logs.doc @@ -236,7 +236,7 @@ The most common setting is to control which logging event will get displayed by setting a threshold to each category through the threshold keyword. -For example, @verbatim --log=root.threshold:debug@endverbatim will make +For example, @verbatim --log=root.thresh:debug@endverbatim will make SimGrid extremely verbose while @verbatim --log=root.thres:critical@endverbatim should shut it almost completely off. @@ -245,7 +245,7 @@ Note that the threshold keyword can be abbreviated here. For example, all the following notations have the same result. @verbatim --log=root.threshold:debug ---log=root.threshol:debug +--log=root.threshold:debug --log=root.thresho:debug --log=root.thresh:debug --log=root.thres:debug @@ -261,7 +261,7 @@ The full list of recognized thresholds is the following: - trace: enter and return of some functions - debug: crufty output - verbose: verbose output for the user wanting more - - info: output about the regular functionning + - info: output about the regular functioning - warning: minor issue encountered - error: issue encountered - critical: major issue encountered diff --git a/doc/doxygen/platform.doc b/doc/doxygen/platform.doc index 2e9967f6f6..831b6ec8c9 100644 --- a/doc/doxygen/platform.doc +++ b/doc/doxygen/platform.doc @@ -111,7 +111,7 @@ c-99.me is set to ``Cluster``. The ``<cabinet />`` tag is, like the @ref pf_tag_cluster "<cluster>" tag, -a meta-tag. This means that it is simply a shortcut for creating a set of (homogenous) hosts and links quickly; +a meta-tag. This means that it is simply a shortcut for creating a set of (homogeneous) hosts and links quickly; unsurprisingly, this tag was introduced to setup cabinets in data centers quickly. Unlike <cluster>, however, the <cabinet> assumes that you create the backbone and routers yourself; see our examples below. @@ -159,7 +159,7 @@ The hosts generated in the above example are named host-1.cluster, host-2.cluste etc. -@subsection pf_ne Network equipments +@subsection pf_ne Network equipment There are two tags at all times available to represent network entities and several other tags that are available only in certain contexts. @@ -514,7 +514,7 @@ router1 must belong to zone1 and router2 must belong to zone2. The purpose of this entity is to define a route between two NetZones. Recall that all zones form a tree, so to connect two -sibiling zones, you must give such a zoneRoute specifying the source +sibling zones, you must give such a zoneRoute specifying the source and destination zones, along with the gateway in each zone (ie, the point to reach within that zone to reach the netzone), and the list of links in the ancestor zone to go from one zone to another. @@ -780,7 +780,7 @@ you say that the file trace must be used by the entity. | Attribute name | Mandatory | Values | Description | | --------------- | --------- | ---------------------- | ----------- | | id | yes | String | Identifier of this trace; this is the name you pass on to @c trace_connect. | -| file | no | String | Filename of the file that contains the information - the path must follow the style of your OS. You can omit this, but then you must specifiy the values inside of <trace> and </trace> - see the example below. | +| file | no | String | Filename of the file that contains the information - the path must follow the style of your OS. You can omit this, but then you must specify the values inside of <trace> and </trace> - see the example below. | | trace_periodicity | yes | String | This is the same as for @ref pf_tag_host "hosts" (see there for details) | Here is an example of trace when no file name is provided: diff --git a/doc/doxygen/uhood.doc b/doc/doxygen/uhood.doc index 14350bdd0e..747aa5ee76 100644 --- a/doc/doxygen/uhood.doc +++ b/doc/doxygen/uhood.doc @@ -24,13 +24,13 @@ We provide an uniform interface to them: - the Maestro object and the corresponding S4U object have the same lifetime (and share the same reference count). -The ability to manipulate thge objects thought pointers and have the ability +The ability to manipulate the objects through pointers and have the ability to use explicit reference count management is useful for creating C wrappers to the S4U and should play nicely with other language bindings (such as SWIG-based ones). Some objects currently live for the whole duration of the simulation and do -not have refertence counts. We still provide dummy `intrusive_ptr_add_ref(p)`, +not have reference counts. We still provide dummy `intrusive_ptr_add_ref(p)`, `intrusive_ptr_release(p)` and `FooPtr` for consistency. In many cases, we try to have a API which is consistent with the API or @@ -203,7 +203,7 @@ The current implementation of the model-checker uses two distinct processes: - the SimGrid model-checker (`simgrid-mc`) itself lives in the parent process; - - it spaws a child process for the SimGrid simulator/maestro and the simulated + - it spawns a child process for the SimGrid simulator/maestro and the simulated processes. They communicate using a `AF_UNIX` `SOCK_SEQPACKET` socket and exchange messages @@ -218,7 +218,7 @@ process using the following techniques: - the model-cheker `ptrace()`s the model-checked process and is thus able to know the state of the model-checked process if it crashes; -- DWARF debug informations are used to unwind the stack and identify local +- DWARF debug information are used to unwind the stack and identify local variables; - a custom heap is enabled in the model-checked process which allows the model @@ -245,11 +245,11 @@ Additional helper class include: [ELF](http://refspecs.linuxbase.org/elf/elf.pdf) is a standard executable file and dynamic libraries file format. -[DWARF](http://dwarfstd.org/) is a standard for debug informations. +[DWARF](http://dwarfstd.org/) is a standard for debug information. Both are used on GNU/Linux systems and exploited by the model-checker to understand the model-checked process: - - `ObjectInformation` represents the informations about a given ELF module + - `ObjectInformation` represents the information about a given ELF module (executable or shared-object); - `Frame` represents a subprogram scope (either a subprogram or a scope within diff --git a/doc/doxygen/uhood_arch.doc b/doc/doxygen/uhood_arch.doc index f8663f4332..732ff1cc56 100644 --- a/doc/doxygen/uhood_arch.doc +++ b/doc/doxygen/uhood_arch.doc @@ -3,7 +3,7 @@ This page presents the current code organization, as you will see it if you dig into the src/ directory. But things will change during the current Gran Refactoring leading to SimGrid 4. So take the -information on this page with a grain of salt, and don't be affraid if +information on this page with a grain of salt, and don't be afraid if things are not exactly as documented here. At some point, we at least extend this page to present the overall @@ -26,7 +26,7 @@ page. @section ug_overview Overview of the toolkit components -@subsection ug_overview_envs Programing environments layer +@subsection ug_overview_envs Programming environments layer SimGrid provides several programming environments built on top of a unique simulation kernel. Each environment targets a specific audience and diff --git a/docs/Build.sh b/docs/Build.sh index 0bb46f2da8..cc536cb689 100755 --- a/docs/Build.sh +++ b/docs/Build.sh @@ -7,7 +7,7 @@ # Python needs to find simgrid on my machine, but not ctest -- sorry for the hack if [ -e /opt/simgrid ] ; then chmod +x /opt/simgrid; fi -set -e +set -ex set -o pipefail if [ "x$1" != 'xdoxy' ] && [ -e build/xml ] ; then @@ -21,7 +21,17 @@ if [ "x$1" != 'xjava' ] && [ -e source/java ] ; then echo "javasphinx not rerun: 'java' was not provided as an argument" else rm -rf source/java - javasphinx-apidoc --force -o source/java/ ../src/bindings/java/org/simgrid/msg + + # Use that script without installing javasphinx: javasphinx-apidoc --force -o source/java/ ../src/bindings/java/org/simgrid/msg + PYTHONPATH=${PYTHONPATH}:source/_ext/javasphinx python3 - --force -o source/java/ ../src/bindings/java/org/simgrid/msg <&1 \ - | grep -v 'WARNING: cpp:identifier reference target not found: simgrid$' \ - | grep -v 'WARNING: cpp:identifier reference target not found: simgrid::s4u$' \ - | grep -v 'WARNING: cpp:identifier reference target not found: boost' +PYTHONPATH=../lib:source/_ext/javasphinx sphinx-build -M html source build ${SPHINXOPTS} 2>&1 set +x diff --git a/docs/find-missing.ignore b/docs/find-missing.ignore index cce9236018..e2ec4edbd1 100644 --- a/docs/find-missing.ignore +++ b/docs/find-missing.ignore @@ -1,5 +1,5 @@ This file lists the symbols ignored in the documentation. -It follows the RST syntact but is completely ignored by sphinx. +It follows the RST syntax but is completely ignored by sphinx. It is only used by find-missing, that will not report any definition linked here as missing. # These ones trigger a bug in autodoxy, that get confused with the const in the function parameter diff --git a/docs/manpages/tesh.pod b/docs/manpages/tesh.pod index 259a2fe72f..da7ef424f0 100755 --- a/docs/manpages/tesh.pod +++ b/docs/manpages/tesh.pod @@ -189,7 +189,7 @@ same timestamp. Here is a SimGrid example: ! output sort 19 $ ./some_simgrid_simulator --log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n -This approach may seem surprizing at the first glance but it does its job: +This approach may seem surprising at the first glance but it does its job: =over 4 @@ -213,7 +213,7 @@ usable by others, who knows? =head2 Ignoring some output -Some outputed lines can be ignored by setting the ignore command followed +Some outputted lines can be ignored by setting the ignore command followed by a regular expression: ! ignore .*0x[0-9A-F]+\. @@ -224,7 +224,7 @@ by a regular expression: =head2 Colored and formatted text -Tesh removes ANSI/VT100 control sequences from outputed text to make easier the writing of tests. +Tesh removes ANSI/VT100 control sequences from outputted text to make easier the writing of tests. $ printf "I \033[0;31mlove\033[0m tesh\n" > I love tesh diff --git a/docs/requirements.txt b/docs/requirements.txt index 4302bfe80f..f357a3bd96 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ breathe -sphinx>=1.8.0 +sphinx>=3.2.1 sphinx_rtd_theme -sphinx_tabs -javasphinx +# sphinx_tabs v1.2.1 is required for Sphinx 2 +sphinx_tabs>=1.2.1 diff --git a/docs/source/Configuring_SimGrid.rst b/docs/source/Configuring_SimGrid.rst index 70fbf7e176..e7a968578b 100644 --- a/docs/source/Configuring_SimGrid.rst +++ b/docs/source/Configuring_SimGrid.rst @@ -67,7 +67,7 @@ with :cpp:func:`simgrid::s4u::Engine::set_config` or :cpp:func:`MSG_config`. int main(int argc, char *argv[]) { simgrid::s4u::Engine e(&argc, argv); - e.set_config("Item:Value"); + simgrid::s4u::Engine::set_config("Item:Value"); // Rest of your code } @@ -415,7 +415,7 @@ Simulating Asynchronous Send It is possible to specify that messages below a certain size (in bytes) will be sent as soon as the call to MPI_Send is issued, without waiting for -the correspondant receive. This threshold can be configured through +the correspondent receive. This threshold can be configured through the ``smpi/async-small-thresh`` item. The default value is 0. This behavior can also be manually set for mailboxes, by setting the receiving mode of the mailbox with a call to @@ -444,7 +444,7 @@ Configuring the Storage model .. _cfg=storage/max_file_descriptors: -File Descriptor Cound per Host +File Descriptor Count per Host ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ **Option** ``storage/max_file_descriptors`` **Default:** 1024 @@ -530,7 +530,7 @@ Specifying a liveness property If you want to specify liveness properties, you have to pass them on the command line, specifying the name of the file containing the property, as formatted by the `ltl2ba `_ program. -Note that ltl2ba is not part of SimGrid and must be installed separatly. +Note that ltl2ba is not part of SimGrid and must be installed separately. .. code-block:: shell @@ -719,7 +719,7 @@ MC-related options, keep non-MC-related ones and add Currently, if the path is of the form ``X;Y;Z``, each number denotes the actor's pid that is selected at each indecision point. If it's of the form ``X/a;Y/b``, the X and Y are the selected pids while the a -and b are the return values of their simcalls. In the previouse +and b are the return values of their simcalls. In the previous example, ``1/3;1/4``, you can see from the full output that the actor 1 is doing MC_RANDOM simcalls, so the 3 and 4 simply denote the values that these simcall return. @@ -963,10 +963,16 @@ a ``MPI_Send()``, SMPI will automatically benchmark the duration of this code, and create an execution task within the simulator to take this into account. For that, the actual duration is measured on the host machine and then scaled to the power of the corresponding -simulated machine. The variable ``smpi/host-speed`` allows one to specify -the computational speed of the host machine (in flop/s) to use when -scaling the execution times. It defaults to 20000, but you really want -to adjust it to get accurate simulation results. +simulated machine. The variable ``smpi/host-speed`` allows one to +specify the computational speed of the host machine (in flop/s by +default) to use when scaling the execution times. + +The default value is ``smpi/host-speed=20kf`` (= 20,000 flop/s). This +is probably underestimated for most machines, leading SimGrid to +overestimate the amount of flops in the execution blocks that are +automatically injected in the simulator. As a result, the execution +time of the whole application will probably be overestimated until you +use a realistic value. When the code consists of numerous consecutive MPI calls, the previous mechanism feeds the simulation kernel with numerous tiny @@ -1384,7 +1390,7 @@ for each shared block. With the ``global`` algorithm, each call to SMPI_SHARED_MALLOC() returns a new address, but it only points to a shadow block: its memory area is mapped on a 1 MiB file on disk. If the returned block is of size -N MiB, then the same file is mapped N times to cover the whole bloc. +N MiB, then the same file is mapped N times to cover the whole block. At the end, no matter how many times you call SMPI_SHARED_MALLOC, this will only consume 1 MiB in memory. diff --git a/docs/source/Deploying_your_Application.rst b/docs/source/Deploying_your_Application.rst index 23dab0552b..2eb5d0fa76 100644 --- a/docs/source/Deploying_your_Application.rst +++ b/docs/source/Deploying_your_Application.rst @@ -20,7 +20,7 @@ study on your :ref:`simulated platform `, i.e. to specify which actor should be started on which host. You can do so directly in your program (as shown in :ref:`these examples `), or using an XML deployment file. Unless you have a good reason, you should keep your application apart -from the deployment as it will :ref:`ease your experimental campain afterward +from the deployment as it will :ref:`ease your experimental campaign afterward `. Deploying actors from XML is easy: it only involves 3 tags: :ref:`pf_tag_actor`, diff --git a/docs/source/Installing_SimGrid.rst b/docs/source/Installing_SimGrid.rst index 78e9c9f628..47c344749e 100644 --- a/docs/source/Installing_SimGrid.rst +++ b/docs/source/Installing_SimGrid.rst @@ -72,7 +72,7 @@ per year, numbered 3.24 or 3.25. Backward compatibility is ensured for one year: Code compiling without warning on 3.24 will still compile with 3.28, but maybe with some deprecation warnings. You should update your SimGrid installation at least once a year and fix those -deprecation warnings: the compatiblity wrappers are usually removed +deprecation warnings: the compatibility wrappers are usually removed after 4 versions. Another approach is to never update your SimGrid installation, but we don't provide any support to old versions. diff --git a/docs/source/Start_Your_Own_Project.rst b/docs/source/Start_Your_Own_Project.rst index 6297e2ee85..d24eea4304 100644 --- a/docs/source/Start_Your_Own_Project.rst +++ b/docs/source/Start_Your_Own_Project.rst @@ -192,6 +192,6 @@ Sometimes, the compilation only spits very few "undefined reference" errors. A possible cause is that the system selected an old version of the SimGrid library somewhere on your disk. -Dicover which version is used with ``ldd name-of-yoursimulator``. +Discover which version is used with ``ldd name-of-yoursimulator``. Once you've found the obsolete copy of SimGrid, just erase it, and recompile and relaunch your program. diff --git a/docs/source/Tutorial_Algorithms.rst b/docs/source/Tutorial_Algorithms.rst index cbb9a8c72c..2bb09ed1e7 100644 --- a/docs/source/Tutorial_Algorithms.rst +++ b/docs/source/Tutorial_Algorithms.rst @@ -211,6 +211,16 @@ informative messages. :append: $$$ :dedent: 2 +Each example included in the SimGrid distribution comes with a `tesh` +file that presents how to start the example once compiled, along with +the expected output. These files are used for the automatic testing of +the framework, but can be used to see the examples' output without +compiling them. See e.g. the file +`examples/s4u/app-masterworkers/s4u-app-masterworkers.tesh `_. +Lines starting with `$` are the commands to execute; +lines starting with `>` are the expected output of each command while +lines starting with `!` are configuration items for the test runner. + Improve it Yourself ------------------- diff --git a/docs/source/_ext/autodoxy.py b/docs/source/_ext/autodoxy.py index d5ccfd59cd..5811d2af70 100644 --- a/docs/source/_ext/autodoxy.py +++ b/docs/source/_ext/autodoxy.py @@ -358,6 +358,13 @@ class DoxygenClassDocumenter(DoxygenDocumenter): # Uncomment to view the generated rst for the class. # print('\n'.join(self.directive.result)) +autodoxy_requalified_identifiers = [] +def fix_namespaces(str): + for unqualified,fullyqualif in autodoxy_requalified_identifiers: + p = re.compile("(^| ){:s}".format(unqualified)) + str = p.sub(' {:s}'.format(fullyqualif), str) + return str + class DoxygenMethodDocumenter(DoxygenDocumenter): objtype = 'doxymethod' directivetype = 'function' @@ -450,7 +457,8 @@ class DoxygenMethodDocumenter(DoxygenDocumenter): rtype = rtype_el.text # print("rtype: {}".format(rtype)) - signame = (rtype and (rtype + ' ') or '') + self.klassname + "::"+ self.objname + signame = fix_namespaces((rtype and (rtype + ' ') or '') + self.klassname + "::"+ self.objname ) +# print("signame: '{}'".format(signame)) return self.format_template_name() + signame def format_template_name(self): @@ -462,7 +470,8 @@ class DoxygenMethodDocumenter(DoxygenDocumenter): return ret def format_signature(self): - args = self.object.find('argsstring').text + args = fix_namespaces(self.object.find('argsstring').text) +# print ("signature: {}".format(args)) return args def document_members(self, all_members=False): @@ -532,7 +541,7 @@ class DoxygenVariableDocumenter(DoxygenDocumenter): # print("rtype: {}".format(rtype)) signame = (rtype and (rtype + ' ') or '') + self.klassname + "::" + self.objname - return self.format_template_name() + signame + return fix_namespaces(self.format_template_name() + signame) def get_doc(self, encoding=None): # This method is called with 1 parameter in Sphinx 2.x and 2 parameters in Sphinx 1.x detaileddescription = self.object.find('detaileddescription') @@ -577,6 +586,9 @@ def set_doxygen_xml(app): for node in root: setup.DOXYGEN_ROOT.append(node) + if app.config.autodoxy_requalified_identifiers is not None: + global autodoxy_requalified_identifiers + autodoxy_requalified_identifiers = app.config.autodoxy_requalified_identifiers def get_doxygen_root(): """Get the root element of the doxygen XML document. @@ -599,6 +611,7 @@ def setup(app): app.add_autodocumenter(DoxygenMethodDocumenter) app.add_autodocumenter(DoxygenVariableDocumenter) app.add_config_value("doxygen_xml", "", True) + app.add_config_value("autodoxy_requalified_identifiers", [], True) # app.add_directive('autodoxysummary', DoxygenAutosummary) # app.add_directive('autodoxyenum', DoxygenAutoEnum) diff --git a/docs/source/_ext/javasphinx/.gitignore b/docs/source/_ext/javasphinx/.gitignore new file mode 100644 index 0000000000..03ecc2b774 --- /dev/null +++ b/docs/source/_ext/javasphinx/.gitignore @@ -0,0 +1,5 @@ +*.pyc +dist/ +*.egg-info/ +.vscode +.DS_Store \ No newline at end of file diff --git a/docs/source/_ext/javasphinx/LICENSE b/docs/source/_ext/javasphinx/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/docs/source/_ext/javasphinx/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docs/source/_ext/javasphinx/MANIFEST.in b/docs/source/_ext/javasphinx/MANIFEST.in new file mode 100644 index 0000000000..9561fb1061 --- /dev/null +++ b/docs/source/_ext/javasphinx/MANIFEST.in @@ -0,0 +1 @@ +include README.rst diff --git a/docs/source/_ext/javasphinx/README.md b/docs/source/_ext/javasphinx/README.md new file mode 100644 index 0000000000..a523ad213b --- /dev/null +++ b/docs/source/_ext/javasphinx/README.md @@ -0,0 +1,12 @@ + +# javasphinx + +[![Documentation Status](https://readthedocs.org/projects/bronto-javasphinx/badge/?version=latest)](http://bronto-javasphinx.readthedocs.io/en/latest/?badge=latest) + +**This project is no longer maintained and should be used for historical purposes only.** + +javasphinx is an extension to the Sphinx documentation system which adds support for documenting Java projects. It includes a Java domain for writing documentation manually and a javasphinx-apidoc utility which will automatically generate API documentation from existing Javadoc markup. + +javasphinx is available in the Python Package Index (PyPi) under the name _javasphinx_ and can be installed using tools such as `pip` or `easy_install`. + +Documentation for javasphinx is available at http://bronto-javasphinx.readthedocs.io diff --git a/docs/source/_ext/javasphinx/doc/conf.py b/docs/source/_ext/javasphinx/doc/conf.py new file mode 100644 index 0000000000..de52125b06 --- /dev/null +++ b/docs/source/_ext/javasphinx/doc/conf.py @@ -0,0 +1,25 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +project = 'javasphinx' +version = '0.9.15' +release = version + +extensions = ['javasphinx'] + +master_doc = 'index' +copyright = u'2012-2017, Bronto Software Inc. and contributors' +primary_domain = 'rst' diff --git a/docs/source/_ext/javasphinx/doc/index.rst b/docs/source/_ext/javasphinx/doc/index.rst new file mode 100644 index 0000000000..8780a23eaf --- /dev/null +++ b/docs/source/_ext/javasphinx/doc/index.rst @@ -0,0 +1,219 @@ + +####################### +javasphinx User's Guide +####################### + +Welcome to the javasphinx user's guide. + +Introduction +============ + +javasphinx is a Sphinx_ extension that provides a Sphinx domain_ for documenting +Java projects and a ``javasphinx-apidoc`` command line tool for automatically +generating API documentation from existing Java source code and Javadoc +documentation. + +.. _Sphinx: http://sphinx-doc.org +.. _domain: http://sphinx-doc.org/domains.html + +Installing +========== + +javasphinx is available in the Python Package Index (PyPi) and can be installed +using tools such as ``pip`` or ``easy_install``, + +.. code-block:: sh + + $ pip install javasphinx + +or, + +.. code-block:: sh + + $ easy_install -U javasphinx + +Configuration +============= + +To enable javasphinx for your existing Sphinx configuration add ``'javasphinx'`` +to the list of extensions in your conf.py file. javasphinx can be configured to +cross link to external sources of documentation using the ``javadoc_url_map`` +option, + +.. code-block:: python + + javadoc_url_map = { + 'com.netflix.curator' : ('http://netflix.github.com/curator/doc', 'javadoc'), + 'org.springframework' : ('http://static.springsource.org/spring/docs/3.1.x/javadoc-api/', 'javadoc'), + 'org.springframework.data.redis' : ('http://static.springsource.org/spring-data/data-redis/docs/current/api/', 'javadoc') + } + +Each key in the map should be a Java package. Each value is a tuple of the form +``(base_url, doc_type)`` where ``base_url`` is the base URL of the documentation +source, and ``doc_type`` is one of, + +``javadoc`` + For documentation generated by the Javadoc tool *before* version 8. + +``javadoc8`` + For documentation generated by the Javadoc tool after version 8. This is + required due to changes in how method anchors are generated (see JDK-8144118_). + +``sphinx`` + For external documentation generated by javasphinx. + +When comparing referenced types to the list of available packages the longest +match will be used. Entries for ``java``, ``javax``, ``org.xml``, and +``org.w3c`` packages pointing to http://docs.oracle.com/javase/8/docs/api are +included automatically and do not need to be defined explicitly. + +.. _JDK-8144118: https://bugs.openjdk.java.net/browse/JDK-8144118 + +Java domain +=========== + +Directives +---------- + +The Java domain uses the name **java** and provides the following directives, + +.. rst:directive:: .. java:type:: type-signature + + Describe a Java type. The signature can represent either a class, interface, + enum or annotation declaration. + + Use the ``param`` field to document type parameters. + + Example, + + .. code-block:: rst + + .. java:type:: public interface List extends Collection, Iterable + + An ordered collection (also known as a *sequence*) + + :param E: type of item stored by the list + + produces, + + .. java:type:: public interface List extends Collection, Iterable + + An ordered collection (also known as a *sequence*) + + :param E: type of item stored by the list + +.. rst:directive:: .. java:field:: field-signature + + Describe a Java field. + +.. rst:directive:: .. java:method:: method-signature + + Describe a Java method. + + Use the ``param`` field to document parameters. + + Use the ``throws`` field to document exceptions thrown by the method. + + Use the ``return`` field to document the return type + +.. rst:directive:: .. java:constructor:: constructor-signature + + Describe a Java constructor. + + Use the ``param`` field to document parameters. + + Use the ``throws`` field to document exceptions thrown by the constructor. + +.. rst:directive:: .. java:package:: package + + Provide package-level documentation and also sets the active package for the + type, method, field, constructors, and references that follow. + + Use the ``:noindex:`` option if the directive is only being used to specify + the active package. Only one directive for a given package should exclude + ``:noindex:``. + +.. rst:directive:: .. java:import:: package type + + Declare the given type as being provided by the given package. This + information helps javasphinx create cross references for types in type, + method, and field declarations. It also allows explicit cross references + (using the ``java:ref`` role) to exclude the package qualification. + +The method, construct, field, and type directives all accept the following +standard options, + +.. describe:: package + + Specify the package the declaration is within. Can be used instead of, or to + override, a ``java:package`` directive. + +.. describe:: outertype + + Specify the class/interface the documented object is contained within. This + option should be provided for any constructor, method, or field directive + that isn't nested within a corresponding type directive. + +Roles +----- + +The following roles are provided, + +.. rst:role:: java:ref + + This role can be used to create a cross reference to any object type within + the Java domain. Aliases for this role include ``java:meth``, ``java:type``, + ``java:field``, ``java:package``, and ``java:construct``. + + An explicit title can be provided by using the standard ``title `` + syntax. + +.. rst:role:: java:extdoc + + This role can be used to explicitly link to an externally documented + type. The reference must be fully qualified and supports an explicit title + using the ``title `` syntax. + + The ``java:ref`` role will also create external references as a fall-back if + it can't find a matching local declaration so using this role is not strictly + necessary. + +javasphinx-apidoc +================= + +The ``javasphinx-apidoc`` tool is the counterpoint to the ``sphinx-apidoc`` tool +within the Java domain. It can be used to generate reST source from existing +Java source code which has been marked up with Javadoc-style comments. The +generated reST is then processed alongside hand-written documentation by Sphinx. + +At minimum a source and destination directory must be provided. The input +directory will be scanned for .java files and documentation will be generated +for all non-private types and members. A separate output file will be generated +for each type (including inner classes). Each file is put within a directory +corresponding to its package (with periods replaced by directory separators) and +with the basename of the file deriving from the type name. Inner types are +placed in files with a basename using a hyphen to separate inner and outer +types, e.g. ``OuterType-InnerType.rst``. + +By default ``javasphinx-apidoc`` will not override existing files. Two options +can change this behavior, + +.. option:: -f, --force + + All existing output files will be rewritten. If a cache directory is + specified it will be rebuilt. + +.. option:: -u, --update + + Updated source files will have their corresponding output files + updated. Unchanged files will be left alone. Most projects will want to use + this option. + +For larger projects it is recommended to use a cache directory. This can speed +up subsequent runs by an order of magnitude or more. Specify a directory to +store cached output using the :option:`-c` option, + +.. option:: -c, --cache-dir + + Specify a directory to cache intermediate documentation representations. This + directory will be created if it does not already exist. diff --git a/docs/source/_ext/javasphinx/javasphinx/__init__.py b/docs/source/_ext/javasphinx/javasphinx/__init__.py new file mode 100644 index 0000000000..c6b9cb9bf0 --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/__init__.py @@ -0,0 +1,24 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .domain import JavaDomain +from .extdoc import javadoc_role + +def setup(app): + app.add_domain(JavaDomain) + + app.add_config_value('javadoc_url_map', dict(), '') + app.add_role('java:extdoc', javadoc_role) diff --git a/docs/source/_ext/javasphinx/javasphinx/apidoc.py b/docs/source/_ext/javasphinx/javasphinx/apidoc.py new file mode 100755 index 0000000000..b0de46f567 --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/apidoc.py @@ -0,0 +1,352 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import print_function, unicode_literals + +try: + import cPickle as pickle +except: + import pickle + +import hashlib +import logging +import sys +import os +import os.path + +from optparse import OptionParser + +import javalang + +import javasphinx.compiler as compiler +import javasphinx.util as util + +def encode_output(s): + if isinstance(s, str): + return s + else: + return s.encode('utf-8') + +def find_source_files(input_path, excludes): + """ Get a list of filenames for all Java source files within the given + directory. + + """ + + java_files = [] + + input_path = os.path.normpath(os.path.abspath(input_path)) + + for dirpath, dirnames, filenames in os.walk(input_path): + if is_excluded(dirpath, excludes): + del dirnames[:] + continue + + for filename in filenames: + if filename.endswith(".java"): + java_files.append(os.path.join(dirpath, filename)) + + return java_files + +def write_toc(packages, opts): + doc = util.Document() + doc.add_heading(opts.toc_title, '=') + + toc = util.Directive('toctree') + toc.add_option('maxdepth', '2') + doc.add_object(toc) + + for package in sorted(packages.keys()): + toc.add_content("%s/package-index\n" % package.replace('.', '/')) + + filename = 'packages.' + opts.suffix + fullpath = os.path.join(opts.destdir, filename) + + if os.path.exists(fullpath) and not (opts.force or opts.update): + sys.stderr.write(fullpath + ' already exists. Use -f to overwrite.\n') + sys.exit(1) + + f = open(fullpath, 'w') + f.write(encode_output(doc.build())) + f.close() + +def write_documents(packages, documents, sources, opts): + package_contents = dict() + + # Write individual documents + for fullname, (package, name, document) in documents.items(): + if is_package_info_doc(name): + continue + + package_path = package.replace('.', os.sep) + filebasename = name.replace('.', '-') + filename = filebasename + '.' + opts.suffix + dirpath = os.path.join(opts.destdir, package_path) + fullpath = os.path.join(dirpath, filename) + + if not os.path.exists(dirpath): + os.makedirs(dirpath) + elif os.path.exists(fullpath) and not (opts.force or opts.update): + sys.stderr.write(fullpath + ' already exists. Use -f to overwrite.\n') + sys.exit(1) + + # Add to package indexes + package_contents.setdefault(package, list()).append(filebasename) + + if opts.update and os.path.exists(fullpath): + # If the destination file is newer than the source file than skip + # writing it out + source_mod_time = os.stat(sources[fullname]).st_mtime + dest_mod_time = os.stat(fullpath).st_mtime + + if source_mod_time < dest_mod_time: + continue + + f = open(fullpath, 'w') + f.write(encode_output(document)) + f.close() + + # Write package-index for each package + for package, classes in package_contents.items(): + doc = util.Document() + doc.add_heading(package, '=') + + #Adds the package documentation (if any) + if packages[package] != '': + documentation = packages[package] + doc.add_line("\n%s" % documentation) + + doc.add_object(util.Directive('java:package', package)) + + toc = util.Directive('toctree') + toc.add_option('maxdepth', '1') + + classes.sort() + for filebasename in classes: + toc.add_content(filebasename + '\n') + doc.add_object(toc) + + package_path = package.replace('.', os.sep) + filename = 'package-index.' + opts.suffix + dirpath = os.path.join(opts.destdir, package_path) + fullpath = os.path.join(dirpath, filename) + + if not os.path.exists(dirpath): + os.makedirs(dirpath) + elif os.path.exists(fullpath) and not (opts.force or opts.update): + sys.stderr.write(fullpath + ' already exists. Use -f to overwrite.\n') + sys.exit(1) + + f = open(fullpath, 'w') + f.write(encode_output(doc.build())) + f.close() + +def get_newer(a, b): + if not os.path.exists(a): + return b + + if not os.path.exists(b): + return a + + a_mtime = int(os.stat(a).st_mtime) + b_mtime = int(os.stat(b).st_mtime) + + if a_mtime < b_mtime: + return b + + return a + +def format_syntax_error(e): + rest = "" + if e.at.position: + value = e.at.value + pos = e.at.position + rest = ' at %s line %d, character %d' % (value, pos[0], pos[1]) + return e.description + rest + +def generate_from_source_file(doc_compiler, source_file, cache_dir): + if cache_dir: + fingerprint = hashlib.md5(source_file.encode()).hexdigest() + cache_file = os.path.join(cache_dir, 'parsed-' + fingerprint + '.p') + + if get_newer(source_file, cache_file) == cache_file: + return pickle.load(open(cache_file, 'rb')) + else: + cache_file = None + + f = open(source_file) + source = f.read() + f.close() + + try: + ast = javalang.parse.parse(source) + except javalang.parser.JavaSyntaxError as e: + util.error('Syntax error in %s: %s', source_file, format_syntax_error(e)) + except Exception: + util.unexpected('Unexpected exception while parsing %s', source_file) + + documents = {} + try: + if source_file.endswith("package-info.java"): + if ast.package is not None: + documentation = doc_compiler.compile_docblock(ast.package) + documents[ast.package.name] = (ast.package.name, 'package-info', documentation) + else: + documents = doc_compiler.compile(ast) + except Exception: + util.unexpected('Unexpected exception while compiling %s', source_file) + + if cache_file: + dump_file = open(cache_file, 'wb') + pickle.dump(documents, dump_file) + dump_file.close() + + return documents + +def generate_documents(source_files, cache_dir, verbose, member_headers, parser): + documents = {} + sources = {} + doc_compiler = compiler.JavadocRestCompiler(None, member_headers, parser) + + for source_file in source_files: + if verbose: + print('Processing', source_file) + + this_file_documents = generate_from_source_file(doc_compiler, source_file, cache_dir) + for fullname in this_file_documents: + sources[fullname] = source_file + + documents.update(this_file_documents) + + #Existing packages dict, where each key is a package name + #and each value is the package documentation (if any) + packages = {} + + #Gets the name of the package where the document was declared + #and adds it to the packages dict with no documentation. + #Package documentation, if any, will be collected from package-info.java files. + for package, name, _ in documents.values(): + packages[package] = "" + + #Gets packages documentation from package-info.java documents (if any). + for package, name, content in documents.values(): + if is_package_info_doc(name): + packages[package] = content + + return packages, documents, sources + +def normalize_excludes(rootpath, excludes): + f_excludes = [] + for exclude in excludes: + if not os.path.isabs(exclude) and not exclude.startswith(rootpath): + exclude = os.path.join(rootpath, exclude) + f_excludes.append(os.path.normpath(exclude) + os.path.sep) + return f_excludes + +def is_excluded(root, excludes): + sep = os.path.sep + if not root.endswith(sep): + root += sep + for exclude in excludes: + if root.startswith(exclude): + return True + return False + +def is_package_info_doc(document_name): + ''' Checks if the name of a document represents a package-info.java file. ''' + return document_name == 'package-info' + + +def main(argv=sys.argv): + logging.basicConfig(level=logging.WARN) + + parser = OptionParser( + usage="""\ +usage: %prog [options] -o [exclude_paths, ...] + +Look recursively in for Java sources files and create reST files +for all non-private classes, organized by package under . A package +index (package-index.) will be created for each package, and a top level +table of contents will be generated named packages.. + +Paths matching any of the given exclude_paths (interpreted as regular +expressions) will be skipped. + +Note: By default this script will not overwrite already created files.""") + + parser.add_option('-o', '--output-dir', action='store', dest='destdir', + help='Directory to place all output', default='') + parser.add_option('-f', '--force', action='store_true', dest='force', + help='Overwrite all files') + parser.add_option('-c', '--cache-dir', action='store', dest='cache_dir', + help='Directory to stored cachable output') + parser.add_option('-u', '--update', action='store_true', dest='update', + help='Overwrite new and changed files', default=False) + parser.add_option('-T', '--no-toc', action='store_true', dest='notoc', + help='Don\'t create a table of contents file') + parser.add_option('-t', '--title', dest='toc_title', default='Javadoc', + help='Title to use on table of contents') + parser.add_option('--no-member-headers', action='store_false', default=True, dest='member_headers', + help='Don\'t generate headers for class members') + parser.add_option('-s', '--suffix', action='store', dest='suffix', + help='file suffix (default: rst)', default='rst') + parser.add_option('-I', '--include', action='append', dest='includes', + help='Additional input paths to scan', default=[]) + parser.add_option('-p', '--parser', dest='parser_lib', default='lxml', + help='Beautiful Soup---html parser library option.') + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + help='verbose output') + + (opts, args) = parser.parse_args(argv[1:]) + + if not args: + parser.error('A source path is required.') + + rootpath, excludes = args[0], args[1:] + + input_paths = opts.includes + input_paths.append(rootpath) + + if not opts.destdir: + parser.error('An output directory is required.') + + if opts.suffix.startswith('.'): + opts.suffix = opts.suffix[1:] + + for input_path in input_paths: + if not os.path.isdir(input_path): + sys.stderr.write('%s is not a directory.\n' % (input_path,)) + sys.exit(1) + + if not os.path.isdir(opts.destdir): + os.makedirs(opts.destdir) + + if opts.cache_dir and not os.path.isdir(opts.cache_dir): + os.makedirs(opts.cache_dir) + + excludes = normalize_excludes(rootpath, excludes) + source_files = [] + + for input_path in input_paths: + source_files.extend(find_source_files(input_path, excludes)) + + packages, documents, sources = generate_documents(source_files, opts.cache_dir, opts.verbose, + opts.member_headers, opts.parser_lib) + + write_documents(packages, documents, sources, opts) + + if not opts.notoc: + write_toc(packages, opts) diff --git a/docs/source/_ext/javasphinx/javasphinx/compiler.py b/docs/source/_ext/javasphinx/javasphinx/compiler.py new file mode 100644 index 0000000000..807d027025 --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/compiler.py @@ -0,0 +1,345 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import javalang + +import javasphinx.formatter as formatter +import javasphinx.util as util +import javasphinx.htmlrst as htmlrst + +class JavadocRestCompiler(object): + """ Javadoc to ReST compiler. Builds ReST documentation from a Java syntax + tree. """ + + def __init__(self, filter=None, member_headers=True, parser='lxml'): + if filter: + self.filter = filter + else: + self.filter = self.__default_filter + + self.converter = htmlrst.Converter(parser) + + self.member_headers = member_headers + + def __default_filter(self, node): + """Excludes private members and those tagged "@hide" / "@exclude" in their + docblocks. + + """ + + if not isinstance(node, javalang.tree.Declaration): + return False + + if 'private' in node.modifiers: + return False + + if isinstance(node, javalang.tree.Documented) and node.documentation: + doc = javalang.javadoc.parse(node.documentation) + if 'hide' in doc.tags or 'exclude' in doc.tags: + return False + + return True + + def __html_to_rst(self, s): + return self.converter.convert(s) + + def __output_doc(self, documented): + if not isinstance(documented, javalang.tree.Documented): + raise ValueError('node not documented') + + output = util.Document() + + if not documented.documentation: + return output + + doc = javalang.javadoc.parse(documented.documentation) + + if doc.description: + output.add(self.__html_to_rst(doc.description)) + output.clear() + + if doc.authors: + output.add_line(':author: %s' % (self.__html_to_rst(', '.join(doc.authors)),)) + + for name, value in doc.params: + output.add_line(':param %s: %s' % (name, self.__html_to_rst(value))) + + for exception in doc.throws: + description = doc.throws[exception] + output.add_line(':throws %s: %s' % (exception, self.__html_to_rst(description))) + + if doc.return_doc: + output.add_line(':return: %s' % (self.__html_to_rst(doc.return_doc),)) + + if doc.tags.get('see'): + output.clear() + + see_also = ', '.join(self.__output_see(see) for see in doc.tags['see']) + output.add_line('**See also:** %s' % (see_also,)) + + return output + + def __output_see(self, see): + """ Convert the argument to a @see tag to rest """ + + if see.startswith('... + return self.__html_to_rst(see) + elif '"' in see: + # Plain text + return see + else: + # Type reference (default) + return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),) + + def compile_type(self, declaration): + signature = util.StringBuilder() + formatter.output_declaration(declaration, signature) + + doc = self.__output_doc(declaration) + + directive = util.Directive('java:type', signature.build()) + directive.add_content(doc) + + return directive + + def compile_enum_constant(self, enum, constant): + signature = util.StringBuilder() + + for annotation in constant.annotations: + formatter.output_annotation(annotation, signature) + + # All enum constants are public, static, and final + signature.append('public static final ') + signature.append(enum) + signature.append(' ') + signature.append(constant.name) + + doc = self.__output_doc(constant) + + directive = util.Directive('java:field', signature.build()) + directive.add_content(doc) + + return directive + + def compile_field(self, field): + signature = util.StringBuilder() + + for annotation in field.annotations: + formatter.output_annotation(annotation, signature) + + formatter.output_modifiers(field.modifiers, signature) + signature.append(' ') + + formatter.output_type(field.type, signature) + signature.append(' ') + signature.append(field.declarators[0].name) + + doc = self.__output_doc(field) + + directive = util.Directive('java:field', signature.build()) + directive.add_content(doc) + + return directive + + def compile_constructor(self, constructor): + signature = util.StringBuilder() + + for annotation in constructor.annotations: + formatter.output_annotation(annotation, signature) + + formatter.output_modifiers(constructor.modifiers, signature) + signature.append(' ') + + if constructor.type_parameters: + formatter.output_type_params(constructor.type_parameters, signature) + signature.append(' ') + + signature.append(constructor.name) + + signature.append('(') + formatter.output_list(formatter.output_formal_param, constructor.parameters, signature, ', ') + signature.append(')') + + if constructor.throws: + signature.append(' throws ') + formatter.output_list(formatter.output_exception, constructor.throws, signature, ', ') + + doc = self.__output_doc(constructor) + + directive = util.Directive('java:constructor', signature.build()) + directive.add_content(doc) + + return directive + + def compile_method(self, method): + signature = util.StringBuilder() + + for annotation in method.annotations: + formatter.output_annotation(annotation, signature) + + formatter.output_modifiers(method.modifiers, signature) + signature.append(' ') + + if method.type_parameters: + formatter.output_type_params(method.type_parameters, signature) + signature.append(' ') + + formatter.output_type(method.return_type, signature) + signature.append(' ') + + signature.append(method.name) + + signature.append('(') + formatter.output_list(formatter.output_formal_param, method.parameters, signature, ', ') + signature.append(')') + + if method.throws: + signature.append(' throws ') + formatter.output_list(formatter.output_exception, method.throws, signature, ', ') + + doc = self.__output_doc(method) + + directive = util.Directive('java:method', signature.build()) + directive.add_content(doc) + + return directive + + def compile_type_document(self, imports_block, package, name, declaration): + """ Compile a complete document, documenting a type and its members """ + + outer_type = name.rpartition('.')[0] + + document = util.Document() + document.add(imports_block) + document.add_heading(name, '=') + + method_summary = util.StringBuilder() + document.add_object(method_summary) + + package_dir = util.Directive('java:package', package) + package_dir.add_option('noindex') + document.add_object(package_dir) + + # Add type-level documentation + type_dir = self.compile_type(declaration) + if outer_type: + type_dir.add_option('outertype', outer_type) + document.add_object(type_dir) + + if isinstance(declaration, javalang.tree.EnumDeclaration): + enum_constants = list(declaration.body.constants) + enum_constants.sort(key=lambda c: c.name) + + document.add_heading('Enum Constants') + for enum_constant in enum_constants: + if self.member_headers: + document.add_heading(enum_constant.name, '^') + c = self.compile_enum_constant(name, enum_constant) + c.add_option('outertype', name) + document.add_object(c) + + fields = list(filter(self.filter, declaration.fields)) + if fields: + document.add_heading('Fields', '-') + fields.sort(key=lambda f: f.declarators[0].name) + for field in fields: + if self.member_headers: + document.add_heading(field.declarators[0].name, '^') + f = self.compile_field(field) + f.add_option('outertype', name) + document.add_object(f) + + constructors = list(filter(self.filter, declaration.constructors)) + if constructors: + document.add_heading('Constructors', '-') + constructors.sort(key=lambda c: c.name) + for constructor in constructors: + if self.member_headers: + document.add_heading(constructor.name, '^') + c = self.compile_constructor(constructor) + c.add_option('outertype', name) + document.add_object(c) + + methods = list(filter(self.filter, declaration.methods)) + if methods: + document.add_heading('Methods', '-') + methods.sort(key=lambda m: m.name) + for method in methods: + if self.member_headers: + document.add_heading(method.name, '^') + m = self.compile_method(method) + m.add_option('outertype', name) + document.add_object(m) + + return document + + def compile(self, ast): + """ Compile autodocs for the given Java syntax tree. Documents will be + returned documenting each separate type. """ + + documents = {} + + imports = util.StringBuilder() + for imp in ast.imports: + if imp.static or imp.wildcard: + continue + + package_parts = [] + cls_parts = [] + + for part in imp.path.split('.'): + if cls_parts or part[0].isupper(): + cls_parts.append(part) + else: + package_parts.append(part) + + + # If the import's final part wasn't capitalized, + # append it to the class parts anyway so sphinx doesn't complain. + if cls_parts == []: + cls_parts.append(package_parts.pop()) + + package = '.'.join(package_parts) + cls = '.'.join(cls_parts) + + imports.append(util.Directive('java:import', package + ' ' + cls).build()) + import_block = imports.build() + + if not ast.package: + raise ValueError('File must have package declaration') + + package = ast.package.name + type_declarations = [] + for path, node in ast.filter(javalang.tree.TypeDeclaration): + if not self.filter(node): + continue + + classes = [n.name for n in path if isinstance(n, javalang.tree.TypeDeclaration)] + classes.append(node.name) + + name = '.'.join(classes) + type_declarations.append((package, name, node)) + + for package, name, declaration in type_declarations: + full_name = package + '.' + name + document = self.compile_type_document(import_block, package, name, declaration) + documents[full_name] = (package, name, document.build()) + return documents + + def compile_docblock(self, documented): + ''' Compiles a single, standalone docblock. ''' + return self.__output_doc(documented).build() diff --git a/docs/source/_ext/javasphinx/javasphinx/domain.py b/docs/source/_ext/javasphinx/javasphinx/domain.py new file mode 100644 index 0000000000..275f6c85cf --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/domain.py @@ -0,0 +1,594 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import re +import string + +from docutils import nodes +from docutils.parsers.rst import Directive, directives + +from sphinx import addnodes, version_info +from sphinx.roles import XRefRole +from sphinx.locale import _ +from sphinx.domains import Domain, ObjType +from sphinx.directives import ObjectDescription +from sphinx.util.nodes import make_refnode +from sphinx.util.docfields import Field, TypedField, GroupedField + +import javalang + +import javasphinx.extdoc as extdoc +import javasphinx.formatter as formatter +import javasphinx.util as util + +# Classes in java.lang. These are available without an import. +java_dot_lang = set([ + 'AbstractMethodError', 'Appendable', 'ArithmeticException', + 'ArrayIndexOutOfBoundsException', 'ArrayStoreException', 'AssertionError', + 'AutoCloseable', 'Boolean', 'BootstrapMethodError', 'Byte', 'Character', + 'CharSequence', 'Class', 'ClassCastException', 'ClassCircularityError', + 'ClassFormatError', 'ClassLoader', 'ClassNotFoundException', 'ClassValue', + 'Cloneable', 'CloneNotSupportedException', 'Comparable', 'Compiler', + 'Deprecated', 'Double', 'Enum', 'EnumConstantNotPresentException', 'Error', + 'Exception', 'ExceptionInInitializerError', 'Float', 'IllegalAccessError', + 'IllegalAccessException', 'IllegalArgumentException', + 'IllegalMonitorStateException', 'IllegalStateException', + 'IllegalThreadStateException', 'IncompatibleClassChangeError', + 'IndexOutOfBoundsException', 'InheritableThreadLocal', 'InstantiationError', + 'InstantiationException', 'Integer', 'InternalError', 'InterruptedException', + 'Iterable', 'LinkageError', 'Long', 'Math', 'NegativeArraySizeException', + 'NoClassDefFoundError', 'NoSuchFieldError', 'NoSuchFieldException', + 'NoSuchMethodError', 'NoSuchMethodException', 'NullPointerException', 'Number', + 'NumberFormatException', 'Object', 'OutOfMemoryError', 'Override', 'Package', + 'Process', 'ProcessBuilder', 'Readable', 'ReflectiveOperationException', + 'Runnable', 'Runtime', 'RuntimeException', 'RuntimePermission', 'SafeVarargs', + 'SecurityException', 'SecurityManager', 'Short', 'StackOverflowError', + 'StackTraceElement', 'StrictMath', 'String', 'StringBuffer', 'StringBuilder', + 'StringIndexOutOfBoundsException', 'SuppressWarnings', 'System', 'Thread', + 'ThreadDeath', 'ThreadGroup', 'ThreadLocal', 'Throwable', + 'TypeNotPresentException', 'UnknownError', 'UnsatisfiedLinkError', + 'UnsupportedClassVersionError', 'UnsupportedOperationException', 'VerifyError', + 'VirtualMachineError', 'Void']) + +class JavaObject(ObjectDescription): + option_spec = { + 'noindex': directives.flag, + 'package': directives.unchanged, + 'outertype': directives.unchanged + } + + def _build_ref_node(self, target): + ref = addnodes.pending_xref('', refdomain='java', reftype='type', reftarget=target, modname=None, classname=None) + ref['java:outertype'] = self.get_type() + + package = self.env.temp_data.get('java:imports', dict()).get(target, None) + + if not package and target in java_dot_lang: + package = 'java.lang' + + if package: + ref['java:imported'] = True + ref['java:package'] = package + else: + ref['java:imported'] = False + ref['java:package'] = self.get_package() + + return ref + + def _build_type_node(self, typ): + if isinstance(typ, javalang.tree.ReferenceType): + if typ.dimensions: + dim = '[]' * len(typ.dimensions) + else: + dim = '' + + target = typ.name + parts = [] + + while typ: + ref_node = self._build_ref_node(target) + ref_node += nodes.Text(typ.name, typ.name) + parts.append(ref_node) + + if typ.arguments: + parts.append(nodes.Text('<', '<')) + + first = True + for type_arg in typ.arguments: + if first: + first = False + else: + parts.append(nodes.Text(', ', ', ')) + + if type_arg.pattern_type == '?': + parts.append(nodes.Text('?', '?')) + else: + if type_arg.pattern_type: + s = '? %s ' % (type_arg.pattern_type,) + parts.append(nodes.Text(s, s)) + parts.extend(self._build_type_node(type_arg.type)) + + parts.append(nodes.Text('>', '>')) + + typ = typ.sub_type + + if typ: + target = target + '.' + typ.name + parts.append(nodes.Text('.', '.')) + elif dim: + parts.append(nodes.Text(dim, dim)) + + return parts + else: + type_repr = formatter.output_type(typ).build() + return [nodes.Text(type_repr, type_repr)] + + def _build_type_node_list(self, types): + parts = self._build_type_node(types[0]) + for typ in types[1:]: + parts.append(nodes.Text(', ', ', ')) + parts.extend(self._build_type_node(typ)) + return parts + + def handle_signature(self, sig, signode): + handle_name = 'handle_%s_signature' % (self.objtype,) + handle = getattr(self, handle_name, None) + + if handle: + return handle(sig, signode) + else: + raise NotImplementedError + + def get_index_text(self, package, type, name): + raise NotImplementedError + + def get_package(self): + return self.options.get('package', self.env.temp_data.get('java:package')) + + def get_type(self): + return self.options.get('outertype', '.'.join(self.env.temp_data.get('java:outertype', []))) + + def add_target_and_index(self, name, sig, signode): + package = self.get_package() + type = self.get_type(); + + fullname = '.'.join(filter(None, (package, type, name))) + basename = fullname.partition('(')[0] + + # note target + if fullname not in self.state.document.ids: + signode['names'].append(fullname) + signode['ids'].append(fullname) + signode['first'] = (not self.names) + self.state.document.note_explicit_target(signode) + + objects = self.env.domaindata['java']['objects'] + if fullname in objects: + self.state_machine.reporter.warning( + 'duplicate object description of %s, ' % fullname + + 'other instance in ' + self.env.doc2path(objects[fullname][0]) + + ', use :noindex: for one of them', + line=self.lineno) + + objects[fullname] = (self.env.docname, self.objtype, basename) + + indextext = self.get_index_text(package, type, name) + if indextext: + self.indexnode['entries'].append(_create_indexnode(indextext, fullname)) + + def before_content(self): + self.set_type = False + + if self.objtype == 'type' and self.names: + self.set_type = True + self.env.temp_data.setdefault('java:outertype', list()).append(self.names[0]) + + def after_content(self): + if self.set_type: + self.env.temp_data['java:outertype'].pop() + +class JavaMethod(JavaObject): + doc_field_types = [ + TypedField('parameter', label=_('Parameters'), + names=('param', 'parameter', 'arg', 'argument'), + typerolename='type', typenames=('type',)), + Field('returnvalue', label=_('Returns'), has_arg=False, + names=('returns', 'return')), + GroupedField('throws', names=('throws',), label=_('Throws'), rolename='type') + ] + + def handle_method_signature(self, sig, signode): + try: + member = javalang.parse.parse_member_signature(sig) + except javalang.parser.JavaSyntaxError: + raise self.error("syntax error in method signature") + + if not isinstance(member, javalang.tree.MethodDeclaration): + raise self.error("expected method declaration") + + mods = formatter.output_modifiers(member.modifiers).build() + signode += nodes.Text(mods + ' ', mods + ' ') + + if member.type_parameters: + type_params = formatter.output_type_params(member.type_parameters).build() + signode += nodes.Text(type_params, type_params) + signode += nodes.Text(' ', ' ') + + rnode = addnodes.desc_type('', '') + rnode += self._build_type_node(member.return_type) + + signode += rnode + signode += nodes.Text(' ', ' ') + signode += addnodes.desc_name(member.name, member.name) + + paramlist = addnodes.desc_parameterlist() + for parameter in member.parameters: + param = addnodes.desc_parameter('', '', noemph=True) + param += self._build_type_node(parameter.type) + + if parameter.varargs: + param += nodes.Text('...', '') + + param += nodes.emphasis(' ' + parameter.name, ' ' + parameter.name) + paramlist += param + signode += paramlist + + param_reprs = [formatter.output_type(param.type, with_generics=False).build() for param in member.parameters] + return member.name + '(' + ', '.join(param_reprs) + ')' + + def get_index_text(self, package, type, name): + return _('%s (Java method)' % (name,)) + +class JavaConstructor(JavaObject): + doc_field_types = [ + TypedField('parameter', label=_('Parameters'), + names=('param', 'parameter', 'arg', 'argument'), + typerolename='type', typenames=('type',)), + GroupedField('throws', names=('throws',), label=_('Throws')) + ] + + def handle_constructor_signature(self, sig, signode): + try: + member = javalang.parse.parse_constructor_signature(sig) + except javalang.parser.JavaSyntaxError: + raise self.error("syntax error in constructor signature") + + if not isinstance(member, javalang.tree.ConstructorDeclaration): + raise self.error("expected constructor declaration") + + mods = formatter.output_modifiers(member.modifiers).build() + signode += nodes.Text(mods + ' ', mods + ' ') + + signode += addnodes.desc_name(member.name, member.name) + + paramlist = addnodes.desc_parameterlist() + for parameter in member.parameters: + param = addnodes.desc_parameter('', '', noemph=True) + param += self._build_type_node(parameter.type) + + if parameter.varargs: + param += nodes.Text('...', '') + + param += nodes.emphasis(' ' + parameter.name, ' ' + parameter.name) + paramlist += param + signode += paramlist + + param_reprs = [formatter.output_type(param.type, with_generics=False).build() for param in member.parameters] + return '%s(%s)' % (member.name, ', '.join(param_reprs)) + + def get_index_text(self, package, type, name): + return _('%s (Java constructor)' % (name,)) + +class JavaType(JavaObject): + doc_field_types = [ + GroupedField('parameter', names=('param',), label=_('Parameters')) + ] + + declaration_type = None + + def handle_type_signature(self, sig, signode): + try: + member = javalang.parse.parse_type_signature(sig) + except javalang.parser.JavaSyntaxError: + raise self.error("syntax error in field signature") + + if isinstance(member, javalang.tree.ClassDeclaration): + self.declaration_type = 'class' + elif isinstance(member, javalang.tree.InterfaceDeclaration): + self.declaration_type = 'interface' + elif isinstance(member, javalang.tree.EnumDeclaration): + self.declaration_type = 'enum' + elif isinstance(member, javalang.tree.AnnotationDeclaration): + self.declaration_type = 'annotation' + else: + raise self.error("expected type declaration") + + mods = formatter.output_modifiers(member.modifiers).build() + signode += nodes.Text(mods + ' ', mods + ' ') + + if self.declaration_type == 'class': + signode += nodes.Text('class ', 'class ') + elif self.declaration_type == 'interface': + signode += nodes.Text('interface ', 'interface ') + elif self.declaration_type == 'enum': + signode += nodes.Text('enum ', 'enum ') + elif self.declaration_type == 'annotation': + signode += nodes.Text('@interface ', '@interface ') + + signode += addnodes.desc_name(member.name, member.name) + + if self.declaration_type in ('class', 'interface') and member.type_parameters: + type_params = formatter.output_type_params(member.type_parameters).build() + signode += nodes.Text(type_params, type_params) + + if self.declaration_type == 'class': + if member.extends: + extends = ' extends ' + signode += nodes.Text(extends, extends) + signode += self._build_type_node(member.extends) + if member.implements: + implements = ' implements ' + signode += nodes.Text(implements, implements) + signode += self._build_type_node_list(member.implements) + elif self.declaration_type == 'interface': + if member.extends: + extends = ' extends ' + signode += nodes.Text(extends, extends) + signode += self._build_type_node_list(member.extends) + elif self.declaration_type == 'enum': + if member.implements: + implements = ' implements ' + signode += nodes.Text(implements, implements) + signode += self._build_type_node_list(member.implements) + + return member.name + + def get_index_text(self, package, type, name): + return _('%s (Java %s)' % (name, self.declaration_type)) + +class JavaField(JavaObject): + def handle_field_signature(self, sig, signode): + try: + member = javalang.parse.parse_member_signature(sig) + except javalang.parser.JavaSyntaxError: + raise self.error("syntax error in field signature") + + if not isinstance(member, javalang.tree.FieldDeclaration): + raise self.error("expected field declaration") + + mods = formatter.output_modifiers(member.modifiers).build() + signode += nodes.Text(mods + ' ', mods + ' ') + + tnode = addnodes.desc_type('', '') + tnode += self._build_type_node(member.type) + + signode += tnode + signode += nodes.Text(' ', ' ') + + if len(member.declarators) > 1: + self.error('only one field may be documented at a time') + + declarator = member.declarators[0] + signode += addnodes.desc_name(declarator.name, declarator.name) + + dim = '[]' * len(declarator.dimensions) + signode += nodes.Text(dim) + + if declarator.initializer and isinstance(declarator.initializer, javalang.tree.Literal): + signode += nodes.Text(' = ' + declarator.initializer.value) + + return declarator.name + + def get_index_text(self, package, type, name): + return _('%s (Java field)' % (name,)) + +class JavaPackage(Directive): + """ + Directive to mark description of a new package. + """ + + has_content = False + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = { + 'noindex': directives.flag, + } + + def run(self): + env = self.state.document.settings.env + package = self.arguments[0].strip() + noindex = 'noindex' in self.options + env.temp_data['java:package'] = package + env.domaindata['java']['objects'][package] = (env.docname, 'package', package) + ret = [] + + if not noindex: + targetnode = nodes.target('', '', ids=['package-' + package], ismod=True) + self.state.document.note_explicit_target(targetnode) + + # the platform and synopsis aren't printed; in fact, they are only + # used in the modindex currently + ret.append(targetnode) + + indextext = _('%s (package)') % (package,) + inode = addnodes.index(entries=[_create_indexnode(indextext, 'package-' + package)]) + ret.append(inode) + + return ret + +class JavaImport(Directive): + """ + This directive is just to tell Sphinx the source of a referenced type. + """ + + has_content = False + required_arguments = 2 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = {} + + def run(self): + env = self.state.document.settings.env + package, typename = self.arguments + + env.temp_data.setdefault('java:imports', dict())[typename] = package + return [] + +class JavaXRefRole(XRefRole): + def process_link(self, env, refnode, has_explicit_title, title, target): + refnode['java:outertype'] = '.'.join(env.temp_data.get('java:outertype', list())) + + target = target.lstrip('~') + + # Strip a method component from the target + basetype = target + if '(' in basetype: + basetype = basetype.partition('(')[0] + if '.' in basetype: + basetype = basetype.rpartition('.')[0] + + package = env.temp_data.get('java:imports', dict()).get(basetype, None) + + if package: + refnode['java:imported'] = True + refnode['java:package'] = package + else: + refnode['java:imported'] = False + refnode['java:package'] = env.temp_data.get('java:package') + + if not has_explicit_title: + # if the first character is a tilde, don't display the module/class + # parts of the contents + if title[0:1] == '~': + title = title.partition('(')[0] + title = title[1:] + dot = title.rfind('.') + if dot != -1: + title = title[dot+1:] + + return title, target + +class JavaDomain(Domain): + """Java language domain.""" + name = 'java' + label = 'Java' + + object_types = { + 'package': ObjType(_('package'), 'package', 'ref'), + 'type': ObjType(_('type'), 'type', 'ref'), + 'field': ObjType(_('field'), 'field', 'ref'), + 'constructor': ObjType(_('constructor'), 'construct', 'ref'), + 'method': ObjType(_('method'), 'meth', 'ref') + } + + directives = { + 'package': JavaPackage, + 'type': JavaType, + 'field': JavaField, + 'constructor': JavaConstructor, + 'method': JavaMethod, + 'import': JavaImport + } + + roles = { + 'package': JavaXRefRole(), + 'type': JavaXRefRole(), + 'field': JavaXRefRole(), + 'construct': JavaXRefRole(), + 'meth': JavaXRefRole(), + 'ref': JavaXRefRole(), + } + + initial_data = { + 'objects': {}, # fullname -> docname, objtype, basename + } + + def clear_doc(self, docname): + objects = dict(self.data['objects']) + + for fullname, (fn, _, _) in objects.items(): + if fn == docname: + del self.data['objects'][fullname] + + def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): + objects = self.data['objects'] + package = node.get('java:package') + imported = node.get('java:imported') + type_context = node.get('java:outertype') + + # Partial function to make building the response easier + make_ref = lambda fullname: make_refnode(builder, fromdocname, objects[fullname][0], fullname, contnode, fullname) + + # Check for fully qualified references + if target in objects: + return make_ref(target) + + # Try with package name prefixed + if package: + fullname = package + '.' + target + if fullname in objects: + return make_ref(fullname) + + # Try with package and type prefixed + if package and type_context: + fullname = package + '.' + type_context + '.' + target + if fullname in objects: + return make_ref(fullname) + + # Try to find a matching suffix + suffix = '.' + target + basename_match = None + basename_suffix = suffix.partition('(')[0] + + for fullname, (_, _, basename) in objects.items(): + if fullname.endswith(suffix): + return make_ref(fullname) + elif basename.endswith(basename_suffix): + basename_match = fullname + + if basename_match: + return make_ref(basename_match) + + # Try creating an external documentation reference + ref = extdoc.get_javadoc_ref(self.env, target, target) + + if not ref and target in java_dot_lang: + fulltarget = 'java.lang.' + target + ref = extdoc.get_javadoc_ref(self.env, fulltarget, fulltarget) + + # If the target was imported try with the package prefixed + if not ref and imported: + fulltarget = package + '.' + target + ref = extdoc.get_javadoc_ref(self.env, fulltarget, fulltarget) + + if ref: + ref.append(contnode) + return ref + else: + return None + + def get_objects(self): + for refname, (docname, type, _) in self.data['objects'].items(): + yield (refname, refname, type, docname, refname, 1) + + +def _create_indexnode(indextext, fullname): + # See https://github.com/sphinx-doc/sphinx/issues/2673 + if version_info < (1, 4): + return ('single', indextext, fullname, '') + else: + return ('single', indextext, fullname, '', None) diff --git a/docs/source/_ext/javasphinx/javasphinx/extdoc.py b/docs/source/_ext/javasphinx/javasphinx/extdoc.py new file mode 100644 index 0000000000..1586bc21c0 --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/extdoc.py @@ -0,0 +1,124 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import re + +from docutils import nodes, utils +from sphinx.util.nodes import split_explicit_title + +def get_javadoc_ref(app, rawtext, text): + javadoc_url_map = app.config.javadoc_url_map + + # Add default Java SE sources + if not javadoc_url_map.get("java"): + javadoc_url_map["java"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8') + if not javadoc_url_map.get("javax"): + javadoc_url_map["javax"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8') + if not javadoc_url_map.get("org.xml"): + javadoc_url_map["org.xml"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8') + if not javadoc_url_map.get("org.w3c"): + javadoc_url_map["org.w3c"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8') + + source = None + package = '' + method = None + + if '(' in text: + # If the javadoc contains a line like this: + # {@link #sort(List)} + # there is no package so the text.rindex will fail + try: + split_point = text.rindex('.', 0, text.index('(')) + method = text[split_point + 1:] + text = text[:split_point] + except ValueError: + pass + + for pkg, (baseurl, ext_type) in javadoc_url_map.items(): + if text.startswith(pkg + '.') and len(pkg) > len(package): + source = baseurl, ext_type + package = pkg + + if not source: + return None + + baseurl, ext_type = source + + package_parts = [] + cls_parts = [] + + for part in text.split('.'): + if cls_parts or part[0].isupper(): + cls_parts.append(part) + else: + package_parts.append(part) + + package = '.'.join(package_parts) + cls = '.'.join(cls_parts) + + if not baseurl.endswith('/'): + baseurl = baseurl + '/' + + if ext_type == 'javadoc': + if not cls: + cls = 'package-summary' + source = baseurl + package.replace('.', '/') + '/' + cls + '.html' + if method: + source = source + '#' + method + elif ext_type == 'javadoc8': + if not cls: + cls = 'package-summary' + source = baseurl + package.replace('.', '/') + '/' + cls + '.html' + if method: + source = source + '#' + re.sub(r'[()]', '-', method) + elif ext_type == 'sphinx': + if not cls: + cls = 'package-index' + source = baseurl + package.replace('.', '/') + '/' + cls.replace('.', '-') + '.html' + if method: + source = source + '#' + package + '.' + cls + '.' + method + else: + raise ValueError('invalid target specifier ' + ext_type) + + title = '.'.join(filter(None, (package, cls, method))) + node = nodes.reference(rawtext, '') + node['refuri'] = source + node['reftitle'] = title + + return node + +def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + """ Role for linking to external Javadoc """ + + has_explicit_title, title, target = split_explicit_title(text) + title = utils.unescape(title) + target = utils.unescape(target) + + if not has_explicit_title: + target = target.lstrip('~') + + if title[0] == '~': + title = title[1:].rpartition('.')[2] + + app = inliner.document.settings.env.app + ref = get_javadoc_ref(app, rawtext, target) + + if not ref: + raise ValueError("no Javadoc source found for %s in javadoc_url_map" % (target,)) + + ref.append(nodes.Text(title, title)) + + return [ref], [] diff --git a/docs/source/_ext/javasphinx/javasphinx/formatter.py b/docs/source/_ext/javasphinx/javasphinx/formatter.py new file mode 100644 index 0000000000..51b4ce719b --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/formatter.py @@ -0,0 +1,163 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Convert Java syntax tree nodes to string representations. + +""" + +import javalang + +from .util import StringBuilder + +# The order for displaying modifiers +__modifiers_order = ('public', 'protected', 'private', 'static', 'abstract', 'final', + 'native', 'synchronized', 'transient', 'volatile', 'strictfp') + +def formatter(f): + def _f(node, output=None, **kwargs): + if output is None: + output = StringBuilder() + + f(node, output, **kwargs) + return output + return _f + +def output_list(f, items, output=None, sep=', '): + if items: + f(items[0], output) + for item in items[1:]: + output.append(sep) + f(item, output) + +@formatter +def output_annotation(annotation, output): + output.append('@') + output.append(annotation.name) + output.append(' ') + +@formatter +def output_type(type, output, with_generics=True): + if not type: + output.append('void') + return + + if type.dimensions: + dim = '[]' * len(type.dimensions) + else: + dim = '' + + if isinstance(type, javalang.tree.BasicType): + output.append(type.name) + else: + while type: + output.append(type.name) + + if with_generics: + output_type_args(type.arguments, output) + + type = type.sub_type + + if type: + output.append('.') + output.append(dim) + +@formatter +def output_exception(exception, output): + output.append(exception) + +@formatter +def output_type_arg(type_arg, output): + if type_arg.pattern_type == '?': + output.append('?') + else: + if type_arg.pattern_type: + output.append('? ') + output.append(type_arg.pattern_type) + output.append(' ') + + output_type(type_arg.type, output) + +@formatter +def output_type_args(type_args, output): + if type_args: + output.append('<') + output_list(output_type_arg, type_args, output, ', ') + output.append('>') + +@formatter +def output_type_param(type_param, output): + output.append(type_param.name) + + if type_param.extends: + output.append(' extends ') + output_list(output_type, type_param.extends, output, ' & ') + +@formatter +def output_type_params(type_params, output): + if type_params: + output.append('<') + output_list(output_type_param, type_params, output, ', ') + output.append('>') + +@formatter +def output_declaration(declaration, output): + for annotation in declaration.annotations: + output_annotation(annotation, output) + + output_modifiers(declaration.modifiers, output) + output.append(' ') + + if isinstance(declaration, javalang.tree.ClassDeclaration): + output.append('class ') + elif isinstance(declaration, javalang.tree.EnumDeclaration): + output.append('enum ') + elif isinstance(declaration, javalang.tree.InterfaceDeclaration): + output.append('interface ') + elif isinstance(declaration, javalang.tree.AnnotationDeclaration): + output.append('@interface ') + + output.append(declaration.name) + + if isinstance(declaration, (javalang.tree.ClassDeclaration, javalang.tree.InterfaceDeclaration)): + output_type_params(declaration.type_parameters, output) + + if isinstance(declaration, javalang.tree.ClassDeclaration) and declaration.extends: + output.append(' extends ') + output_type(declaration.extends, output) + + if isinstance(declaration, javalang.tree.InterfaceDeclaration) and declaration.extends: + output.append(' extends ') + output_list(output_type, declaration.extends, output, ', ') + + if isinstance(declaration, (javalang.tree.ClassDeclaration, javalang.tree.EnumDeclaration)) and declaration.implements: + output.append(' implements ') + output_list(output_type, declaration.implements, output, ', ') + +@formatter +def output_formal_param(param, output): + output_type(param.type, output) + + if param.varargs: + output.append('...') + + output.append(' ') + output.append(param.name) + +@formatter +def output_modifiers(modifiers, output): + ordered_modifiers = [mod for mod in __modifiers_order if mod in modifiers] + output_list(lambda mod, output: output.append(mod), ordered_modifiers, output, ' ') diff --git a/docs/source/_ext/javasphinx/javasphinx/htmlrst.py b/docs/source/_ext/javasphinx/javasphinx/htmlrst.py new file mode 100644 index 0000000000..b34f1f205d --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/htmlrst.py @@ -0,0 +1,419 @@ +# +# Copyright 2013-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import unicode_literals +from builtins import str + +import collections +import re + +from xml.sax.saxutils import escape as html_escape +from bs4 import BeautifulSoup + +Cell = collections.namedtuple('Cell', ['type', 'rowspan', 'colspan', 'contents']) + +class Converter(object): + def __init__(self, parser): + self._unknown_tags = set() + self._clear = '\n\n..\n\n' + + # Regular expressions + self._preprocess_anchors = re.compile(r'') + self._post_process_empty_lines = re.compile(r'^\s+$', re.MULTILINE) + self._post_process_compress_lines = re.compile(r'\n{3,}') + self._whitespace_with_newline = re.compile(r'[\s\n]+') + self._whitespace = re.compile(r'\s+') + self._html_tag = re.compile(r'<.*?>') + + self._preprocess_entity = re.compile(r'&(nbsp|lt|gt|amp)([^;]|[\n])') + self._parser = parser + + # -------------------------------------------------------------------------- + # ---- reST Utility Methods ---- + + def _unicode(self, s): + if isinstance(s, unicode): + return s + else: + return unicode(s, 'utf8') + + def _separate(self, s): + return u'\n\n' + s + u'\n\n' + + def _escape_inline(self, s): + return '\\ ' + s + '\\ ' + + def _inline(self, tag, s): + # Seems fishy if our inline markup spans lines. We will instead just return + # the string as is + if '\n' in s: + return s + + s = s.strip() + + if not s: + return s + + return self._escape_inline(tag + s.strip() + tag) + + def _role(self, role, s, label=None): + if label: + return self._escape_inline(':%s:`%s <%s>`' % (role, label, s)) + else: + return self._escape_inline(':%s:`%s`' % (role, s)) + + def _directive(self, directive, body=None): + header = '\n\n.. %s::\n\n' % (directive,) + + if body: + return header + self._left_justify(body, 3) + '\n\n' + else: + return header + '\n' + + def _hyperlink(self, target, label): + return self._escape_inline('`%s <%s>`_' % (label, target)) + + def _listing(self, marker, items): + items = [self._left_justify(item, len(marker) + 1) for item in items] + items = [marker + item[len(marker):] for item in items] + return self._separate('..') + self._separate('\n'.join(items)) + + def _left_justify(self, s, indent=0): + lines = [l.rstrip() for l in s.split('\n')] + indents = [len(l) - len(l.lstrip()) for l in lines if l] + + if not indents: + return s + + shift = indent - min(indents) + + if shift < 0: + return '\n'.join(l[-shift:] for l in lines) + else: + prefix = ' ' * shift + return '\n'.join(prefix + l for l in lines) + + def _compress_whitespace(self, s, replace=' ', newlines=True): + if newlines: + return self._whitespace_with_newline.sub(replace, s) + else: + return self._whitespace.sub(replace, s) + + # -------------------------------------------------------------------------- + # ---- DOM Tree Processing ---- + + def _process_table_cells(self, table): + """ Compile all the table cells. + + Returns a list of rows. The rows may have different lengths because of + column spans. + + """ + + rows = [] + + for i, tr in enumerate(table.find_all('tr')): + row = [] + + for c in tr.contents: + cell_type = getattr(c, 'name', None) + + if cell_type not in ('td', 'th'): + continue + + rowspan = int(c.attrs.get('rowspan', 1)) + colspan = int(c.attrs.get('colspan', 1)) + contents = self._process_children(c).strip() + + if cell_type == 'th' and i > 0: + contents = self._inline('**', contents) + + row.append(Cell(cell_type, rowspan, colspan, contents)) + + rows.append(row) + + return rows + + def _process_table(self, node): + rows = self._process_table_cells(node) + + if not rows: + return '' + + table_num_columns = max(sum(c.colspan for c in row) for row in rows) + + normalized = [] + + for row in rows: + row_num_columns = sum(c.colspan for c in row) + + if row_num_columns < table_num_columns: + cell_type = row[-1].type if row else 'td' + row.append(Cell(cell_type, 1, table_num_columns - row_num_columns, '')) + + col_widths = [0] * table_num_columns + row_heights = [0] * len(rows) + + for i, row in enumerate(rows): + j = 0 + for cell in row: + current_w = sum(col_widths[j:j + cell.colspan]) + required_w = max(len(l) for l in cell.contents.split('\n')) + + if required_w > current_w: + additional = required_w - current_w + col_widths[j] += additional - (cell.colspan - 1) * (additional // cell.colspan) + for jj in range(j + 1, j + cell.colspan): + col_widths[jj] += (additional // cell.colspan) + + current_h = row_heights[i] + required_h = len(cell.contents.split('\n')) + + if required_h > current_h: + row_heights[i] = required_h + + j += cell.colspan + + row_sep = '+' + '+'.join('-' * (l + 2) for l in col_widths) + '+' + header_sep = '+' + '+'.join('=' * (l + 2) for l in col_widths) + '+' + lines = [row_sep] + + for i, row in enumerate(rows): + for y in range(0, row_heights[i]): + line = [] + j = 0 + for c in row: + w = sum(n + 3 for n in col_widths[j:j+c.colspan]) - 2 + h = row_heights[i] + + line.append('| ') + cell_lines = c.contents.split('\n') + content = cell_lines[y] if y < len(cell_lines) else '' + line.append(content.ljust(w)) + + j += c.colspan + + line.append('|') + lines.append(''.join(line)) + + if i == 0 and all(c.type == 'th' for c in row): + lines.append(header_sep) + else: + lines.append(row_sep) + + return self._separate('\n'.join(lines)) + + def _process_children(self, node): + parts = [] + is_newline = False + + for c in node.contents: + part = self._process(c) + + if is_newline: + part = part.lstrip() + + if part: + parts.append(part) + is_newline = part.endswith('\n') + + return ''.join(parts) + + def _process_text(self, node): + return ''.join(node.strings) + + def _process(self, node): + if isinstance(node, str): + return self._compress_whitespace(node) + + simple_tags = { + 'b' : lambda s: self._inline('**', s), + 'strong' : lambda s: self._inline('**', s), + 'i' : lambda s: self._inline('*', s), + 'em' : lambda s: self._inline('*', s), + 'tt' : lambda s: self._inline('``', s), + 'code' : lambda s: self._inline('``', s), + 'h1' : lambda s: self._inline('**', s), + 'h2' : lambda s: self._inline('**', s), + 'h3' : lambda s: self._inline('**', s), + 'h4' : lambda s: self._inline('**', s), + 'h5' : lambda s: self._inline('**', s), + 'h6' : lambda s: self._inline('**', s), + 'sub' : lambda s: self._role('sub', s), + 'sup' : lambda s: self._role('sup', s), + 'hr' : lambda s: self._separate('') # Transitions not allowed + } + + if node.name in simple_tags: + return simple_tags[node.name](self._process_text(node)) + + if node.name == 'p': + return self._separate(self._process_children(node).strip()) + + if node.name == 'pre': + return self._directive('parsed-literal', self._process_text(node)) + + if node.name == 'a': + if 'name' in node.attrs: + return self._separate('.. _' + node['name'] + ':') + elif 'href' in node.attrs: + target = node['href'] + label = self._compress_whitespace(self._process_text(node).strip('\n')) + + if target.startswith('#'): + return self._role('ref', target[1:], label) + elif target.startswith('@'): + return self._role('java:ref', target[1:], label) + else: + return self._hyperlink(target, label) + + if node.name == 'ul': + items = [self._process(n) for n in node.find_all('li', recursive=False)] + return self._listing('*', items) + + if node.name == 'ol': + items = [self._process(n) for n in node.find_all('li', recursive=False)] + return self._listing('#.', items) + + if node.name == 'li': + s = self._process_children(node) + s = s.strip() + + # If it's multiline clear the end to correcly support nested lists + if '\n' in s: + s = s + '\n\n' + + return s + + if node.name == 'table': + return self._process_table(node) + + self._unknown_tags.add(node.name) + + return self._process_children(node) + + # -------------------------------------------------------------------------- + # ---- HTML Preprocessing ---- + + def _preprocess_inline_javadoc_replace(self, tag, f, s): + parts = [] + + start = '{@' + tag + start_length = len(start) + + i = s.find(start) + j = 0 + + while i != -1: + parts.append(s[j:i]) + + # Find a closing bracket such that the brackets are balanced between + # them. This is necessary since code examples containing { and } are + # commonly wrapped in {@code ...} tags + + try: + j = s.find('}', i + start_length) + 1 + while s.count('{', i, j) != s.count('}', i, j): + j = s.index('}', j) + 1 + except ValueError: + raise ValueError('Unbalanced {} brackets in ' + tag + ' tag') + + parts.append(f(s[i + start_length:j - 1].strip())) + i = s.find(start, j) + + parts.append(s[j:]) + + return ''.join(parts) + + def _preprocess_replace_javadoc_link(self, s): + s = self._compress_whitespace(s) + + target = None + label = '' + + if ' ' not in s: + target = s + else: + i = s.find(' ') + + while s.count('(', 0, i) != s.count(')', 0, i): + i = s.find(' ', i + 1) + + if i == -1: + i = len(s) + break + + target = s[:i] + label = s[i:] + + if target[0] == '#': + target = target[1:] + + target = target.replace('#', '.').replace(' ', '').strip() + + # Strip HTML tags from the target + target = self._html_tag.sub('', target) + + label = label.strip() + + return '%s' % (target, label) + + def _preprocess_close_anchor_tags(self, s): + # Add closing tags to all anchors so they are better handled by the parser + return self._preprocess_anchors.sub(r'', s) + + def _preprocess_fix_entities(self, s): + return self._preprocess_entity.sub(r'&\1;\2', s) + + def _preprocess(self, s_html): + to_tag = lambda t: lambda m: '<%s>%s' % (t, html_escape(m), t) + s_html = self._preprocess_inline_javadoc_replace('code', to_tag('code'), s_html) + s_html = self._preprocess_inline_javadoc_replace('literal', to_tag('span'), s_html) + s_html = self._preprocess_inline_javadoc_replace('docRoot', lambda m: '', s_html) + s_html = self._preprocess_inline_javadoc_replace('linkplain', self._preprocess_replace_javadoc_link, s_html) + s_html = self._preprocess_inline_javadoc_replace('link', self._preprocess_replace_javadoc_link, s_html) + + # Make sure all anchor tags are closed + s_html = self._preprocess_close_anchor_tags(s_html) + + # Fix up some entitities without closing ; + s_html = self._preprocess_fix_entities(s_html) + + return s_html + + # -------------------------------------------------------------------------- + # ---- Conversion entry point ---- + + def convert(self, s_html): + if not isinstance(s_html, str): + s_html = str(s_html, 'utf8') + + s_html = self._preprocess(s_html) + + if not s_html.strip(): + return '' + + soup = BeautifulSoup(s_html, self._parser) + top = soup.html.body + + result = self._process_children(top) + + # Post processing + result = self._post_process_empty_lines.sub('', result) + result = self._post_process_compress_lines.sub('\n\n', result) + result = result.strip() + + return result diff --git a/docs/source/_ext/javasphinx/javasphinx/util.py b/docs/source/_ext/javasphinx/javasphinx/util.py new file mode 100644 index 0000000000..2de85d5499 --- /dev/null +++ b/docs/source/_ext/javasphinx/javasphinx/util.py @@ -0,0 +1,119 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import unicode_literals +from builtins import str + +import logging +import re +import sys + +class StringBuilder(list): + def build(self): + return str(self) + + def __str__(self): + return ''.join(self) + +class Directive(object): + + def __init__(self, type, argument=''): + self.type = type + self.argument = argument + + self.options = [] + self.content = [] + + def add_option(self, name, value=''): + self.options.append((name, value)) + + def add_content(self, o): + assert o is not None + self.content.append(o) + + def build(self): + doc = Document() + doc.add_line('.. %s:: %s' % (self.type, self.argument)) + + for name, value in self.options: + doc.add_line(' :%s: %s\n' % (name, value)) + + content = Document() + + for obj in self.content: + content.add_object(obj) + + doc.clear() + for line in content.build().splitlines(): + doc.add_line(' ' + line) + doc.clear() + + return doc.build() + +class Document(object): + remove_trailing_whitespace_re = re.compile('[ \t]+$', re.MULTILINE) + collapse_empty_lines_re = re.compile('\n' + '{3,}', re.DOTALL) + + def __init__(self): + self.content = [] + + def add_object(self, o): + assert o is not None + + self.content.append(o) + + def add(self, s): + self.add_object(s) + + def add_line(self, s): + self.add(s) + self.add('\n') + + def add_heading(self, s, t='-'): + self.add_line(s) + self.add_line(t * len(s)) + + def clear(self): + self.add('\n\n') + + def build(self): + output = StringBuilder() + + for obj in self.content: + if isinstance(obj, Directive): + output.append('\n\n') + output.append(obj.build()) + output.append('\n\n') + elif isinstance(obj, Document): + output.append(obj.build()) + else: + output.append(str(obj)) + + output.append('\n\n') + + output = str(output) + output = self.remove_trailing_whitespace_re.sub('', output) + output = self.collapse_empty_lines_re.sub('\n\n', output) + + return output + +def error(s, *args, **kwargs): + logging.error(s, *args, **kwargs) + sys.exit(1) + +def unexpected(s, *args, **kwargs): + logging.exception(s, *args, **kwargs) + sys.exit(1) diff --git a/docs/source/_ext/javasphinx/setup.py b/docs/source/_ext/javasphinx/setup.py new file mode 100644 index 0000000000..3e4f362cfa --- /dev/null +++ b/docs/source/_ext/javasphinx/setup.py @@ -0,0 +1,59 @@ +# +# Copyright 2012-2015 Bronto Software, Inc. and contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from setuptools import setup + +setup( + name = "javasphinx", + packages = ["javasphinx"], + version = "0.9.15", + author = "Chris Thunes", + author_email = "cthunes@brewtab.com", + url = "http://github.com/bronto/javasphinx", + description = "Sphinx extension for documenting Java projects", + license = "Apache 2.0", + classifiers = [ + "Programming Language :: Python", + "Development Status :: 4 - Beta", + "Operating System :: OS Independent", + "License :: OSI Approved :: Apache Software License", + "Intended Audience :: Developers", + "Topic :: Software Development :: Libraries" + ], + install_requires=[ + "javalang>=0.10.1", + "lxml", + "beautifulsoup4", + "future", + "docutils", + "sphinx" + ], + entry_points={ + 'console_scripts': [ + 'javasphinx-apidoc = javasphinx.apidoc:main' + ] + }, + long_description = """\ +========== +javasphinx +========== + +javasphinx is an extension to the Sphinx documentation system which adds support +for documenting Java projects. It includes a Java domain for writing +documentation manually and a javasphinx-apidoc utility which will automatically +generate API documentation from existing Javadoc markup. +""" +) diff --git a/docs/source/_ext/showfile.py b/docs/source/_ext/showfile.py index 14f4d0975a..7894d6d385 100644 --- a/docs/source/_ext/showfile.py +++ b/docs/source/_ext/showfile.py @@ -140,8 +140,8 @@ class ToggleDirective(Directive): return [par, node] def add_assets(app): - app.add_stylesheet(CSS_FILE) - app.add_javascript(JS_FILE) + app.add_css_file(CSS_FILE) + app.add_js_file(JS_FILE) def copy_assets(app, exception): diff --git a/docs/source/app_msg.rst b/docs/source/app_msg.rst index 432b50520d..f68fe2d38e 100644 --- a/docs/source/app_msg.rst +++ b/docs/source/app_msg.rst @@ -276,7 +276,7 @@ Network Zone (:cpp:class:`msg_file_t`) and associated functions. Java bindings ************* -This section describes jMSG, the Java API to Simgrid. This API mimicks +This section describes jMSG, the Java API to Simgrid. This API mimics :ref:`MSG `, which is a simple yet somehow realistic interface. The full reference documentation is provided at the end of this page. diff --git a/docs/source/app_s4u.rst b/docs/source/app_s4u.rst index fdb8c905ac..9e9be4ed83 100644 --- a/docs/source/app_s4u.rst +++ b/docs/source/app_s4u.rst @@ -247,8 +247,8 @@ at a given point of time). A big difference with TCP sockets or MPI communications is that communications do not start right away after a -:cpp:func:`Mailbox::put() `, but wait -for the corresponding :cpp:func:`Mailbox::get() `. +:cpp:func:`Mailbox::put() `, but wait +for the corresponding :cpp:func:`Mailbox::get() `. You can change this by :ref:`declaring a receiving actor `. A big difference with twitter hashtags is that SimGrid does not @@ -260,8 +260,8 @@ A big difference with the ZeroMQ queues is that you cannot filter on the data you want to get from the mailbox. To model such settings in SimGrid, you'd have one mailbox per potential topic, and subscribe to each topic individually with a -:cpp:func:`get_async() ` on each mailbox. -Then, use :cpp:func:`Comm::wait_any() ` +:cpp:func:`get_async() ` on each mailbox. +Then, use :cpp:func:`Comm::wait_any() ` to get the first message on any of the mailbox you are subscribed onto. The mailboxes are not located on the network, and you can access @@ -488,7 +488,7 @@ Querying info .. autodoxymethod:: simgrid::s4u::Actor::get_host .. autodoxymethod:: simgrid::s4u::Actor::set_host - .. autodoxymethod:: simgrid::s4u::Actor::get_refcount() + .. autodoxymethod:: simgrid::s4u::Actor::get_refcount .. autodoxymethod:: simgrid::s4u::Actor::get_impl .. group-tab:: Python @@ -521,7 +521,7 @@ Suspending and resuming actors .. autodoxymethod:: simgrid::s4u::Actor::suspend() .. autodoxymethod:: simgrid::s4u::Actor::resume() - .. autodoxymethod:: simgrid::s4u::Actor::is_suspended() + .. autodoxymethod:: simgrid::s4u::Actor::is_suspended .. group-tab:: Python @@ -533,7 +533,7 @@ Suspending and resuming actors .. autodoxymethod:: sg_actor_suspend(sg_actor_t actor) .. autodoxymethod:: sg_actor_resume(sg_actor_t actor) - .. autodoxymethod:: sg_actor_is_suspended(sg_actor_t actor) + .. autodoxymethod:: sg_actor_is_suspended(const_sg_actor_t actor) Specifying when actors should terminate --------------------------------------- @@ -545,7 +545,7 @@ Specifying when actors should terminate .. autodoxymethod:: simgrid::s4u::Actor::kill() .. autodoxymethod:: simgrid::s4u::Actor::kill_all() .. autodoxymethod:: simgrid::s4u::Actor::set_kill_time(double time) - .. autodoxymethod:: simgrid::s4u::Actor::get_kill_time() + .. autodoxymethod:: simgrid::s4u::Actor::get_kill_time .. autodoxymethod:: simgrid::s4u::Actor::restart() .. autodoxymethod:: simgrid::s4u::Actor::daemonize() @@ -601,7 +601,7 @@ Signals .. autodoxyvar:: simgrid::s4u::Actor::on_creation .. autodoxyvar:: simgrid::s4u::Actor::on_suspend - .. cpp:var:: xbt::signal Actor::on_host_change + .. cpp:var:: xbt::signal Actor::on_host_change Signal fired when an actor is migrated from one host to another. @@ -635,7 +635,7 @@ Querying info .. autodoxymethod:: simgrid::s4u::this_actor::is_maestro() .. autodoxymethod:: simgrid::s4u::this_actor::get_host() - .. autodoxymethod:: simgrid::s4u::this_actor::set_host(Host *new_host) + .. autodoxymethod:: simgrid::s4u::this_actor::set_host(simgrid::s4u::Host *new_host) .. group-tab:: Python @@ -709,8 +709,8 @@ the execution, or start an asynchronous activity. .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::this_actor::exec_async(double flops_amounts) - .. autodoxymethod:: simgrid::s4u::this_actor::exec_init(const std::vector< s4u::Host *> &hosts, const std::vector< double > &flops_amounts, const std::vector< double > &bytes_amounts) + .. autodoxymethod:: simgrid::s4u::this_actor::exec_async + .. autodoxymethod:: simgrid::s4u::this_actor::exec_init(const std::vector< s4u::Host * > &hosts, const std::vector< double > &flops_amounts, const std::vector< double > &bytes_amounts) .. autodoxymethod:: simgrid::s4u::this_actor::exec_init(double flops_amounts) .. autodoxymethod:: simgrid::s4u::this_actor::execute(double flop) .. autodoxymethod:: simgrid::s4u::this_actor::execute(double flop, double priority) @@ -763,10 +763,10 @@ Initialization .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, bool value) .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, double value) .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, int value) - .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, std::string value) + .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, const std::string &value) - .. autodoxymethod:: simgrid::s4u::Engine::load_deployment(const std::string &deploy) - .. autodoxymethod:: simgrid::s4u::Engine::load_platform(const std::string &platf) + .. autodoxymethod:: simgrid::s4u::Engine::load_deployment + .. autodoxymethod:: simgrid::s4u::Engine::load_platform .. autodoxymethod:: simgrid::s4u::Engine::register_actor(const std::string &name) .. autodoxymethod:: simgrid::s4u::Engine::register_actor(const std::string &name, F code) .. autodoxymethod:: simgrid::s4u::Engine::register_default(void(*code)(int, char **)) @@ -796,7 +796,7 @@ Run the simulation .. group-tab:: C++ .. autodoxymethod:: simgrid::s4u::Engine::get_clock() - .. autodoxymethod:: simgrid::s4u::Engine::run() + .. autodoxymethod:: simgrid::s4u::Engine::run .. group-tab:: Python @@ -815,9 +815,9 @@ Retrieving actors .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Engine::get_actor_count() - .. autodoxymethod:: simgrid::s4u::Engine::get_all_actors() - .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_actors(const std::function< bool(ActorPtr)> &filter) + .. autodoxymethod:: simgrid::s4u::Engine::get_actor_count + .. autodoxymethod:: simgrid::s4u::Engine::get_all_actors + .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_actors .. group-tab:: C @@ -830,11 +830,11 @@ Retrieving hosts .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Engine::get_all_hosts() - .. autodoxymethod:: simgrid::s4u::Engine::get_host_count() - .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_hosts(const std::function< bool(Host *)> &filter) - .. autodoxymethod:: simgrid::s4u::Engine::host_by_name(const std::string &name) - .. autodoxymethod:: simgrid::s4u::Engine::host_by_name_or_null(const std::string &name) + .. autodoxymethod:: simgrid::s4u::Engine::get_all_hosts + .. autodoxymethod:: simgrid::s4u::Engine::get_host_count + .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_hosts + .. autodoxymethod:: simgrid::s4u::Engine::host_by_name + .. autodoxymethod:: simgrid::s4u::Engine::host_by_name_or_null .. group-tab:: Python @@ -851,11 +851,11 @@ Retrieving links .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Engine::get_all_links() - .. autodoxymethod:: simgrid::s4u::Engine::get_link_count() + .. autodoxymethod:: simgrid::s4u::Engine::get_all_links + .. autodoxymethod:: simgrid::s4u::Engine::get_link_count .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_links - .. autodoxymethod:: simgrid::s4u::Engine::link_by_name(const std::string &name) - .. autodoxymethod:: simgrid::s4u::Engine::link_by_name_or_null(const std::string &name) + .. autodoxymethod:: simgrid::s4u::Engine::link_by_name + .. autodoxymethod:: simgrid::s4u::Engine::link_by_name_or_null Interacting with the routing ---------------------------- @@ -864,12 +864,12 @@ Interacting with the routing .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Engine::get_all_netpoints() - .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_netzones() + .. autodoxymethod:: simgrid::s4u::Engine::get_all_netpoints + .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_netzones .. autodoxymethod:: simgrid::s4u::Engine::get_instance() - .. autodoxymethod:: simgrid::s4u::Engine::get_netzone_root() - .. autodoxymethod:: simgrid::s4u::Engine::netpoint_by_name_or_null(const std::string &name) - .. autodoxymethod:: simgrid::s4u::Engine::netzone_by_name_or_null(const std::string &name) + .. autodoxymethod:: simgrid::s4u::Engine::get_netzone_root + .. autodoxymethod:: simgrid::s4u::Engine::netpoint_by_name_or_null + .. autodoxymethod:: simgrid::s4u::Engine::netzone_by_name_or_null .. autodoxymethod:: simgrid::s4u::Engine::set_netzone_root(const NetZone *netzone) Signals @@ -924,8 +924,8 @@ Querying info .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Mailbox::get_cname() const - .. autodoxymethod:: simgrid::s4u::Mailbox::get_name() const + .. autodoxymethod:: simgrid::s4u::Mailbox::get_cname + .. autodoxymethod:: simgrid::s4u::Mailbox::get_name .. group-tab:: Python @@ -957,15 +957,15 @@ Receiving data .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Mailbox::empty() - .. autodoxymethod:: simgrid::s4u::Mailbox::front() + .. autodoxymethod:: simgrid::s4u::Mailbox::empty + .. autodoxymethod:: simgrid::s4u::Mailbox::front .. autodoxymethod:: simgrid::s4u::Mailbox::get() .. autodoxymethod:: simgrid::s4u::Mailbox::get(double timeout) .. autodoxymethod:: simgrid::s4u::Mailbox::get_async(void **data) .. autodoxymethod:: simgrid::s4u::Mailbox::get_init() .. autodoxymethod:: simgrid::s4u::Mailbox::iprobe(int type, bool(*match_fun)(void *, void *, kernel::activity::CommImpl *), void *data) - .. autodoxymethod:: simgrid::s4u::Mailbox::listen() - .. autodoxymethod:: simgrid::s4u::Mailbox::ready() + .. autodoxymethod:: simgrid::s4u::Mailbox::listen + .. autodoxymethod:: simgrid::s4u::Mailbox::ready .. group-tab:: Python @@ -984,7 +984,7 @@ See :ref:`s4u_receiving_actor`. .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Mailbox::get_receiver() + .. autodoxymethod:: simgrid::s4u::Mailbox::get_receiver .. autodoxymethod:: simgrid::s4u::Mailbox::set_receiver(ActorPtr actor) .. group-tab:: C @@ -1248,9 +1248,9 @@ Execution .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::Host::exec_async(double flops_amounts) - .. autodoxymethod:: simgrid::s4u::Host::execute(double flops) - .. autodoxymethod:: simgrid::s4u::Host::execute(double flops, double priority) + .. autodoxymethod:: simgrid::s4u::Host::exec_async + .. autodoxymethod:: simgrid::s4u::Host::execute(double flops) const + .. autodoxymethod:: simgrid::s4u::Host::execute(double flops, double priority) const Platform and routing -------------------- @@ -1261,8 +1261,8 @@ Platform and routing .. autodoxymethod:: simgrid::s4u::Host::get_englobing_zone() .. autodoxymethod:: simgrid::s4u::Host::get_netpoint() const - .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< Link *> &links, double *latency) const - .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< kernel::resource::LinkImpl *> &links, double *latency) const + .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< Link * > &links, double *latency) const + .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< kernel::resource::LinkImpl * > &links, double *latency) const .. autodoxymethod:: simgrid::s4u::Host::sendto(Host *dest, double byte_amount) .. autodoxymethod:: simgrid::s4u::Host::sendto_async(Host *dest, double byte_amount) @@ -1365,6 +1365,16 @@ Querying info .. autodoxymethod:: sg_link_latency(const_sg_link_t link) .. autodoxymethod:: sg_link_name(const_sg_link_t link) +Modifying characteristics +------------------------- + +.. tabs:: + + .. group-tab:: C++ + + .. autodoxymethod:: simgrid::s4u::Link::set_bandwidth(double value) + .. autodoxymethod:: simgrid::s4u::Link::set_latency(double value) + User data and properties ------------------------ @@ -1396,6 +1406,8 @@ On/Off Dynamic profiles ---------------- +See :ref:`howto_churn` for more details. + .. tabs:: .. group-tab:: C++ @@ -1404,6 +1416,15 @@ Dynamic profiles .. autodoxymethod:: simgrid::s4u::Link::set_latency_profile(kernel::profile::Profile *profile) .. autodoxymethod:: simgrid::s4u::Link::set_state_profile(kernel::profile::Profile *profile) +WIFI links +---------- + +.. tabs:: + + .. group-tab:: C++ + + .. autodoxymethod:: simgrid::s4u::Link::set_host_wifi_rate + Signals ------- @@ -1519,9 +1540,9 @@ Routing data .. group-tab:: C++ - .. autodoxymethod:: simgrid::s4u::NetZone::add_bypass_route(kernel::routing::NetPoint *src, kernel::routing::NetPoint *dst, kernel::routing::NetPoint *gw_src, kernel::routing::NetPoint *gw_dst, std::vector< kernel::resource::LinkImpl *> &link_list, bool symmetrical) + .. autodoxymethod:: simgrid::s4u::NetZone::add_bypass_route .. autodoxymethod:: simgrid::s4u::NetZone::add_component(kernel::routing::NetPoint *elm) - .. autodoxymethod:: simgrid::s4u::NetZone::add_route(kernel::routing::NetPoint *src, kernel::routing::NetPoint *dst, kernel::routing::NetPoint *gw_src, kernel::routing::NetPoint *gw_dst, std::vector< kernel::resource::LinkImpl *> &link_list, bool symmetrical) + .. autodoxymethod:: simgrid::s4u::NetZone::add_route .. autodoxymethod:: simgrid::s4u::NetZone::get_children() const .. autodoxymethod:: simgrid::s4u::NetZone::get_father() @@ -1708,6 +1729,17 @@ Activities life cycle .. autodoxymethod:: simgrid::s4u::Activity::wait_until(double time_limit) .. autodoxymethod:: simgrid::s4u::Activity::vetoable_start() +Suspending and resuming an activity +----------------------------------- + +.. tabs:: + + .. group-tab:: C++ + + .. autodoxymethod:: simgrid::s4u::Activity::suspend + .. autodoxymethod:: simgrid::s4u::Activity::resume + .. autodoxymethod:: simgrid::s4u::Activity::is_suspended + .. _API_s4u_Comm: ============= diff --git a/docs/source/application.rst b/docs/source/application.rst index a568dc4305..4e46704a50 100644 --- a/docs/source/application.rst +++ b/docs/source/application.rst @@ -26,7 +26,7 @@ to mix several interfaces in the same simulation. - In some cases, you may want to replay an execution trace in the simulator. This trace lists the events of your application or of your workload, and your application is decomposed as a list of event handlers that are - fired according to the trace. SimGrid comes with a build-in support + fired according to the trace. SimGrid comes with a built-in support for MPI traces (with solutions to import traces captured by several MPI profilers). You can reuse this mechanism for any kind of trace that you want to replay, for example to study how a P2P DHT overlay diff --git a/docs/source/conf.py b/docs/source/conf.py index 0ca9461b26..e06332da96 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -67,6 +67,57 @@ breathe_default_project = "simgrid" # Setup the autodoxy extension doxygen_xml = os.path.join(os.path.dirname(__file__), "..", "build", "xml") +autodoxy_requalified_identifiers = [ # The first element will be substituted into the second one if it's starting an element or preceded by a space + ("Activity", "simgrid::s4u::Activity"), + ("Actor", "simgrid::s4u::Actor"), + ("ActorPtr", "simgrid::s4u::ActorPtr"), + ("Barrier", "simgrid::s4u::Barrier"), + ("BarrierPtr", "simgrid::s4u::BarrierPtr"), + ("Comm", "simgrid::s4u::Comm"), + ("CommPtr", "simgrid::s4u::CommPtr"), + ("ConditionVariable", "simgrid::s4u::ConditionVariable"), + ("ConditionVariablePtr", "simgrid::s4u::ConditionVariablePtr"), + ("Disk", "simgrid::s4u::Disk"), + ("Exec", "simgrid::s4u::Exec"), + ("ExecPtr", "simgrid::s4u::ExecPtr"), + ("Host", "simgrid::s4u::Host"), + ("s4u::Host", "simgrid::s4u::Host"), + ("Engine", "simgrid::s4u::Engine"), + ("Io", "simgrid::s4u::Io"), + ("Link", "simgrid::s4u::Link"), + ("Mailbox", "simgrid::s4u::Mailbox"), + ("Mutex", "simgrid::s4u::Mutex"), + ("s4u::Mutex", "simgrid::s4u::Mutex"), + ("s4u::MutexPtr", "simgrid::s4u::MutexPtr"), + ("NetZone", "simgrid::s4u::NetZone"), + ("Semaphore", "simgrid::s4u::Semaphore"), + ("VirtualMachine", "simgrid::s4u::VirtualMachine"), + ] + +# Generate a warning for all a cross-reference (such as :func:`myfunc`) that cannot be found +nitpicky = True +nitpick_ignore = [ + ('cpp:identifier', 'boost'), + ('cpp:identifier', 'boost::intrusive_ptr'), + ('cpp:identifier', 'boost::intrusive_ptr'), + ('cpp:identifier', 'boost::intrusive_ptr'), + ('cpp:identifier', 'boost::intrusive_ptr'), + ('cpp:identifier', 'boost::intrusive_ptr'), + ('cpp:identifier', 'boost::intrusive_ptr'), + ('cpp:identifier', 'kernel'), + ('cpp:identifier', 'kernel::activity'), + ('cpp:identifier', 'kernel::profile'), + ('cpp:identifier', 'kernel::resource'), + ('cpp:identifier', 'kernel::routing'), + ('cpp:identifier', 'simgrid'), + ('cpp:identifier', 'simgrid::s4u'), + ('cpp:identifier', 'this_actor'), + ('cpp:identifier', 's4u'), + ('cpp:identifier', 'size_t'), + ('cpp:identifier', 'uint64_t'), + ('cpp:identifier', 'xbt'), + ('cpp:identifier', 'xbt::string'), +] # For cross-ref generation primary_domain = 'cpp' @@ -151,4 +202,3 @@ html_css_files = [ # -- Other options -nitpicky = True # Generate a warning for all a cross-reference (such as :func:`myfunc`) that cannot be found diff --git a/docs/source/ns3.rst b/docs/source/ns3.rst index c6b634b789..a2cb51c57b 100644 --- a/docs/source/ns3.rst +++ b/docs/source/ns3.rst @@ -8,6 +8,13 @@ You can use the well-known `ns-3 packet-level network simulator validity of your simulation. Just install ns-3 and recompile SimGrid accordingly. +The SimGrid/ns-3 binding only contains features that are common to both systems. +Not all ns-3 models are available from SimGrid (only the TCP and WiFi ones are), +while not all SimGrid platform files can be used in conjunction ns-3 (routes +must be of length 1). Also, the platform built in ns-3 from the SimGrid +description is very basic. + + Compiling the ns-3/SimGrid binding ********************************** @@ -18,27 +25,14 @@ SimGrid requires ns-3 version 3.26 or higher, and you probably want the most recent version of both SimGrid and ns-3. While the Debian package of SimGrid don't have the ns-3 bindings activated, you can still use the packaged version of ns-3 by grabbing the ``libns3-dev ns3`` packages. Alternatively, you can -install ns-3 from scratch as follows: - -.. code-block:: shell - - # Download the source - wget http://www.nsnam.org/release/ns-allinone-3.29.tar.bz2 - tar -xf ns-allinone-3.29.tar.bz2 - cd ns-allinone-3.29/ns-3.29/ - # Configure, build and install - ./waf configure --prefix="/opt/ns3" # or give another path if you prefer - ./waf - ./waf install - -For more information, please refer to the ns-3 documentation -(`official website `_). +install ns-3 from scratch (see the `ns-3 documentation `_). Enabling ns-3 in SimGrid ======================== SimGrid must be recompiled with the ``enable_ns3`` option activated in cmake. -Optionally, use ``NS3_HINT`` to hint cmake about where to find ns-3. +Optionally, use ``NS3_HINT`` to tell cmake where ns3 is installed on +your disk. .. code-block:: shell @@ -50,7 +44,9 @@ If your local copy defines the variable ``SIMGRID_HAVE_NS3`` to 1, then ns-3 was correctly detected. Otherwise, explore ``CMakeFiles/CMakeOutput.log`` and ``CMakeFiles/CMakeError.log`` to diagnose the problem. -Test your installation after compilation as follows: +Test that ns-3 was successfully integrated with the following (from your SimGrid +build directory). It will run all SimGrid tests that are related to the ns-3 +integration. If no test is run at all, you probably forgot to enable ns-3 in cmake. .. code-block:: shell @@ -69,181 +65,146 @@ version of either SimGrid or ns-3, try upgrading everything. Using ns-3 from SimGrid *********************** -The SimGrid/ns-3 binding only contains features that are common to both -systems. Also, the platform built in ns-3 from the -SimGrid description is very basic. - Platform files compatibility ============================ Any route longer than one will be ignored when using ns-3. They are harmless, but you still need to connect your hosts using one-hop routes. The best solution is to add routers to split your route. Here is an -example of invalid platform: - -.. code-block:: shell - - - - - - - - - - - - - - - - - - +example of an invalid platform: + +.. code-block:: xml + + + + + + + + + + + + + + + + + + This can be reformulated as follows to make it usable with the ns-3 binding. -There is no direct connection from alice to bob, but that's OK because -ns-3 automatically routes from point to point. - -.. code-block:: shell - - - - - - - - - - - - - - - - - - - - - - +There is no direct connection from alice to bob, but that's OK because ns-3 +automatically routes from point to point (using +``ns3::Ipv4GlobalRoutingHelper::PopulateRoutingTables``). + +.. code-block:: xml + + + + + + + + + + + + + + + + + + + + + + Once your platform is OK, just change the :ref:`network/model -`_ configuration option to "ns-3" as follows. The rest -is unchanged. +` configuration option to `ns-3` as follows. The other +options can be used as usual. .. code-block:: shell ./network-ns3 --cfg=network/model:ns-3 (other parameters) -Many other files from the ``examples/platform directory`` are usable with the -ns-3 model. +Many other files from the ``examples/platform`` directory are usable with the +ns-3 model, such as `examples/platforms/dogbone.xml `_. +Check the file `examples/s4u/network-ns3/network-ns3.tesh `_ +to see which ones are used in our regression tests. -Build a wifi-compatible platform -=================================== +WiFi platforms +-------------- -We describe here a simple platform allowing ns3 wifi communication -between two simgrid hosts. +In SimGrid, WiFi networks are modeled as regular links with a specific +attribute, and these links are then added to routes between hosts. The main +difference When using ns-3 WiFi networks is that the network performance is not +given by the link bandwidth and latency but by the access point WiFi +characteristics, and the distance between the access point and the hosts (called +station in the WiFi world). -First, here are the mandatory information necessary to create a -simgrid platform: +So, to declare a new WiFi network, simply declare a link with the ``WiFi`` +sharing policy as you would do in a pure SimGrid simulation (you must still +provide the ``bandwidth`` and ``latency`` attributes even if they are ignored, +because they are mandatory to the SimGrid XML parser). -.. code-block:: shell - - - - - -Then, we create our access point and station hosts: +.. code-block:: xml -.. code-block:: shell + - - +To declare that a given host is connected to this WiFi zone, use the +``wifi_link`` property of that host. The property value must be the link id that +you want to use as a WiFi zone. This is not needed when using pure SimGrid wifi, +only when using ns-3 wifi, because the wifi performance is :ref:`configured `. -We must specify that alice will be our access point. To do that we -simply add the property ``wifi_link`` to the host ``alice``: +.. code-block:: xml -.. code-block:: shell + + + - - - +To connect the station node to the access point node, simply create a route +between them: - +.. code-block:: xml -The value ``net0`` of this property defines the name of the wifi network -generated. To generate this wifi network we create a wifi link: + + + -.. code-block:: shell`` +.. _ns3_wifi_perf: - +WiFi network performance +^^^^^^^^^^^^^^^^^^^^^^^^ -The important information here are: - * The id of the link, ``net0``, must match the network name defined by the property ``wifi_link`` of the access point node - * The sharing policy must be set to ``WIFI`` -Note: bandwidth and latency are mandatory by simgrid to create a link but are NOT used to create a wifi network. Instead the -wifi network capabilities are defined by its MCS, NSS and distance from access point to station. Those properties are described in section :ref:`Optional access point node properties `_ +The performance of a wifi network is controlled by 3 property that can be added +to the an host connected to the wifi zone: -To connect the station node to the access point node, we -create a route between the hosts: + * ``wifi_mcs`` (`Modulation and Coding Scheme `_) + Roughly speaking, it defines the speed at which the access point is + exchanging data with all stations. It depends on its model and configuration, + and the possible values are listed for example on Wikipedia. + |br| By default, ``wifi_mcs=3``. + * ``wifi_nss`` (Number of Spatial Streams, or `number of antennas `_) + defines the amount of simultaneous data streams that the AP can sustain. + Not all value of MCS and NSS are valid nor compatible (cf. `802.11n standard `_). + |br| By default, ``wifi_nss=1``. + * ``wifi_distance`` is the distance from the station to the access point. Each + station can have a specific value. + |br| By default, ``wifi_distance=10``. -.. code-block:: shell +Here is an example of host changing all these values: - - - +.. code-block:: xml -Finally, we end the xml file with the missing closing tags: - -.. code-block:: shell - - - - -.. _optional_prop: - -Optional access point node properties --------------------------------------- - -The MCS (`Modulation and Coding Scheme `_) can be set with the property ``wifi_mcs``: - -.. code-block:: shell - - - - - - -Its default value is 3. - -The NSS (Number of Spatial Streams, also known as the `number of antennas `_) can be set with the property ``wifi_nss``: - -.. code-block:: shell - - - - - - -Its default value is 1. - -Note: not all value of MCS and NSS are valid nor compatible. Check `802.11n standard `_ for more information. - -Optional station node properties ---------------------------------- - -The distance in meter at which the station is placed from the access point can -be set with the property ``wifi_distance``. - -.. code-block:: shell - - - - - -Its default value is 10. + + + + + + Limitations =========== @@ -251,10 +212,12 @@ Limitations A ns-3 platform is automatically created from the provided SimGrid platform. However, there are some known caveats: - * The default values (e.g., TCP parameters) are the ns-3 default values. - * ns-3 networks are routed using the shortest path algorithm, using - ``ns3::Ipv4GlobalRoutingHelper::PopulateRoutingTables``. - + * The default values (e.g., TCP parameters) are the ns-3 default values. + * ns-3 networks are routed using the shortest path algorithm, using ``ns3::Ipv4GlobalRoutingHelper::PopulateRoutingTables``. + * End hosts cannot have more than one interface card. So, your SimGrid hosts + should be connected to the platform through only one link. Otherwise, your + SimGrid host will be considered as a router (FIXME: is it still true?). + Our goal is to keep the ns-3 plugin of SimGrid as easy (and hopefully readable) as possible. If the current state does not fit your needs, you should modify this plugin, and/or create your own plugin from the existing one. If you come up @@ -268,3 +231,7 @@ is sending data that is not routable in your platform. Make sure that you only use routes of length 1, and that any host is connected to the platform. Arguably, SimGrid could detect this situation and report it, but unfortunately, this is still to be done. + +.. |br| raw:: html + +
diff --git a/docs/source/platform_howtos.rst b/docs/source/platform_howtos.rst index 2a135eaaf3..e7dc6206e9 100644 --- a/docs/source/platform_howtos.rst +++ b/docs/source/platform_howtos.rst @@ -288,8 +288,8 @@ on such a simple example: - When data is transferred from A to B, some TCP ACK messages travel in the opposite direction. To reflect the impact of this `cross-traffic`, SimGrid simulates a flow from B to A that represents an additional bandwidth - consumption of `0.05`. The route from B to A is implicity declared in the - platfrom file and uses the same link `link1` as if the two hosts were + consumption of `0.05`. The route from B to A is implicitly declared in the + platform file and uses the same link `link1` as if the two hosts were connected through a communication bus. The bandwidth share allocated to the flow from A to B is then the available bandwidth of `link1` (i.e., 97% of the nominal bandwidth of 1Mb/s) divided by 1.05 (i.e., the total consumption). diff --git a/examples/README.rst b/examples/README.rst index 7cc5e29be1..d1e5482a71 100644 --- a/examples/README.rst +++ b/examples/README.rst @@ -28,12 +28,12 @@ to simulate. Actors: the Active Entities =========================== -Starting and Stoping Actors ---------------------------- +Starting and Stopping Actors +---------------------------- - **Creating actors:** Most actors are started from the deployment XML file, because this - is a :ref:`better scientific habbit `, but you can + is a :ref:`better scientific habit `, but you can also create them directly from your code. .. tabs:: @@ -106,7 +106,7 @@ Starting and Stoping Actors See also :cpp:func:`sg_actor_kill`, :cpp:func:`sg_actor_kill_all`, :cpp:func:`sg_actor_exit`, :cpp:func:`sg_actor_on_exit`. - - **Controling the actor life cycle from the XML:** + - **Controlling the actor life cycle from the XML:** You can specify a start time and a kill time in the deployment file. .. tabs:: @@ -147,7 +147,7 @@ Starting and Stoping Actors - **Specify the stack size to use** The stack size can be specified by default on the command line, - globally by changing the configuration with :cpp:func:`simgrid::s4u::Engine::set_config(std::string)`, + globally by changing the configuration with :cpp:func:`simgrid::s4u::Engine::set_config`, or for a specific actor using :cpp:func:`simgrid::s4u::Actor::set_stacksize` before its start. .. tabs:: @@ -289,33 +289,48 @@ Communications on the Network .. tabs:: - .. example-tab:: examples/s4u/async-wait/s4u-async-wait.cpp + .. example-tab:: examples/s4u/comm-wait/s4u-comm-wait.cpp See also :cpp:func:`simgrid::s4u::Mailbox::put_async()` and :cpp:func:`simgrid::s4u::Comm::wait()`. - .. example-tab:: examples/python/async-wait/async-wait.py + .. example-tab:: examples/python/comm-wait/comm-wait.py See also :py:func:`simgrid.Mailbox.put_async()` and :py:func:`simgrid.Comm.wait()`. - .. example-tab:: examples/c/async-wait/async-wait.c + .. example-tab:: examples/c/comm-wait/comm-wait.c See also :cpp:func:`sg_mailbox_put_async()` and :cpp:func:`sg_comm__wait()`. + - **Suspending communications:** + The ``suspend()`` and ``resume()`` functions allow to block the + progression of a given communication for a while and then unblock it. + ``is_suspended()`` can be used to retrieve whether the activity is + currently blocked or not. + + .. tabs:: + + .. example-tab:: examples/s4u/comm-suspend/s4u-comm-suspend.cpp + + See also :cpp:func:`simgrid::s4u::Activity::suspend()` + :cpp:func:`simgrid::s4u::Activity::resume()` and + :cpp:func:`simgrid::s4u::Activity::is_suspended()`. + + - **Waiting for all communications in a set:** The ``wait_all()`` function is useful when you want to block until all activities in a given set have completed. .. tabs:: - .. example-tab:: examples/s4u/async-waitall/s4u-async-waitall.cpp + .. example-tab:: examples/s4u/comm-waitall/s4u-comm-waitall.cpp See also :cpp:func:`simgrid::s4u::Comm::wait_all()`. - .. example-tab:: examples/python/async-waitall/async-waitall.py + .. example-tab:: examples/python/comm-waitall/comm-waitall.py See also :py:func:`simgrid.Comm.wait_all()`. - .. example-tab:: examples/c/async-waitall/async-waitall.c + .. example-tab:: examples/c/comm-waitall/comm-waitall.c See also :cpp:func:`sg_comm_wait_all()`. @@ -326,15 +341,15 @@ Communications on the Network .. tabs:: - .. example-tab:: examples/s4u/async-waitany/s4u-async-waitany.cpp + .. example-tab:: examples/s4u/comm-waitany/s4u-comm-waitany.cpp See also :cpp:func:`simgrid::s4u::Comm::wait_any()`. - .. example-tab:: examples/python/async-waitany/async-waitany.py + .. example-tab:: examples/python/comm-waitany/comm-waitany.py See also :py:func:`simgrid.Comm.wait_any()`. - .. example-tab:: examples/c/async-waitany/async-waitany.c + .. example-tab:: examples/c/comm-waitany/comm-waitany.c See also :cpp:func:`sg_comm_wait_any`. @@ -679,7 +694,7 @@ options to see the task executions: - **Platform Tracing:** This program is a toy example just loading the platform, so that - you can play with the platform visualization. Recommanded options: + you can play with the platform visualization. Recommended options: ``--cfg=tracing:yes --cfg=tracing/categorized:yes`` .. tabs:: @@ -885,6 +900,35 @@ Model-Related Examples .. showfile:: examples/platforms/small_platform_one_link_routes.xml :language: xml + + - **wifi links** + + This demonstrates how to declare a wifi link in your platform and + how to use it in your simulation. The basics is to have a link + which sharing policy is set to `WIFI`. Such links can have more + than one bandwidth value (separated by commas), corresponding to + the several SNR level of your wifi link. + + In this case, SimGrid automatically switches to validated + performance models of wifi networks, where the time is shared + between users instead of the bandwidth for wired links (the + corresponding publication is currently being written). + + If your wifi link provides more than one SNR level, you can switch + the level of a given host using + :cpp:func:`simgrid::s4u::Link::set_host_wifi_rate`. By default, + the first level is used. + + .. tabs:: + + .. example-tab:: examples/s4u/network-wifi/s4u-network-wifi.cpp + + .. group-tab:: XML + + **Platform files:** + + .. showfile:: examples/platforms/wifi.xml + :language: xml ======================= Model-Checking Examples diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt index 34168e5ef2..ff0d3a4b82 100644 --- a/examples/c/CMakeLists.txt +++ b/examples/c/CMakeLists.txt @@ -5,7 +5,7 @@ foreach(x actor-create actor-daemon actor-exiting actor-join actor-kill actor-lifetime actor-migrate actor-stacksize actor-suspend actor-yield app-masterworker app-pingpong app-token-ring - async-wait async-waitall async-waitany + comm-wait comm-waitall comm-waitany cloud-capping cloud-masterworker cloud-migration cloud-simple dht-pastry exec-async exec-basic exec-dvfs exec-remote exec-waitany @@ -81,12 +81,12 @@ set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/actor-create/actor-cr ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/app-masterworker-multicore_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/app-masterworker-vivaldi_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/app-pingpong/app-pingpong_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait2_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait3_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait4_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-waitall/async-waitall_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-waitany/async-waitany_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait2_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait3_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait4_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitall/comm-waitall_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitany/comm-waitany_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/dht-kademlia/dht-kademlia_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/dht-pastry/dht-pastry_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/io-file-remote/io-file-remote_d.xml @@ -97,7 +97,7 @@ foreach(x actor-create actor-daemon actor-exiting actor-join actor-kill actor-lifetime actor-migrate actor-stacksize actor-suspend actor-yield app-bittorrent app-chainsend app-masterworker app-pingpong app-token-ring - async-wait async-waitall async-waitany + comm-wait comm-waitall comm-waitany cloud-capping cloud-masterworker cloud-migration cloud-simple dht-kademlia dht-pastry exec-async exec-basic exec-dvfs exec-remote exec-waitany diff --git a/examples/c/actor-create/actor-create.c b/examples/c/actor-create/actor-create.c index eda06b9f32..47e65cfcb0 100644 --- a/examples/c/actor-create/actor-create.c +++ b/examples/c/actor-create/actor-create.c @@ -7,7 +7,7 @@ * * The first step is to declare the code of your actors (what they do exactly does not matter to this example) and then * you ask SimGrid to start your actors. There is three ways of doing so: - * - Directly, by instantiating your actor as paramter to Actor::create() + * - Directly, by instantiating your actor as parameter to Actor::create() * - By first registering your actors before instantiating it; * - Through the deployment file. * diff --git a/examples/c/app-bittorrent/bittorrent-peer.c b/examples/c/app-bittorrent/bittorrent-peer.c index 82698bf9a7..408d3f136d 100644 --- a/examples/c/app-bittorrent/bittorrent-peer.c +++ b/examples/c/app-bittorrent/bittorrent-peer.c @@ -79,7 +79,7 @@ void peer(int argc, char* argv[]) xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments"); // Build peer object - peer_t peer = peer_init(xbt_str_parse_int(argv[1], "Invalid ID: %s"), argc == 4 ? 1 : 0); + peer_t peer = peer_init((int)xbt_str_parse_int(argv[1], "Invalid ID: %s"), argc == 4 ? 1 : 0); // Retrieve deadline peer->deadline = xbt_str_parse_double(argv[2], "Invalid deadline: %s"); diff --git a/examples/c/app-bittorrent/tracker.c b/examples/c/app-bittorrent/tracker.c index dcee75fb51..09900f75e2 100644 --- a/examples/c/app-bittorrent/tracker.c +++ b/examples/c/app-bittorrent/tracker.c @@ -58,7 +58,7 @@ void tracker(int argc, char* argv[]) // Sending peers to the requesting peer tracker_answer_t ta = tracker_answer_new(TRACKER_QUERY_INTERVAL); int next_peer; - int peers_length = xbt_dynar_length(peers_list); + int peers_length = (int)xbt_dynar_length(peers_list); for (int i = 0; i < MAXIMUM_PEERS && i < peers_length; i++) { do { next_peer = xbt_dynar_get_as(peers_list, rand() % peers_length, int); diff --git a/examples/c/app-chainsend/broadcaster.c b/examples/c/app-chainsend/broadcaster.c index 9444caac0c..a5b8e6a20b 100644 --- a/examples/c/app-chainsend/broadcaster.c +++ b/examples/c/app-chainsend/broadcaster.c @@ -75,7 +75,7 @@ void broadcaster(int argc, char* argv[]) { XBT_DEBUG("broadcaster"); xbt_assert(argc > 2); - unsigned int host_count = xbt_str_parse_int(argv[1], "Invalid number of peers: %s"); + unsigned int host_count = (unsigned int)xbt_str_parse_int(argv[1], "Invalid number of peers: %s"); sg_mailbox_t* mailboxes = xbt_malloc(sizeof(sg_mailbox_t) * host_count); @@ -86,7 +86,7 @@ void broadcaster(int argc, char* argv[]) free(name); } - unsigned int piece_count = xbt_str_parse_int(argv[2], "Invalid number of pieces: %s"); + unsigned int piece_count = (unsigned int)xbt_str_parse_int(argv[2], "Invalid number of pieces: %s"); broadcaster_t bc = broadcaster_init(mailboxes, host_count, piece_count); diff --git a/examples/c/app-masterworker/app-masterworker.c b/examples/c/app-masterworker/app-masterworker.c index 1db37f7b5b..6137c8b918 100644 --- a/examples/c/app-masterworker/app-masterworker.c +++ b/examples/c/app-masterworker/app-masterworker.c @@ -23,7 +23,7 @@ static void master(int argc, char* argv[]) xbt_assert(argc == 5, "The master function expects 4 arguments from the XML deployment file"); long number_of_tasks = xbt_str_parse_int(argv[1], "Invalid amount of tasks: %s"); /* - Number of tasks */ double comp_size = xbt_str_parse_double(argv[2], "Invalid computational size: %s"); /* - Compute cost */ - double comm_size = xbt_str_parse_double(argv[3], "Invalid communication size: %s"); /* - Communication size */ + long comm_size = xbt_str_parse_int(argv[3], "Invalid communication size: %s"); /* - Communication size */ long workers_count = xbt_str_parse_int(argv[4], "Invalid amount of workers: %s"); /* - Number of workers */ XBT_INFO("Got %ld workers and %ld tasks to process", workers_count, number_of_tasks); diff --git a/examples/c/app-token-ring/app-token-ring.c b/examples/c/app-token-ring/app-token-ring.c index 1484d2b058..69c6fc177f 100644 --- a/examples/c/app-token-ring/app-token-ring.c +++ b/examples/c/app-token-ring/app-token-ring.c @@ -23,7 +23,7 @@ static void relay_runner(int argc, char* argv[]) xbt_assert(argc == 0, "The relay_runner function does not accept any parameter from the XML deployment file"); const char* name = sg_actor_self_get_name(); - int rank = xbt_str_parse_int(name, "Any actor of this example must have a numerical name, not %s"); + int rank = (int)xbt_str_parse_int(name, "Any actor of this example must have a numerical name, not %s"); sg_mailbox_t my_mailbox = sg_mailbox_by_name(name); diff --git a/examples/c/cloud-capping/cloud-capping.c b/examples/c/cloud-capping/cloud-capping.c index aafc0dc20b..bc3de023fd 100644 --- a/examples/c/cloud-capping/cloud-capping.c +++ b/examples/c/cloud-capping/cloud-capping.c @@ -20,7 +20,7 @@ static void worker_main(int argc, char* argv[]) { xbt_assert(argc == 4); double computation_amount = xbt_str_parse_double(argv[1], "Invalid computation amount: %s"); - int use_bound = xbt_str_parse_int(argv[2], "Second parameter (use_bound) should be 0 or 1 but is: %s"); + int use_bound = !!xbt_str_parse_int(argv[2], "Second parameter (use_bound) should be 0 or 1 but is: %s"); double bound = xbt_str_parse_double(argv[3], "Invalid bound: %s"); double clock_sta = simgrid_get_clock(); diff --git a/examples/c/cloud-masterworker/cloud-masterworker.c b/examples/c/cloud-masterworker/cloud-masterworker.c index c58c30dcca..1e54f2d514 100644 --- a/examples/c/cloud-masterworker/cloud-masterworker.c +++ b/examples/c/cloud-masterworker/cloud-masterworker.c @@ -21,7 +21,7 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(cloud_masterworker, "Messages specific for this exa #define FINALIZE 221297 /* a magic number to tell people to stop working */ const double comp_size = 10000000; -const double comm_size = 10000000; +const long comm_size = 10000000; static void send_tasks(int nb_workers) { diff --git a/examples/c/async-wait/async-wait.c b/examples/c/comm-wait/comm-wait.c similarity index 95% rename from examples/c/async-wait/async-wait.c rename to examples/c/comm-wait/comm-wait.c index 64ba4efd49..142dcf7b85 100644 --- a/examples/c/async-wait/async-wait.c +++ b/examples/c/comm-wait/comm-wait.c @@ -14,14 +14,14 @@ #include -XBT_LOG_NEW_DEFAULT_CATEGORY(async_wait, "Messages specific for this example"); +XBT_LOG_NEW_DEFAULT_CATEGORY(comm_wait, "Messages specific for this example"); /* Main function of the Sender actor */ static void sender(int argc, char* argv[]) { xbt_assert(argc == 5, "The sender function expects 4 arguments from the XML deployment file"); long messages_count = xbt_str_parse_int(argv[1], "Invalid amount of messages: %s"); /* - number of messages */ - double message_size = xbt_str_parse_double(argv[2], "Invalid message size: %s"); /* - communication cost */ + long message_size = xbt_str_parse_int(argv[2], "Invalid message size: %s"); /* - communication cost */ double sleep_start_time = xbt_str_parse_double(argv[3], "Invalid sleep start time: %s"); /* - start time */ double sleep_test_time = xbt_str_parse_double(argv[4], "Invalid test time: %s"); /* - test time */ diff --git a/examples/c/async-wait/async-wait.tesh b/examples/c/comm-wait/comm-wait.tesh similarity index 84% rename from examples/c/async-wait/async-wait.tesh rename to examples/c/comm-wait/comm-wait.tesh index 72cc48a377..9307cd9dd9 100644 --- a/examples/c/async-wait/async-wait.tesh +++ b/examples/c/comm-wait/comm-wait.tesh @@ -2,7 +2,7 @@ p Test1 sg_comm_test() with Sleep_sender > Sleep_receiver -$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) sleep_start_time : 5.000000 , sleep_test_time : 0.100000 > [ 0.000000] (2:receiver@Ruby) sleep_start_time : 1.000000 , sleep_test_time : 0.100000 > [ 1.000000] (2:receiver@Ruby) Wait for my first message @@ -17,7 +17,7 @@ $ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:= p Test2 sg_comm_test() with Sleep_sender < Sleep_receiver -$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait2_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait2_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) sleep_start_time : 1.000000 , sleep_test_time : 0.100000 > [ 0.000000] (2:receiver@Ruby) sleep_start_time : 5.000000 , sleep_test_time : 0.100000 > [ 1.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver' @@ -32,7 +32,7 @@ $ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:= p Test1 sg_comm_wait() with Sleep_sender > Sleep_receiver -$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait3_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait3_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) sleep_start_time : 5.000000 , sleep_test_time : 0.000000 > [ 0.000000] (2:receiver@Ruby) sleep_start_time : 1.000000 , sleep_test_time : 0.000000 > [ 1.000000] (2:receiver@Ruby) Wait for my first message @@ -47,7 +47,7 @@ $ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:= p Test2 sg_comm_wait() with Sleep_sender < Sleep_receiver -$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait4_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait4_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) sleep_start_time : 1.000000 , sleep_test_time : 0.000000 > [ 0.000000] (2:receiver@Ruby) sleep_start_time : 5.000000 , sleep_test_time : 0.000000 > [ 1.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver' diff --git a/examples/c/async-wait/async-wait2_d.xml b/examples/c/comm-wait/comm-wait2_d.xml similarity index 100% rename from examples/c/async-wait/async-wait2_d.xml rename to examples/c/comm-wait/comm-wait2_d.xml diff --git a/examples/c/async-wait/async-wait3_d.xml b/examples/c/comm-wait/comm-wait3_d.xml similarity index 100% rename from examples/c/async-wait/async-wait3_d.xml rename to examples/c/comm-wait/comm-wait3_d.xml diff --git a/examples/c/async-wait/async-wait4_d.xml b/examples/c/comm-wait/comm-wait4_d.xml similarity index 100% rename from examples/c/async-wait/async-wait4_d.xml rename to examples/c/comm-wait/comm-wait4_d.xml diff --git a/examples/c/async-wait/async-wait_d.xml b/examples/c/comm-wait/comm-wait_d.xml similarity index 100% rename from examples/c/async-wait/async-wait_d.xml rename to examples/c/comm-wait/comm-wait_d.xml diff --git a/examples/c/async-waitall/async-waitall.c b/examples/c/comm-waitall/comm-waitall.c similarity index 89% rename from examples/c/async-waitall/async-waitall.c rename to examples/c/comm-waitall/comm-waitall.c index f0755cf812..e676bc95c1 100644 --- a/examples/c/async-waitall/async-waitall.c +++ b/examples/c/comm-waitall/comm-waitall.c @@ -15,7 +15,7 @@ #include /* snprintf */ -XBT_LOG_NEW_DEFAULT_CATEGORY(msg_async_waitall, "Messages specific for this msg example"); +XBT_LOG_NEW_DEFAULT_CATEGORY(comm_waitall, "Messages specific for this msg example"); static void sender(int argc, char* argv[]) { @@ -23,6 +23,7 @@ static void sender(int argc, char* argv[]) long messages_count = xbt_str_parse_int(argv[1], "Invalid message count: %s"); long message_size = xbt_str_parse_int(argv[2], "Invalid message size: %s"); long receivers_count = xbt_str_parse_int(argv[3], "Invalid amount of receivers: %s"); + xbt_assert(receivers_count > 0); /* Array in which we store all ongoing communications */ sg_comm_t* pending_comms = xbt_malloc(sizeof(sg_comm_t) * (messages_count + receivers_count)); @@ -38,9 +39,9 @@ static void sender(int argc, char* argv[]) } /* Start dispatching all messages to receivers, in a round robin fashion */ - for (int i = 0; i < messages_count; i++) { + for (long i = 0; i < messages_count; i++) { char msg_content[80]; - snprintf(msg_content, 79, "Message %d", i); + snprintf(msg_content, 79, "Message %ld", i); sg_mailbox_t mbox = mboxes[i % receivers_count]; XBT_INFO("Send '%s' to '%s'", msg_content, sg_mailbox_get_name(mbox)); /* Create a communication representing the ongoing communication, and store it in pending_comms */ @@ -48,8 +49,8 @@ static void sender(int argc, char* argv[]) } /* Start sending messages to let the workers know that they should stop */ - for (int i = 0; i < receivers_count; i++) { - XBT_INFO("Send 'finalize' to 'receiver-%d'", i); + for (long i = 0; i < receivers_count; i++) { + XBT_INFO("Send 'finalize' to 'receiver-%ld'", i); char* end_msg = xbt_strdup("finalize"); sg_mailbox_t mbox = mboxes[i % receivers_count]; pending_comms[pending_comms_count++] = sg_mailbox_put_async(mbox, end_msg, 0); @@ -69,7 +70,7 @@ static void sender(int argc, char* argv[]) static void receiver(int argc, char* argv[]) { xbt_assert(argc == 2, "Expecting one parameter from the XML deployment file but got %d", argc); - int id = xbt_str_parse_int(argv[1], "ID should be numerical, not %s"); + int id = (int)xbt_str_parse_int(argv[1], "ID should be numerical, not %s"); char mailbox_name[80]; snprintf(mailbox_name, 79, "receiver-%d", id); sg_mailbox_t mbox = sg_mailbox_by_name(mailbox_name); diff --git a/examples/c/async-waitall/async-waitall.tesh b/examples/c/comm-waitall/comm-waitall.tesh similarity index 88% rename from examples/c/async-waitall/async-waitall.tesh rename to examples/c/comm-waitall/comm-waitall.tesh index ce51d6d049..326f543542 100644 --- a/examples/c/async-waitall/async-waitall.tesh +++ b/examples/c/comm-waitall/comm-waitall.tesh @@ -1,7 +1,7 @@ #!/usr/bin/env tesh ! output sort 19 -$ ${bindir:=.}/c-async-waitall ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/c-comm-waitall ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0' > [ 0.000000] (2:receiver@Ruby) Wait for my first message > [ 0.000000] (3:receiver@Perl) Wait for my first message diff --git a/examples/c/async-waitall/async-waitall_d.xml b/examples/c/comm-waitall/comm-waitall_d.xml similarity index 100% rename from examples/c/async-waitall/async-waitall_d.xml rename to examples/c/comm-waitall/comm-waitall_d.xml diff --git a/examples/c/async-waitany/async-waitany.c b/examples/c/comm-waitany/comm-waitany.c similarity index 91% rename from examples/c/async-waitany/async-waitany.c rename to examples/c/comm-waitany/comm-waitany.c index 14a0767470..9dd47e8503 100644 --- a/examples/c/async-waitany/async-waitany.c +++ b/examples/c/comm-waitany/comm-waitany.c @@ -14,7 +14,7 @@ #include /* snprintf */ -XBT_LOG_NEW_DEFAULT_CATEGORY(async_waitany, "Messages specific for this example"); +XBT_LOG_NEW_DEFAULT_CATEGORY(comm_waitany, "Messages specific for this example"); static void sender(int argc, char* argv[]) { @@ -22,6 +22,7 @@ static void sender(int argc, char* argv[]) long messages_count = xbt_str_parse_int(argv[1], "Invalid message count: %s"); long msg_size = xbt_str_parse_int(argv[2], "Invalid message size: %s"); long receivers_count = xbt_str_parse_int(argv[3], "Invalid amount of receivers: %s"); + xbt_assert(receivers_count > 0); /* Array in which we store all ongoing communications */ sg_comm_t* pending_comms = xbt_malloc(sizeof(sg_comm_t) * (messages_count + receivers_count)); @@ -37,9 +38,9 @@ static void sender(int argc, char* argv[]) } /* Start dispatching all messages to receivers, in a round robin fashion */ - for (int i = 0; i < messages_count; i++) { + for (long i = 0; i < messages_count; i++) { char msg_content[80]; - snprintf(msg_content, 79, "Message %d", i); + snprintf(msg_content, 79, "Message %ld", i); sg_mailbox_t mbox = mboxes[i % receivers_count]; XBT_INFO("Send '%s' to '%s'", msg_content, sg_mailbox_get_name(mbox)); @@ -47,8 +48,8 @@ static void sender(int argc, char* argv[]) pending_comms[pending_comms_count++] = sg_mailbox_put_async(mbox, xbt_strdup(msg_content), msg_size); } /* Start sending messages to let the workers know that they should stop */ - for (int i = 0; i < receivers_count; i++) { - XBT_INFO("Send 'finalize' to 'receiver-%d'", i); + for (long i = 0; i < receivers_count; i++) { + XBT_INFO("Send 'finalize' to 'receiver-%ld'", i); char* end_msg = xbt_strdup("finalize"); sg_mailbox_t mbox = mboxes[i % receivers_count]; pending_comms[pending_comms_count++] = sg_mailbox_put_async(mbox, end_msg, 0); @@ -82,7 +83,7 @@ static void sender(int argc, char* argv[]) static void receiver(int argc, char* argv[]) { xbt_assert(argc == 2, "Expecting one parameter from the XML deployment file but got %d", argc); - int id = xbt_str_parse_int(argv[1], "ID should be numerical, not %s"); + int id = (int)xbt_str_parse_int(argv[1], "ID should be numerical, not %s"); char mailbox_name[80]; snprintf(mailbox_name, 79, "receiver-%d", id); sg_mailbox_t mbox = sg_mailbox_by_name(mailbox_name); diff --git a/examples/c/async-waitany/async-waitany.tesh b/examples/c/comm-waitany/comm-waitany.tesh similarity index 88% rename from examples/c/async-waitany/async-waitany.tesh rename to examples/c/comm-waitany/comm-waitany.tesh index 9ecd94715d..3d19ecd623 100644 --- a/examples/c/async-waitany/async-waitany.tesh +++ b/examples/c/comm-waitany/comm-waitany.tesh @@ -1,7 +1,7 @@ #!/usr/bin/env tesh ! output sort 19 -$ ${bindir:=.}/c-async-waitany ${platfdir:=.}/small_platform.xml ${srcdir:=.}/async-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/c-comm-waitany ${platfdir:=.}/small_platform.xml ${srcdir:=.}/comm-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0' > [ 0.000000] (1:sender@Tremblay) Send 'Message 1' to 'receiver-1' > [ 0.000000] (1:sender@Tremblay) Send 'Message 2' to 'receiver-0' diff --git a/examples/c/async-waitany/async-waitany_d.xml b/examples/c/comm-waitany/comm-waitany_d.xml similarity index 100% rename from examples/c/async-waitany/async-waitany_d.xml rename to examples/c/comm-waitany/comm-waitany_d.xml diff --git a/examples/c/dht-kademlia/dht-kademlia.c b/examples/c/dht-kademlia/dht-kademlia.c index 887bc1c295..63130347fb 100644 --- a/examples/c/dht-kademlia/dht-kademlia.c +++ b/examples/c/dht-kademlia/dht-kademlia.c @@ -27,12 +27,12 @@ static void node(int argc, char* argv[]) double deadline; xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments"); /* Node initialization */ - unsigned int id = strtoul(argv[1], NULL, 0); + unsigned int id = (unsigned int)strtoul(argv[1], NULL, 0); node_t node = node_init(id); if (argc == 4) { XBT_INFO("Hi, I'm going to join the network with id %s", sg_mailbox_get_name(node->mailbox)); - unsigned int id_known = strtoul(argv[2], NULL, 0); + unsigned int id_known = (unsigned int)strtoul(argv[2], NULL, 0); join_success = join(node, id_known); deadline = strtod(argv[3], NULL); } else { diff --git a/examples/c/dht-kademlia/node.c b/examples/c/dht-kademlia/node.c index 13d287f4ff..79d228b4f6 100644 --- a/examples/c/dht-kademlia/node.c +++ b/examples/c/dht-kademlia/node.c @@ -320,7 +320,7 @@ unsigned int get_id_in_prefix(unsigned int id, unsigned int prefix) if (prefix == 0) { return 0; } else { - return (1U << ((unsigned int)(prefix - 1))) ^ id; + return (1U << (prefix - 1)) ^ id; } } diff --git a/examples/c/dht-pastry/dht-pastry.c b/examples/c/dht-pastry/dht-pastry.c index c6e5947b91..af12bf377c 100644 --- a/examples/c/dht-pastry/dht-pastry.c +++ b/examples/c/dht-pastry/dht-pastry.c @@ -13,7 +13,6 @@ #include "xbt/str.h" #include "xbt/sysdep.h" -#include #include XBT_LOG_NEW_DEFAULT_CATEGORY(pastry, "Messages specific for this example"); @@ -83,7 +82,7 @@ unsigned int domain_mask = 0; static int domain(unsigned int a, unsigned int level) { if (domain_mask == 0) - domain_mask = pow(2, DOMAIN_SIZE) - 1; + domain_mask = (1U << DOMAIN_SIZE) - 1; unsigned int shift = (LEVELS_COUNT - level - 1) * DOMAIN_SIZE; return (a >> shift) & domain_mask; } @@ -395,6 +394,7 @@ static void handle_message(node_t node, pastry_message_t message) /* Update routing table */ for (i = shl(node->id, message->state->id); i < LEVELS_COUNT; i++) { for (j = 0; j < LEVEL_SIZE; j++) { + // FIXME: this is a no-op! if (node->routing_table[i][j] == -1 && message->state->routing_table[i][j] == -1) node->routing_table[i][j] = message->state->routing_table[i][j]; } @@ -445,7 +445,7 @@ static void node(int argc, char* argv[]) double deadline; xbt_assert(argc == 3 || argc == 5, "Wrong number of arguments for this node"); s_node_t node = {0}; - node.id = xbt_str_parse_int(argv[1], "Invalid ID: %s"); + node.id = (int)xbt_str_parse_int(argv[1], "Invalid ID: %s"); node.known_id = -1; node.ready = -1; node.pending_messages = xbt_dynar_new(sizeof(pastry_message_t), NULL); @@ -472,7 +472,7 @@ static void node(int argc, char* argv[]) XBT_DEBUG("Create a new Pastry ring..."); join_success = 1; } else { - node.known_id = xbt_str_parse_int(argv[2], "Invalid known ID: %s"); + node.known_id = (int)xbt_str_parse_int(argv[2], "Invalid known ID: %s"); double sleep_time = xbt_str_parse_double(argv[3], "Invalid sleep time: %s"); deadline = xbt_str_parse_double(argv[4], "Invalid deadline: %s"); @@ -520,12 +520,12 @@ int main(int argc, char* argv[]) while (!strncmp(options[0], "-", 1)) { int length = strlen("-nb_bits="); if (!strncmp(options[0], "-nb_bits=", length) && strlen(options[0]) > length) { - nb_bits = xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s"); + nb_bits = (int)xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s"); XBT_DEBUG("Set nb_bits to %d", nb_bits); } else { length = strlen("-timeout="); if (!strncmp(options[0], "-timeout=", length) && strlen(options[0]) > length) { - timeout = xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s"); + timeout = (int)xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s"); XBT_DEBUG("Set timeout to %d", timeout); } else { xbt_die("Invalid pastry option '%s'", options[0]); diff --git a/examples/c/energy-exec-ptask/energy-exec-ptask.c b/examples/c/energy-exec-ptask/energy-exec-ptask.c index 7249e0146e..54ac87095c 100644 --- a/examples/c/energy-exec-ptask/energy-exec-ptask.c +++ b/examples/c/energy-exec-ptask/energy-exec-ptask.c @@ -17,7 +17,7 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(energy_exec_ptask, "Messages specific for this exam static void runner(int argc, char* argv[]) { /* Retrieve the list of all hosts as an array of hosts */ - int host_count = sg_host_count(); + int host_count = (int)sg_host_count(); sg_host_t* hosts = sg_host_list(); XBT_INFO("First, build a classical parallel task, with 1 Gflop to execute on each node, " diff --git a/examples/c/platform-failures/platform-failures.c b/examples/c/platform-failures/platform-failures.c index cdb4edf196..34e0eafe89 100644 --- a/examples/c/platform-failures/platform-failures.c +++ b/examples/c/platform-failures/platform-failures.c @@ -25,7 +25,7 @@ static void master(int argc, char* argv[]) xbt_assert(argc == 5); long number_of_tasks = xbt_str_parse_int(argv[1], "Invalid amount of tasks: %s"); double task_comp_size = xbt_str_parse_double(argv[2], "Invalid computational size: %s"); - double task_comm_size = xbt_str_parse_double(argv[3], "Invalid communication size: %s"); + long task_comm_size = xbt_str_parse_int(argv[3], "Invalid communication size: %s"); long workers_count = xbt_str_parse_int(argv[4], "Invalid amount of workers: %s"); XBT_INFO("Got %ld workers and %ld tasks to process", workers_count, number_of_tasks); diff --git a/examples/c/platform-failures/platform-failures.tesh b/examples/c/platform-failures/platform-failures.tesh index d70b2de9c0..5aa5c9ca16 100644 --- a/examples/c/platform-failures/platform-failures.tesh +++ b/examples/c/platform-failures/platform-failures.tesh @@ -210,7 +210,7 @@ $ ${bindir:=.}/c-platform-failures --log=xbt_cfg.thres:critical --log=no_loc ${p > [ 43.774742] (1:master@Tremblay) Goodbye now! > [ 43.774742] (0:maestro@) Simulation time 43.7747 -p NOT testing the mixure of failures and CpuTI: +p NOT testing the mixture of failures and CpuTI: p This test leads to a deadlock because of a bug somewhere in surf_solve. p We should debug this instead of ignoring the issue, but it's utterly p complex with such an integration test. One day, we will setup a set of diff --git a/examples/deprecated/java/async/waitall/Main.java b/examples/deprecated/java/async/waitall/Main.java index d183627a89..5035b0a8a9 100644 --- a/examples/deprecated/java/async/waitall/Main.java +++ b/examples/deprecated/java/async/waitall/Main.java @@ -5,7 +5,7 @@ package async.waitall; -/** This example demonstrates the use of the asynchrounous communications +/** This example demonstrates the use of the asynchronous communications * * Task.isend() and Task.irecv() are used to start the communications in non-blocking mode. * diff --git a/examples/deprecated/java/async/yield/Yielder.java b/examples/deprecated/java/async/yield/Yielder.java index 52a50379f4..afd88fbfca 100644 --- a/examples/deprecated/java/async/yield/Yielder.java +++ b/examples/deprecated/java/async/yield/Yielder.java @@ -17,7 +17,7 @@ public class Yielder extends Process { public void main(String[] args) { int yieldsCount = Integer.parseInt(args[0]); for (int i=0; i [0.000000] [java/INFO] Using regular java threads. > [900.000000] [java/INFO] Terminating the simulation... > [node-0.simgrid.org:dht.kademlia.Node:(1) 0.000000] [java/INFO] Hi, I'm going to create the network with the id 0! -> [node-0.simgrid.org:dht.kademlia.Node:(1) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded. +> [node-0.simgrid.org:dht.kademlia.Node:(1) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded. > [node-1.simgrid.org:dht.kademlia.Node:(2) 0.000000] [java/INFO] Hi, I'm going to join the network with the id 1! -> [node-1.simgrid.org:dht.kademlia.Node:(2) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded. +> [node-1.simgrid.org:dht.kademlia.Node:(2) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded. > [node-2.simgrid.org:dht.kademlia.Node:(3) 0.000000] [java/INFO] Hi, I'm going to join the network with the id 2! -> [node-2.simgrid.org:dht.kademlia.Node:(3) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded. +> [node-2.simgrid.org:dht.kademlia.Node:(3) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded. > [node-3.simgrid.org:dht.kademlia.Node:(4) 0.000000] [java/INFO] Hi, I'm going to join the network with the id 4! -> [node-3.simgrid.org:dht.kademlia.Node:(4) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded. +> [node-3.simgrid.org:dht.kademlia.Node:(4) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded. diff --git a/examples/deprecated/msg/README.doc b/examples/deprecated/msg/README.doc index 03505a57b2..a294843ed4 100644 --- a/examples/deprecated/msg/README.doc +++ b/examples/deprecated/msg/README.doc @@ -27,7 +27,7 @@ options to see the task executions: This example declares several tracing categories to that are used to classify its tasks. When the program is executed, the tracing mechanism registers the resource utilization of hosts - and links according to these categories. Recommanded options: + and links according to these categories. Recommended options: @verbatim --cfg=tracing:yes --cfg=tracing/categorized:yes --cfg=tracing/uncategorized:yes @endverbatim @@ -36,14 +36,14 @@ options to see the task executions: This is an augmented version of our basic master/worker example using several tracing features. It traces resource usage, sorted out in several categories; Trace marks and user variables are also - used. Recommanded options: + used. Recommended options: @verbatim --cfg=tracing/categorized:yes --cfg=tracing/uncategorized:yes @endverbatim - Process migration tracing. @ref examples/deprecated/msg/trace-process-migration/trace-process-migration.c \n This version is enhanced so that the process migrations can be - displayed as arrows in a Gantt-chart visualization. Recommanded + displayed as arrows in a Gantt-chart visualization. Recommended options to that extend: @verbatim -cfg=tracing:yes --cfg=tracing/msg/process:yes @endverbatim diff --git a/examples/deprecated/msg/trace-route-user-variables/trace-route-user-variables.c b/examples/deprecated/msg/trace-route-user-variables/trace-route-user-variables.c index a2f736a355..1a637cf9e7 100644 --- a/examples/deprecated/msg/trace-route-user-variables/trace-route-user-variables.c +++ b/examples/deprecated/msg/trace-route-user-variables/trace-route-user-variables.c @@ -16,7 +16,7 @@ static void create_and_execute_task (void) static int trace_fun(XBT_ATTRIB_UNUSED int argc, XBT_ATTRIB_UNUSED char* argv[]) { //Set initial values for the link user variables - //This example uses source and destination where source and destination are the name of hosts inthe platform file. + //This example uses source and destination where source and destination are the name of hosts in the platform file. //The functions will set/change the value of the variable for all links in the route between source and destination. //Set the Link_Capacity variable diff --git a/examples/platforms/energy_platform.xml b/examples/platforms/energy_platform.xml index 2da6ba54d0..eda19adbb6 100644 --- a/examples/platforms/energy_platform.xml +++ b/examples/platforms/energy_platform.xml @@ -3,7 +3,7 @@ - - + diff --git a/examples/platforms/wifi_decay_2STA.xml b/examples/platforms/wifi_decay_2STA.xml deleted file mode 100755 index fed5614afa..0000000000 --- a/examples/platforms/wifi_decay_2STA.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/examples/python/CMakeLists.txt b/examples/python/CMakeLists.txt index a62c35e5f8..fa32133393 100644 --- a/examples/python/CMakeLists.txt +++ b/examples/python/CMakeLists.txt @@ -1,5 +1,5 @@ foreach(example actor-create actor-daemon actor-join actor-kill actor-migrate actor-suspend actor-yield actor-lifetime - async-wait async-waitall async-waitany + comm-wait comm-waitall comm-waitany exec-async exec-basic exec-dvfs exec-remote) set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.tesh) set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.py) @@ -19,6 +19,6 @@ endforeach() set(examples_src ${examples_src} PARENT_SCOPE) set(tesh_files ${tesh_files} examples/python/actor-create/actor-create_d.xml examples/python/actor-lifetime/actor-lifetime_d.xml - examples/python/async-wait/async-wait_d.xml - examples/python/async-waitall/async-waitall_d.xml - examples/python/async-waitany/async-waitany_d.xml PARENT_SCOPE) + examples/python/comm-wait/comm-wait_d.xml + examples/python/comm-waitall/comm-waitall_d.xml + examples/python/comm-waitany/comm-waitany_d.xml PARENT_SCOPE) diff --git a/examples/python/async-wait/async-wait.py b/examples/python/comm-wait/comm-wait.py similarity index 100% rename from examples/python/async-wait/async-wait.py rename to examples/python/comm-wait/comm-wait.py diff --git a/examples/python/async-wait/async-wait.tesh b/examples/python/comm-wait/comm-wait.tesh similarity index 80% rename from examples/python/async-wait/async-wait.tesh rename to examples/python/comm-wait/comm-wait.tesh index d95c005b8c..7d7670fbbb 100644 --- a/examples/python/async-wait/async-wait.tesh +++ b/examples/python/comm-wait/comm-wait.tesh @@ -1,6 +1,6 @@ #!/usr/bin/env tesh -$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/async-wait.py ${platfdir}/small_platform_fatpipe.xml async-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/comm-wait.py ${platfdir}/small_platform_fatpipe.xml comm-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'Mailbox(receiver-0)' > [ 0.000000] (2:receiver@Ruby) Wait for my first message > [ 0.000000] (1:sender@Tremblay) Send 'Message 1' to 'Mailbox(receiver-0)' diff --git a/examples/python/async-wait/async-wait_d.xml b/examples/python/comm-wait/comm-wait_d.xml similarity index 100% rename from examples/python/async-wait/async-wait_d.xml rename to examples/python/comm-wait/comm-wait_d.xml diff --git a/examples/python/async-waitall/async-waitall.py b/examples/python/comm-waitall/comm-waitall.py similarity index 100% rename from examples/python/async-waitall/async-waitall.py rename to examples/python/comm-waitall/comm-waitall.py diff --git a/examples/python/async-waitall/async-waitall.tesh b/examples/python/comm-waitall/comm-waitall.tesh similarity index 86% rename from examples/python/async-waitall/async-waitall.tesh rename to examples/python/comm-waitall/comm-waitall.tesh index 5a6670471b..208f807c79 100644 --- a/examples/python/async-waitall/async-waitall.tesh +++ b/examples/python/comm-waitall/comm-waitall.tesh @@ -1,6 +1,6 @@ #!/usr/bin/env tesh -$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/async-waitall.py ${platfdir}/small_platform_fatpipe.xml async-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/comm-waitall.py ${platfdir}/small_platform_fatpipe.xml comm-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'Mailbox(receiver-0)' > [ 0.000000] (2:receiver@Ruby) Wait for my first message > [ 0.000000] (3:receiver@Perl) Wait for my first message diff --git a/examples/python/async-waitall/async-waitall_d.xml b/examples/python/comm-waitall/comm-waitall_d.xml similarity index 100% rename from examples/python/async-waitall/async-waitall_d.xml rename to examples/python/comm-waitall/comm-waitall_d.xml diff --git a/examples/python/async-waitany/async-waitany.py b/examples/python/comm-waitany/comm-waitany.py similarity index 100% rename from examples/python/async-waitany/async-waitany.py rename to examples/python/comm-waitany/comm-waitany.py diff --git a/examples/python/async-waitany/async-waitany.tesh b/examples/python/comm-waitany/comm-waitany.tesh similarity index 89% rename from examples/python/async-waitany/async-waitany.tesh rename to examples/python/comm-waitany/comm-waitany.tesh index 4a1381a547..cd6ebad5df 100644 --- a/examples/python/async-waitany/async-waitany.tesh +++ b/examples/python/comm-waitany/comm-waitany.tesh @@ -3,7 +3,7 @@ p Testing Comm.wait_any() ! output sort 19 -$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/async-waitany.py ${platfdir}/small_platform.xml async-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/comm-waitany.py ${platfdir}/small_platform.xml comm-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'Mailbox(receiver-0)' > [ 0.000000] (2:receiver@Fafard) Wait for my first message > [ 0.000000] (3:receiver@Jupiter) Wait for my first message diff --git a/examples/python/async-waitany/async-waitany_d.xml b/examples/python/comm-waitany/comm-waitany_d.xml similarity index 100% rename from examples/python/async-waitany/async-waitany_d.xml rename to examples/python/comm-waitany/comm-waitany_d.xml diff --git a/examples/s4u/CMakeLists.txt b/examples/s4u/CMakeLists.txt index fb66719917..21c1aa1c56 100644 --- a/examples/s4u/CMakeLists.txt +++ b/examples/s4u/CMakeLists.txt @@ -62,7 +62,7 @@ endif() foreach (example actor-create actor-daemon actor-exiting actor-join actor-kill actor-lifetime actor-migrate actor-suspend actor-yield actor-stacksize app-bittorrent app-chainsend app-pingpong app-token-ring - async-ready async-wait async-waitany async-waitall async-waituntil + comm-ready comm-suspend comm-wait comm-waitany comm-waitall comm-waituntil comm-dependent cloud-capping cloud-migration cloud-simple dht-chord dht-kademlia @@ -71,6 +71,7 @@ foreach (example actor-create actor-daemon actor-exiting actor-join actor-kill exec-async exec-basic exec-dvfs exec-ptask exec-remote exec-waitany exec-waitfor exec-dependent maestro-set mc-bugged1 mc-bugged1-liveness mc-bugged2 mc-electric-fence mc-failing-assert + network-wifi io-async io-file-system io-file-remote io-disk-raw io-dependent platform-failures platform-profile platform-properties plugin-hostload @@ -218,11 +219,12 @@ set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/actor-create/s4u-a ${CMAKE_CURRENT_SOURCE_DIR}/actor-yield/s4u-actor-yield_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/app-bittorrent/s4u-app-bittorrent_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworkers/s4u-app-masterworkers_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/s4u-async-wait_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-waitany/s4u-async-waitany_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-waitall/s4u-async-waitall_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-ready/s4u-async-ready_d.xml - ${CMAKE_CURRENT_SOURCE_DIR}/async-waituntil/s4u-async-waituntil_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-suspend/s4u-comm-suspend_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/s4u-comm-wait_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitany/s4u-comm-waitany_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitall/s4u-comm-waitall_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-ready/s4u-comm-ready_d.xml + ${CMAKE_CURRENT_SOURCE_DIR}/comm-waituntil/s4u-comm-waituntil_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u-dht-chord_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/dht-kademlia/s4u-dht-kademlia_d.xml ${CMAKE_CURRENT_SOURCE_DIR}/energy-boot/platform_boot.xml diff --git a/examples/s4u/actor-create/s4u-actor-create.cpp b/examples/s4u/actor-create/s4u-actor-create.cpp index bff3790308..ee1e6ef49a 100644 --- a/examples/s4u/actor-create/s4u-actor-create.cpp +++ b/examples/s4u/actor-create/s4u-actor-create.cpp @@ -75,7 +75,7 @@ public: msg = args[1]; mbox = args[2]; } - void operator()() /* This is the main code of the actor */ + void operator()() const /* This is the main code of the actor */ { XBT_INFO("Hello s4u, I have something to send"); simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(mbox); diff --git a/examples/s4u/actor-lifetime/s4u-actor-lifetime.cpp b/examples/s4u/actor-lifetime/s4u-actor-lifetime.cpp index 5a945a4305..5e3c614826 100644 --- a/examples/s4u/actor-lifetime/s4u-actor-lifetime.cpp +++ b/examples/s4u/actor-lifetime/s4u-actor-lifetime.cpp @@ -20,7 +20,7 @@ public: XBT_INFO("Exiting now (done sleeping or got killed)."); }); } - void operator()() + void operator()() const { XBT_INFO("Hello! I go to sleep."); simgrid::s4u::this_actor::sleep_for(10); diff --git a/examples/s4u/actor-stacksize/s4u-actor-stacksize.cpp b/examples/s4u/actor-stacksize/s4u-actor-stacksize.cpp index 3d9a77e5e5..e38cb2b172 100644 --- a/examples/s4u/actor-stacksize/s4u-actor-stacksize.cpp +++ b/examples/s4u/actor-stacksize/s4u-actor-stacksize.cpp @@ -24,12 +24,12 @@ int main(int argc, char* argv[]) // You can use set_config(string) to pass a size that will be parsed. That value will be used for any subsequent // actors - e.set_config("contexts/stack-size:16384"); + simgrid::s4u::Engine::set_config("contexts/stack-size:16384"); simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor); simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor); // You can use set_config(key, value) for the same effect. - e.set_config("contexts/stack-size", 32 * 1024); + simgrid::s4u::Engine::set_config("contexts/stack-size", 32 * 1024); simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor); simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor); diff --git a/examples/s4u/actor-yield/s4u-actor-yield.cpp b/examples/s4u/actor-yield/s4u-actor-yield.cpp index 6abfa67f5a..9ef0d79ec0 100644 --- a/examples/s4u/actor-yield/s4u-actor-yield.cpp +++ b/examples/s4u/actor-yield/s4u-actor-yield.cpp @@ -19,8 +19,8 @@ class yielder { long number_of_yields; public: - explicit yielder(std::vector args) { number_of_yields = std::stod(args[1]); } - void operator()() + explicit yielder(std::vector args) { number_of_yields = std::stol(args[1]); } + void operator()() const { for (int i = 0; i < number_of_yields; i++) simgrid::s4u::this_actor::yield(); diff --git a/examples/s4u/app-bittorrent/s4u-peer.cpp b/examples/s4u/app-bittorrent/s4u-peer.cpp index 994d7ac6f9..d653c2a0ee 100644 --- a/examples/s4u/app-bittorrent/s4u-peer.cpp +++ b/examples/s4u/app-bittorrent/s4u-peer.cpp @@ -150,7 +150,7 @@ void Peer::sendRequestTo(Connection* remote_peer, unsigned int piece) xbt_assert(remote_peer->hasPiece(piece)); int block_index = getFirstMissingBlockFrom(piece); if (block_index != -1) { - int block_length = std::min(BLOCKS_REQUESTED, PIECES_BLOCKS - block_index); + int block_length = static_cast(std::min(BLOCKS_REQUESTED, PIECES_BLOCKS - block_index)); XBT_DEBUG("Sending a REQUEST to %s for piece %u (%d,%d)", remote_peer->mailbox_->get_cname(), piece, block_index, block_length); remote_peer->mailbox_ @@ -159,7 +159,7 @@ void Peer::sendRequestTo(Connection* remote_peer, unsigned int piece) } } -std::string Peer::getStatus() +std::string Peer::getStatus() const { std::string res; for (unsigned i = 0; i < FILE_PIECES; i++) @@ -167,7 +167,7 @@ std::string Peer::getStatus() return res; } -bool Peer::hasFinished() +bool Peer::hasFinished() const { return bitfield_ == (1U << FILE_PIECES) - 1U; } @@ -193,7 +193,7 @@ void Peer::updatePiecesCountFromBitfield(unsigned int bitfield) pieces_count[i]++; } -unsigned int Peer::countPieces(unsigned int bitfield) +unsigned int Peer::countPieces(unsigned int bitfield) const { unsigned int count = 0U; unsigned int n = bitfield; @@ -204,7 +204,7 @@ unsigned int Peer::countPieces(unsigned int bitfield) return count; } -int Peer::nbInterestedPeers() +int Peer::nbInterestedPeers() const { int nb = 0; for (auto const& kv : connected_peers) @@ -553,7 +553,7 @@ void Peer::updateChokedPeers() do { // We choose a random peer to unchoke. std::unordered_map::iterator chosen_peer_it = connected_peers.begin(); - std::advance(chosen_peer_it, random.uniform_int(0, connected_peers.size() - 1)); + std::advance(chosen_peer_it, random.uniform_int(0, static_cast(connected_peers.size() - 1))); chosen_peer = &chosen_peer_it->second; if (not chosen_peer->interested || not chosen_peer->choked_upload) chosen_peer = nullptr; @@ -629,7 +629,7 @@ void Peer::updateBitfieldBlocks(int piece, int block_index, int block_length) bitfield_blocks |= (1ULL << static_cast(piece * PIECES_BLOCKS + i)); } -bool Peer::hasCompletedPiece(unsigned int piece) +bool Peer::hasCompletedPiece(unsigned int piece) const { for (unsigned int i = 0; i < PIECES_BLOCKS; i++) if (not(bitfield_blocks & 1ULL << (piece * PIECES_BLOCKS + i))) @@ -637,7 +637,7 @@ bool Peer::hasCompletedPiece(unsigned int piece) return true; } -int Peer::getFirstMissingBlockFrom(int piece) +int Peer::getFirstMissingBlockFrom(int piece) const { for (unsigned int i = 0; i < PIECES_BLOCKS; i++) if (not(bitfield_blocks & 1ULL << (piece * PIECES_BLOCKS + i))) @@ -646,7 +646,7 @@ int Peer::getFirstMissingBlockFrom(int piece) } /** Returns a piece that is partially downloaded and stored by the remote peer if any -1 otherwise. */ -int Peer::partiallyDownloadedPiece(const Connection* remote_peer) +int Peer::partiallyDownloadedPiece(const Connection* remote_peer) const { for (unsigned int i = 0; i < FILE_PIECES; i++) if (remotePeerHasMissingPiece(remote_peer, i) && isNotDownloadingPiece(i) && getFirstMissingBlockFrom(i) > 0) diff --git a/examples/s4u/app-bittorrent/s4u-peer.hpp b/examples/s4u/app-bittorrent/s4u-peer.hpp index 0175e26fb7..4a3c84cf7e 100644 --- a/examples/s4u/app-bittorrent/s4u-peer.hpp +++ b/examples/s4u/app-bittorrent/s4u-peer.hpp @@ -52,9 +52,9 @@ public: Peer& operator=(const Peer&) = delete; void operator()(); - std::string getStatus(); - bool hasFinished(); - int nbInterestedPeers(); + std::string getStatus() const; + bool hasFinished() const; + int nbInterestedPeers() const; bool isInterestedBy(const Connection* remote_peer) const; bool isInterestedByFree(const Connection* remote_peer) const; void updateActivePeersSet(Connection* remote_peer); @@ -62,19 +62,19 @@ public: void updateChokedPeers(); bool hasNotPiece(unsigned int piece) const { return not(bitfield_ & 1U << piece); } - bool remotePeerHasMissingPiece(const Connection* remote_peer, unsigned int piece) + bool remotePeerHasMissingPiece(const Connection* remote_peer, unsigned int piece) const { return hasNotPiece(piece) && remote_peer->hasPiece(piece); } - bool hasCompletedPiece(unsigned int piece); - unsigned int countPieces(unsigned int bitfield); + bool hasCompletedPiece(unsigned int piece) const; + unsigned int countPieces(unsigned int bitfield) const; /** Check that a piece is not currently being download by the peer. */ bool isNotDownloadingPiece(unsigned int piece) const { return not(current_pieces & 1U << piece); } - int partiallyDownloadedPiece(const Connection* remote_peer); + int partiallyDownloadedPiece(const Connection* remote_peer) const; void updatePiecesCountFromBitfield(unsigned int bitfield); void removeCurrentPiece(Connection* remote_peer, unsigned int current_piece); void updateBitfieldBlocks(int piece, int block_index, int block_length); - int getFirstMissingBlockFrom(int piece); + int getFirstMissingBlockFrom(int piece) const; int selectPieceToDownload(const Connection* remote_peer); void requestNewPieceTo(Connection* remote_peer); diff --git a/examples/s4u/app-bittorrent/s4u-tracker.cpp b/examples/s4u/app-bittorrent/s4u-tracker.cpp index f9f70b8014..ee4a401941 100644 --- a/examples/s4u/app-bittorrent/s4u-tracker.cpp +++ b/examples/s4u/app-bittorrent/s4u-tracker.cpp @@ -46,7 +46,7 @@ void Tracker::operator()() // Sending back peers to the requesting peer TrackerAnswer* ta = new TrackerAnswer(TRACKER_QUERY_INTERVAL); std::set::iterator next_peer; - int nb_known_peers = known_peers.size(); + int nb_known_peers = static_cast(known_peers.size()); int max_tries = std::min(MAXIMUM_PEERS, nb_known_peers); int tried = 0; while (tried < max_tries) { diff --git a/examples/s4u/app-bittorrent/s4u-tracker.hpp b/examples/s4u/app-bittorrent/s4u-tracker.hpp index 8fc72ce6dd..acdb129379 100644 --- a/examples/s4u/app-bittorrent/s4u-tracker.hpp +++ b/examples/s4u/app-bittorrent/s4u-tracker.hpp @@ -17,8 +17,8 @@ class TrackerQuery { public: explicit TrackerQuery(int peer_id, simgrid::s4u::Mailbox* return_mailbox) : peer_id(peer_id), return_mailbox(return_mailbox){}; - int getPeerId() { return peer_id; } - simgrid::s4u::Mailbox* getReturnMailbox() { return return_mailbox; } + int getPeerId() const { return peer_id; } + simgrid::s4u::Mailbox* getReturnMailbox() const { return return_mailbox; } }; class TrackerAnswer { @@ -27,7 +27,7 @@ class TrackerAnswer { public: explicit TrackerAnswer(int /*interval*/) /*: interval(interval)*/ {} void addPeer(int peer) { peers.insert(peer); } - const std::set& getPeers() { return peers; } + const std::set& getPeers() const { return peers; } }; class Tracker { diff --git a/examples/s4u/app-masterworkers/s4u-app-masterworkers-class.cpp b/examples/s4u/app-masterworkers/s4u-app-masterworkers-class.cpp index d37a880166..9c83964326 100644 --- a/examples/s4u/app-masterworkers/s4u-app-masterworkers-class.cpp +++ b/examples/s4u/app-masterworkers/s4u-app-masterworkers-class.cpp @@ -12,9 +12,9 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_app_masterworker, "Messages specific for this s4u example"); class Master { - long tasks_count = 0; - double compute_cost = 0; - double communicate_cost = 0; + long tasks_count = 0; + double compute_cost = 0; + long communicate_cost = 0; std::vector workers; public: @@ -24,7 +24,7 @@ public: tasks_count = std::stol(args[1]); compute_cost = std::stod(args[2]); - communicate_cost = std::stod(args[3]); + communicate_cost = std::stol(args[3]); for (unsigned int i = 4; i < args.size(); i++) workers.push_back(simgrid::s4u::Mailbox::by_name(args[i])); diff --git a/examples/s4u/app-masterworkers/s4u-app-masterworkers-fun.cpp b/examples/s4u/app-masterworkers/s4u-app-masterworkers-fun.cpp index 08b60c2fcb..a348d98c74 100644 --- a/examples/s4u/app-masterworkers/s4u-app-masterworkers-fun.cpp +++ b/examples/s4u/app-masterworkers/s4u-app-masterworkers-fun.cpp @@ -16,9 +16,9 @@ static void master(std::vector args) { xbt_assert(args.size() > 4, "The master function expects at least 3 arguments"); - long tasks_count = std::stol(args[1]); - double compute_cost = std::stod(args[2]); - double communication_cost = std::stod(args[3]); + long tasks_count = std::stol(args[1]); + double compute_cost = std::stod(args[2]); + long communication_cost = std::stol(args[3]); std::vector workers; for (unsigned int i = 4; i < args.size(); i++) workers.push_back(simgrid::s4u::Mailbox::by_name(args[i])); diff --git a/examples/s4u/comm-dependent/s4u-comm-dependent.tesh b/examples/s4u/comm-dependent/s4u-comm-dependent.tesh index c5b4225ae6..94a4001c21 100644 --- a/examples/s4u/comm-dependent/s4u-comm-dependent.tesh +++ b/examples/s4u/comm-dependent/s4u-comm-dependent.tesh @@ -2,7 +2,7 @@ p Testing with default compound -$ ${bindir:=.}/s4u-comm-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:debug "--log=root.fmt:[%6.2r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-comm-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:verbose "--log=root.fmt:[%6.2r]%e(%i:%P@%h)%e%m%n" > [ 2.00] (1:sender@Tremblay) Remove a dependency from 'exec on sender' on 'comm to receiver' > [ 2.00] (1:sender@Tremblay) All dependencies are solved, let's start 'comm to receiver' > [ 3.07] (2:receiver@Jupiter) Remove a dependency from 'comm from sender' on 'exec on receiver' diff --git a/examples/s4u/async-ready/s4u-async-ready.cpp b/examples/s4u/comm-ready/s4u-comm-ready.cpp similarity index 73% rename from examples/s4u/async-ready/s4u-async-ready.cpp rename to examples/s4u/comm-ready/s4u-comm-ready.cpp index 68f1242148..85867a63f8 100644 --- a/examples/s4u/async-ready/s4u-async-ready.cpp +++ b/examples/s4u/comm-ready/s4u-comm-ready.cpp @@ -30,10 +30,11 @@ static void peer(int argc, char** argv) xbt_assert(argc == 5, "Expecting 4 parameters from the XML deployment file but got %d", argc); int my_id = std::stoi(argv[1]); /* - my id */ long messages_count = std::stol(argv[2]); /* - number of message */ - double msg_size = std::stol(argv[3]); /* - message size in bytes */ - long peers_count = std::stod(argv[4]); /* - number of peers */ + long msg_size = std::stol(argv[3]); /* - message size in bytes */ + long peers_count = std::stol(argv[4]); /* - number of peers */ - /* Set myself as the persistent receiver of my mailbox so that messages start flowing to me as soon as they are put into it */ + /* Set myself as the persistent receiver of my mailbox so that messages start flowing to me as soon as they are put + * into it */ simgrid::s4u::Mailbox* my_mbox = simgrid::s4u::Mailbox::by_name(std::string("peer-") + std::to_string(my_id)); my_mbox->set_receiver(simgrid::s4u::Actor::self()); @@ -43,10 +44,11 @@ static void peer(int argc, char** argv) for (int i = 0; i < messages_count; i++) { for (int peer_id = 0; peer_id < peers_count; peer_id++) { if (peer_id != my_id) { - std::string mboxName = std::string("peer-") + std::to_string(peer_id); - simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); - std::string msgName = std::string("Message ") + std::to_string(i) + std::string(" from peer ") + std::to_string(my_id); - std::string* payload = new std::string(msgName); // copy the data we send: + std::string mboxName = std::string("peer-") + std::to_string(peer_id); + simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); + std::string msgName = + std::string("Message ") + std::to_string(i) + std::string(" from peer ") + std::to_string(my_id); + std::string* payload = new std::string(msgName); // copy the data we send: // 'msgName' is not a stable storage location XBT_INFO("Send '%s' to '%s'", msgName.c_str(), mboxName.c_str()); /* Create a communication representing the ongoing communication */ @@ -58,23 +60,27 @@ static void peer(int argc, char** argv) /* Start sending messages to let peers know that they should stop */ for (int peer_id = 0; peer_id < peers_count; peer_id++) { if (peer_id != my_id) { - std::string mboxName = std::string("peer-") + std::to_string(peer_id); - simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); - std::string* payload = new std::string("finalize"); // Make a copy of the data we will send + std::string mboxName = std::string("peer-") + std::to_string(peer_id); + simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); + std::string* payload = new std::string("finalize"); // Make a copy of the data we will send pending_comms.push_back(mbox->put_async(payload, msg_size)); XBT_INFO("Send 'finalize' to 'peer-%d'", peer_id); } } XBT_INFO("Done dispatching all messages"); - /* Retrieve all the messages other peers have been sending to me until I receive all the corresponding "Finalize" messages */ - int pending_finalize_messages = peers_count - 1; + /* Retrieve all the messages other peers have been sending to me until I receive all the corresponding "Finalize" + * messages */ + long pending_finalize_messages = peers_count - 1; while (pending_finalize_messages > 0) { if (my_mbox->ready()) { - double start = simgrid::s4u::Engine::get_clock(); + double start = simgrid::s4u::Engine::get_clock(); const std::string* received = static_cast(my_mbox->get()); - double waiting_time = simgrid::s4u::Engine::get_clock() - start; - xbt_assert(waiting_time == 0, "Expecting the waiting time to be 0 because the communication was supposedly ready, but got %f instead", waiting_time); + double waiting_time = simgrid::s4u::Engine::get_clock() - start; + xbt_assert( + waiting_time == 0, + "Expecting the waiting time to be 0 because the communication was supposedly ready, but got %f instead", + waiting_time); XBT_INFO("I got a '%s'.", received->c_str()); if (*received == "finalize") { pending_finalize_messages--; @@ -92,8 +98,7 @@ static void peer(int argc, char** argv) XBT_INFO("Goodbye now!"); } - -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]); diff --git a/examples/s4u/async-ready/s4u-async-ready.tesh b/examples/s4u/comm-ready/s4u-comm-ready.tesh similarity index 97% rename from examples/s4u/async-ready/s4u-async-ready.tesh rename to examples/s4u/comm-ready/s4u-comm-ready.tesh index 9b6d9a8e69..96902d9b02 100644 --- a/examples/s4u/async-ready/s4u-async-ready.tesh +++ b/examples/s4u/comm-ready/s4u-comm-ready.tesh @@ -2,7 +2,7 @@ p Test1 Peer sending and receiving -$ ${bindir:=.}/s4u-async-ready ${platfdir}/small_platform_fatpipe.xml s4u-async-ready_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-comm-ready ${platfdir}/small_platform_fatpipe.xml s4u-comm-ready_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:peer@Tremblay) Send 'Message 0 from peer 0' to 'peer-1' > [ 0.000000] (2:peer@Ruby) Send 'Message 0 from peer 1' to 'peer-0' > [ 0.000000] (1:peer@Tremblay) Send 'Message 0 from peer 0' to 'peer-2' diff --git a/examples/s4u/async-ready/s4u-async-ready_d.xml b/examples/s4u/comm-ready/s4u-comm-ready_d.xml similarity index 100% rename from examples/s4u/async-ready/s4u-async-ready_d.xml rename to examples/s4u/comm-ready/s4u-comm-ready_d.xml diff --git a/examples/s4u/comm-suspend/s4u-comm-suspend.cpp b/examples/s4u/comm-suspend/s4u-comm-suspend.cpp new file mode 100644 index 0000000000..144a0293aa --- /dev/null +++ b/examples/s4u/comm-suspend/s4u-comm-suspend.cpp @@ -0,0 +1,71 @@ +/* Copyright (c) 2010-2020. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +/* This example shows how to suspend and resume an asynchronous communication. */ + +#include "simgrid/s4u.hpp" +#include +#include +#include + +XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_wait, "Messages specific for this s4u example"); + +static void sender(int argc, char**) +{ + xbt_assert(argc == 1, "Expecting no parameter from the XML deployment file but got %d", argc - 1); + + simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name("receiver"); + + // Copy the data we send: the 'msg_content' variable is not a stable storage location. + // It will be destroyed when this actor leaves the loop, ie before the receiver gets the data + std::string* payload = new std::string("Sent message"); + + /* Create a communication representing the ongoing communication and then */ + simgrid::s4u::CommPtr comm = mbox->put_init(payload, 13194230); + XBT_INFO("Suspend the communication before it starts (remaining: %.0f bytes) and wait a second.", + comm->get_remaining()); + simgrid::s4u::this_actor::sleep_for(1); + XBT_INFO("Now, start the communication (remaining: %.0f bytes) and wait another second.", comm->get_remaining()); + comm->start(); + simgrid::s4u::this_actor::sleep_for(1); + + XBT_INFO("There is still %.0f bytes to transfer in this communication. Suspend it for one second.", + comm->get_remaining()); + comm->suspend(); + XBT_INFO("Now there is %.0f bytes to transfer. Resume it and wait for its completion.", comm->get_remaining()); + comm->resume(); + comm->wait(); + XBT_INFO("There is %f bytes to transfer after the communication completion.", comm->get_remaining()); + XBT_INFO("Suspending a completed activity is a no-op."); + comm->suspend(); +} + +static void receiver(int, char**) +{ + simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name("receiver"); + + XBT_INFO("Wait for the message."); + void* payload = mbox->get(); + + const std::string* received = static_cast(payload); + XBT_INFO("I got '%s'.", received->c_str()); + + delete received; +} + +int main(int argc, char* argv[]) +{ + xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]); + + simgrid::s4u::Engine e(&argc, argv); + e.register_function("sender", &sender); + e.register_function("receiver", &receiver); + + e.load_platform(argv[1]); + e.load_deployment(argv[2]); + e.run(); + + return 0; +} diff --git a/examples/s4u/comm-suspend/s4u-comm-suspend.tesh b/examples/s4u/comm-suspend/s4u-comm-suspend.tesh new file mode 100644 index 0000000000..8d05012e06 --- /dev/null +++ b/examples/s4u/comm-suspend/s4u-comm-suspend.tesh @@ -0,0 +1,11 @@ +#!/usr/bin/env tesh + +$ ${bindir:=.}/s4u-comm-suspend ${platfdir}/small_platform.xml s4u-comm-suspend_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +> [ 0.000000] (1:sender@Tremblay) Suspend the communication before it starts (remaining: 13194230 bytes) and wait a second. +> [ 0.000000] (2:receiver@Jupiter) Wait for the message. +> [ 1.000000] (1:sender@Tremblay) Now, start the communication (remaining: 13194230 bytes) and wait another second. +> [ 2.000000] (1:sender@Tremblay) There is still 6660438 bytes to transfer in this communication. Suspend it for one second. +> [ 2.000000] (1:sender@Tremblay) Now there is 6660438 bytes to transfer. Resume it and wait for its completion. +> [ 3.000000] (2:receiver@Jupiter) I got 'Sent message'. +> [ 3.000000] (1:sender@Tremblay) There is 0.000000 bytes to transfer after the communication completion. +> [ 3.000000] (1:sender@Tremblay) Suspending a completed activity is a no-op. diff --git a/examples/s4u/comm-suspend/s4u-comm-suspend_d.xml b/examples/s4u/comm-suspend/s4u-comm-suspend_d.xml new file mode 100644 index 0000000000..3c2490089a --- /dev/null +++ b/examples/s4u/comm-suspend/s4u-comm-suspend_d.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/examples/s4u/async-wait/s4u-async-wait.cpp b/examples/s4u/comm-wait/s4u-comm-wait.cpp similarity index 95% rename from examples/s4u/async-wait/s4u-async-wait.cpp rename to examples/s4u/comm-wait/s4u-comm-wait.cpp index 37d6058293..1664f4b058 100644 --- a/examples/s4u/async-wait/s4u-async-wait.cpp +++ b/examples/s4u/comm-wait/s4u-comm-wait.cpp @@ -16,13 +16,13 @@ #include #include -XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_wait, "Messages specific for this s4u example"); +XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_wait, "Messages specific for this s4u example"); static void sender(int argc, char** argv) { xbt_assert(argc == 3, "Expecting 2 parameters from the XML deployment file but got %d", argc); long messages_count = std::stol(argv[1]); /* - number of messages */ - double msg_size = std::stod(argv[2]); /* - message size in bytes */ + long msg_size = std::stol(argv[2]); /* - message size in bytes */ double sleep_start_time = 5.0; double sleep_test_time = 0; @@ -87,7 +87,7 @@ static void receiver(int, char**) } } -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]); diff --git a/examples/s4u/async-wait/s4u-async-wait.tesh b/examples/s4u/comm-wait/s4u-comm-wait.tesh similarity index 85% rename from examples/s4u/async-wait/s4u-async-wait.tesh rename to examples/s4u/comm-wait/s4u-comm-wait.tesh index 83b5859a6d..d78bf4936c 100644 --- a/examples/s4u/async-wait/s4u-async-wait.tesh +++ b/examples/s4u/comm-wait/s4u-comm-wait.tesh @@ -2,7 +2,7 @@ p Test1 Sleep_sender > Sleep_receiver -$ ${bindir:=.}/s4u-async-wait ${platfdir}/small_platform_fatpipe.xml s4u-async-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-comm-wait ${platfdir}/small_platform_fatpipe.xml s4u-comm-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) sleep_start_time : 5.000000 , sleep_test_time : 0.000000 > [ 0.000000] (2:receiver@Ruby) sleep_start_time : 1.000000 , sleep_test_time : 0.100000 > [ 1.000000] (2:receiver@Ruby) Wait for my first message diff --git a/examples/s4u/async-wait/s4u-async-wait_d.xml b/examples/s4u/comm-wait/s4u-comm-wait_d.xml similarity index 100% rename from examples/s4u/async-wait/s4u-async-wait_d.xml rename to examples/s4u/comm-wait/s4u-comm-wait_d.xml diff --git a/examples/s4u/async-waitall/s4u-async-waitall.cpp b/examples/s4u/comm-waitall/s4u-comm-waitall.cpp similarity index 96% rename from examples/s4u/async-waitall/s4u-async-waitall.cpp rename to examples/s4u/comm-waitall/s4u-comm-waitall.cpp index 141d7498dc..34cb0f54e1 100644 --- a/examples/s4u/async-waitall/s4u-async-waitall.cpp +++ b/examples/s4u/comm-waitall/s4u-comm-waitall.cpp @@ -22,17 +22,17 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_waitall, "Messages specific for this s4u class Sender { long messages_count; /* - number of messages */ long receivers_count; /* - number of receivers */ - double msg_size; /* - message size in bytes */ + long msg_size; /* - message size in bytes */ public: explicit Sender(std::vector args) { xbt_assert(args.size() == 4, "Expecting 3 parameters from the XML deployment file but got %zu", args.size()); messages_count = std::stol(args[1]); - msg_size = std::stod(args[2]); + msg_size = std::stol(args[2]); receivers_count = std::stol(args[3]); } - void operator()() + void operator()() const { // sphinx-doc: init-begin (this line helps the doc to build; ignore it) /* Vector in which we store all ongoing communications */ @@ -98,7 +98,7 @@ public: } }; -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]); diff --git a/examples/s4u/async-waitall/s4u-async-waitall.tesh b/examples/s4u/comm-waitall/s4u-comm-waitall.tesh similarity index 88% rename from examples/s4u/async-waitall/s4u-async-waitall.tesh rename to examples/s4u/comm-waitall/s4u-comm-waitall.tesh index 04168e495d..5844e84ada 100644 --- a/examples/s4u/async-waitall/s4u-async-waitall.tesh +++ b/examples/s4u/comm-waitall/s4u-comm-waitall.tesh @@ -1,6 +1,6 @@ #!/usr/bin/env tesh -$ ${bindir:=.}/s4u-async-waitall ${platfdir}/small_platform_fatpipe.xml s4u-async-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-comm-waitall ${platfdir}/small_platform_fatpipe.xml s4u-comm-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0' > [ 0.000000] (2:receiver@Ruby) Wait for my first message > [ 0.000000] (3:receiver@Perl) Wait for my first message diff --git a/examples/s4u/async-waitall/s4u-async-waitall_d.xml b/examples/s4u/comm-waitall/s4u-comm-waitall_d.xml similarity index 100% rename from examples/s4u/async-waitall/s4u-async-waitall_d.xml rename to examples/s4u/comm-waitall/s4u-comm-waitall_d.xml diff --git a/examples/s4u/async-waitany/s4u-async-waitany.cpp b/examples/s4u/comm-waitany/s4u-comm-waitany.cpp similarity index 95% rename from examples/s4u/async-waitany/s4u-async-waitany.cpp rename to examples/s4u/comm-waitany/s4u-comm-waitany.cpp index ef890fe424..825de575fb 100644 --- a/examples/s4u/async-waitany/s4u-async-waitany.cpp +++ b/examples/s4u/comm-waitany/s4u-comm-waitany.cpp @@ -22,22 +22,22 @@ #include #include -XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_waitall, "Messages specific for this s4u example"); +XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_waitall, "Messages specific for this s4u example"); class Sender { long messages_count; /* - number of messages */ long receivers_count; /* - number of receivers */ - double msg_size; /* - message size in bytes */ + long msg_size; /* - message size in bytes */ public: explicit Sender(std::vector args) { xbt_assert(args.size() == 4, "Expecting 3 parameters from the XML deployment file but got %zu", args.size()); messages_count = std::stol(args[1]); - msg_size = std::stod(args[2]); + msg_size = std::stol(args[2]); receivers_count = std::stol(args[3]); } - void operator()() + void operator()() const { /* Vector in which we store all ongoing communications */ std::vector pending_comms; @@ -111,7 +111,7 @@ public: } }; -int main(int argc, char *argv[]) +int main(int argc, char* argv[]) { xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]); diff --git a/examples/s4u/async-waitany/s4u-async-waitany.tesh b/examples/s4u/comm-waitany/s4u-comm-waitany.tesh similarity index 91% rename from examples/s4u/async-waitany/s4u-async-waitany.tesh rename to examples/s4u/comm-waitany/s4u-comm-waitany.tesh index 5f3dee8dc2..0d56306d80 100644 --- a/examples/s4u/async-waitany/s4u-async-waitany.tesh +++ b/examples/s4u/comm-waitany/s4u-comm-waitany.tesh @@ -3,7 +3,7 @@ p Testing this_actor->wait_any() ! output sort 19 -$ ${bindir:=.}/s4u-async-waitany ${platfdir}/small_platform.xml s4u-async-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-comm-waitany ${platfdir}/small_platform.xml s4u-comm-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0' > [ 0.000000] (2:receiver@Fafard) Wait for my first message > [ 0.000000] (3:receiver@Jupiter) Wait for my first message diff --git a/examples/s4u/async-waitany/s4u-async-waitany_d.xml b/examples/s4u/comm-waitany/s4u-comm-waitany_d.xml similarity index 100% rename from examples/s4u/async-waitany/s4u-async-waitany_d.xml rename to examples/s4u/comm-waitany/s4u-comm-waitany_d.xml diff --git a/examples/s4u/async-waituntil/s4u-async-waituntil.cpp b/examples/s4u/comm-waituntil/s4u-comm-waituntil.cpp similarity index 75% rename from examples/s4u/async-waituntil/s4u-async-waituntil.cpp rename to examples/s4u/comm-waituntil/s4u-comm-waituntil.cpp index f2a7c83b9a..d887e37ecc 100644 --- a/examples/s4u/async-waituntil/s4u-async-waituntil.cpp +++ b/examples/s4u/comm-waituntil/s4u-comm-waituntil.cpp @@ -6,7 +6,7 @@ /* This example shows how to use simgrid::s4u::Activity::wait_until() and * simgrid::s4u::Activity::wait_for() on a given communication. * - * It is very similar to the async-wait example, but the sender initially + * It is very similar to the comm-wait example, but the sender initially * does some waits that are too short before doing an infinite wait. */ @@ -15,23 +15,23 @@ #include #include -XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_waituntil, "Messages specific for this s4u example"); +XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_waituntil, "Messages specific for this s4u example"); static void sender(int argc, char** argv) { xbt_assert(argc == 4, "Expecting 3 parameters from the XML deployment file but got %d", argc); long messages_count = std::stol(argv[1]); /* - number of messages */ - double msg_size = std::stol(argv[2]); /* - message size in bytes */ - long receivers_count = std::stod(argv[3]); /* - number of receivers */ + long msg_size = std::stol(argv[2]); /* - message size in bytes */ + long receivers_count = std::stol(argv[3]); /* - number of receivers */ std::vector pending_comms; /* Start dispatching all messages to receivers, in a round robin fashion */ for (int i = 0; i < messages_count; i++) { - std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count); - simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); - std::string msgName = std::string("Message ") + std::to_string(i); - std::string* payload = new std::string(msgName); // copy the data we send: + std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count); + simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); + std::string msgName = std::string("Message ") + std::to_string(i); + std::string* payload = new std::string(msgName); // copy the data we send: // 'msgName' is not a stable storage location XBT_INFO("Send '%s' to '%s'", msgName.c_str(), mboxName.c_str()); @@ -43,9 +43,9 @@ static void sender(int argc, char** argv) /* Start sending messages to let the workers know that they should stop */ for (int i = 0; i < receivers_count; i++) { - std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count); - simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); - std::string* payload = new std::string("finalize"); // Make a copy of the data we will send + std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count); + simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName); + std::string* payload = new std::string("finalize"); // Make a copy of the data we will send simgrid::s4u::CommPtr comm = mbox->put_async(payload, 0); pending_comms.push_back(comm); diff --git a/examples/s4u/async-waituntil/s4u-async-waituntil.tesh b/examples/s4u/comm-waituntil/s4u-comm-waituntil.tesh similarity index 77% rename from examples/s4u/async-waituntil/s4u-async-waituntil.tesh rename to examples/s4u/comm-waituntil/s4u-comm-waituntil.tesh index c3f2bc508c..e5c43f7b44 100644 --- a/examples/s4u/async-waituntil/s4u-async-waituntil.tesh +++ b/examples/s4u/comm-waituntil/s4u-comm-waituntil.tesh @@ -2,7 +2,7 @@ p Test1 Sleep_sender > Sleep_receiver -$ ${bindir:=.}/s4u-async-waituntil ${platfdir}/small_platform_fatpipe.xml s4u-async-waituntil_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-comm-waituntil ${platfdir}/small_platform_fatpipe.xml s4u-comm-waituntil_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0' > [ 0.000000] (2:receiver@Ruby) Wait for my first message > [ 0.000000] (1:sender@Tremblay) Send 'Message 1' to 'receiver-0' diff --git a/examples/s4u/async-waituntil/s4u-async-waituntil_d.xml b/examples/s4u/comm-waituntil/s4u-comm-waituntil_d.xml similarity index 100% rename from examples/s4u/async-waituntil/s4u-async-waituntil_d.xml rename to examples/s4u/comm-waituntil/s4u-comm-waituntil_d.xml diff --git a/examples/s4u/dht-chord/s4u-dht-chord-node.cpp b/examples/s4u/dht-chord/s4u-dht-chord-node.cpp index fe914e77cc..fbfdec60f3 100644 --- a/examples/s4u/dht-chord/s4u-dht-chord-node.cpp +++ b/examples/s4u/dht-chord/s4u-dht-chord-node.cpp @@ -364,7 +364,7 @@ void Node::notify(int predecessor_candidate_id) } /* Notifies a remote node that its predecessor may have changed. */ -void Node::remoteNotify(int notify_id, int predecessor_candidate_id) +void Node::remoteNotify(int notify_id, int predecessor_candidate_id) const { ChordMessage* message = new ChordMessage(NOTIFY); message->request_id = predecessor_candidate_id; diff --git a/examples/s4u/dht-chord/s4u-dht-chord.cpp b/examples/s4u/dht-chord/s4u-dht-chord.cpp index c2128f12ec..98fec880e2 100644 --- a/examples/s4u/dht-chord/s4u-dht-chord.cpp +++ b/examples/s4u/dht-chord/s4u-dht-chord.cpp @@ -22,12 +22,12 @@ int main(int argc, char* argv[]) while (not strncmp(options[0], "-", 1)) { unsigned int length = strlen("-nb_bits="); if (not strncmp(options[0], "-nb_bits=", length) && strlen(options[0]) > length) { - nb_bits = xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s"); + nb_bits = static_cast(xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s")); XBT_DEBUG("Set nb_bits to %d", nb_bits); } else { length = strlen("-timeout="); if (not strncmp(options[0], "-timeout=", length) && strlen(options[0]) > length) { - timeout = xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s"); + timeout = static_cast(xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s")); XBT_DEBUG("Set timeout to %d", timeout); } else { xbt_die("Invalid chord option '%s'", options[0]); diff --git a/examples/s4u/dht-chord/s4u-dht-chord.hpp b/examples/s4u/dht-chord/s4u-dht-chord.hpp index bac34b5cac..4b9e85f8d6 100644 --- a/examples/s4u/dht-chord/s4u-dht-chord.hpp +++ b/examples/s4u/dht-chord/s4u-dht-chord.hpp @@ -81,7 +81,7 @@ public: int remoteFindSuccessor(int ask_to, int id); void notify(int predecessor_candidate_id); - void remoteNotify(int notify_id, int predecessor_candidate_id); + void remoteNotify(int notify_id, int predecessor_candidate_id) const; void stabilize(); void handleMessage(ChordMessage* message); diff --git a/examples/s4u/dht-kademlia/answer.cpp b/examples/s4u/dht-kademlia/answer.cpp index 6d5515cc76..8e3b0ab786 100644 --- a/examples/s4u/dht-kademlia/answer.cpp +++ b/examples/s4u/dht-kademlia/answer.cpp @@ -11,7 +11,7 @@ XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(kademlia_node); namespace kademlia { /** @brief Prints a answer_t, for debugging purposes */ -void Answer::print() +void Answer::print() const { XBT_INFO("Searching %08x, size %zu", destination_id_, nodes_.size()); unsigned int i = 0; diff --git a/examples/s4u/dht-kademlia/answer.hpp b/examples/s4u/dht-kademlia/answer.hpp index de5e1c9e83..2c11211797 100644 --- a/examples/s4u/dht-kademlia/answer.hpp +++ b/examples/s4u/dht-kademlia/answer.hpp @@ -23,7 +23,7 @@ public: unsigned int getDestinationId() const { return destination_id_; } size_t getSize() const { return nodes_.size(); } const std::vector>& getNodes() const { return nodes_; } - void print(); + void print() const; unsigned int merge(const Answer* a); void trim(); bool destinationFound() const; diff --git a/examples/s4u/dht-kademlia/node.cpp b/examples/s4u/dht-kademlia/node.cpp index d331cc361b..265cdc06d3 100644 --- a/examples/s4u/dht-kademlia/node.cpp +++ b/examples/s4u/dht-kademlia/node.cpp @@ -76,7 +76,7 @@ bool Node::join(unsigned int known_id) * @param id node we are querying * @param destination node we are trying to find. */ -void Node::sendFindNode(unsigned int id, unsigned int destination) +void Node::sendFindNode(unsigned int id, unsigned int destination) const { /* Gets the mailbox to send to */ simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(std::to_string(id)); @@ -94,7 +94,7 @@ void Node::sendFindNode(unsigned int id, unsigned int destination) * Sends to the best "KADEMLIA_ALPHA" nodes in the "node_list" array a "FIND_NODE" request, to ask them for their best * nodes */ -unsigned int Node::sendFindNodeToBest(const Answer* node_list) +unsigned int Node::sendFindNodeToBest(const Answer* node_list) const { unsigned int i = 0; unsigned int j = 0; @@ -281,7 +281,7 @@ void Node::handleFindNode(const Message* msg) msg->answer_to_->put_init(answer, 1)->detach(kademlia::destroy); } -void Node::displaySuccessRate() +void Node::displaySuccessRate() const { XBT_INFO("%u/%u FIND_NODE have succeeded", find_node_success, find_node_success + find_node_failed); } @@ -295,7 +295,7 @@ unsigned int get_id_in_prefix(unsigned int id, unsigned int prefix) if (prefix == 0) { return 0; } else { - return (1U << ((unsigned int)(prefix - 1))) ^ id; + return (1U << (prefix - 1)) ^ id; } } diff --git a/examples/s4u/dht-kademlia/node.hpp b/examples/s4u/dht-kademlia/node.hpp index ab64fa7252..f65106c77a 100644 --- a/examples/s4u/dht-kademlia/node.hpp +++ b/examples/s4u/dht-kademlia/node.hpp @@ -24,17 +24,17 @@ public: explicit Node(unsigned int node_id) : id_(node_id), table(node_id) {} Node(const Node&) = delete; Node& operator=(const Node&) = delete; - unsigned int getId() { return id_; } + unsigned int getId() const { return id_; } bool join(unsigned int known_id); - void sendFindNode(unsigned int id, unsigned int destination); - unsigned int sendFindNodeToBest(const Answer* node_list); + void sendFindNode(unsigned int id, unsigned int destination) const; + unsigned int sendFindNodeToBest(const Answer* node_list) const; void routingTableUpdate(unsigned int id); Answer* findClosest(unsigned int destination_id); bool findNode(unsigned int id_to_find, bool count_in_stats); void randomLookup(); void handleFindNode(const Message* msg); - void displaySuccessRate(); + void displaySuccessRate() const; }; } // namespace kademlia // identifier functions diff --git a/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp b/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp index c38206c99c..1912ef69ac 100644 --- a/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp +++ b/examples/s4u/dht-kademlia/s4u-dht-kademlia.cpp @@ -23,12 +23,12 @@ static void node(int argc, char* argv[]) double deadline; xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments"); /* Node initialization */ - unsigned int node_id = strtoul(argv[1], nullptr, 0); + unsigned int node_id = static_cast(strtoul(argv[1], nullptr, 0)); kademlia::Node node(node_id); if (argc == 4) { XBT_INFO("Hi, I'm going to join the network with id %u", node.getId()); - unsigned int known_id = strtoul(argv[2], NULL, 0); + unsigned int known_id = static_cast(strtoul(argv[2], NULL, 0)); join_success = node.join(known_id); deadline = std::stod(argv[3]); } else { diff --git a/examples/s4u/energy-exec-ptask/s4u-energy-exec-ptask.cpp b/examples/s4u/energy-exec-ptask/s4u-energy-exec-ptask.cpp index db14d380fc..1fe84f2919 100644 --- a/examples/s4u/energy-exec-ptask/s4u-energy-exec-ptask.cpp +++ b/examples/s4u/energy-exec-ptask/s4u-energy-exec-ptask.cpp @@ -134,7 +134,7 @@ int main(int argc, char* argv[]) { sg_host_energy_plugin_init(); simgrid::s4u::Engine e(&argc, argv); - e.set_config("host/model:ptask_L07"); + simgrid::s4u::Engine::set_config("host/model:ptask_L07"); xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s ../platforms/energy_platform.xml\n", argv[0], argv[0]); diff --git a/examples/s4u/energy-link/s4u-energy-link.cpp b/examples/s4u/energy-link/s4u-energy-link.cpp index cd0c5fbbc1..a44c445040 100644 --- a/examples/s4u/energy-link/s4u-energy-link.cpp +++ b/examples/s4u/energy-link/s4u-energy-link.cpp @@ -17,9 +17,9 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_app_energyconsumption, "Messages specific for t static void sender(std::vector args) { xbt_assert(args.size() == 2, "The master function expects 2 arguments."); - int flow_amount = std::stoi(args.at(0)); - double comm_size = std::stod(args.at(1)); - XBT_INFO("Send %.0f bytes, in %d flows", comm_size, flow_amount); + int flow_amount = std::stoi(args.at(0)); + long comm_size = std::stol(args.at(1)); + XBT_INFO("Send %ld bytes, in %d flows", comm_size, flow_amount); simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(std::string("message")); @@ -28,7 +28,7 @@ static void sender(std::vector args) if (flow_amount == 1) { /* - Send the task to the @ref worker */ - char* payload = bprintf("%f", comm_size); + char* payload = bprintf("%ld", comm_size); mailbox->put(payload, comm_size); } else { // Start all comms in parallel, and wait for all completions in one shot diff --git a/examples/s4u/engine-filtering/s4u-engine-filtering.cpp b/examples/s4u/engine-filtering/s4u-engine-filtering.cpp index 639e1edc88..d34e21f775 100644 --- a/examples/s4u/engine-filtering/s4u-engine-filtering.cpp +++ b/examples/s4u/engine-filtering/s4u-engine-filtering.cpp @@ -29,7 +29,7 @@ static bool filter_speed_more_than_50Mf(const simgrid::s4u::Host* host) */ class SingleCore { public: - bool operator()(const simgrid::s4u::Host* host) { return host->get_core_count() == 1; } + bool operator()(const simgrid::s4u::Host* host) const { return host->get_core_count() == 1; } }; /* This functor is a bit more complex, as it saves the current state when created. @@ -39,7 +39,7 @@ class FrequencyChanged { std::map host_list; public: - explicit FrequencyChanged(simgrid::s4u::Engine& e) + explicit FrequencyChanged(const simgrid::s4u::Engine& e) { std::vector list = e.get_all_hosts(); for (auto& host : list) { diff --git a/examples/s4u/exec-dependent/s4u-exec-dependent.tesh b/examples/s4u/exec-dependent/s4u-exec-dependent.tesh index dab01ae3df..f915f71456 100644 --- a/examples/s4u/exec-dependent/s4u-exec-dependent.tesh +++ b/examples/s4u/exec-dependent/s4u-exec-dependent.tesh @@ -1,7 +1,7 @@ #!/usr/bin/env tesh ! output sort -$ ${bindir:=.}/s4u-exec-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:debug "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-exec-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 2.000000] (1:worker@Fafard) Remove a dependency from 'parent 1' on 'child' > [ 2.000000] (1:worker@Fafard) Exec 'parent 1' is complete > [ 3.000000] (1:worker@Fafard) Remove a dependency from 'parent 2' on 'child' diff --git a/examples/s4u/exec-ptask/s4u-exec-ptask.tesh b/examples/s4u/exec-ptask/s4u-exec-ptask.tesh index fdf031d3c3..f4ee2b8376 100644 --- a/examples/s4u/exec-ptask/s4u-exec-ptask.tesh +++ b/examples/s4u/exec-ptask/s4u-exec-ptask.tesh @@ -1,6 +1,6 @@ #!/usr/bin/env tesh -$ ${bindir:=.}/s4u-exec-ptask ${platfdir}/energy_platform.xml --cfg=host/model:ptask_L07 --cfg=tracing:yes --cfg=tracing/uncategorized:yes --log=instr_resource.t:debug --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-exec-ptask ${platfdir}/energy_platform.xml --cfg=host/model:ptask_L07 --cfg=tracing:yes --cfg=tracing/uncategorized:yes --log=instr_resource.t:verbose --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'ptask_L07' > [ 0.000000] (0:maestro@) Configuration change: Set 'tracing' to 'yes' > [ 0.000000] (0:maestro@) Configuration change: Set 'tracing/uncategorized' to 'yes' diff --git a/examples/s4u/io-dependent/s4u-io-dependent.tesh b/examples/s4u/io-dependent/s4u-io-dependent.tesh index 93e86b6ac6..7a7e537d40 100644 --- a/examples/s4u/io-dependent/s4u-io-dependent.tesh +++ b/examples/s4u/io-dependent/s4u-io-dependent.tesh @@ -1,7 +1,7 @@ #!/usr/bin/env tesh ! output sort -$ ${bindir:=.}/s4u-io-dependent ${platfdir}/hosts_with_disks.xml --log=s4u_activity.t:debug "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +$ ${bindir:=.}/s4u-io-dependent ${platfdir}/hosts_with_disks.xml --log=s4u_activity.t:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" > [ 1.000000] (1:bob@bob) All dependencies are solved, let's start 'bob write' > [ 1.000000] (1:bob@bob) Remove a dependency from 'bob compute' on 'bob write' > [ 1.100000] (1:bob@bob) All dependencies are solved, let's start 'carl read' diff --git a/examples/s4u/io-file-system/s4u-io-file-system.cpp b/examples/s4u/io-file-system/s4u-io-file-system.cpp index 46f1e02851..bdc66291b2 100644 --- a/examples/s4u/io-file-system/s4u-io-file-system.cpp +++ b/examples/s4u/io-file-system/s4u-io-file-system.cpp @@ -13,7 +13,7 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "a sample log category"); class MyHost { public: - void show_info(std::vector const& disks) + void show_info(std::vector const& disks) const { XBT_INFO("Storage info on %s:", simgrid::s4u::Host::current()->get_cname()); @@ -24,7 +24,7 @@ public: } } - void operator()() + void operator()() const { std::vector const& disks = simgrid::s4u::Host::current()->get_disks(); diff --git a/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness-visited.tesh b/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness-visited.tesh index e88cc7e895..f8541d0d69 100644 --- a/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness-visited.tesh +++ b/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness-visited.tesh @@ -7,68 +7,68 @@ $ ${bindir:=.}/../../../bin/simgrid-mc ${bindir:=.}/s4u-mc-bugged1-liveness ${pl > [ 0.000000] (0:maestro@) Check the liveness property promela_bugged1_liveness > [ 0.000000] (2:client@Boivin) Ask the request > [ 0.000000] (3:client@Fafard) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request. > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1) > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request. > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1) > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request. > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1) > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request. > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1) > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (3:client@Fafard) Propositions changed : r=1, cs=0 > [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1) > [ 0.000000] (2:client@Boivin) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it > [ 0.000000] (0:maestro@) Pair 58 already reached (equal to pair 46) ! > [ 0.000000] (0:maestro@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* diff --git a/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.cpp b/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.cpp index 6822f38147..175d561863 100644 --- a/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.cpp +++ b/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.cpp @@ -52,7 +52,7 @@ static void coordinator() simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name("coordinator"); - while (1) { + while (true) { m = static_cast(mbox->get()); if (m->kind == Message::Kind::REQUEST) { if (CS_used) { @@ -60,7 +60,7 @@ static void coordinator() requests.push(m->return_mailbox); } else { if (m->return_mailbox->get_name() != "1") { - XBT_INFO("CS idle. Grant immediatly"); + XBT_INFO("CS idle. Grant immediately"); m->return_mailbox->put(new Message(Message::Kind::GRANT, mbox), 1000); CS_used = 1; } @@ -87,11 +87,11 @@ static void coordinator() static void client(int id) { - int my_pid = simgrid::s4u::this_actor::get_pid(); + aid_t my_pid = simgrid::s4u::this_actor::get_pid(); simgrid::s4u::Mailbox* my_mailbox = simgrid::s4u::Mailbox::by_name(std::to_string(id)); - while (1) { + while (true) { XBT_INFO("Ask the request"); simgrid::s4u::Mailbox::by_name("coordinator")->put(new Message(Message::Kind::REQUEST, my_mailbox), 1000); @@ -117,7 +117,7 @@ static void client(int id) simgrid::s4u::Mailbox::by_name("coordinator")->put(new Message(Message::Kind::RELEASE, my_mailbox), 1000); - simgrid::s4u::this_actor::sleep_for(my_pid); + simgrid::s4u::this_actor::sleep_for(static_cast(my_pid)); if (id == 1) { cs = 0; diff --git a/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.tesh b/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.tesh index e11847d40e..6ac22b1aec 100644 --- a/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.tesh +++ b/examples/s4u/mc-bugged1-liveness/s4u-mc-bugged1-liveness.tesh @@ -8,11 +8,11 @@ $ ${bindir:=.}/../../../bin/simgrid-mc ${bindir:=.}/s4u-mc-bugged1-liveness ${pl > [ 0.000000] (2:client@Boivin) Ask the request > [ 0.000000] (3:client@Fafard) Ask the request > [ 0.000000] (2:client@Boivin) Propositions changed : r=1, cs=0 -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (3:client@Fafard) 2 got the answer. Sleep a bit and release it > [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle > [ 0.000000] (3:client@Fafard) Ask the request -> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly +> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately > [ 0.000000] (0:maestro@) Pair 22 already reached (equal to pair 10) ! > [ 0.000000] (0:maestro@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-* > [ 0.000000] (0:maestro@) | ACCEPTANCE CYCLE | diff --git a/examples/s4u/network-wifi/s4u-network-wifi.cpp b/examples/s4u/network-wifi/s4u-network-wifi.cpp new file mode 100644 index 0000000000..57c896a501 --- /dev/null +++ b/examples/s4u/network-wifi/s4u-network-wifi.cpp @@ -0,0 +1,49 @@ +/* Copyright (c) 2017-2020. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#include "simgrid/s4u.hpp" + +/* This example demonstrates how to use wifi links in SimGrid. Most of the interesting things happen in the + * corresponding XML file. + */ + +XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_network_wifi, "Messages specific for this s4u example"); + +static void sender(simgrid::s4u::Mailbox* mailbox, int data_size) +{ + XBT_INFO("Send a message to the other station."); + static char message[] = "message"; + mailbox->put(message, data_size); + XBT_INFO("Done."); +} +static void receiver(simgrid::s4u::Mailbox* mailbox) +{ + XBT_INFO("Wait for a message."); + mailbox->get(); + XBT_INFO("Done."); +} + +int main(int argc, char* argv[]) +{ + simgrid::s4u::Engine e(&argc, argv); + + xbt_assert(argc > 1, "Usage: %s platform_file\n\tExample: %s platform.xml deployment.xml\n", argv[0], argv[0]); + + e.load_platform(argv[1]); + + auto mailbox = simgrid::s4u::Mailbox::by_name("mailbox"); + auto station1 = simgrid::s4u::Host::by_name("Station 1"); + auto station2 = simgrid::s4u::Host::by_name("Station 2"); + simgrid::s4u::Actor::create("sender", station1, sender, mailbox, 1e7); + simgrid::s4u::Actor::create("receiver", station2, receiver, mailbox); + + auto ap = simgrid::s4u::Link::by_name("AP1"); + ap->set_host_wifi_rate(station1, 1); // The host "Station 1" uses the second level of bandwidths on that AP + ap->set_host_wifi_rate(station2, 0); // This is perfectly useless as level 0 is used by default + + e.run(); + + return 0; +} diff --git a/examples/s4u/network-wifi/s4u-network-wifi.tesh b/examples/s4u/network-wifi/s4u-network-wifi.tesh new file mode 100644 index 0000000000..b4e49d11f5 --- /dev/null +++ b/examples/s4u/network-wifi/s4u-network-wifi.tesh @@ -0,0 +1,7 @@ +#!/usr/bin/env tesh + +$ ${bindir:=.}/s4u-network-wifi ${platfdir}/wifi.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" +> [ 0.000000] (1:sender@Station 1) Send a message to the other station. +> [ 0.000000] (2:receiver@Station 2) Wait for a message. +> [ 3.888889] (1:sender@Station 1) Done. +> [ 3.888889] (2:receiver@Station 2) Done. diff --git a/examples/s4u/platform-failures/s4u-platform-failures.cpp b/examples/s4u/platform-failures/s4u-platform-failures.cpp index a8104a5deb..f55e2b0b31 100644 --- a/examples/s4u/platform-failures/s4u-platform-failures.cpp +++ b/examples/s4u/platform-failures/s4u-platform-failures.cpp @@ -29,7 +29,7 @@ static void master(int argc, char* argv[]) simgrid::s4u::Mailbox* mailbox; long number_of_tasks = xbt_str_parse_int(argv[1], "Invalid amount of tasks: %s"); double comp_size = xbt_str_parse_double(argv[2], "Invalid computational size: %s"); - double comm_size = xbt_str_parse_double(argv[3], "Invalid communication size: %s"); + long comm_size = xbt_str_parse_int(argv[3], "Invalid communication size: %s"); long workers_count = xbt_str_parse_int(argv[4], "Invalid amount of workers: %s"); XBT_INFO("Got %ld workers and %ld tasks to process", workers_count, number_of_tasks); @@ -74,7 +74,7 @@ static void worker(int argc, char* argv[]) xbt_assert(argc == 2, "Expecting one parameter"); long id = xbt_str_parse_int(argv[1], "Invalid argument %s"); simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(std::string("worker-") + std::to_string(id)); - while (1) { + while (true) { try { XBT_INFO("Waiting a message on %s", mailbox->get_cname()); const double* payload = static_cast(mailbox->get()); diff --git a/examples/s4u/replay-comm/s4u-replay-comm.cpp b/examples/s4u/replay-comm/s4u-replay-comm.cpp index 5f3071c803..d22c13c6d7 100644 --- a/examples/s4u/replay-comm/s4u-replay-comm.cpp +++ b/examples/s4u/replay-comm/s4u-replay-comm.cpp @@ -7,6 +7,7 @@ #include "xbt/replay.hpp" #include "xbt/str.h" #include +#include #include XBT_LOG_NEW_DEFAULT_CATEGORY(replay_comm, "Messages specific for this msg example"); @@ -35,7 +36,7 @@ public: simgrid::xbt::replay_runner(actor_name, trace_filename); } - void operator()() + void operator()() const { // Nothing to do here } @@ -52,11 +53,11 @@ public: static void send(simgrid::xbt::ReplayAction& action) { - double size = std::stod(action[3]); - std::string* payload = new std::string(action[3]); - double clock = simgrid::s4u::Engine::get_clock(); + uint64_t size = static_cast(std::stod(action[3])); + std::string* payload = new std::string(action[3]); + double clock = simgrid::s4u::Engine::get_clock(); simgrid::s4u::Mailbox* to = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_name() + "_" + action[2]); - ACT_DEBUG("Entering Send: %s (size: %g) -- Actor %s on mailbox %s", NAME.c_str(), size, + ACT_DEBUG("Entering Send: %s (size: %" PRIu64 ") -- Actor %s on mailbox %s", NAME.c_str(), size, simgrid::s4u::this_actor::get_cname(), to->get_cname()); to->put(payload, size); delete payload; diff --git a/examples/s4u/replay-io/s4u-replay-io.cpp b/examples/s4u/replay-io/s4u-replay-io.cpp index f5205e47d3..509995ccc5 100644 --- a/examples/s4u/replay-io/s4u-replay-io.cpp +++ b/examples/s4u/replay-io/s4u-replay-io.cpp @@ -44,7 +44,7 @@ public: simgrid::xbt::replay_runner(actor_name, nullptr); } - void operator()() + void operator()() const { // Nothing to do here } diff --git a/examples/smpi/NAS/dt.c b/examples/smpi/NAS/dt.c index c98a0d34a9..5a363cdcc4 100644 --- a/examples/smpi/NAS/dt.c +++ b/examples/smpi/NAS/dt.c @@ -673,7 +673,7 @@ int main(int argc,char **argv ){ if(timer_on != 0 && dg->numNodes+1>timers_tot){ timer_on=0; if(my_rank==0) - fprintf(stderr,"Not enough timers. Node timeing is off. \n"); + fprintf(stderr,"Not enough timers. Node timing is off. \n"); } if(dg->numNodes && dg->numNodes>comm_size){ if(my_rank==0){ diff --git a/examples/smpi/NAS/is.c b/examples/smpi/NAS/is.c index b68196515a..cccdbf044a 100644 --- a/examples/smpi/NAS/is.c +++ b/examples/smpi/NAS/is.c @@ -578,7 +578,7 @@ int main( int argc, char **argv ) 1220703125.00 ), /* Random number gen mult */ 1220703125.00 ); /* Random number gen mult */ -/* Do one interation for free (i.e., untimed) to guarantee initialization of +/* Do one iteration for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ rank(gd, 1 ); diff --git a/examples/smpi/gemm/gemm.tesh b/examples/smpi/gemm/gemm.tesh index 173f8bcd60..397860242a 100644 --- a/examples/smpi/gemm/gemm.tesh +++ b/examples/smpi/gemm/gemm.tesh @@ -4,13 +4,13 @@ p Test instrumentation of SMPI -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1 -np 8 ${bindir:=.}/smpi_gemm 1000 native +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1f -np 8 ${bindir:=.}/smpi_gemm 1000 native > You requested to use 8 ranks, but there is only 5 processes in your hostfile... > Matrix Size : 1000x1000 > Native mode > Performance= 227.29 GFlop/s, Time= 8.799 sec, Size= 2000000000 Ops -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1 -np 8 ${bindir:=.}/smpi_gemm 1000 sampling +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1f -np 8 ${bindir:=.}/smpi_gemm 1000 sampling > You requested to use 8 ranks, but there is only 5 processes in your hostfile... > Matrix Size : 1000x1000 > Sampling mode diff --git a/examples/smpi/mc/bugged1_liveness.c b/examples/smpi/mc/bugged1_liveness.c index 4c232a33b5..6d5d0aa51e 100644 --- a/examples/smpi/mc/bugged1_liveness.c +++ b/examples/smpi/mc/bugged1_liveness.c @@ -64,7 +64,7 @@ int main(int argc, char **argv){ xbt_dynar_push(requests, &recv_buff); }else{ if(recv_buff != size - 1){ - printf("CS idle. Grant immediatly.\n"); + printf("CS idle. Grant immediately.\n"); MPI_Send(&rank, 1, MPI_INT, recv_buff, GRANT_TAG, MPI_COMM_WORLD); CS_used = 1; } diff --git a/examples/smpi/mc/mutual_exclusion.c b/examples/smpi/mc/mutual_exclusion.c index 5481c91bee..e929a8d47c 100644 --- a/examples/smpi/mc/mutual_exclusion.c +++ b/examples/smpi/mc/mutual_exclusion.c @@ -43,7 +43,7 @@ int main(int argc, char **argv){ printf("CS already used. Queue the request.\n"); xbt_dynar_push(requests, &recv_buff); }else{ - printf("CS idle. Grant immediatly.\n"); + printf("CS idle. Grant immediately.\n"); MPI_Send(&rank, 1, MPI_INT, recv_buff, GRANT_TAG, MPI_COMM_WORLD); CS_used = 1; } diff --git a/examples/smpi/mc/non_deterministic.tesh b/examples/smpi/mc/non_deterministic.tesh index b702255a26..904088ba1d 100644 --- a/examples/smpi/mc/non_deterministic.tesh +++ b/examples/smpi/mc/non_deterministic.tesh @@ -1,7 +1,7 @@ #!/usr/bin/env tesh ! timeout 60 -$ ../../../smpi_script/bin/smpirun -wrapper ${bindir:=.}/../../../bin/simgrid-mc -hostfile ${srcdir:=.}/hostfile_non_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --log=xbt_cfg.thresh:warning --cfg=model-check/communications-determinism:1 --cfg=smpi/send-is-detached-thresh:0 --cfg=smpi/host-speed:1e9 ./smpi_non_deterministic +$ ../../../smpi_script/bin/smpirun -wrapper ${bindir:=.}/../../../bin/simgrid-mc -hostfile ${srcdir:=.}/hostfile_non_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --log=xbt_cfg.thresh:warning --cfg=model-check/communications-determinism:1 --cfg=smpi/send-is-detached-thresh:0 --cfg=smpi/host-speed:1Gf ./smpi_non_deterministic > [0.000000] [mc_global/INFO] Check communication determinism > [0.000000] [mc_comm_determinism/INFO] The communications pattern of the process 1 is different! (Different communication : 1) > [0.000000] [mc_comm_determinism/INFO] **************************************************** diff --git a/examples/smpi/mc/only_send_deterministic.tesh b/examples/smpi/mc/only_send_deterministic.tesh index 22ff676dde..43a7018471 100644 --- a/examples/smpi/mc/only_send_deterministic.tesh +++ b/examples/smpi/mc/only_send_deterministic.tesh @@ -1,7 +1,7 @@ #!/usr/bin/env tesh ! timeout 60 -$ ../../../smpi_script/bin/smpirun -wrapper "${bindir:=.}/../../../bin/simgrid-mc" --log=xbt_cfg.thresh:warning -hostfile ${srcdir:=.}/hostfile_only_send_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --cfg=model-check/communications-determinism:1 --cfg=smpi/buffering:zero --cfg=smpi/host-speed:1e9 ./smpi_only_send_deterministic +$ ../../../smpi_script/bin/smpirun -wrapper "${bindir:=.}/../../../bin/simgrid-mc" --log=xbt_cfg.thresh:warning -hostfile ${srcdir:=.}/hostfile_only_send_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --cfg=model-check/communications-determinism:1 --cfg=smpi/buffering:zero --cfg=smpi/host-speed:1Gf ./smpi_only_send_deterministic > [0.000000] [mc_comm_determinism/INFO] Check communication determinism > [0.000000] [mc_comm_determinism/INFO] ******************************************************* > [0.000000] [mc_comm_determinism/INFO] **** Only-send-deterministic communication pattern **** diff --git a/examples/smpi/replay/replay.cpp b/examples/smpi/replay/replay.cpp index 50b515e31f..9cb0c63fe0 100644 --- a/examples/smpi/replay/replay.cpp +++ b/examples/smpi/replay/replay.cpp @@ -29,7 +29,8 @@ static void overriding_send(simgrid::xbt::ReplayAction& args) int main(int argc, char* argv[]) { const char* instance_id = simgrid::s4u::Actor::self()->get_property("instance_id"); - const int rank = xbt_str_parse_int(simgrid::s4u::Actor::self()->get_property("rank"), "Cannot parse rank"); + const int rank = + static_cast(xbt_str_parse_int(simgrid::s4u::Actor::self()->get_property("rank"), "Cannot parse rank")); const char* trace_filename = argv[1]; double start_delay_flops = 0; diff --git a/examples/smpi/replay_multiple/README b/examples/smpi/replay_multiple/README index f56e3d75a7..5d4b8fd1fd 100644 --- a/examples/smpi/replay_multiple/README +++ b/examples/smpi/replay_multiple/README @@ -12,7 +12,7 @@ example : 2 smpi_replay.txt 32 125000000000 -This launchs an instance "2" that will replay file smpi_replay.txt with 32 processes, and each one of these processes +This launches an instance "2" that will replay file smpi_replay.txt with 32 processes, and each one of these processes will sleep for 125000000000 flops before the run. In order to be replayed, a deployment file must be generatedfrom this description file, and from the intended platform diff --git a/examples/smpi/replay_multiple/generate_multiple_deployment.sh b/examples/smpi/replay_multiple/generate_multiple_deployment.sh index 5de54b4a13..aab51a4e9f 100755 --- a/examples/smpi/replay_multiple/generate_multiple_deployment.sh +++ b/examples/smpi/replay_multiple/generate_multiple_deployment.sh @@ -80,47 +80,47 @@ HOSTFILETMP=0 if [ -z "${HOSTFILE}" ] ; then HOSTFILETMP=1 HOSTFILE="$(mktemp tmphostXXXXXX)" - perl -ne 'print "$1\n" if /.*.*/' ${PLATFORM} > ${HOSTFILE} + perl -ne 'print "$1\n" if /.*.*/' "${PLATFORM}" > "${HOSTFILE}" fi UNROLLEDHOSTFILETMP=0 #parse if our lines are terminated by :num_process -multiple_processes=$(grep -c ":" $HOSTFILE) +multiple_processes=$(grep -c ":" "$HOSTFILE") if [ "${multiple_processes}" -gt 0 ] ; then UNROLLEDHOSTFILETMP=1 UNROLLEDHOSTFILE="$(mktemp tmphostXXXXXX)" - perl -ne ' do{ for ( 1 .. $2 ) { print "$1\n" } } if /(.*?):(\d+).*/' ${HOSTFILE} > ${UNROLLEDHOSTFILE} + perl -ne ' do{ for ( 1 .. $2 ) { print "$1\n" } } if /(.*?):(\d+).*/' "${HOSTFILE}" > "${UNROLLEDHOSTFILE}" if [ ${HOSTFILETMP} = 1 ] ; then - rm ${HOSTFILE} + rm "${HOSTFILE}" HOSTFILETMP=0 fi HOSTFILE=$UNROLLEDHOSTFILE fi # Don't use wc -l to compute it to avoid issues with trailing \n at EOF -hostfile_procs=$(grep -c "[a-zA-Z0-9]" $HOSTFILE) -if [ ${hostfile_procs} = 0 ] ; then - echo "[$(basename $0)] ** error: the hostfile '${HOSTFILE}' is empty. Aborting." >&2 +hostfile_procs=$(grep -c "[a-zA-Z0-9]" "$HOSTFILE") +if [ "${hostfile_procs}" = 0 ] ; then + echo "[$(basename "$0")] ** error: the hostfile '${HOSTFILE}' is empty. Aborting." >&2 exit 1 fi ##-------------------------------- DEFAULT APPLICATION -------------------------------------- -APPLICATIONTMP=$(echo ${PROC_ARGS}|cut -d' ' -f2 -s) +APPLICATIONTMP=$(echo "${PROC_ARGS}"|cut -d' ' -f2 -s) -cat > ${APPLICATIONTMP} < "${APPLICATIONTMP}" < APPLICATIONHEAD ##---- cache hostnames of hostfile--------------- -if [ -n "${HOSTFILE}" ] && [ -f ${HOSTFILE} ]; then - hostnames=$(tr '\n\r' ' ' < ${HOSTFILE}) - NUMHOSTS=$(wc -l < ${HOSTFILE}) +if [ -n "${HOSTFILE}" ] && [ -f "${HOSTFILE}" ]; then + hostnames=$(tr '\n\r' ' ' < "${HOSTFILE}") + NUMHOSTS=$(wc -l < "${HOSTFILE}") fi -DESCRIPTIONFILE=$(echo $PROC_ARGS|cut -d' ' -f1) +DESCRIPTIONFILE=$(echo "$PROC_ARGS"|cut -d' ' -f1) if [ -n "${DESCRIPTIONFILE}" ] && [ -f "${DESCRIPTIONFILE}" ]; then IFS_OLD=$IFS @@ -168,45 +168,46 @@ if [ -n "${DESCRIPTIONFILE}" ] && [ -f "${DESCRIPTIONFILE}" ]; then if [ -n "${HOSTFILE}" ]; then j=$(( NUMPROCS % NUMHOSTS + 1)) fi - hostname=$(echo $hostnames|cut -d' ' -f$j) + hostname=$(echo "$hostnames"|cut -d' ' -f$j) if [ -z "${hostname}" ]; then host="host"$($j) else host="${hostname}" fi - - echo " " >> ${APPLICATIONTMP} - echo " " >> ${APPLICATIONTMP} - echo " " >> ${APPLICATIONTMP} - echo " " >> ${APPLICATIONTMP} + { + echo " " + echo " " + echo " " + echo " " - echo " " >> ${APPLICATIONTMP} - echo " " >> ${APPLICATIONTMP} + echo " " + echo " " + } >> "${APPLICATIONTMP}" NUMPROCS=$(( NUMPROCS + 1)) done # return IFS back to newline for "for" loop IFS_OLD=$IFS IFS=$(printf '\n_'); IFS=${IFS%_} # protect trailing \n - done < ${DESCRIPTIONFILE} + done < "${DESCRIPTIONFILE}" # return delimiter to previous value IFS=$IFS_OLD IFS_OLD= else - printf "File not found: %s\n", ${DESCRIPTIONFILE} >&2 + printf "File not found: %s\n", "${DESCRIPTIONFILE}" >&2 exit 1 fi -cat >> ${APPLICATIONTMP} <> "${APPLICATIONTMP}" < APPLICATIONFOOT ##-------------------------------- end DEFAULT APPLICATION -------------------------------------- if [ ${HOSTFILETMP} = 1 ] ; then - rm ${HOSTFILE} + rm "${HOSTFILE}" fi if [ ${UNROLLEDHOSTFILETMP} = 1 ] ; then - rm ${UNROLLEDHOSTFILE} + rm "${UNROLLEDHOSTFILE}" fi exit 0 diff --git a/examples/smpi/replay_multiple/replay_multiple.c b/examples/smpi/replay_multiple/replay_multiple.c index 7ad646ea34..847873b8ff 100644 --- a/examples/smpi/replay_multiple/replay_multiple.c +++ b/examples/smpi/replay_multiple/replay_multiple.c @@ -16,7 +16,7 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example") static void smpi_replay(int argc, char* argv[]) { const char* instance_id = argv[1]; - int rank = xbt_str_parse_int(argv[2], "Cannot parse rank '%s'"); + int rank = (int)xbt_str_parse_int(argv[2], "Cannot parse rank '%s'"); const char* trace_filename = argv[3]; double start_delay_flops = 0; @@ -52,7 +52,7 @@ int main(int argc, char *argv[]){ const char** line_char= xbt_dynar_to_array(elems); instance_id = line_char[0]; - int instance_size = xbt_str_parse_int(line_char[2], "Invalid size: %s"); + int instance_size = (int)xbt_str_parse_int(line_char[2], "Invalid size: %s"); XBT_INFO("Initializing instance %s of size %d", instance_id, instance_size); SMPI_app_instance_register(instance_id, smpi_replay,instance_size); diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll1.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll1.tesh index 4137fe645d..9452b3e3ab 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll1.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll1.tesh @@ -6,8 +6,8 @@ $ mkfile ${srcdir:=.}/workload_coll1 ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='alone', file='coll.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone') > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone') @@ -22,8 +22,8 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir p Workload with one coll job (with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='alone', file='coll.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone') > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll2_st_sr_noise.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll2_st_sr_noise.tesh index b648673f76..81c99ffa0f 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll2_st_sr_noise.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_coll2_st_sr_noise.tesh @@ -7,8 +7,8 @@ $ mkfile ${srcdir:=.}/workload_coll2_same_time_and_resources ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_coll2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_coll2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='coll.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='coll.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty1.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty1.tesh index 452cacd3cf..deb4038700 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty1.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty1.tesh @@ -6,8 +6,8 @@ $ mkfile ${srcdir:=.}/workload_empty1 ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='alone', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone') > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone') @@ -23,8 +23,8 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir: p Workload with one empty job (with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='alone', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone') > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty2.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty2.tesh index eb0d0a8e14..171cbee54f 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty2.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_empty2.tesh @@ -7,8 +7,8 @@ $ mkfile ${srcdir:=.}/workload_empty2 ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='2,3' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -35,8 +35,8 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir: p Workload with two empty jobs (not at the same time, not on the same resources, with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='2,3' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -71,8 +71,8 @@ $ mkfile ${srcdir:=.}/workload_empty2_same_resources ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -99,8 +99,8 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir: p Workload with two empty jobs (not at the same time, but on the same resources, with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -135,8 +135,8 @@ $ mkfile ${srcdir:=.}/workload_empty2_same_time ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='2,3' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -162,8 +162,8 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir: p Workload with two empty jobs (at the same time but not on the same resources, with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='2,3' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -198,8 +198,8 @@ $ mkfile ${srcdir:=.}/workload_empty2_same_time_and_resources ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') @@ -226,8 +226,8 @@ p Workload with two empty jobs (at the same time and on the same resources, with ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed1.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed1.tesh index ff4169334a..b27648cd9c 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed1.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed1.tesh @@ -6,8 +6,8 @@ $ mkfile ${srcdir:=.}/workload_mixed1 ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='alone', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone') > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone') @@ -22,8 +22,8 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir p Workload with one mixed job (with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='alone', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone') > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2.tesh index 3ad735de8f..5c3b546290 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2.tesh @@ -6,9 +6,9 @@ $ mkfile ${srcdir:=.}/workload_mixed2 ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0') -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='2,3' > [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0') @@ -32,9 +32,9 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir: p Workload with two mixed jobs (not at the same time, not on the same resources, with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0') -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='2,3' > [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0') @@ -66,9 +66,9 @@ $ mkfile ${srcdir:=.}/workload_mixed2_same_resources ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0') -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='0,1' > [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0') @@ -92,9 +92,9 @@ $ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir: p Workload with two mixed jobs (not at the same time, but on the same resources, with noise) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 > [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0') -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='0,1' > [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st.tesh index 196afde075..379b6eaf2c 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st.tesh @@ -1,8 +1,8 @@ p Workload with two mixed jobs (at the same time but not on the same resources) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='2,3' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_noise.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_noise.tesh index e0f89cc5ff..9b32acdbd0 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_noise.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_noise.tesh @@ -1,8 +1,8 @@ p Workload with two mixed jobs (at the same time but not on the same resources) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='2,3' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr.tesh index 2f242fc1ef..9efc0099d4 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr.tesh @@ -1,8 +1,8 @@ p Workload with two mixed jobs (at the same time and on the same resources) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr_noise.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr_noise.tesh index 1f3c5b5c25..4d9b15c778 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr_noise.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_mixed2_st_sr_noise.tesh @@ -1,8 +1,8 @@ p Workload with two mixed jobs (at the same time and on the same resources) ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='0,1' > [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0') diff --git a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_nojob.tesh b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_nojob.tesh index 255620844f..62827fd4ec 100644 --- a/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_nojob.tesh +++ b/examples/smpi/replay_multiple_manual_deploy/replay_multiple_manual_nojob.tesh @@ -5,14 +5,14 @@ $ mkfile ${srcdir:=.}/workload_nojob ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 0.000000] (maestro@) Simulation finished! Final time: 0 ! timeout 120 ! output sort 25 -$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 -> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100' +$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13 +> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f' > [ 7.000000] (maestro@) Simulation finished! Final time: 7 $ rm -f ${srcdir:=.}/workload_nojob diff --git a/examples/smpi/smpi_s4u_masterslave/CMakeLists.txt b/examples/smpi/smpi_s4u_masterslave/CMakeLists.txt deleted file mode 100644 index 41e4dcf3a5..0000000000 --- a/examples/smpi/smpi_s4u_masterslave/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -if(enable_smpi) - include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi") - - add_executable (masterslave_mailbox_smpi EXCLUDE_FROM_ALL masterslave_mailbox_smpi.cpp) - target_link_libraries(masterslave_mailbox_smpi simgrid) - ADD_TESH_FACTORIES(smpi-s4u-masterslave "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterslave --cd ${CMAKE_BINARY_DIR}/examples/smpi/smpi_s4u_masterslave ${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh) - add_dependencies(tests masterslave_mailbox_smpi) -endif() - -set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/s4u_smpi.tesh PARENT_SCOPE) -set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/deployment_masterslave_mailbox_smpi.xml PARENT_SCOPE) -set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/masterslave_mailbox_smpi.cpp PARENT_SCOPE) diff --git a/examples/smpi/smpi_s4u_masterworker/CMakeLists.txt b/examples/smpi/smpi_s4u_masterworker/CMakeLists.txt new file mode 100644 index 0000000000..5ffc2c32e8 --- /dev/null +++ b/examples/smpi/smpi_s4u_masterworker/CMakeLists.txt @@ -0,0 +1,12 @@ +if(enable_smpi) + include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi") + + add_executable (masterworker_mailbox_smpi EXCLUDE_FROM_ALL masterworker_mailbox_smpi.cpp) + target_link_libraries(masterworker_mailbox_smpi simgrid) + ADD_TESH_FACTORIES(smpi-s4u-masterworker "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterworker --cd ${CMAKE_BINARY_DIR}/examples/smpi/smpi_s4u_masterworker ${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh) + add_dependencies(tests masterworker_mailbox_smpi) +endif() + +set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/s4u_smpi.tesh PARENT_SCOPE) +set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/deployment_masterworker_mailbox_smpi.xml PARENT_SCOPE) +set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/masterworker_mailbox_smpi.cpp PARENT_SCOPE) diff --git a/examples/smpi/smpi_s4u_masterslave/deployment_masterslave_mailbox_smpi.xml b/examples/smpi/smpi_s4u_masterworker/deployment_masterworker_mailbox_smpi.xml similarity index 100% rename from examples/smpi/smpi_s4u_masterslave/deployment_masterslave_mailbox_smpi.xml rename to examples/smpi/smpi_s4u_masterworker/deployment_masterworker_mailbox_smpi.xml diff --git a/examples/smpi/smpi_s4u_masterslave/masterslave_mailbox_smpi.cpp b/examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi.cpp similarity index 96% rename from examples/smpi/smpi_s4u_masterslave/masterslave_mailbox_smpi.cpp rename to examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi.cpp index a786889f33..6ed9f330e9 100644 --- a/examples/smpi/smpi_s4u_masterslave/masterslave_mailbox_smpi.cpp +++ b/examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi.cpp @@ -14,9 +14,9 @@ static void master(std::vector args) { xbt_assert(args.size() > 4, "The master function expects at least 3 arguments"); - long tasks_count = std::stol(args[1]); - double compute_cost = std::stod(args[2]); - double communication_cost = std::stod(args[3]); + long tasks_count = std::stol(args[1]); + double compute_cost = std::stod(args[2]); + long communication_cost = std::stol(args[3]); std::vector workers; for (unsigned int i = 4; i < args.size(); i++) workers.push_back(simgrid::s4u::Mailbox::by_name(args[i])); diff --git a/examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh b/examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh similarity index 94% rename from examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh rename to examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh index ff3932f456..a4198498bb 100644 --- a/examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh +++ b/examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh @@ -1,5 +1,5 @@ p Test the use of SMPI+MSG in the same file, as well as several different SMPI instances at the same time -$ ./masterslave_mailbox_smpi ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/deployment_masterslave_mailbox_smpi.xml --log=smpi.:info --cfg=smpi/simulate-computation:no +$ ./masterworker_mailbox_smpi ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/deployment_masterworker_mailbox_smpi.xml --log=smpi.:info --cfg=smpi/simulate-computation:no > [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no' > [0.000000] [smpi_config/INFO] You did not set the power of the host running the simulation. The timings will certainly not be accurate. Use the option "--cfg=smpi/host-speed:" to set its value. Check https://simgrid.org/doc/latest/Configuring_SimGrid.html#automatic-benchmarking-of-smpi-code for more information. > [Tremblay:master:(1) 0.000000] [msg_test/INFO] Got 2 workers and 20 tasks to process diff --git a/examples/smpi/trace/trace.tesh b/examples/smpi/trace/trace.tesh index ad4d92f4b6..2103ff5d56 100644 --- a/examples/smpi/trace/trace.tesh +++ b/examples/smpi/trace/trace.tesh @@ -1345,10 +1345,10 @@ $ tail -n +3 ${bindir:=.}/smpi_trace.trace $ rm -f ${bindir:=.}/smpi_trace.trace -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-resource -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1 -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-resource -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1f -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning $ rm -f ${bindir:=.}/smpi_trace.trace -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace --cfg=tracing/smpi/display-sizes:yes --cfg=tracing/smpi/computing:yes --cfg=tracing/smpi/internals:yes -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1 -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace --cfg=tracing/smpi/display-sizes:yes --cfg=tracing/smpi/computing:yes --cfg=tracing/smpi/internals:yes -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1f -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning $ rm -f ${bindir:=.}/smpi_trace.trace diff --git a/examples/smpi/trace_call_location/trace_call_location.tesh b/examples/smpi/trace_call_location/trace_call_location.tesh index d75dfc2ed9..cfa3993aae 100644 --- a/examples/smpi/trace_call_location/trace_call_location.tesh +++ b/examples/smpi/trace_call_location/trace_call_location.tesh @@ -4,22 +4,22 @@ p Test SMPI with call-location tracing. This means that the binary must have p already been compiled with the -trace-call-location switch. -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/trace-call-use-absolute-path:1 --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/trace-call-use-absolute-path:1 --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning $ grep -q "12 0.* 2 1 7 .*trace_call_location\.c\" 14$" ${bindir:=.}/smpi_trace.trace $ rm -f ${bindir:=.}/smpi_trace.trace #the same, but with trace-ti -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti -trace-file ${bindir:=.}/smpi_trace.txt --cfg=tracing/smpi/sleeping:1 -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/cpu-threshold:0 +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti -trace-file ${bindir:=.}/smpi_trace.txt --cfg=tracing/smpi/sleeping:1 -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/cpu-threshold:0 #Test replaying the trace, without altering the time. #We disable computation, but leave the sleep. -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no > [Fafard:2:(3) 1000.007967] [smpi_replay/INFO] Simulation time 1000.007967 #Test replaying the trace, altering the time to have the sleep much faster (1 instead of 1000). -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/comp-adjustment-file:${srcdir:=.}/trace_call_location/adjust -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/comp-adjustment-file:${srcdir:=.}/trace_call_location/adjust -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no > [Fafard:2:(3) 1.007967] [smpi_replay/INFO] Simulation time 1.007967 $ rm -f ${bindir:=.}/smpi_trace.tx* diff --git a/include/simgrid/actor.h b/include/simgrid/actor.h index 5885acca42..3640e2631b 100644 --- a/include/simgrid/actor.h +++ b/include/simgrid/actor.h @@ -44,7 +44,7 @@ XBT_PUBLIC const char* sg_actor_get_property_value(const_sg_actor_t actor, const XBT_PUBLIC xbt_dict_t sg_actor_get_properties(const_sg_actor_t actor); XBT_PUBLIC void sg_actor_suspend(sg_actor_t actor); XBT_PUBLIC void sg_actor_resume(sg_actor_t actor); -XBT_PUBLIC int sg_actor_is_suspended(sg_actor_t actor); +XBT_PUBLIC int sg_actor_is_suspended(const_sg_actor_t actor); XBT_PUBLIC sg_actor_t sg_actor_restart(sg_actor_t actor); XBT_PUBLIC void sg_actor_set_auto_restart(sg_actor_t actor, int auto_restart); XBT_PUBLIC void sg_actor_daemonize(sg_actor_t actor); diff --git a/include/simgrid/jedule/jedule_platform.hpp b/include/simgrid/jedule/jedule_platform.hpp index 9079e9e9ad..8097776b16 100644 --- a/include/simgrid/jedule/jedule_platform.hpp +++ b/include/simgrid/jedule/jedule_platform.hpp @@ -30,9 +30,9 @@ public: const char* get_cname() const { return name.c_str(); } void set_parent(Container* parent) { parent_ = parent; } - bool has_children() { return not children_.empty(); } + bool has_children() const { return not children_.empty(); } int get_child_position(const Container* child) const; - unsigned int get_id_by_name(const char* name) { return name2id.at(name); } + unsigned int get_id_by_name(const char* name) const { return name2id.at(name); } void add_child(Container* child); void add_resources(std::vector hosts); diff --git a/include/simgrid/kernel/future.hpp b/include/simgrid/kernel/future.hpp index 4891f133c3..906d702937 100644 --- a/include/simgrid/kernel/future.hpp +++ b/include/simgrid/kernel/future.hpp @@ -48,7 +48,7 @@ public: FutureStateBase(FutureStateBase const&) = delete; FutureStateBase& operator=(FutureStateBase const&) = delete; - XBT_PUBLIC void schedule(simgrid::xbt::Task&& job); + XBT_PUBLIC void schedule(simgrid::xbt::Task&& job) const; void set_exception(std::exception_ptr exception) { @@ -522,13 +522,13 @@ public: future_get_ = true; return Future(state_); } - void set_value() + void set_value() const { if (state_ == nullptr) throw std::future_error(std::future_errc::no_state); state_->set_value(); } - void set_exception(std::exception_ptr exception) + void set_exception(std::exception_ptr exception) const { if (state_ == nullptr) throw std::future_error(std::future_errc::no_state); diff --git a/include/simgrid/kernel/resource/Action.hpp b/include/simgrid/kernel/resource/Action.hpp index a8f2177fe4..a74f944faf 100644 --- a/include/simgrid/kernel/resource/Action.hpp +++ b/include/simgrid/kernel/resource/Action.hpp @@ -31,7 +31,7 @@ class XBT_PUBLIC ActionHeap : public heap_type { public: enum class Type { - latency = 100, /* this is a heap entry to warn us when the latency is payed */ + latency = 100, /* this is a heap entry to warn us when the latency is paid */ max_duration, /* this is a heap entry to warn us when the max_duration limit (timeout) is reached */ normal, /* this is a normal heap entry stating the date to finish transmitting */ unset diff --git a/include/simgrid/kernel/resource/Model.hpp b/include/simgrid/kernel/resource/Model.hpp index 63c2542029..ac5ccd72ef 100644 --- a/include/simgrid/kernel/resource/Model.hpp +++ b/include/simgrid/kernel/resource/Model.hpp @@ -34,7 +34,7 @@ public: virtual ~Model(); - bool is_update_lazy() { return update_algorithm_ == UpdateAlgo::LAZY; } + bool is_update_lazy() const { return update_algorithm_ == UpdateAlgo::LAZY; } /** @brief Get the set of [actions](@ref Action) in *inited* state */ Action::StateSet* get_inited_action_set() { return &inited_action_set_; } diff --git a/include/simgrid/kernel/routing/ClusterZone.hpp b/include/simgrid/kernel/routing/ClusterZone.hpp index e8e72c1253..f6ab794dfc 100644 --- a/include/simgrid/kernel/routing/ClusterZone.hpp +++ b/include/simgrid/kernel/routing/ClusterZone.hpp @@ -83,9 +83,12 @@ public: /* The pair is {link_up, link_down} */ std::unordered_map> private_links_; - unsigned int node_pos(int id) { return id * num_links_per_node_; } - unsigned int node_pos_with_loopback(int id) { return node_pos(id) + (has_loopback_ ? 1 : 0); } - unsigned int node_pos_with_loopback_limiter(int id) { return node_pos_with_loopback(id) + (has_limiter_ ? 1 : 0); } + unsigned int node_pos(int id) const { return id * num_links_per_node_; } + unsigned int node_pos_with_loopback(int id) const { return node_pos(id) + (has_loopback_ ? 1 : 0); } + unsigned int node_pos_with_loopback_limiter(int id) const + { + return node_pos_with_loopback(id) + (has_limiter_ ? 1 : 0); + } void* loopback_ = nullptr; kernel::resource::LinkImpl* backbone_ = nullptr; diff --git a/include/simgrid/kernel/routing/DragonflyZone.hpp b/include/simgrid/kernel/routing/DragonflyZone.hpp index adde19cf44..133fa77267 100644 --- a/include/simgrid/kernel/routing/DragonflyZone.hpp +++ b/include/simgrid/kernel/routing/DragonflyZone.hpp @@ -65,12 +65,13 @@ public: void parse_specific_arguments(ClusterCreationArgs* cluster) override; void seal() override; - void rankId_to_coords(int rank_id, unsigned int coords[4]); + void rankId_to_coords(int rank_id, unsigned int coords[4]) const; private: void generate_routers(); void generate_links(); - void create_link(const std::string& id, int numlinks, resource::LinkImpl** linkup, resource::LinkImpl** linkdown); + void create_link(const std::string& id, int numlinks, resource::LinkImpl** linkup, + resource::LinkImpl** linkdown) const; simgrid::s4u::Link::SharingPolicy sharing_policy_; double bw_ = 0; diff --git a/include/simgrid/kernel/routing/FatTreeZone.hpp b/include/simgrid/kernel/routing/FatTreeZone.hpp index ebf200b77f..683bd1fca7 100644 --- a/include/simgrid/kernel/routing/FatTreeZone.hpp +++ b/include/simgrid/kernel/routing/FatTreeZone.hpp @@ -138,8 +138,8 @@ private: void generate_labels(); void generate_switches(); int connect_node_to_parents(FatTreeNode* node); - bool are_related(FatTreeNode* parent, FatTreeNode* child); - bool is_in_sub_tree(FatTreeNode* root, FatTreeNode* node); + bool are_related(FatTreeNode* parent, FatTreeNode* child) const; + bool is_in_sub_tree(FatTreeNode* root, FatTreeNode* node) const; }; } // namespace routing } // namespace kernel diff --git a/include/simgrid/kernel/routing/NetPoint.hpp b/include/simgrid/kernel/routing/NetPoint.hpp index f73bfd582e..d413928519 100644 --- a/include/simgrid/kernel/routing/NetPoint.hpp +++ b/include/simgrid/kernel/routing/NetPoint.hpp @@ -32,7 +32,7 @@ public: ~NetPoint() = default; // Our rank in the vertices_ array of the netzone that contains us. - unsigned int id() { return id_; } + unsigned int id() const { return id_; } const std::string& get_name() const { return name_; } const char* get_cname() const { return name_.c_str(); } /** @brief the NetZone in which this NetPoint is included */ diff --git a/include/simgrid/kernel/routing/NetZoneImpl.hpp b/include/simgrid/kernel/routing/NetZoneImpl.hpp index 585bc78682..0c7b96f90c 100644 --- a/include/simgrid/kernel/routing/NetZoneImpl.hpp +++ b/include/simgrid/kernel/routing/NetZoneImpl.hpp @@ -81,7 +81,7 @@ protected: * * @param src where from * @param dst where to - * @param into Container into which the traversed links and gateway informations should be pushed + * @param into Container into which the traversed links and gateway information should be pushed * @param latency Accumulator in which the latencies should be added (caller must set it to 0) */ virtual void get_local_route(NetPoint* src, NetPoint* dst, RouteCreationArgs* into, double* latency) = 0; diff --git a/include/simgrid/kernel/routing/RoutedZone.hpp b/include/simgrid/kernel/routing/RoutedZone.hpp index 27ff684ba0..d338fc5200 100644 --- a/include/simgrid/kernel/routing/RoutedZone.hpp +++ b/include/simgrid/kernel/routing/RoutedZone.hpp @@ -59,9 +59,9 @@ protected: virtual RouteCreationArgs* new_extended_route(RoutingMode hierarchy, NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoint* gw_dst, std::vector& link_list, bool symmetrical, bool change_order); - void get_route_check_params(NetPoint* src, NetPoint* dst); + void get_route_check_params(NetPoint* src, NetPoint* dst) const; void add_route_check_params(NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoint* gw_dst, - const std::vector& link_list, bool symmetrical); + const std::vector& link_list, bool symmetrical) const; }; } // namespace routing } // namespace kernel diff --git a/include/simgrid/msg.h b/include/simgrid/msg.h index 64cc2cb723..33ab45c106 100644 --- a/include/simgrid/msg.h +++ b/include/simgrid/msg.h @@ -204,7 +204,7 @@ XBT_PUBLIC const char* MSG_process_get_property_value(const_sg_actor_t process, XBT_PUBLIC void MSG_process_suspend(msg_process_t process); XBT_PUBLIC void MSG_process_resume(msg_process_t process); -XBT_PUBLIC int MSG_process_is_suspended(msg_process_t process); +XBT_PUBLIC int MSG_process_is_suspended(const_sg_actor_t process); XBT_PUBLIC void MSG_process_restart(msg_process_t process); /** @brief Sets the "auto-restart" flag of the process. * diff --git a/include/simgrid/plugins/file_system.h b/include/simgrid/plugins/file_system.h index 31e356e4ba..73fa2ffad8 100644 --- a/include/simgrid/plugins/file_system.h +++ b/include/simgrid/plugins/file_system.h @@ -29,14 +29,14 @@ XBT_PUBLIC sg_size_t sg_file_read(sg_file_t fd, sg_size_t size); XBT_PUBLIC sg_size_t sg_file_write(sg_file_t fd, sg_size_t size); XBT_PUBLIC void sg_file_close(const_sg_file_t fd); -XBT_PUBLIC const char* sg_file_get_name(sg_file_t fd); -XBT_PUBLIC sg_size_t sg_file_get_size(sg_file_t fd); -XBT_PUBLIC void sg_file_dump(sg_file_t fd); +XBT_PUBLIC const char* sg_file_get_name(const_sg_file_t fd); +XBT_PUBLIC sg_size_t sg_file_get_size(const_sg_file_t fd); +XBT_PUBLIC void sg_file_dump(const_sg_file_t fd); XBT_PUBLIC void* sg_file_get_data(const_sg_file_t fd); XBT_PUBLIC void sg_file_set_data(sg_file_t fd, void* data); XBT_PUBLIC void sg_file_seek(sg_file_t fd, sg_offset_t offset, int origin); -XBT_PUBLIC sg_size_t sg_file_tell(sg_file_t fd); -XBT_PUBLIC void sg_file_move(sg_file_t fd, const char* fullpath); +XBT_PUBLIC sg_size_t sg_file_tell(const_sg_file_t fd); +XBT_PUBLIC void sg_file_move(const_sg_file_t fd, const char* fullpath); XBT_PUBLIC void sg_file_unlink(sg_file_t fd); XBT_PUBLIC int sg_file_rcopy(sg_file_t file, sg_host_t host, const char* fullpath); XBT_PUBLIC int sg_file_rmove(sg_file_t file, sg_host_t host, const char* fullpath); @@ -122,7 +122,7 @@ public: ~File(); /** Retrieves the path to the file */ - const char* get_path() { return fullpath_.c_str(); } + const char* get_path() const { return fullpath_.c_str(); } /** Simulates a local read action. Returns the size of data actually read */ sg_size_t read(sg_size_t size); @@ -135,18 +135,18 @@ public: /** Retrieves the previously stored data */ XBT_ATTRIB_DEPRECATED_v329("Please use get_data()") void* get_userdata() { return get_data(); } - sg_size_t size(); + sg_size_t size() const; void seek(sg_offset_t pos); /** Sets the file head to the given position. */ void seek(sg_offset_t pos, int origin); /** Sets the file head to the given position from a given origin. */ - sg_size_t tell(); /** Retrieves the current file position */ + sg_size_t tell() const; /** Retrieves the current file position */ /** Rename a file. WARNING: It is forbidden to move the file to another mount point */ - void move(const std::string& fullpath); + void move(const std::string& fullpath) const; int remote_copy(sg_host_t host, const char* fullpath); int remote_move(sg_host_t host, const char* fullpath); - int unlink(); /** Remove a file from the contents of a disk */ - void dump(); + int unlink() const; /** Remove a file from the contents of a disk */ + void dump() const; }; class XBT_PUBLIC FileSystemDiskExt { @@ -163,7 +163,7 @@ public: FileSystemDiskExt& operator=(const FileSystemDiskExt&) = delete; std::map* parse_content(const std::string& filename); std::map* get_content() const { return content_.get(); } - const char* get_mount_point() { return mount_point_.c_str(); } + const char* get_mount_point() const { return mount_point_.c_str(); } const char* get_mount_point(s4u::Host* remote_host) { return remote_mount_points_[remote_host].c_str(); } void add_remote_mount(Host* host, const std::string& mount_point) { @@ -187,8 +187,8 @@ public: FileSystemStorageExt& operator=(const FileSystemStorageExt&) = delete; std::map* parse_content(const std::string& filename); std::map* get_content() { return content_.get(); } - sg_size_t get_size() { return size_; } - sg_size_t get_used_size() { return used_size_; } + sg_size_t get_size() const { return size_; } + sg_size_t get_used_size() const { return used_size_; } void decr_used_size(sg_size_t size); void incr_used_size(sg_size_t size); }; diff --git a/include/simgrid/s4u/Activity.hpp b/include/simgrid/s4u/Activity.hpp index 2572d61eee..fbf8993242 100644 --- a/include/simgrid/s4u/Activity.hpp +++ b/include/simgrid/s4u/Activity.hpp @@ -37,7 +37,7 @@ protected: { while (not successors_.empty()) { ActivityPtr b = successors_.back(); - XBT_CDEBUG(s4u_activity, "Remove a dependency from '%s' on '%s'", get_cname(), b->get_cname()); + XBT_CVERB(s4u_activity, "Remove a dependency from '%s' on '%s'", get_cname(), b->get_cname()); b->dependencies_.erase(this); if (b->dependencies_.empty()) { b->vetoable_start(); @@ -57,7 +57,7 @@ public: { state_ = State::STARTING; if (dependencies_.empty()) { - XBT_CDEBUG(s4u_activity, "All dependencies are solved, let's start '%s'", get_cname()); + XBT_CVERB(s4u_activity, "All dependencies are solved, let's start '%s'", get_cname()); start(); } } @@ -67,21 +67,19 @@ public: Activity& operator=(Activity const&) = delete; #endif - enum class State { INITED = 0, STARTING, STARTED, CANCELED, - // ERRORED, // FIXME: state has never been used - FINISHED }; + enum class State { INITED = 0, STARTING, STARTED, CANCELED, FINISHED }; /** Starts a previously created activity. * * This function is optional: you can call wait() even if you didn't call start() */ virtual Activity* start() = 0; - /** Blocks until the activity is terminated */ + /** Blocks the current actor until the activity is terminated */ virtual Activity* wait() = 0; - /** Blocks until the activity is terminated, or until the timeout is elapsed + /** Blocks the current actor until the activity is terminated, or until the timeout is elapsed\n * Raises: timeout exception.*/ virtual Activity* wait_for(double timeout) = 0; - /** Blocks until the activity is terminated, or until the time limit is reached + /** Blocks the current actor until the activity is terminated, or until the time limit is reached\n * Raises: timeout exception. */ void wait_until(double time_limit); @@ -93,6 +91,13 @@ public: /** Tests whether the given activity is terminated yet. */ virtual bool test(); + /** Blocks the progression of this activity until it gets resumed */ + virtual Activity* suspend(); + /** Unblock the progression of this activity if it was suspended previously */ + virtual Activity* resume(); + /** Whether or not the progression of this activity is blocked */ + bool is_suspended() const { return suspended_; } + virtual const char* get_cname() const = 0; virtual const std::string& get_name() const = 0; @@ -127,6 +132,7 @@ private: kernel::activity::ActivityImplPtr pimpl_ = nullptr; Activity::State state_ = Activity::State::INITED; double remains_ = 0; + bool suspended_ = false; std::vector successors_; std::set dependencies_; std::atomic_int_fast32_t refcount_{0}; diff --git a/include/simgrid/s4u/Actor.hpp b/include/simgrid/s4u/Actor.hpp index 959673bcbb..10538605d7 100644 --- a/include/simgrid/s4u/Actor.hpp +++ b/include/simgrid/s4u/Actor.hpp @@ -63,7 +63,7 @@ public: friend XBT_PUBLIC void intrusive_ptr_release(const Actor* actor); #endif /** Retrieve the amount of references on that object. Useful to debug the automatic refcounting */ - int get_refcount(); + int get_refcount() const; // ***** Actor creation ***** /** Retrieve a reference to myself */ @@ -105,7 +105,7 @@ public: static ActorPtr create(const std::string& name, s4u::Host* host, const std::function& code); /** Create an actor, but don't start it yet. * - * This is usefull to set some properties or extension before actually starting it */ + * This is useful to set some properties or extension before actually starting it */ static ActorPtr init(const std::string& name, s4u::Host* host); ActorPtr set_stacksize(unsigned stacksize); /** Start a previously initialized actor */ @@ -157,7 +157,7 @@ public: void resume(); /** Returns true if the actor is suspended. */ - bool is_suspended(); + bool is_suspended() const; /** If set to true, the actor will automatically restart when its host reboots */ void set_auto_restart(bool autorestart); @@ -178,7 +178,7 @@ public: /** Sets the time at which that actor should be killed */ void set_kill_time(double time); /** Retrieves the time at which that actor will be killed (or -1 if not set) */ - double get_kill_time(); + double get_kill_time() const; /** @brief Moves the actor to another host * diff --git a/include/simgrid/s4u/Engine.hpp b/include/simgrid/s4u/Engine.hpp index 7294e60ea8..249000775f 100644 --- a/include/simgrid/s4u/Engine.hpp +++ b/include/simgrid/s4u/Engine.hpp @@ -41,14 +41,14 @@ public: static void shutdown(); /** Run the simulation after initialization */ - void run(); + void run() const; /** @brief Retrieve the simulation time (in seconds) */ static double get_clock(); /** @brief Retrieve the engine singleton */ static s4u::Engine* get_instance(); - void load_platform(const std::string& platf); + void load_platform(const std::string& platf) const; XBT_ATTRIB_DEPRECATED_v330("Please change the return code of your actors to void") void register_function( const std::string& name, int (*code)(int, char**)); @@ -80,7 +80,7 @@ public: register_function(name, std::move(code_factory)); } - void load_deployment(const std::string& deploy); + void load_deployment(const std::string& deploy) const; protected: #ifndef DOXYGEN @@ -103,44 +103,44 @@ protected: public: /** Returns the amount of hosts existing in the platform. */ - size_t get_host_count(); + size_t get_host_count() const; /** Returns a vector of all hosts found in the platform. * * The order is generally different from the creation/declaration order in the XML platform because we use a hash * table internally. */ - std::vector get_all_hosts(); - std::vector get_filtered_hosts(const std::function& filter); - Host* host_by_name(const std::string& name); - Host* host_by_name_or_null(const std::string& name); + std::vector get_all_hosts() const; + std::vector get_filtered_hosts(const std::function& filter) const; + Host* host_by_name(const std::string& name) const; + Host* host_by_name_or_null(const std::string& name) const; - size_t get_link_count(); - std::vector get_all_links(); - std::vector get_filtered_links(const std::function& filter); - Link* link_by_name(const std::string& name); - Link* link_by_name_or_null(const std::string& name); + size_t get_link_count() const; + std::vector get_all_links() const; + std::vector get_filtered_links(const std::function& filter) const; + Link* link_by_name(const std::string& name) const; + Link* link_by_name_or_null(const std::string& name) const; - size_t get_actor_count(); - std::vector get_all_actors(); - std::vector get_filtered_actors(const std::function& filter); + size_t get_actor_count() const; + std::vector get_all_actors() const; + std::vector get_filtered_actors(const std::function& filter) const; #ifndef DOXYGEN - size_t get_storage_count(); - std::vector get_all_storages(); - Storage* storage_by_name(const std::string& name); - Storage* storage_by_name_or_null(const std::string& name); + size_t get_storage_count() const; + std::vector get_all_storages() const; + Storage* storage_by_name(const std::string& name) const; + Storage* storage_by_name_or_null(const std::string& name) const; #endif - std::vector get_all_netpoints(); - kernel::routing::NetPoint* netpoint_by_name_or_null(const std::string& name); + std::vector get_all_netpoints() const; + kernel::routing::NetPoint* netpoint_by_name_or_null(const std::string& name) const; - NetZone* get_netzone_root(); + NetZone* get_netzone_root() const; void set_netzone_root(const NetZone* netzone); - NetZone* netzone_by_name_or_null(const std::string& name); + NetZone* netzone_by_name_or_null(const std::string& name) const; /** @brief Retrieves all netzones of the type indicated by the template argument */ - template std::vector get_filtered_netzones() + template std::vector get_filtered_netzones() const { static_assert(std::is_base_of::value, "Filtering netzones is only possible for subclasses of kernel::routing::NetZoneImpl"); @@ -159,13 +159,13 @@ public: * @endrst * * Example: - * e->set_config("host/model:ptask_L07"); + * simgrid::s4u::Engine::set_config("host/model:ptask_L07"); */ - void set_config(const std::string& str); - void set_config(const std::string& name, int value); - void set_config(const std::string& name, bool value); - void set_config(const std::string& name, double value); - void set_config(const std::string& name, const std::string& value); + static void set_config(const std::string& str); + static void set_config(const std::string& name, int value); + static void set_config(const std::string& name, bool value); + static void set_config(const std::string& name, double value); + static void set_config(const std::string& name, const std::string& value); /** Callback fired when the platform is created (ie, the xml file parsed), * right before the actual simulation starts. */ diff --git a/include/simgrid/s4u/Exec.hpp b/include/simgrid/s4u/Exec.hpp index 7e1e8ef1da..e3894ac668 100644 --- a/include/simgrid/s4u/Exec.hpp +++ b/include/simgrid/s4u/Exec.hpp @@ -19,7 +19,7 @@ namespace s4u { * @rst * Most of them are created with :cpp:func:`simgrid::s4u::this_actor::exec_init()` or * :cpp:func:`simgrid::s4u::Host::execute()`, and represent a classical (sequential) execution. This can be used to - * simulate some computation occuring in another thread when the calling actor is not blocked during the execution. + * simulate some computation occurring in another thread when the calling actor is not blocked during the execution. * * You can also use :cpp:func:`simgrid::s4u::this_actor::parallel_execute()` to create *parallel* executions. These * objects represent distributed computations involving computations on several hosts and communications between them. diff --git a/include/simgrid/s4u/Host.hpp b/include/simgrid/s4u/Host.hpp index 254caf7ac8..1e7bd8ac53 100644 --- a/include/simgrid/s4u/Host.hpp +++ b/include/simgrid/s4u/Host.hpp @@ -188,12 +188,12 @@ public: * It is not a problem if the actor is not located on the called host. * The actor will not be migrated in this case. Such remote execution are easy in simulation. */ - void execute(double flops); + void execute(double flops) const; /** Start an asynchronous computation on that host (possibly remote) */ - ExecPtr exec_async(double flops_amounts); + ExecPtr exec_async(double flops_amounts) const; /** Block the calling actor on an execution located on the called host (with explicit priority) */ - void execute(double flops, double priority); + void execute(double flops, double priority) const; private: xbt::string name_{"noname"}; diff --git a/include/simgrid/s4u/Link.hpp b/include/simgrid/s4u/Link.hpp index 89f4f792d4..11212f9526 100644 --- a/include/simgrid/s4u/Link.hpp +++ b/include/simgrid/s4u/Link.hpp @@ -52,18 +52,33 @@ public: /** @brief Retrieves the name of that link as a C string */ const char* get_cname() const; - /** @brief Get the bandwidth in bytes per second of current Link */ + /** Get the bandwidth of the current Link (in bytes per second) */ double get_bandwidth() const; + /** Set the bandwidth of the current Link (in bytes per second) */ void set_bandwidth(double value); - /** @brief Get the latency in seconds of current Link */ + /** Get the latency of the current Link (in seconds) */ double get_latency() const; + /** Set the latency of the current Link (in seconds) */ void set_latency(double value); /** @brief Describes how the link is shared between flows */ SharingPolicy get_sharing_policy() const; - /** @brief Returns the current load (in flops per second) */ + /** @brief Set the level of communication speed of the given host on this wifi link. + * + * The bandwidth of a wifi link for a given host depends on its SNR (signal to noise ratio), + * which ultimately depends on the distance between the host and the station and the material between them. + * + * This is modeled in SimGrid by providing several bandwidths to wifi links, one per SNR level (just provide + * comma-separated values in the XML file). By default, the first level in the list is used, but you can use the + * current function to specify that a given host uses another level of bandwidth. This can be used to take the + * location of hosts into account, or even to model mobility in your SimGrid simulation. + * + * Note that this function asserts that the link is actually a wifi link */ + void set_host_wifi_rate(const s4u::Host* host, int level) const; + + /** @brief Returns the current load (in bytes per second) */ double get_usage() const; /** @brief Check if the Link is used (at least one flow uses the link) */ diff --git a/include/simgrid/s4u/Mailbox.hpp b/include/simgrid/s4u/Mailbox.hpp index f60c07019a..35a9db76fc 100644 --- a/include/simgrid/s4u/Mailbox.hpp +++ b/include/simgrid/s4u/Mailbox.hpp @@ -40,19 +40,19 @@ public: static Mailbox* by_name(const std::string& name); /** Returns whether the mailbox contains queued communications */ - bool empty(); + bool empty() const; /** Check if there is a communication going on in a mailbox. */ - bool listen(); + bool listen() const; /** Look if there is a communication going on in a mailbox and return the PID of the sender actor */ - aid_t listen_from(); + aid_t listen_from() const; /** Check if there is a communication ready to be consumed from a mailbox. */ - bool ready(); + bool ready() const; /** Gets the first element in the queue (without dequeuing it), or nullptr if none is there */ - kernel::activity::CommImplPtr front(); + kernel::activity::CommImplPtr front() const; /** Declare that the specified actor is a permanent receiver on that mailbox * @@ -75,7 +75,7 @@ public: void set_receiver(ActorPtr actor); /** Return the actor declared as permanent receiver, or nullptr if none **/ - ActorPtr get_receiver(); + ActorPtr get_receiver() const; /** Creates (but don't start) a data transmission to that mailbox */ CommPtr put_init(); diff --git a/include/simgrid/s4u/VirtualMachine.hpp b/include/simgrid/s4u/VirtualMachine.hpp index 689afdd918..e8a9f40bf5 100644 --- a/include/simgrid/s4u/VirtualMachine.hpp +++ b/include/simgrid/s4u/VirtualMachine.hpp @@ -49,10 +49,10 @@ public: void destroy() override; Host* get_pm() const; - void set_pm(Host* pm); + VirtualMachine* set_pm(Host* pm); size_t get_ramsize() const; - void set_ramsize(size_t ramsize); - void set_bound(double bound); + VirtualMachine* set_ramsize(size_t ramsize); + VirtualMachine* set_bound(double bound); VirtualMachine::state get_state(); static xbt::signal on_start; diff --git a/include/simgrid/simix.hpp b/include/simgrid/simix.hpp index 115fdff27c..324062ca3f 100644 --- a/include/simgrid/simix.hpp +++ b/include/simgrid/simix.hpp @@ -109,7 +109,7 @@ public: Timer(double date, simgrid::xbt::Task&& callback) : date(date), callback(std::move(callback)) {} simgrid::xbt::Task callback; - double get_date() { return date; } + double get_date() const { return date; } void remove(); template static inline Timer* set(double date, F callback) diff --git a/include/smpi/mpif.h.in b/include/smpi/mpif.h.in index 8c25bb42e3..6042a102bb 100644 --- a/include/smpi/mpif.h.in +++ b/include/smpi/mpif.h.in @@ -290,4 +290,9 @@ double precision MPI_WTIME double precision MPI_WTICK + + external smpi_execute_flops + external smpi_execute_flops_benched + external smpi_execute + external smpi_execute_benched @MODULE_MPIF_OUT@ diff --git a/include/smpi/smpi.h b/include/smpi/smpi.h index 8832ecd220..ec4b56d2e2 100644 --- a/include/smpi/smpi.h +++ b/include/smpi/smpi.h @@ -309,7 +309,7 @@ XBT_PUBLIC_DATA const MPI_Datatype MPI_COUNT; #define MPI_LOGICAL MPI_LONG #endif -#define MPI_Fint int +typedef int MPI_Fint; #define MPI_COMPLEX MPI_C_FLOAT_COMPLEX #define MPI_DOUBLE_COMPLEX MPI_C_DOUBLE_COMPLEX diff --git a/include/smpi/smpi_extended_traces.h b/include/smpi/smpi_extended_traces.h index 1f5ede2c11..1b91ec7f6d 100644 --- a/include/smpi/smpi_extended_traces.h +++ b/include/smpi/smpi_extended_traces.h @@ -276,6 +276,8 @@ #define MPI_Win_get_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Win_get_errhandler(__VA_ARGS__)) #define MPI_Win_create_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Win_create_errhandler(__VA_ARGS__)) #define MPI_Win_call_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Win_call_errhandler(__VA_ARGS__)) +#define MPI_Type_get_contents(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_get_contents(__VA_ARGS__)) +#define MPI_Type_get_envelope(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_get_envelope(__VA_ARGS__)) #define MPI_File_call_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_File_call_errhandler(__VA_ARGS__)) #define MPI_File_create_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_File_create_errhandler(__VA_ARGS__)) #define MPI_File_set_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_File_set_errhandler(__VA_ARGS__)) @@ -304,7 +306,6 @@ #define MPI_Irsend(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Irsend(__VA_ARGS__)) #define MPI_Get_elements(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Get_elements(__VA_ARGS__)) #define MPI_Pcontrol(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Pcontrol(__VA_ARGS__)) -#define MPI_Type_get_contents(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_get_contents(__VA_ARGS__)) #define MPI_Type_create_darray(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_create_darray(__VA_ARGS__)) #define MPI_Pack_external_size(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Pack_external_size(__VA_ARGS__)) #define MPI_Pack_external(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Pack_external(__VA_ARGS__)) diff --git a/include/smpi/smpi_extended_traces_fortran.h b/include/smpi/smpi_extended_traces_fortran.h index b1d58c80f1..0f587f6fcf 100644 --- a/include/smpi/smpi_extended_traces_fortran.h +++ b/include/smpi/smpi_extended_traces_fortran.h @@ -551,6 +551,10 @@ #define MPI_WIN_CREATE_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Win_create_errhandler #define mpi_win_call_errhandler smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Win_call_errhandler #define MPI_WIN_CALL_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Win_call_errhandler +#define mpi_type_get_contents smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents +#define MPI_TYPE_GET_CONTENTS smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents +#define mpi_type_get_envelope smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_envelope +#define MPI_TYPE_GET_ENVELOPE smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_envelope #define mpi_file_call_errhandler smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_File_call_errhandler #define MPI_FILE_CALL_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_File_call_errhandler #define mpi_file_create_errhandler smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_File_create_errhandler @@ -607,8 +611,6 @@ #define MPI_GET_ELEMENTS smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Get_elements #define mpi_pcontrol smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Pcontrol #define MPI_PCONTROL smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Pcontrol -#define mpi_type_get_contents smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents -#define MPI_TYPE_GET_CONTENTS smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents #define mpi_type_create_darray smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_create_darray #define MPI_TYPE_CREATE_DARRAY smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_create_darray #define mpi_pack_external_size smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Pack_external_size diff --git a/include/xbt/file.hpp b/include/xbt/file.hpp index e98178f7f8..b12800f8a8 100644 --- a/include/xbt/file.hpp +++ b/include/xbt/file.hpp @@ -24,9 +24,9 @@ public: /** @brief Returns the full path name */ const std::string& get_name() const { return path_; } /** @brief Returns the directory component of a path (reimplementation of POSIX dirname) */ - std::string get_dir_name(); + std::string get_dir_name() const; /** @brief Returns the file component of a path (reimplementation of POSIX basename) */ - std::string get_base_name(); + std::string get_base_name() const; private: std::string path_; diff --git a/include/xbt/log.h b/include/xbt/log.h index 4f09803292..6264c59aa5 100644 --- a/include/xbt/log.h +++ b/include/xbt/log.h @@ -46,7 +46,7 @@ typedef enum { xbt_log_priority_trace = 1, /**< enter and return of some functions */ xbt_log_priority_debug = 2, /**< crufty output */ xbt_log_priority_verbose = 3, /**< verbose output for the user wanting more */ - xbt_log_priority_info = 4, /**< output about the regular functionning */ + xbt_log_priority_info = 4, /**< output about the regular functioning */ xbt_log_priority_warning = 5, /**< minor issue encountered */ xbt_log_priority_error = 6, /**< issue encountered */ xbt_log_priority_critical = 7, /**< major issue encountered */ diff --git a/include/xbt/log.hpp b/include/xbt/log.hpp index 4553724d6b..dc1153501e 100644 --- a/include/xbt/log.hpp +++ b/include/xbt/log.hpp @@ -12,7 +12,7 @@ namespace simgrid { namespace xbt { -/** Display informations about an exception +/** Display information about an exception * * We display: the exception type, name, attached backtraces (if any) and * the nested exception (if any). diff --git a/include/xbt/parse_units.hpp b/include/xbt/parse_units.hpp new file mode 100644 index 0000000000..7350c241eb --- /dev/null +++ b/include/xbt/parse_units.hpp @@ -0,0 +1,23 @@ +/* Copyright (c) 2007-2020. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#ifndef SIMGRID_XBT_PARSE_UNITS_HPP +#define SIMGRID_XBT_PARSE_UNITS_HPP + +double xbt_parse_get_time(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name); +double surf_parse_get_size(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name); +double xbt_parse_get_bandwidth(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name); +std::vector xbt_parse_get_bandwidths(const std::string& filename, int lineno, const char* string, + const char* entity_kind, const std::string& name); +double xbt_parse_get_speed(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name); +std::vector xbt_parse_get_all_speeds(const std::string& filename, int lineno, char* speeds, + const char* entity_kind, const std::string& id); + +#endif diff --git a/include/xbt/random.hpp b/include/xbt/random.hpp index d4c612f653..7d4e4ef30b 100644 --- a/include/xbt/random.hpp +++ b/include/xbt/random.hpp @@ -45,7 +45,7 @@ public: /** * @brief Write the state of the Mersenne-Twister RNG to a file */ - bool write_state(const std::string& filename); + bool write_state(const std::string& filename) const; /** * @brief Draws an integer number uniformly in range [min, max] (min and max included) diff --git a/sonar-project.properties b/sonar-project.properties index 66edad6df0..e6e7f492ea 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -14,7 +14,7 @@ sonar.sources=src,examples,include,teshsuite # Disable some rules on some files -sonar.issue.ignore.multicriteria=j1,j2,j3,j4,j5,jni1,jni2,c1,c2a,c2b,c3,c5a,c5b,c6a,c6b,c7,c8a,c8b,c9,c10a,c10b,c10c,cex1a,cex1b,cex2a,cex2b,cex3,cex4,f1,p1,s1,s2,s3,s4,s5 +sonar.issue.ignore.multicriteria=j1,j2,j3,j4,j5,jni1,jni2,c1,c2a,c2b,c3,c4,c5a,c5b,c6a,c6b,c7,c8a,c8b,c9,c10a,c10b,c10c,cex1a,cex1b,cex2a,cex2b,cex3,cex4,f1,p1,s1,s2,s3,s4,s5 # The Object.finalize() method should not be overridden # But we need to clean the native memory with JNI @@ -69,6 +69,11 @@ sonar.issue.ignore.multicriteria.c2b.resourceKey=**/* sonar.issue.ignore.multicriteria.c3.ruleKey=c:PPMacroName sonar.issue.ignore.multicriteria.c3.resourceKey=include/smpi/smpi_extended_traces.h +# Concise syntax should be used for concatenatable namespaces +# This is C++17, and we still support C++11 +sonar.issue.ignore.multicriteria.c4.ruleKey=cpp:S5812 +sonar.issue.ignore.multicriteria.c4.resourceKey=**/* + # Replace alternative operator "not" with "!" # I like it better, so please leave me alone sonar.issue.ignore.multicriteria.c5a.ruleKey=cpp:S3659 diff --git a/src/bindings/java/org/simgrid/NativeLib.java b/src/bindings/java/org/simgrid/NativeLib.java index 846d304600..3e2322d47a 100644 --- a/src/bindings/java/org/simgrid/NativeLib.java +++ b/src/bindings/java/org/simgrid/NativeLib.java @@ -14,7 +14,7 @@ import java.util.stream.Stream; /** Helper class loading the native functions of SimGrid that we use for downcalls * - * Almost all org.simgrid.msg.* classes contain a static bloc (thus executed when the class is loaded) + * Almost all org.simgrid.msg.* classes contain a static block (thus executed when the class is loaded) * containing a call to this. */ public final class NativeLib { diff --git a/src/bindings/java/org/simgrid/msg/Process.java b/src/bindings/java/org/simgrid/msg/Process.java index 5eeb9525bf..d8f1f3bdfd 100644 --- a/src/bindings/java/org/simgrid/msg/Process.java +++ b/src/bindings/java/org/simgrid/msg/Process.java @@ -14,12 +14,12 @@ import java.util.Arrays; * declared in the deployment file (XML format). * To create your own process you must inherit your own process from this * class and override the method "main()". For example if you want to use - * a process named Slave proceed as it : + * a process named Worker proceed as it : * * (1) import the class Process of the package simgrid.msg * import simgrid.msg.Process; * - * public class Slave extends simgrid.msg.Process { + * public class Worker extends simgrid.msg.Process { * * (2) Override the method function * @@ -30,9 +30,9 @@ import java.util.Arrays; * \endverbatim * } * The name of your process must be declared in the deployment file of your simulation. - * For the example, for the previous process Slave this file must contains a line : - * <process host="Maxims" function="Slave"/>, where Maxims is the host of the process - * Slave. All the process of your simulation are automatically launched and managed by Msg. + * For the example, for the previous process Worker this file must contains a line : + * <process host="Maxims" function="Worker"/>, where Maxims is the host of the process + * Worker. All the process of your simulation are automatically launched and managed by Msg. * A process use tasks to simulate communications or computations with another process. * For more information see Task. For more information on host concept * see Host. diff --git a/src/bindings/java/org/simgrid/msg/Task.java b/src/bindings/java/org/simgrid/msg/Task.java index 5b1bf6685c..18e060b1e8 100644 --- a/src/bindings/java/org/simgrid/msg/Task.java +++ b/src/bindings/java/org/simgrid/msg/Task.java @@ -43,7 +43,7 @@ public class Task { * If 0, then it cannot be executed with the execute() method. * This value has to be ≥ 0. * - * @param bytesAmount A value of amount of data (in bytes) needed to transfert this task. + * @param bytesAmount A value of amount of data (in bytes) needed to transfer this task. * If 0, then it cannot be transferred with the get() and put() methods. * This value has to be ≥ 0. */ @@ -93,7 +93,7 @@ public class Task { * @param flopsAmount A value of the processing amount (in flop) needed * to process the task. If 0, then it cannot be executed * with the execute() method. This value has to be >= 0. - * @param bytesAmount A value of amount of data (in bytes) needed to transfert + * @param bytesAmount A value of amount of data (in bytes) needed to transfer * this task. If 0, then it cannot be transferred this task. * If 0, then it cannot be transferred with the get() and put() * methods. This value has to be >= 0. diff --git a/src/bindings/java/org/simgrid/msg/VM.java b/src/bindings/java/org/simgrid/msg/VM.java index 1ca044548f..b1fbf650e9 100644 --- a/src/bindings/java/org/simgrid/msg/VM.java +++ b/src/bindings/java/org/simgrid/msg/VM.java @@ -131,7 +131,7 @@ public class VM extends Host { Msg.info("Migration of VM "+this.getName()+" to "+destination.getName()+" is impossible ("+e.getMessage()+")"); throw new HostFailureException(e.getMessage()); } - // If the migration correcly returned, then we should change the currentHost value. + // If the migration correctly returned, then we should change the currentHost value. this.currentHost = destination; } private native void nativeMigration(Host destination) throws MsgException; diff --git a/src/bindings/lua/lua_host.cpp b/src/bindings/lua/lua_host.cpp index 918973120e..59361fe515 100644 --- a/src/bindings/lua/lua_host.cpp +++ b/src/bindings/lua/lua_host.cpp @@ -101,7 +101,7 @@ static int l_host_number(lua_State * L) */ static int l_host_at(lua_State * L) { - int index = luaL_checkinteger(L, 1); + lua_Integer index = luaL_checkinteger(L, 1); std::vector hosts = simgrid::s4u::Engine::get_instance()->get_all_hosts(); sg_host_t host = hosts[index - 1]; // lua indexing start by 1 (lua[1] <=> C[0]) lua_newtable(L); /* create a table, put the userdata on top of it */ diff --git a/src/bindings/lua/lua_platf.cpp b/src/bindings/lua/lua_platf.cpp index 239ea855ea..b29dcde512 100644 --- a/src/bindings/lua/lua_platf.cpp +++ b/src/bindings/lua/lua_platf.cpp @@ -11,6 +11,7 @@ #include "src/surf/network_interface.hpp" #include "src/surf/surf_private.hpp" #include "src/surf/xml/platf_private.hpp" +#include "xbt/parse_units.hpp" #include #include @@ -79,6 +80,9 @@ int console_close(lua_State*) int console_add_backbone(lua_State *L) { simgrid::kernel::routing::LinkCreationArgs link; + lua_Debug ar; + lua_getstack(L, 1, &ar); + lua_getinfo(L, "Sl", &ar); link.properties = nullptr; @@ -94,14 +98,16 @@ int console_add_backbone(lua_State *L) { type = lua_gettable(L, -2); lua_ensure(type == LUA_TSTRING || type == LUA_TNUMBER, "Attribute 'bandwidth' must be specified for backbone and must either be a string (in the right format; see docs) or a number."); - link.bandwidths.push_back(surf_parse_get_bandwidth(lua_tostring(L, -1), "bandwidth of backbone", link.id.c_str())); + link.bandwidths.push_back(xbt_parse_get_bandwidth(ar.short_src, ar.currentline, lua_tostring(L, -1), + "bandwidth of backbone", link.id.c_str())); lua_pop(L, 1); lua_pushstring(L, "lat"); type = lua_gettable(L, -2); lua_ensure(type == LUA_TSTRING || type == LUA_TNUMBER, "Attribute 'lat' must be specified for backbone and must either be a string (in the right format; see docs) or a number."); - link.latency = surf_parse_get_time(lua_tostring(L, -1), "latency of backbone", link.id.c_str()); + link.latency = + xbt_parse_get_time(ar.short_src, ar.currentline, lua_tostring(L, -1), "latency of backbone", link.id.c_str()); lua_pop(L, 1); lua_pushstring(L, "sharing_policy"); @@ -151,6 +157,9 @@ int console_add_host___link(lua_State *L) { int console_add_host(lua_State *L) { simgrid::kernel::routing::HostCreationArgs host; int type; + lua_Debug ar; + lua_getstack(L, 1, &ar); + lua_getinfo(L, "Sl", &ar); // we get values from the table passed as argument lua_ensure(lua_istable(L, -1), @@ -170,9 +179,10 @@ int console_add_host(lua_State *L) { lua_ensure(type == LUA_TSTRING || type == LUA_TNUMBER, "Attribute 'speed' must be specified for host and must either be a string (in the correct format; check documentation) or a number."); if (type == LUA_TNUMBER) - host.speed_per_pstate.push_back(lua_tointeger(L, -1)); + host.speed_per_pstate.push_back(lua_tonumber(L, -1)); else // LUA_TSTRING - host.speed_per_pstate.push_back(surf_parse_get_speed(lua_tostring(L, -1), "speed of host", host.id)); + host.speed_per_pstate.push_back( + xbt_parse_get_speed(ar.short_src, ar.currentline, lua_tostring(L, -1), "speed of host", host.id)); lua_pop(L, 1); // get core @@ -181,7 +191,7 @@ int console_add_host(lua_State *L) { if (not lua_isnumber(L, -1)) host.core_amount = 1; // Default value else - host.core_amount = lua_tonumber(L, -1); + host.core_amount = static_cast(lua_tointeger(L, -1)); if (host.core_amount == 0) host.core_amount = 1; lua_pop(L, 1); @@ -209,6 +219,9 @@ int console_add_host(lua_State *L) { int console_add_link(lua_State *L) { simgrid::kernel::routing::LinkCreationArgs link; + lua_Debug ar; + lua_getstack(L, 1, &ar); + lua_getinfo(L, "Sl", &ar); const char* policy; @@ -230,7 +243,8 @@ int console_add_link(lua_State *L) { if (type == LUA_TNUMBER) link.bandwidths.push_back(lua_tonumber(L, -1)); else // LUA_TSTRING - link.bandwidths.push_back(surf_parse_get_bandwidth(lua_tostring(L, -1), "bandwidth of link", link.id.c_str())); + link.bandwidths.push_back(xbt_parse_get_bandwidth(ar.short_src, ar.currentline, lua_tostring(L, -1), + "bandwidth of link", link.id.c_str())); lua_pop(L, 1); //get latency value @@ -241,7 +255,8 @@ int console_add_link(lua_State *L) { if (type == LUA_TNUMBER) link.latency = lua_tonumber(L, -1); else // LUA_TSTRING - link.latency = surf_parse_get_time(lua_tostring(L, -1), "latency of link", link.id.c_str()); + link.latency = + xbt_parse_get_time(ar.short_src, ar.currentline, lua_tostring(L, -1), "latency of link", link.id.c_str()); lua_pop(L, 1); /*Optional Arguments */ diff --git a/src/bindings/lua/simgrid_lua.hpp b/src/bindings/lua/simgrid_lua.hpp index 7ffcd4aee0..b6e7c2756f 100644 --- a/src/bindings/lua/simgrid_lua.hpp +++ b/src/bindings/lua/simgrid_lua.hpp @@ -9,7 +9,7 @@ #include /* ********************************************************************************* */ -/* Plaftorm functions */ +/* Platform functions */ /* ********************************************************************************* */ extern "C" { diff --git a/src/bindings/python/simgrid_python.cpp b/src/bindings/python/simgrid_python.cpp index 66b1c1f88e..f900c97a91 100644 --- a/src/bindings/python/simgrid_python.cpp +++ b/src/bindings/python/simgrid_python.cpp @@ -152,7 +152,7 @@ PYBIND11_MODULE(simgrid, m) py::class_(m, "Engine", "Simulation Engine") .def(py::init([](std::vector args) { static char noarg[] = {'\0'}; - int argc = args.size(); + int argc = static_cast(args.size()); std::unique_ptr argv(new char*[argc + 1]); for (int i = 0; i != argc; ++i) argv[i] = args[i].empty() ? noarg : &args[i].front(); @@ -199,7 +199,7 @@ PYBIND11_MODULE(simgrid, m) /* Class Host */ py::class_>(m, "Host", "Simulated host") .def("by_name", &Host::by_name, "Retrieves a host from its name, or die") - .def("get_pstate_count", &Host::get_pstate_count, "Retrieve the cound of defined pstate levels") + .def("get_pstate_count", &Host::get_pstate_count, "Retrieve the count of defined pstate levels") .def("get_pstate_speed", &Host::get_pstate_speed, "Retrieve the maximal speed at the given pstate") .def_property( "pstate", &Host::get_pstate, diff --git a/src/include/xbt/parmap.hpp b/src/include/xbt/parmap.hpp index 58b59cd8ea..295ad660f6 100644 --- a/src/include/xbt/parmap.hpp +++ b/src/include/xbt/parmap.hpp @@ -294,7 +294,7 @@ template void Parmap::worker_main(ThreadData* data) XBT_CDEBUG(xbt_parmap, "New worker thread created"); /* Worker's main loop */ - while (1) { + while (true) { round++; // New scheduling round parmap.synchro->worker_wait(round); if (parmap.destroying) diff --git a/src/instr/instr_config.cpp b/src/instr/instr_config.cpp index f9fa1b4241..f1c9d17394 100644 --- a/src/instr/instr_config.cpp +++ b/src/instr/instr_config.cpp @@ -172,7 +172,7 @@ static void print_line(const char* option, const char* desc, const char* longdes { std::string str = std::string("--cfg=") + option + " "; - int len = str.size(); + int len = static_cast(str.size()); XBT_HELP("%s%*.*s %s", str.c_str(), 30 - len, 30 - len, "", desc); if (longdesc != nullptr) { XBT_HELP("%s\n", longdesc); @@ -225,7 +225,7 @@ xbt::signal PajeEvent::on_destruction; xbt::signal StateEvent::on_destruction; xbt::signal EntityValue::on_creation; -static void on_container_creation_paje(Container& c) +static void on_container_creation_paje(const Container& c) { double timestamp = SIMIX_get_clock(); std::stringstream stream; @@ -244,7 +244,7 @@ static void on_container_creation_paje(Container& c) tracing_file << stream.str() << std::endl; } -static void on_container_destruction_paje(Container& c) +static void on_container_destruction_paje(const Container& c) { // trace my destruction, but not if user requests so or if the container is root if (not trace_disable_destroy && &c != Container::get_root()) { @@ -297,7 +297,7 @@ static void on_container_destruction_ti(Container& c) } } -static void on_entity_value_creation(EntityValue& value) +static void on_entity_value_creation(const EntityValue& value) { std::stringstream stream; XBT_DEBUG("%s: event_type=%u", __func__, PAJE_DefineEntityValue); @@ -317,19 +317,19 @@ static void on_event_creation(PajeEvent& event) event.stream_ << event.get_type()->get_id() << " " << event.get_container()->get_id(); } -static void on_event_destruction(PajeEvent& event) +static void on_event_destruction(const PajeEvent& event) { XBT_DEBUG("Dump %s", event.stream_.str().c_str()); tracing_file << event.stream_.str() << std::endl; } -static void on_state_event_destruction(StateEvent& event) +static void on_state_event_destruction(const StateEvent& event) { if (event.has_extra()) *tracing_files.at(event.get_container()) << event.stream_.str() << std::endl; } -static void on_type_creation(Type& type, e_event_type event_type) +static void on_type_creation(const Type& type, e_event_type event_type) { if (event_type == PAJE_DefineLinkType) return; // this kind of type has to be handled differently @@ -344,7 +344,7 @@ static void on_type_creation(Type& type, e_event_type event_type) tracing_file << stream.str() << std::endl; } -static void on_link_type_creation(Type& type, Type& source, Type& dest) +static void on_link_type_creation(const Type& type, const Type& source, const Type& dest) { std::stringstream stream; XBT_DEBUG("%s: event_type=%u, timestamp=%.*f", __func__, PAJE_DefineLinkType, trace_precision, 0.); diff --git a/src/instr/instr_paje_events.hpp b/src/instr/instr_paje_events.hpp index 744ada9b4e..f26a206ec4 100644 --- a/src/instr/instr_paje_events.hpp +++ b/src/instr/instr_paje_events.hpp @@ -81,7 +81,7 @@ public: static xbt::signal on_destruction; StateEvent(Container* container, Type* type, e_event_type event_type, EntityValue* value, TIData* extra); ~StateEvent() { on_destruction(*this); } - bool has_extra() { return extra_ != nullptr; } + bool has_extra() const { return extra_ != nullptr; } void print() override; }; diff --git a/src/instr/instr_paje_types.hpp b/src/instr/instr_paje_types.hpp index 2ce289b7d7..e23ddc2d68 100644 --- a/src/instr/instr_paje_types.hpp +++ b/src/instr/instr_paje_types.hpp @@ -34,13 +34,13 @@ public: Type* father); virtual ~Type() = default; - long long int get_id() { return id_; } + long long int get_id() const { return id_; } const std::string& get_name() const { return name_; } - const char* get_cname() { return name_.c_str(); } + const char* get_cname() const { return name_.c_str(); } const std::string& get_color() const { return color_; } Type* get_father() const { return father_; } - const std::map>& get_children() { return children_; } - bool is_colored() { return not color_.empty(); } + const std::map>& get_children() const { return children_; } + bool is_colored() const { return not color_.empty(); } Type* by_name(const std::string& name); LinkType* by_name_or_create(const std::string& name, Type* source, Type* dest); diff --git a/src/instr/instr_platform.cpp b/src/instr/instr_platform.cpp index b2d214c92a..f0f6442bf6 100644 --- a/src/instr/instr_platform.cpp +++ b/src/instr/instr_platform.cpp @@ -53,8 +53,8 @@ static container_t lowestCommonAncestor(const simgrid::instr::Container* a1, con // find the lowest ancestor p = nullptr; - int i = ancestors_a1.size() - 1; - int j = ancestors_a2.size() - 1; + int i = static_cast(ancestors_a1.size()) - 1; + int j = static_cast(ancestors_a2.size()) - 1; while (i >= 0 && j >= 0) { container_t a1p = ancestors_a1.at(i); const simgrid::instr::Container* a2p = ancestors_a2.at(j); @@ -286,7 +286,8 @@ static void on_netzone_creation(s4u::NetZone const& netzone) } if (TRACE_needs_platform()) { - NetZoneContainer* container = new NetZoneContainer(id, currentContainer.size(), currentContainer.back()); + unsigned level = static_cast(currentContainer.size()); + NetZoneContainer* container = new NetZoneContainer(id, level, currentContainer.back()); currentContainer.push_back(container); } } @@ -342,9 +343,9 @@ static void on_host_creation(s4u::Host const& host) static void on_action_state_change(kernel::resource::Action const& action, kernel::resource::Action::State /* previous */) { - int n = action.get_variable()->get_number_of_constraint(); + unsigned n = static_cast(action.get_variable()->get_number_of_constraint()); - for (int i = 0; i < n; i++) { + for (unsigned i = 0; i < n; i++) { double value = action.get_variable()->get_value() * action.get_variable()->get_constraint_weight(i); /* Beware of composite actions: ptasks put links and cpus together. Extra pb: we cannot dynamic_cast from void* */ kernel::resource::Resource* resource = action.get_variable()->get_constraint(i)->get_id(); diff --git a/src/instr/instr_resource_utilization.cpp b/src/instr/instr_resource_utilization.cpp index 8ab07eb7c4..1e52e6919a 100644 --- a/src/instr/instr_resource_utilization.cpp +++ b/src/instr/instr_resource_utilization.cpp @@ -16,12 +16,12 @@ void resource_set_utilization(const char* type, const char* name, const char* re { // only trace resource utilization if resource is known by tracing mechanism container_t container = Container::by_name_or_null(resource); - if (not container || not value) + if (container == nullptr || value == 0.0) return; // trace uncategorized resource utilization if (TRACE_uncategorized()){ - XBT_DEBUG("UNCAT %s [%f - %f] %s %s %f", type, now, now + delta, resource, name, value); + XBT_VERB("UNCAT %s [%f - %f] %s %s %f", type, now, now + delta, resource, name, value); container->get_variable(name)->instr_event(now, delta, resource, value); } diff --git a/src/instr/instr_smpi.hpp b/src/instr/instr_smpi.hpp index aac3353d67..910f702f83 100644 --- a/src/instr/instr_smpi.hpp +++ b/src/instr/instr_smpi.hpp @@ -33,7 +33,7 @@ public: std::string previous_filename; int previous_linenumber; - std::string get_composed_key() + std::string get_composed_key() const { return previous_filename + ':' + std::to_string(previous_linenumber) + ':' + filename + ':' + std::to_string(linenumber); diff --git a/src/instr/jedule/jedule_platform.cpp b/src/instr/jedule/jedule_platform.cpp index c53f3dd5f2..523d5a80dd 100644 --- a/src/instr/jedule/jedule_platform.cpp +++ b/src/instr/jedule/jedule_platform.cpp @@ -69,7 +69,7 @@ int Container::get_child_position(const Container* child) const { auto it = std::find_if(begin(children_), end(children_), [&child](const std::unique_ptr& c) { return c.get() == child; }); - return it == end(children_) ? -1 : std::distance(begin(children_), it); + return it == end(children_) ? -1 : static_cast(std::distance(begin(children_), it)); } std::vector Container::get_hierarchy() @@ -96,7 +96,7 @@ std::string Container::get_hierarchy_as_string() std::vector heir_list = this->get_hierarchy(); - unsigned int length = heir_list.size(); + unsigned int length = static_cast(heir_list.size()); unsigned int i = 0; for (auto const& id : heir_list) { output += std::to_string(id); @@ -113,7 +113,7 @@ void Container::print_resources(FILE* jed_file) unsigned int i=0; xbt_assert(not this->resource_list.empty()); - unsigned int res_nb = this->resource_list.size(); + unsigned int res_nb = static_cast(this->resource_list.size()); std::string resid = this->get_hierarchy_as_string(); fprintf(jed_file, " & subset_list, st for (auto const& host_name : hostgroup) { xbt_assert( host_name != nullptr ); - jed_container_t parent_cont = host2_simgrid_parent_container.at(host_name); + const simgrid::jedule::Container* parent_cont = host2_simgrid_parent_container.at(host_name); unsigned int id = parent_cont->get_id_by_name(host_name); id_list.push_back(id); } - unsigned int nb_ids = id_list.size(); + unsigned int nb_ids = static_cast(id_list.size()); std::sort(id_list.begin(), id_list.end()); if( nb_ids > 0 ) { diff --git a/src/kernel/EngineImpl.cpp b/src/kernel/EngineImpl.cpp index 28f993c4b7..8cf08b7afe 100644 --- a/src/kernel/EngineImpl.cpp +++ b/src/kernel/EngineImpl.cpp @@ -39,7 +39,7 @@ EngineImpl::~EngineImpl() kv.second->destroy(); } -void EngineImpl::load_deployment(const std::string& file) +void EngineImpl::load_deployment(const std::string& file) const { sg_platf_exit(); sg_platf_init(); diff --git a/src/kernel/EngineImpl.hpp b/src/kernel/EngineImpl.hpp index d730edebbd..2e385c759b 100644 --- a/src/kernel/EngineImpl.hpp +++ b/src/kernel/EngineImpl.hpp @@ -34,7 +34,7 @@ public: EngineImpl& operator=(const EngineImpl&) = delete; virtual ~EngineImpl(); - void load_deployment(const std::string& file); + void load_deployment(const std::string& file) const; void register_function(const std::string& name, const actor::ActorCodeFactory& code); void register_default(const actor::ActorCodeFactory& code); @@ -53,4 +53,4 @@ public: } // namespace kernel } // namespace simgrid -#endif \ No newline at end of file +#endif diff --git a/src/kernel/activity/ActivityImpl.hpp b/src/kernel/activity/ActivityImpl.hpp index fcda3304ff..28c5c843f8 100644 --- a/src/kernel/activity/ActivityImpl.hpp +++ b/src/kernel/activity/ActivityImpl.hpp @@ -54,8 +54,8 @@ protected: } public: - const std::string& get_name() { return name_; } - const char* get_cname() { return name_.c_str(); } + const std::string& get_name() const { return name_; } + const char* get_cname() const { return name_.c_str(); } bool test(); void wait_for(actor::ActorImpl* issuer, double timeout); @@ -100,7 +100,7 @@ public: tracing_category_ = category; return static_cast(*this); } - const std::string& get_tracing_category() { return tracing_category_; } + const std::string& get_tracing_category() const { return tracing_category_; } }; } // namespace activity diff --git a/src/kernel/activity/CommImpl.cpp b/src/kernel/activity/CommImpl.cpp index d5c1ff3884..63cb6a505a 100644 --- a/src/kernel/activity/CommImpl.cpp +++ b/src/kernel/activity/CommImpl.cpp @@ -25,7 +25,7 @@ XBT_PRIVATE void simcall_HANDLER_comm_send(smx_simcall_t simcall, smx_actor_t sr void* data, double timeout) { simgrid::kernel::activity::ActivityImplPtr comm = simcall_HANDLER_comm_isend( - simcall, src, mbox, task_size, rate, src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, data, 0); + simcall, src, mbox, task_size, rate, src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, data, false); SIMCALL_SET_MC_VALUE(*simcall, 0); simcall_HANDLER_comm_wait(simcall, static_cast(comm.get()), timeout); } diff --git a/src/kernel/activity/MutexImpl.hpp b/src/kernel/activity/MutexImpl.hpp index ba85ad0f9e..9a311b5bd6 100644 --- a/src/kernel/activity/MutexImpl.hpp +++ b/src/kernel/activity/MutexImpl.hpp @@ -30,7 +30,7 @@ public: void lock(actor::ActorImpl* issuer); bool try_lock(actor::ActorImpl* issuer); void unlock(actor::ActorImpl* issuer); - bool is_locked() { return locked_; } + bool is_locked() const { return locked_; } MutexImpl* ref(); void unref(); diff --git a/src/kernel/activity/SemaphoreImpl.hpp b/src/kernel/activity/SemaphoreImpl.hpp index 361c7cf217..c340db827e 100644 --- a/src/kernel/activity/SemaphoreImpl.hpp +++ b/src/kernel/activity/SemaphoreImpl.hpp @@ -30,11 +30,11 @@ public: void acquire(actor::ActorImpl* issuer, double timeout); void release(); - bool would_block() { return (value_ == 0); } + bool would_block() const { return (value_ == 0); } void remove_sleeping_actor(actor::ActorImpl& actor) { xbt::intrusive_erase(sleeping_, actor); } - unsigned int get_capacity() { return value_; } - bool is_used() { return not sleeping_.empty(); } + unsigned int get_capacity() const { return value_; } + bool is_used() const { return not sleeping_.empty(); } friend void intrusive_ptr_add_ref(SemaphoreImpl* sem) { diff --git a/src/kernel/actor/ActorImpl.cpp b/src/kernel/actor/ActorImpl.cpp index 4e0162f49e..0c9d9a5d6a 100644 --- a/src/kernel/actor/ActorImpl.cpp +++ b/src/kernel/actor/ActorImpl.cpp @@ -227,7 +227,7 @@ void ActorImpl::exit() this->throw_exception(std::make_exception_ptr(ForcefulKillException(host_->is_on() ? "exited" : "host failed"))); } -void ActorImpl::kill(ActorImpl* actor) +void ActorImpl::kill(ActorImpl* actor) const { xbt_assert(actor != simix_global->maestro_, "Killing maestro is a rather bad idea"); if (actor->finished_) { @@ -252,7 +252,7 @@ void ActorImpl::kill(ActorImpl* actor) } } -void ActorImpl::kill_all() +void ActorImpl::kill_all() const { for (auto const& kv : simix_global->process_list) if (kv.second != this) @@ -270,7 +270,7 @@ void ActorImpl::set_kill_time(double kill_time) }); } -double ActorImpl::get_kill_time() +double ActorImpl::get_kill_time() const { return kill_timer_ ? kill_timer_->get_date() : 0; } @@ -454,7 +454,7 @@ void ActorImpl::set_host(s4u::Host* dest) dest->pimpl_->add_actor(this); } -ActorImplPtr ActorImpl::init(const std::string& name, s4u::Host* host) +ActorImplPtr ActorImpl::init(const std::string& name, s4u::Host* host) const { ActorImpl* actor = new ActorImpl(xbt::string(name), host); actor->set_ppid(this->pid_); @@ -494,7 +494,8 @@ ActorImpl* ActorImpl::start(const ActorCode& code) } ActorImplPtr ActorImpl::create(const std::string& name, const ActorCode& code, void* data, s4u::Host* host, - const std::unordered_map* properties, ActorImpl* parent_actor) + const std::unordered_map* properties, + const ActorImpl* parent_actor) { XBT_DEBUG("Start actor %s@'%s'", name.c_str(), host->get_cname()); diff --git a/src/kernel/actor/ActorImpl.hpp b/src/kernel/actor/ActorImpl.hpp index 276eb214e4..6e9691f35f 100644 --- a/src/kernel/actor/ActorImpl.hpp +++ b/src/kernel/actor/ActorImpl.hpp @@ -37,7 +37,7 @@ public: ~ActorImpl(); static ActorImpl* self(); - double get_kill_time(); + double get_kill_time() const; void set_kill_time(double kill_time); boost::intrusive::list_member_hook<> host_actor_list_hook; /* simgrid::simix::Host::process_list */ boost::intrusive::list_member_hook<> smx_destroy_list_hook; /* simix_global->actors_to_destroy */ @@ -56,11 +56,11 @@ public: aid_t get_pid() const { return pid_; } aid_t get_ppid() const { return ppid_; } void set_ppid(aid_t ppid) { ppid_ = ppid; } - bool is_daemon() { return daemon_; } /** Whether this actor has been daemonized */ - bool has_to_auto_restart() { return auto_restart_; } + bool is_daemon() const { return daemon_; } /** Whether this actor has been daemonized */ + bool has_to_auto_restart() const { return auto_restart_; } void set_auto_restart(bool autorestart) { auto_restart_ = autorestart; } void set_stacksize(unsigned stacksize) { stacksize_ = stacksize; } - unsigned get_stacksize() { return stacksize_; } + unsigned get_stacksize() const { return stacksize_; } std::unique_ptr context_; /* the context (uctx/raw/thread) that executes the user function */ @@ -83,7 +83,7 @@ private: std::atomic_int_fast32_t refcount_{0}; public: - int get_refcount() { return refcount_; } + int get_refcount() const { return refcount_; } friend void intrusive_ptr_add_ref(ActorImpl* actor) { // This whole memory consistency semantic drives me nuts. @@ -117,22 +117,23 @@ public: s4u::ActorPtr iface() { return s4u::ActorPtr(&piface_); } s4u::Actor* ciface() { return &piface_; } - ActorImplPtr init(const std::string& name, s4u::Host* host); + ActorImplPtr init(const std::string& name, s4u::Host* host) const; ActorImpl* start(const ActorCode& code); static ActorImplPtr create(const std::string& name, const ActorCode& code, void* data, s4u::Host* host, - const std::unordered_map* properties, ActorImpl* parent_actor); + const std::unordered_map* properties, + const ActorImpl* parent_actor); static ActorImplPtr attach(const std::string& name, void* data, s4u::Host* host, const std::unordered_map* properties); static void detach(); void cleanup(); void exit(); - void kill(ActorImpl* actor); - void kill_all(); + void kill(ActorImpl* actor) const; + void kill_all() const; void yield(); void daemonize(); - bool is_suspended() { return suspended_; } + bool is_suspended() const { return suspended_; } s4u::Actor* restart(); void suspend(); void resume(); diff --git a/src/kernel/context/Context.hpp b/src/kernel/context/Context.hpp index 813856f1ef..7dc642024e 100644 --- a/src/kernel/context/Context.hpp +++ b/src/kernel/context/Context.hpp @@ -56,7 +56,7 @@ public: bool wannadie() const { return iwannadie_; } void set_wannadie(bool value = true) { iwannadie_ = value; } - void operator()() { code_(); } + void operator()() const { code_(); } bool has_code() const { return static_cast(code_); } actor::ActorImpl* get_actor() const { return this->actor_; } diff --git a/src/kernel/future.cpp b/src/kernel/future.cpp index 53ce0bd33a..9c190453df 100644 --- a/src/kernel/future.cpp +++ b/src/kernel/future.cpp @@ -13,7 +13,7 @@ namespace simgrid { namespace kernel { -void FutureStateBase::schedule(simgrid::xbt::Task&& job) +void FutureStateBase::schedule(simgrid::xbt::Task&& job) const { simix_global->tasks.push_back(std::move(job)); } diff --git a/src/kernel/lmm/maxmin.cpp b/src/kernel/lmm/maxmin.cpp index f506fafe6c..3dd1bb67f2 100644 --- a/src/kernel/lmm/maxmin.cpp +++ b/src/kernel/lmm/maxmin.cpp @@ -253,7 +253,7 @@ void System::expand(Constraint* cnst, Variable* var, double consumption_weight) elem.constraint = cnst; elem.variable = var; - if (var->sharing_penalty_) { + if (var->sharing_penalty_ != 0.0) { elem.constraint->enabled_element_set_.push_front(elem); elem.increase_concurrency(); } else @@ -283,7 +283,7 @@ void System::expand_add(Constraint* cnst, Variable* var, double value) std::find_if(begin(var->cnsts_), end(var->cnsts_), [&cnst](Element const& x) { return x.constraint == cnst; }); if (elem_it != end(var->cnsts_)) { Element& elem = *elem_it; - if (var->sharing_penalty_) + if (var->sharing_penalty_ != 0.0) elem.decrease_concurrency(); if (cnst->sharing_policy_ != s4u::Link::SharingPolicy::FATPIPE) @@ -292,7 +292,7 @@ void System::expand_add(Constraint* cnst, Variable* var, double value) elem.consumption_weight = std::max(elem.consumption_weight, value); // We need to check that increasing value of the element does not cross the concurrency limit - if (var->sharing_penalty_) { + if (var->sharing_penalty_ != 0.0) { if (cnst->get_concurrency_slack() < elem.get_concurrency()) { double penalty = var->sharing_penalty_; disable_var(var); @@ -514,7 +514,7 @@ template void System::lmm_solve(CnstList& cnst_list) cnst.usage_ = elem.consumption_weight / elem.variable->sharing_penalty_; elem.make_active(); - resource::Action* action = static_cast(elem.variable->id_); + resource::Action* action = elem.variable->id_; if (modified_set_ && not action->is_within_modified_set()) modified_set_->push_back(*action); } @@ -939,8 +939,8 @@ double Constraint::get_usage() const int Constraint::get_variable_amount() const { - return std::count_if(std::begin(enabled_element_set_), std::end(enabled_element_set_), - [](const Element& elem) { return elem.consumption_weight > 0; }); + return static_cast(std::count_if(std::begin(enabled_element_set_), std::end(enabled_element_set_), + [](const Element& elem) { return elem.consumption_weight > 0; })); } } // namespace lmm diff --git a/src/kernel/lmm/maxmin.hpp b/src/kernel/lmm/maxmin.hpp index d7de61e329..5aef06ad2b 100644 --- a/src/kernel/lmm/maxmin.hpp +++ b/src/kernel/lmm/maxmin.hpp @@ -313,7 +313,7 @@ public: Constraint* get_constraint(unsigned num) const { return num < cnsts_.size() ? cnsts_[num].constraint : nullptr; } /** - * @brief Get the weigth of the numth constraint associated to the variable + * @brief Get the weight of the numth constraint associated to the variable * @param num The rank of constraint we want to get * @return The numth constraint */ diff --git a/src/kernel/resource/DiskImpl.hpp b/src/kernel/resource/DiskImpl.hpp index 962dd9f999..11e4829e40 100644 --- a/src/kernel/resource/DiskImpl.hpp +++ b/src/kernel/resource/DiskImpl.hpp @@ -68,8 +68,8 @@ public: s4u::Host* get_host() const { return host_; } void set_host(s4u::Host* host) { host_ = host; } - double get_read_bandwidth() { return read_bw_; } - double get_write_bandwidth() { return write_bw_; } + double get_read_bandwidth() const { return read_bw_; } + double get_write_bandwidth() const { return write_bw_; } lmm::Constraint* get_read_constraint() const { return constraint_read_; } lmm::Constraint* get_write_constraint() const { return constraint_write_; } diff --git a/src/kernel/routing/DijkstraZone.cpp b/src/kernel/routing/DijkstraZone.cpp index b9d79f8eec..bb780822b1 100644 --- a/src/kernel/routing/DijkstraZone.cpp +++ b/src/kernel/routing/DijkstraZone.cpp @@ -118,7 +118,7 @@ void DijkstraZone::get_local_route(NetPoint* src, NetPoint* dst, RouteCreationAr for (auto const& link : e_route->link_list) { route->link_list.insert(route->link_list.begin(), link); if (lat) - *lat += static_cast(link)->get_latency(); + *lat += link->get_latency(); } } @@ -209,7 +209,7 @@ void DijkstraZone::get_local_route(NetPoint* src, NetPoint* dst, RouteCreationAr for (auto const& link : e_route->link_list) { route->link_list.insert(route->link_list.begin(), link); if (lat) - *lat += static_cast(link)->get_latency(); + *lat += link->get_latency(); } } @@ -227,10 +227,12 @@ void DijkstraZone::add_route(NetPoint* src, NetPoint* dst, NetPoint* gw_src, Net { add_route_check_params(src, dst, gw_src, gw_dst, link_list, symmetrical); - new_edge(src->id(), dst->id(), new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, 1)); + new_edge(src->id(), dst->id(), + new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, true)); if (symmetrical == true) - new_edge(dst->id(), src->id(), new_extended_route(hierarchy_, dst, src, gw_dst, gw_src, link_list, symmetrical, 0)); + new_edge(dst->id(), src->id(), + new_extended_route(hierarchy_, dst, src, gw_dst, gw_src, link_list, symmetrical, false)); } void DijkstraZone::new_edge(int src_id, int dst_id, RouteCreationArgs* route) diff --git a/src/kernel/routing/DragonflyZone.cpp b/src/kernel/routing/DragonflyZone.cpp index 01d1da37a7..be88274f2f 100644 --- a/src/kernel/routing/DragonflyZone.cpp +++ b/src/kernel/routing/DragonflyZone.cpp @@ -23,7 +23,7 @@ DragonflyZone::DragonflyZone(NetZoneImpl* father, const std::string& name, resou { } -void DragonflyZone::rankId_to_coords(int rankId, unsigned int coords[4]) +void DragonflyZone::rankId_to_coords(int rankId, unsigned int coords[4]) const { // coords : group, chassis, blade, node coords[0] = rankId / (num_chassis_per_group_ * num_blades_per_chassis_ * num_nodes_per_blade_); @@ -133,7 +133,7 @@ void DragonflyZone::generate_routers() } void DragonflyZone::create_link(const std::string& id, int numlinks, resource::LinkImpl** linkup, - resource::LinkImpl** linkdown) + resource::LinkImpl** linkdown) const { *linkup = nullptr; *linkdown = nullptr; diff --git a/src/kernel/routing/FatTreeZone.cpp b/src/kernel/routing/FatTreeZone.cpp index 820dfba65f..ab626648ea 100644 --- a/src/kernel/routing/FatTreeZone.cpp +++ b/src/kernel/routing/FatTreeZone.cpp @@ -38,7 +38,7 @@ FatTreeZone::~FatTreeZone() } } -bool FatTreeZone::is_in_sub_tree(FatTreeNode* root, FatTreeNode* node) +bool FatTreeZone::is_in_sub_tree(FatTreeNode* root, FatTreeNode* node) const { XBT_DEBUG("Is %d(%u,%u) in the sub tree of %d(%u,%u) ?", node->id, node->level, node->position, root->id, root->level, root->position); @@ -200,7 +200,7 @@ int FatTreeZone::connect_node_to_parents(FatTreeNode* node) return connectionsNumber; } -bool FatTreeZone::are_related(FatTreeNode* parent, FatTreeNode* child) +bool FatTreeZone::are_related(FatTreeNode* parent, FatTreeNode* child) const { std::stringstream msgBuffer; @@ -442,7 +442,7 @@ FatTreeNode::FatTreeNode(const ClusterCreationArgs* cluster, int id, int level, : id(id), level(level), position(position) { LinkCreationArgs linkTemplate; - if (cluster->limiter_link) { + if (cluster->limiter_link != 0.0) { linkTemplate.bandwidths.push_back(cluster->limiter_link); linkTemplate.latency = 0; linkTemplate.policy = s4u::Link::SharingPolicy::SHARED; @@ -450,7 +450,7 @@ FatTreeNode::FatTreeNode(const ClusterCreationArgs* cluster, int id, int level, sg_platf_new_link(&linkTemplate); this->limiter_link_ = s4u::Link::by_name(linkTemplate.id)->get_impl(); } - if (cluster->loopback_bw || cluster->loopback_lat) { + if (cluster->loopback_bw != 0.0 || cluster->loopback_lat != 0.0) { linkTemplate.bandwidths.push_back(cluster->loopback_bw); linkTemplate.latency = cluster->loopback_lat; linkTemplate.policy = s4u::Link::SharingPolicy::FATPIPE; diff --git a/src/kernel/routing/FloydZone.cpp b/src/kernel/routing/FloydZone.cpp index bdae1de452..7bfd8ba334 100644 --- a/src/kernel/routing/FloydZone.cpp +++ b/src/kernel/routing/FloydZone.cpp @@ -118,7 +118,7 @@ void FloydZone::add_route(NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoi dst->get_cname()); TO_FLOYD_LINK(src->id(), dst->id()) = - new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, 1); + new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, true); TO_FLOYD_PRED(src->id(), dst->id()) = src->id(); TO_FLOYD_COST(src->id(), dst->id()) = (TO_FLOYD_LINK(src->id(), dst->id()))->link_list.size(); @@ -146,7 +146,7 @@ void FloydZone::add_route(NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoi src->get_cname(), gw_dst->get_cname()); TO_FLOYD_LINK(dst->id(), src->id()) = - new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, 0); + new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, false); TO_FLOYD_PRED(dst->id(), src->id()) = dst->id(); TO_FLOYD_COST(dst->id(), src->id()) = (TO_FLOYD_LINK(dst->id(), src->id()))->link_list.size(); /* count of links, old model assume 1 */ diff --git a/src/kernel/routing/NetZoneImpl.cpp b/src/kernel/routing/NetZoneImpl.cpp index 5085d81410..0849a033dc 100644 --- a/src/kernel/routing/NetZoneImpl.cpp +++ b/src/kernel/routing/NetZoneImpl.cpp @@ -202,13 +202,13 @@ static void find_common_ancestors(NetPoint* src, NetPoint* dst, NetZoneImpl* current = src->get_englobing_zone(); while (current != nullptr) { path_src.push_back(current); - current = static_cast(current->get_father()); + current = current->get_father(); } std::vector path_dst; current = dst->get_englobing_zone(); while (current != nullptr) { path_dst.push_back(current); - current = static_cast(current->get_father()); + current = current->get_father(); } /* (3) find the common father. @@ -265,14 +265,14 @@ bool NetZoneImpl::get_bypass_route(NetPoint* src, NetPoint* dst, std::vector path_src; NetZoneImpl* current = src->get_englobing_zone(); while (current != nullptr) { - path_src.push_back(static_cast(current)); + path_src.push_back(current); current = current->father_; } std::vector path_dst; current = dst->get_englobing_zone(); while (current != nullptr) { - path_dst.push_back(static_cast(current)); + path_dst.push_back(current); current = current->father_; } diff --git a/src/kernel/routing/RoutedZone.cpp b/src/kernel/routing/RoutedZone.cpp index e9d4fb5ed0..b6b01f6ea5 100644 --- a/src/kernel/routing/RoutedZone.cpp +++ b/src/kernel/routing/RoutedZone.cpp @@ -143,7 +143,7 @@ RouteCreationArgs* RoutedZone::new_extended_route(RoutingMode hierarchy, NetPoin return result; } -void RoutedZone::get_route_check_params(NetPoint* src, NetPoint* dst) +void RoutedZone::get_route_check_params(NetPoint* src, NetPoint* dst) const { xbt_assert(src, "Cannot find a route from nullptr to %s", dst->get_cname()); xbt_assert(dst, "Cannot find a route from %s to nullptr", src->get_cname()); @@ -161,7 +161,7 @@ void RoutedZone::get_route_check_params(NetPoint* src, NetPoint* dst) src->get_cname(), dst->get_cname(), src_as->get_cname(), dst_as->get_cname(), get_cname()); } void RoutedZone::add_route_check_params(NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoint* gw_dst, - const std::vector& link_list, bool symmetrical) + const std::vector& link_list, bool symmetrical) const { const char* srcName = src->get_cname(); const char* dstName = dst->get_cname(); diff --git a/src/kernel/routing/TorusZone.cpp b/src/kernel/routing/TorusZone.cpp index bca85bdd66..574153e556 100644 --- a/src/kernel/routing/TorusZone.cpp +++ b/src/kernel/routing/TorusZone.cpp @@ -33,7 +33,7 @@ void TorusZone::create_links_for_node(ClusterCreationArgs* cluster, int id, int int current_dimension = dimensions_[j]; // which dimension are we currently in? // we need to iterate over all dimensions and create all links there // The other node the link connects - int neighbor_rank_id = ((static_cast(rank) / dim_product) % current_dimension == current_dimension - 1) + int neighbor_rank_id = ((rank / dim_product) % current_dimension == current_dimension - 1) ? rank - (current_dimension - 1) * dim_product : rank + dim_product; // name of neighbor is not right for non contiguous cluster radicals (as id != rank in this case) diff --git a/src/mc/AddressSpace.hpp b/src/mc/AddressSpace.hpp index 34b47a219e..27d7496dd0 100644 --- a/src/mc/AddressSpace.hpp +++ b/src/mc/AddressSpace.hpp @@ -92,7 +92,7 @@ public: /** The process of this address space * - * This is where we can get debug informations, memory layout, etc. + * This is where we can get debug information, memory layout, etc. */ simgrid::mc::RemoteSimulation* get_remote_simulation() const { return remote_simulation_; } diff --git a/src/mc/ModelChecker.cpp b/src/mc/ModelChecker.cpp index f4de7fefea..884444cf3f 100644 --- a/src/mc/ModelChecker.cpp +++ b/src/mc/ModelChecker.cpp @@ -94,7 +94,7 @@ static const std::pair ignored_local_variables[] = { void ModelChecker::setup_ignore() { - RemoteSimulation& process = this->get_remote_simulation(); + const RemoteSimulation& process = this->get_remote_simulation(); for (std::pair const& var : ignored_local_variables) process.ignore_local_variable(var.first, var.second); @@ -105,7 +105,7 @@ void ModelChecker::setup_ignore() void ModelChecker::shutdown() { - XBT_DEBUG("Shuting down model-checker"); + XBT_DEBUG("Shutting down model-checker"); RemoteSimulation* process = &this->get_remote_simulation(); if (process->running()) { @@ -169,7 +169,7 @@ bool ModelChecker::handle_message(const char* buffer, ssize_t size) case MC_MESSAGE_IGNORE_HEAP: { s_mc_message_ignore_heap_t message; - xbt_assert(size == sizeof(message), "Broken messsage"); + xbt_assert(size == sizeof(message), "Broken message"); memcpy(&message, buffer, sizeof(message)); IgnoredHeapRegion region; @@ -184,7 +184,7 @@ bool ModelChecker::handle_message(const char* buffer, ssize_t size) case MC_MESSAGE_UNIGNORE_HEAP: { s_mc_message_ignore_memory_t message; - xbt_assert(size == sizeof(message), "Broken messsage"); + xbt_assert(size == sizeof(message), "Broken message"); memcpy(&message, buffer, sizeof(message)); get_remote_simulation().unignore_heap((void*)(std::uintptr_t)message.addr, message.size); break; @@ -193,7 +193,7 @@ bool ModelChecker::handle_message(const char* buffer, ssize_t size) case MC_MESSAGE_IGNORE_MEMORY: { s_mc_message_ignore_memory_t message; - xbt_assert(size == sizeof(message), "Broken messsage"); + xbt_assert(size == sizeof(message), "Broken message"); memcpy(&message, buffer, sizeof(message)); this->get_remote_simulation().ignore_region(message.addr, message.size); break; @@ -202,7 +202,7 @@ bool ModelChecker::handle_message(const char* buffer, ssize_t size) case MC_MESSAGE_STACK_REGION: { s_mc_message_stack_region_t message; - xbt_assert(size == sizeof(message), "Broken messsage"); + xbt_assert(size == sizeof(message), "Broken message"); memcpy(&message, buffer, sizeof(message)); this->get_remote_simulation().stack_areas().push_back(message.stack_region); } diff --git a/src/mc/Session.cpp b/src/mc/Session.cpp index 2ace38ce87..71b28fc38f 100644 --- a/src/mc/Session.cpp +++ b/src/mc/Session.cpp @@ -105,18 +105,18 @@ void Session::initialize() initial_snapshot_ = std::make_shared(0); } -void Session::execute(Transition const& transition) +void Session::execute(Transition const& transition) const { model_checker_->handle_simcall(transition); model_checker_->wait_for_requests(); } -void Session::restore_initial_state() +void Session::restore_initial_state() const { this->initial_snapshot_->restore(&model_checker_->get_remote_simulation()); } -void Session::log_state() +void Session::log_state() const { model_checker_->getChecker()->log_state(); @@ -141,7 +141,7 @@ void Session::close() } } -bool Session::actor_is_enabled(aid_t pid) +bool Session::actor_is_enabled(aid_t pid) const { s_mc_message_actor_enabled_t msg{MC_MESSAGE_ACTOR_ENABLED, pid}; model_checker_->channel().send(msg); diff --git a/src/mc/Session.hpp b/src/mc/Session.hpp index 9f07badd7e..7934ec0e58 100644 --- a/src/mc/Session.hpp +++ b/src/mc/Session.hpp @@ -45,11 +45,11 @@ public: void close(); void initialize(); - void execute(Transition const& transition); - void log_state(); + void execute(Transition const& transition) const; + void log_state() const; - void restore_initial_state(); - bool actor_is_enabled(aid_t pid); + void restore_initial_state() const; + bool actor_is_enabled(aid_t pid) const; }; // Temporary :) diff --git a/src/mc/checker/CommunicationDeterminismChecker.cpp b/src/mc/checker/CommunicationDeterminismChecker.cpp index 7d112063e6..89f2249a8d 100644 --- a/src/mc/checker/CommunicationDeterminismChecker.cpp +++ b/src/mc/checker/CommunicationDeterminismChecker.cpp @@ -127,12 +127,12 @@ void CommunicationDeterminismChecker::deterministic_comm_pattern(int process, co if (diff != NONE_DIFF) { if (comm->type == PatternCommunicationType::send) { - this->send_deterministic = 0; + this->send_deterministic = false; if (this->send_diff != nullptr) xbt_free(this->send_diff); this->send_diff = print_determinism_result(diff, process, comm, list.index_comm + 1); } else { - this->recv_deterministic = 0; + this->recv_deterministic = false; if (this->recv_diff != nullptr) xbt_free(this->recv_diff); this->recv_diff = print_determinism_result(diff, process, comm, list.index_comm + 1); @@ -185,9 +185,8 @@ void CommunicationDeterminismChecker::get_comm_pattern(smx_simcall_t request, e_ pattern->comm_addr = static_cast(simcall_comm_isend__getraw__result(request)); Remote temp_synchro; - mc_model_checker->get_remote_simulation().read( - temp_synchro, remote(static_cast(pattern->comm_addr))); - const kernel::activity::CommImpl* synchro = static_cast(temp_synchro.get_buffer()); + mc_model_checker->get_remote_simulation().read(temp_synchro, remote(pattern->comm_addr)); + const kernel::activity::CommImpl* synchro = temp_synchro.get_buffer(); char* remote_name = mc_model_checker->get_remote_simulation().read(RemotePtr( (uint64_t)(synchro->get_mailbox() ? &synchro->get_mailbox()->name_ : &synchro->mbox_cpy->name_))); @@ -233,8 +232,7 @@ void CommunicationDeterminismChecker::get_comm_pattern(smx_simcall_t request, e_ #endif Remote temp_comm; - mc_model_checker->get_remote_simulation().read( - temp_comm, remote(static_cast(pattern->comm_addr))); + mc_model_checker->get_remote_simulation().read(temp_comm, remote(pattern->comm_addr)); const kernel::activity::CommImpl* comm = temp_comm.get_buffer(); char* remote_name; diff --git a/src/mc/checker/LivenessChecker.cpp b/src/mc/checker/LivenessChecker.cpp index b04c743a4b..e22741d715 100644 --- a/src/mc/checker/LivenessChecker.cpp +++ b/src/mc/checker/LivenessChecker.cpp @@ -63,14 +63,14 @@ static bool evaluate_label(const xbt_automaton_exp_label* l, std::vector co case xbt_automaton_exp_label::AUT_ONE: return true; default: - xbt_die("Unexpected vaue for automaton"); + xbt_die("Unexpected value for automaton"); } } Pair::Pair(unsigned long expanded_pairs) : num(expanded_pairs) {} -std::shared_ptr> LivenessChecker::get_proposition_values() +std::shared_ptr> LivenessChecker::get_proposition_values() const { std::vector values; unsigned int cursor = 0; @@ -408,8 +408,8 @@ void LivenessChecker::run() // For each enabled transition in the property automaton, push a // (application_state, automaton_state) pair to the exploration stack: for (int i = xbt_dynar_length(current_pair->automaton_state->out) - 1; i >= 0; i--) { - const xbt_automaton_transition* transition_succ = (xbt_automaton_transition_t)xbt_dynar_get_as( - current_pair->automaton_state->out, i, xbt_automaton_transition_t); + const xbt_automaton_transition* transition_succ = + xbt_dynar_get_as(current_pair->automaton_state->out, i, xbt_automaton_transition_t); if (evaluate_label(transition_succ->label, *prop_values)) exploration_stack_.push_back(this->create_pair(current_pair.get(), transition_succ->dst, prop_values)); } diff --git a/src/mc/checker/LivenessChecker.hpp b/src/mc/checker/LivenessChecker.hpp index 3ce6200e80..d471da91bb 100644 --- a/src/mc/checker/LivenessChecker.hpp +++ b/src/mc/checker/LivenessChecker.hpp @@ -61,7 +61,7 @@ public: void log_state() override; private: - std::shared_ptr> get_proposition_values(); + std::shared_ptr> get_proposition_values() const; std::shared_ptr insert_acceptance_pair(Pair* pair); int insert_visited_pair(std::shared_ptr visited_pair, Pair* pair); void show_acceptance_cycle(std::size_t depth); diff --git a/src/mc/compare.cpp b/src/mc/compare.cpp index dd6b7b9527..70c2982251 100644 --- a/src/mc/compare.cpp +++ b/src/mc/compare.cpp @@ -252,7 +252,7 @@ static bool mmalloc_heap_differ(StateComparator& state, const Snapshot& snapshot continue; } - xbt_assert(heapinfo1->type >= 0, "Unkown mmalloc block type: %d", heapinfo1->type); + xbt_assert(heapinfo1->type >= 0, "Unknown mmalloc block type: %d", heapinfo1->type); void* addr_block1 = ((void*)(((ADDR2UINT(i1)) - 1) * BLOCKSIZE + (char*)state.std_heap_copy.heapbase)); @@ -359,7 +359,7 @@ static bool mmalloc_heap_differ(StateComparator& state, const Snapshot& snapshot continue; } - xbt_assert(heapinfo2b->type >= 0, "Unkown mmalloc block type: %d", heapinfo2b->type); + xbt_assert(heapinfo2b->type >= 0, "Unknown mmalloc block type: %d", heapinfo2b->type); for (size_t j2 = 0; j2 < (size_t)(BLOCKSIZE >> heapinfo2b->type); j2++) { if (i2 == i1 && j2 == j1) @@ -873,8 +873,8 @@ static bool heap_area_differ(StateComparator& state, const void* area1, const vo } else if ((heapinfo1->type > 0) && (heapinfo2->type > 0)) { /* Fragmented block */ // Fragment number: - ssize_t frag1 = ((uintptr_t)(ADDR2UINT(area1) % (BLOCKSIZE))) >> heapinfo1->type; - ssize_t frag2 = ((uintptr_t)(ADDR2UINT(area2) % (BLOCKSIZE))) >> heapinfo2->type; + ssize_t frag1 = (ADDR2UINT(area1) % BLOCKSIZE) >> heapinfo1->type; + ssize_t frag2 = (ADDR2UINT(area2) % BLOCKSIZE) >> heapinfo2->type; // Process address of the fragment_: void* real_addr_frag1 = (void*)((char*)real_addr_block1 + (frag1 << heapinfo1->type)); @@ -1147,8 +1147,7 @@ static bool global_variables_differ(simgrid::mc::StateComparator& state, // If the variable is not in this object, skip it: // We do not expect to find a pointer to something which is not reachable // by the global variables. - if ((char *) current_var.address < (char *) object_info->start_rw - || (char *) current_var.address > (char *) object_info->end_rw) + if ((char*)current_var.address < object_info->start_rw || (char*)current_var.address > object_info->end_rw) continue; const simgrid::mc::Type* bvariable_type = current_var.type; diff --git a/src/mc/inspect/DwarfExpression.hpp b/src/mc/inspect/DwarfExpression.hpp index c2058a988f..797f66143e 100644 --- a/src/mc/inspect/DwarfExpression.hpp +++ b/src/mc/inspect/DwarfExpression.hpp @@ -39,7 +39,7 @@ typedef std::vector DwarfExpression; /** Context of evaluation of a DWARF expression * * Some DWARF instructions need to read the CPU registers, - * the process memory, etc. All those informations are gathered in + * the process memory, etc. All those information are gathered in * the evaluation context. */ struct ExpressionContext { diff --git a/src/mc/inspect/LocationList.cpp b/src/mc/inspect/LocationList.cpp index 276ce897db..07c7c3d002 100644 --- a/src/mc/inspect/LocationList.cpp +++ b/src/mc/inspect/LocationList.cpp @@ -65,7 +65,7 @@ LocationList location_list(const simgrid::mc::ObjectInformation& info, Dwarf_Att { LocationList locations; std::ptrdiff_t offset = 0; - while (1) { + while (true) { Dwarf_Addr base; Dwarf_Addr start; Dwarf_Addr end; diff --git a/src/mc/inspect/ObjectInformation.hpp b/src/mc/inspect/ObjectInformation.hpp index 9903b530d8..92de05d3ef 100644 --- a/src/mc/inspect/ObjectInformation.hpp +++ b/src/mc/inspect/ObjectInformation.hpp @@ -51,7 +51,7 @@ class ObjectInformation { public: ObjectInformation() = default; - // Not copyable: + // Not copiable: ObjectInformation(ObjectInformation const&) = delete; ObjectInformation& operator=(ObjectInformation const&) = delete; @@ -156,7 +156,7 @@ public: XBT_PRIVATE std::shared_ptr createObjectInformation(std::vector const& maps, const char* name); -/** Augment the current module with informations about the other ones */ +/** Augment the current module with information about the other ones */ XBT_PRIVATE void postProcessObjectInformation(const simgrid::mc::RemoteSimulation* process, simgrid::mc::ObjectInformation* info); } // namespace mc diff --git a/src/mc/inspect/mc_dwarf.cpp b/src/mc/inspect/mc_dwarf.cpp index 22c8d6b1aa..f6f8601e95 100644 --- a/src/mc/inspect/mc_dwarf.cpp +++ b/src/mc/inspect/mc_dwarf.cpp @@ -59,7 +59,7 @@ static uint64_t MC_dwarf_array_element_count(Dwarf_Die* die, Dwarf_Die* unit); /** @brief Process a DIE * - * @param info the resulting object fot the library/binary file (output) + * @param info the resulting object for the library/binary file (output) * @param die the current DIE * @param unit the DIE of the compile unit of the current DIE * @param frame containing frame if any @@ -74,7 +74,7 @@ static void MC_dwarf_handle_type_die(simgrid::mc::ObjectInformation* info, Dwarf /** @brief Calls MC_dwarf_handle_die on all children of the given die * - * @param info the resulting object fot the library/binary file (output) + * @param info the resulting object for the library/binary file (output) * @param die the current DIE * @param unit the DIE of the compile unit of the current DIE * @param frame containing frame if any @@ -84,7 +84,7 @@ static void MC_dwarf_handle_children(simgrid::mc::ObjectInformation* info, Dwarf /** @brief Handle a variable (DW_TAG_variable or other) * - * @param info the resulting object fot the library/binary file (output) + * @param info the resulting object for the library/binary file (output) * @param die the current DIE * @param unit the DIE of the compile unit of the current DIE * @param frame containing frame if any @@ -790,7 +790,7 @@ static void MC_dwarf_handle_scope_die(simgrid::mc::ObjectInformation* info, Dwar // TODO, support DW_AT_ranges uint64_t low_pc = MC_dwarf_attr_integrate_addr(die, DW_AT_low_pc); - frame.range.begin() = low_pc ? (std::uint64_t)base + low_pc : 0; + frame.range.begin() = low_pc ? base + low_pc : 0; if (low_pc) { // DW_AT_high_pc: Dwarf_Attribute attr; @@ -955,7 +955,7 @@ static std::vector get_build_id(Elf* elf) memcmp((char*)data->d_buf + name_pos, "GNU", sizeof("GNU")) == 0) { XBT_DEBUG("Found GNU/NT_GNU_BUILD_ID note"); char* start = (char*)data->d_buf + desc_pos; - char* end = (char*)start + nhdr.n_descsz; + char* end = start + nhdr.n_descsz; return std::vector(start, end); } } @@ -1024,7 +1024,7 @@ static std::string find_by_build_id(std::vector id) return std::string(); } -/** @brief Populate the debugging informations of the given ELF object +/** @brief Populate the debugging information of the given ELF object * * Read the DWARf information of the EFFL object and populate the * lists of types, variables, functions. @@ -1058,7 +1058,7 @@ static void MC_load_dwarf(simgrid::mc::ObjectInformation* info) dwarf_end(dwarf); // If there was no DWARF in the file, try to find it in a separate file. - // Different methods might be used to store the DWARF informations: + // Different methods might be used to store the DWARF information: // * GNU NT_GNU_BUILD_ID // * .gnu_debuglink // See https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html @@ -1190,7 +1190,7 @@ static void MC_post_process_types(simgrid::mc::ObjectInformation* info) namespace simgrid { namespace mc { -/** @brief Finds informations about a given shared object/executable */ +/** @brief Finds information about a given shared object/executable */ std::shared_ptr createObjectInformation(std::vector const& maps, const char* name) { std::shared_ptr result = std::make_shared(); diff --git a/src/mc/inspect/mc_unw.cpp b/src/mc/inspect/mc_unw.cpp index f11170b54f..a9c273f1e7 100644 --- a/src/mc/inspect/mc_unw.cpp +++ b/src/mc/inspect/mc_unw.cpp @@ -183,7 +183,7 @@ int UnwindContext::access_reg(unw_addr_space_t /*as*/, unw_regnum_t regnum, unw_ return 0; } -/** Find informations about a function (libunwind method) +/** Find information about a function (libunwind method) */ int UnwindContext::get_proc_name(unw_addr_space_t /*as*/, unw_word_t addr, char* bufp, size_t buf_len, unw_word_t* offp, void* arg) noexcept diff --git a/src/mc/inspect/mc_unw.hpp b/src/mc/inspect/mc_unw.hpp index d94c171afc..f6c291c4ef 100644 --- a/src/mc/inspect/mc_unw.hpp +++ b/src/mc/inspect/mc_unw.hpp @@ -10,7 +10,7 @@ * Libunwind implementation for the model-checker * * Libunwind provides an pluggable stack unwinding API: the way the current - * registers and memory is accessed, the way unwinding informations is found + * registers and memory is accessed, the way unwinding information is found * is pluggable. * * This component implements the libunwind API for he model-checker: diff --git a/src/mc/mc_base.cpp b/src/mc/mc_base.cpp index fcb4fa08e5..2a59567386 100644 --- a/src/mc/mc_base.cpp +++ b/src/mc/mc_base.cpp @@ -89,8 +89,7 @@ bool actor_is_enabled(smx_actor_t actor) case SIMCALL_COMM_WAIT: { /* FIXME: check also that src and dst processes are not suspended */ - const kernel::activity::CommImpl* act = - static_cast(simcall_comm_wait__getraw__comm(req)); + const kernel::activity::CommImpl* act = simcall_comm_wait__getraw__comm(req); if (act->src_timeout_ || act->dst_timeout_) { /* If it has a timeout it will be always be enabled (regardless of who declared the timeout), diff --git a/src/mc/mc_comm_pattern.cpp b/src/mc/mc_comm_pattern.cpp index bdbf88bb14..661d8301d2 100644 --- a/src/mc/mc_comm_pattern.cpp +++ b/src/mc/mc_comm_pattern.cpp @@ -65,7 +65,7 @@ void MC_handle_comm_pattern(e_mc_call_type_t call_type, smx_simcall_t req, int v { simgrid::mc::RemotePtr comm_addr{nullptr}; if (call_type == MC_CALL_TYPE_WAIT) - comm_addr = remote(static_cast(simcall_comm_wait__getraw__comm(req))); + comm_addr = remote(simcall_comm_wait__getraw__comm(req)); else { simgrid::kernel::activity::ActivityImpl* addr; diff --git a/src/mc/mc_hash.cpp b/src/mc/mc_hash.cpp index 7ae53cbba3..7b86d7f724 100644 --- a/src/mc/mc_hash.cpp +++ b/src/mc/mc_hash.cpp @@ -30,10 +30,7 @@ public: { state_ = (state_ << 5) + state_ + x; } - hash_type value() - { - return state_; - } + hash_type value() const { return state_; } }; } diff --git a/src/mc/mc_private.hpp b/src/mc/mc_private.hpp index 3feeed90bf..c89997cb0a 100644 --- a/src/mc/mc_private.hpp +++ b/src/mc/mc_private.hpp @@ -16,7 +16,7 @@ namespace simgrid { namespace mc { struct DerefAndCompareByActorsCountAndUsedHeap { - template bool operator()(X const& a, Y const& b) + template bool operator()(X const& a, Y const& b) const { return std::make_pair(a->actors_count, a->heap_bytes_used) < std::make_pair(b->actors_count, b->heap_bytes_used); } diff --git a/src/mc/mc_record.hpp b/src/mc/mc_record.hpp index 1cc96be99c..9bf1399606 100644 --- a/src/mc/mc_record.hpp +++ b/src/mc/mc_record.hpp @@ -5,7 +5,7 @@ /** \file mc_record.hpp * - * This file contains the MC replay/record functionnality. + * This file contains the MC replay/record functionality. * The recorded path is written in the log output and can be replayed with MC disabled * (even with an non-MC build) using `--cfg=model-check/replay:$replayPath`. * diff --git a/src/mc/mc_request.cpp b/src/mc/mc_request.cpp index 565aeec692..53428fdc89 100644 --- a/src/mc/mc_request.cpp +++ b/src/mc/mc_request.cpp @@ -22,9 +22,9 @@ static inline simgrid::kernel::activity::CommImpl* MC_get_comm(smx_simcall_t r) { switch (r->call_) { case SIMCALL_COMM_WAIT: - return static_cast(simcall_comm_wait__getraw__comm(r)); + return simcall_comm_wait__getraw__comm(r); case SIMCALL_COMM_TEST: - return static_cast(simcall_comm_test__getraw__comm(r)); + return simcall_comm_test__getraw__comm(r); default: return nullptr; } @@ -225,8 +225,7 @@ std::string simgrid::mc::request_to_string(smx_simcall_t req, int value, simgrid } case SIMCALL_COMM_WAIT: { - simgrid::kernel::activity::CommImpl* remote_act = - static_cast(simcall_comm_wait__getraw__comm(req)); + simgrid::kernel::activity::CommImpl* remote_act = simcall_comm_wait__getraw__comm(req); char* p; if (value == -1) { type = "WaitTimeout"; @@ -239,8 +238,7 @@ std::string simgrid::mc::request_to_string(smx_simcall_t req, int value, simgrid simgrid::mc::Remote temp_synchro; const simgrid::kernel::activity::CommImpl* act; if (use_remote_comm) { - mc_model_checker->get_remote_simulation().read( - temp_synchro, remote(static_cast(remote_act))); + mc_model_checker->get_remote_simulation().read(temp_synchro, remote(remote_act)); act = temp_synchro.get_buffer(); } else act = remote_act; @@ -260,13 +258,11 @@ std::string simgrid::mc::request_to_string(smx_simcall_t req, int value, simgrid } case SIMCALL_COMM_TEST: { - simgrid::kernel::activity::CommImpl* remote_act = - static_cast(simcall_comm_test__getraw__comm(req)); + simgrid::kernel::activity::CommImpl* remote_act = simcall_comm_test__getraw__comm(req); simgrid::mc::Remote temp_synchro; const simgrid::kernel::activity::CommImpl* act; if (use_remote_comm) { - mc_model_checker->get_remote_simulation().read( - temp_synchro, remote(static_cast(remote_act))); + mc_model_checker->get_remote_simulation().read(temp_synchro, remote(remote_act)); act = temp_synchro.get_buffer(); } else act = remote_act; diff --git a/src/mc/mc_state.cpp b/src/mc/mc_state.cpp index ec1e42ac48..d5e0c204c0 100644 --- a/src/mc/mc_state.cpp +++ b/src/mc/mc_state.cpp @@ -109,7 +109,7 @@ static inline smx_simcall_t MC_state_choose_request_for_process(simgrid::mc::Sta case SIMCALL_COMM_WAIT: { simgrid::mc::RemotePtr remote_act = - remote(static_cast(simcall_comm_wait__getraw__comm(&actor->simcall_))); + remote(simcall_comm_wait__getraw__comm(&actor->simcall_)); simgrid::mc::Remote temp_act; mc_model_checker->get_remote_simulation().read(temp_act, remote_act); const simgrid::kernel::activity::CommImpl* act = temp_act.get_buffer(); diff --git a/src/mc/remote/AppSide.cpp b/src/mc/remote/AppSide.cpp index f721a18d04..21fa56bb9e 100644 --- a/src/mc/remote/AppSide.cpp +++ b/src/mc/remote/AppSide.cpp @@ -66,7 +66,7 @@ AppSide* AppSide::initialize() return instance_.get(); } -void AppSide::handle_deadlock_check(const s_mc_message_t*) +void AppSide::handle_deadlock_check(const s_mc_message_t*) const { bool deadlock = false; if (not simix_global->process_list.empty()) { @@ -82,11 +82,11 @@ void AppSide::handle_deadlock_check(const s_mc_message_t*) s_mc_message_int_t answer{MC_MESSAGE_DEADLOCK_CHECK_REPLY, deadlock}; xbt_assert(channel_.send(answer) == 0, "Could not send response"); } -void AppSide::handle_continue(const s_mc_message_t*) +void AppSide::handle_continue(const s_mc_message_t*) const { /* Nothing to do */ } -void AppSide::handle_simcall(const s_mc_message_simcall_handle_t* message) +void AppSide::handle_simcall(const s_mc_message_simcall_handle_t* message) const { smx_actor_t process = SIMIX_process_from_PID(message->pid); xbt_assert(process != nullptr, "Invalid pid %lu", message->pid); @@ -95,7 +95,7 @@ void AppSide::handle_simcall(const s_mc_message_simcall_handle_t* message) xbt_die("Could not send MESSAGE_WAITING to model-checker"); } -void AppSide::handle_actor_enabled(const s_mc_message_actor_enabled_t* msg) +void AppSide::handle_actor_enabled(const s_mc_message_actor_enabled_t* msg) const { bool res = simgrid::mc::actor_is_enabled(SIMIX_process_from_PID(msg->aid)); s_mc_message_int_t answer{MC_MESSAGE_ACTOR_ENABLED_REPLY, res}; @@ -106,9 +106,9 @@ void AppSide::handle_actor_enabled(const s_mc_message_actor_enabled_t* msg) xbt_assert(received_size == sizeof(_type_), "Unexpected size for " _name_ " (%zd != %zu)", received_size, \ sizeof(_type_)) -void AppSide::handle_messages() +void AppSide::handle_messages() const { - while (1) { + while (true) { XBT_DEBUG("Waiting messages from model-checker"); char message_buffer[MC_MESSAGE_LENGTH]; @@ -145,33 +145,33 @@ void AppSide::handle_messages() } } -void AppSide::main_loop() +void AppSide::main_loop() const { - while (1) { + while (true) { simgrid::mc::wait_for_requests(); xbt_assert(channel_.send(MC_MESSAGE_WAITING) == 0, "Could not send WAITING message to model-checker"); this->handle_messages(); } } -void AppSide::report_assertion_failure() +void AppSide::report_assertion_failure() const { if (channel_.send(MC_MESSAGE_ASSERTION_FAILED)) xbt_die("Could not send assertion to model-checker"); this->handle_messages(); } -void AppSide::ignore_memory(void* addr, std::size_t size) +void AppSide::ignore_memory(void* addr, std::size_t size) const { s_mc_message_ignore_memory_t message; message.type = MC_MESSAGE_IGNORE_MEMORY; message.addr = (std::uintptr_t)addr; message.size = size; if (channel_.send(message)) - xbt_die("Could not send IGNORE_MEMORY mesage to model-checker"); + xbt_die("Could not send IGNORE_MEMORY message to model-checker"); } -void AppSide::ignore_heap(void* address, std::size_t size) +void AppSide::ignore_heap(void* address, std::size_t size) const { const s_xbt_mheap_t* heap = mmalloc_get_current_heap(); @@ -184,7 +184,7 @@ void AppSide::ignore_heap(void* address, std::size_t size) message.fragment = -1; heap->heapinfo[message.block].busy_block.ignore++; } else { - message.fragment = ((uintptr_t)(ADDR2UINT(address) % (BLOCKSIZE))) >> heap->heapinfo[message.block].type; + message.fragment = (ADDR2UINT(address) % BLOCKSIZE) >> heap->heapinfo[message.block].type; heap->heapinfo[message.block].busy_frag.ignore[message.fragment]++; } @@ -192,7 +192,7 @@ void AppSide::ignore_heap(void* address, std::size_t size) xbt_die("Could not send ignored region to MCer"); } -void AppSide::unignore_heap(void* address, std::size_t size) +void AppSide::unignore_heap(void* address, std::size_t size) const { s_mc_message_ignore_memory_t message; message.type = MC_MESSAGE_UNIGNORE_HEAP; @@ -202,7 +202,7 @@ void AppSide::unignore_heap(void* address, std::size_t size) xbt_die("Could not send IGNORE_HEAP message to model-checker"); } -void AppSide::declare_symbol(const char* name, int* value) +void AppSide::declare_symbol(const char* name, int* value) const { s_mc_message_register_symbol_t message; message.type = MC_MESSAGE_REGISTER_SYMBOL; @@ -215,7 +215,7 @@ void AppSide::declare_symbol(const char* name, int* value) xbt_die("Could send REGISTER_SYMBOL message to model-checker"); } -void AppSide::declare_stack(void* stack, size_t size, ucontext_t* context) +void AppSide::declare_stack(void* stack, size_t size, ucontext_t* context) const { const s_xbt_mheap_t* heap = mmalloc_get_current_heap(); diff --git a/src/mc/remote/AppSide.hpp b/src/mc/remote/AppSide.hpp index b1cd172dfe..d159af90fa 100644 --- a/src/mc/remote/AppSide.hpp +++ b/src/mc/remote/AppSide.hpp @@ -27,25 +27,25 @@ private: public: AppSide(); explicit AppSide(int fd) : channel_(fd) {} - void handle_messages(); + void handle_messages() const; private: - void handle_deadlock_check(const s_mc_message_t* msg); - void handle_continue(const s_mc_message_t* msg); - void handle_simcall(const s_mc_message_simcall_handle_t* message); - void handle_actor_enabled(const s_mc_message_actor_enabled_t* msg); + void handle_deadlock_check(const s_mc_message_t* msg) const; + void handle_continue(const s_mc_message_t* msg) const; + void handle_simcall(const s_mc_message_simcall_handle_t* message) const; + void handle_actor_enabled(const s_mc_message_actor_enabled_t* msg) const; public: Channel const& get_channel() const { return channel_; } Channel& get_channel() { return channel_; } - XBT_ATTRIB_NORETURN void main_loop(); - void report_assertion_failure(); - void ignore_memory(void* addr, std::size_t size); - void ignore_heap(void* addr, std::size_t size); - void unignore_heap(void* addr, std::size_t size); - void declare_symbol(const char* name, int* value); + XBT_ATTRIB_NORETURN void main_loop() const; + void report_assertion_failure() const; + void ignore_memory(void* addr, std::size_t size) const; + void ignore_heap(void* addr, std::size_t size) const; + void unignore_heap(void* addr, std::size_t size) const; + void declare_symbol(const char* name, int* value) const; #if HAVE_UCONTEXT_H - void declare_stack(void* stack, size_t size, ucontext_t* context); + void declare_stack(void* stack, size_t size, ucontext_t* context) const; #endif // Singleton :/ diff --git a/src/mc/remote/RemoteSimulation.cpp b/src/mc/remote/RemoteSimulation.cpp index 290c765ba5..e56c9bd4c2 100644 --- a/src/mc/remote/RemoteSimulation.cpp +++ b/src/mc/remote/RemoteSimulation.cpp @@ -47,13 +47,19 @@ static const std::vector filtered_libraries = { "libboost_system", "libboost_thread", "libboost_timer", + "libbrotlicommon", + "libbrotlidec", "libbz2", "libc", "libc++", "libcdt", "libcgraph", + "libcom_err", + "libcrypt", "libcrypto", + "libcurl", "libcxxrt", + "libdebuginfod", "libdl", "libdw", "libelf", @@ -63,53 +69,48 @@ static const std::vector filtered_libraries = { "libflangrti", "libgcc_s", "libgfortran", + "libgssapi_krb5", + "libidn2", "libimf", "libintlc", "libirng", + "libk5crypto", + "libkeyutils", + "libkrb5", + "libkrb5support", /*odd behaviour on fedora rawhide ... remove these when fixed*/ + "liblber", + "libldap", "liblua5.1", "liblua5.3", "liblzma", "libm", + "libnghttp2", "libomp", "libpapi", "libpcre2", "libpfm", "libpgmath", + "libpsl", "libpthread", "libquadmath", + "libresolv", "librt", + "libsasl2", + "libselinux", + "libssh", + "libssh2", + "libssl", "libstdc++", "libsvml", "libtsan", /* gcc sanitizers */ "libubsan", /* gcc sanitizers */ + "libunistring", "libunwind", "libunwind-ptrace", "libunwind-x86", "libunwind-x86_64", "libz", - "libkrb5support", /*odd behaviour on fedora rawhide ... remove these when fixed*/ - "libkeyutils", - "libunistring", - "libbrotlidec", - "liblber", - "libldap", - "libcom_err", - "libk5crypto", - "libkrb5", - "libgssapi_krb5", - "libssl", - "libpsl", - "libssh", - "libssh2", - "libidn2", - "libnghttp2", - "libcurl", - "libdebuginfod", - "libbrotlicommon", - "libsasl2", - "libresolv", - "libcrypt", - "libselinux"}; + "libzstd"}; static bool is_simgrid_lib(const std::string& libname) { @@ -424,7 +425,7 @@ std::string RemoteSimulation::read_string(RemotePtr address) const std::vector res(128); off_t off = 0; - while (1) { + while (true) { ssize_t c = pread(this->memory_file, res.data() + off, res.size() - off, (off_t)address.address() + off); if (c == -1 && errno == EINTR) continue; @@ -454,13 +455,13 @@ void* RemoteSimulation::read_bytes(void* buffer, std::size_t size, RemotePtr address) +void RemoteSimulation::write_bytes(const void* buffer, size_t len, RemotePtr address) const { if (pwrite_whole(this->memory_file, buffer, len, (size_t)address.address()) < 0) xbt_die("Write to process %lli failed", (long long)this->pid_); } -void RemoteSimulation::clear_bytes(RemotePtr address, size_t len) +void RemoteSimulation::clear_bytes(RemotePtr address, size_t len) const { pthread_once(&zero_buffer_flag, zero_buffer_init); while (len) { @@ -575,7 +576,7 @@ void RemoteSimulation::unignore_heap(void* address, size_t size) } } -void RemoteSimulation::ignore_local_variable(const char* var_name, const char* frame_name) +void RemoteSimulation::ignore_local_variable(const char* var_name, const char* frame_name) const { if (frame_name != nullptr && strcmp(frame_name, "*") == 0) frame_name = nullptr; @@ -595,7 +596,7 @@ std::vector& RemoteSimulation::dead_actors() return smx_dead_actors_infos; } -void RemoteSimulation::dump_stack() +void RemoteSimulation::dump_stack() const { unw_addr_space_t as = unw_create_addr_space(&_UPT_accessors, BYTE_ORDER); if (as == nullptr) { diff --git a/src/mc/remote/RemoteSimulation.hpp b/src/mc/remote/RemoteSimulation.hpp index 55f3f97b6a..3bd73c2a29 100644 --- a/src/mc/remote/RemoteSimulation.hpp +++ b/src/mc/remote/RemoteSimulation.hpp @@ -102,8 +102,8 @@ public: using AddressSpace::read_string; // Write memory: - void write_bytes(const void* buffer, size_t len, RemotePtr address); - void clear_bytes(RemotePtr address, size_t len); + void write_bytes(const void* buffer, size_t len, RemotePtr address) const; + void clear_bytes(RemotePtr address, size_t len) const; // Debug information: std::shared_ptr find_object_info(RemotePtr addr) const; @@ -142,7 +142,7 @@ public: void terminate() { running_ = false; } - void ignore_global_variable(const char* name) + void ignore_global_variable(const char* name) const { for (std::shared_ptr const& info : this->object_infos) info->remove_global_variable(name); @@ -155,7 +155,7 @@ public: void ignore_heap(IgnoredHeapRegion const& region); void unignore_heap(void* address, size_t size); - void ignore_local_variable(const char* var_name, const char* frame_name); + void ignore_local_variable(const char* var_name, const char* frame_name) const; std::vector& actors(); std::vector& dead_actors(); @@ -185,7 +185,7 @@ public: return nullptr; } - void dump_stack(); + void dump_stack() const; private: void init_memory_map_info(); diff --git a/src/mc/sosp/PageStore.hpp b/src/mc/sosp/PageStore.hpp index f5511fe42d..702fc15d39 100644 --- a/src/mc/sosp/PageStore.hpp +++ b/src/mc/sosp/PageStore.hpp @@ -23,7 +23,7 @@ namespace mc { /** @brief Storage for snapshot memory pages * * The first (lower) layer of the per-page snapshot mechanism is a page store: - * its responsibility is to store immutable sharable reference-counted memory + * its responsibility is to store immutable shareable reference-counted memory * pages independently of the snapshotting logic. Snapshot management and * representation is handled to an higher layer. READMORE * @@ -146,16 +146,16 @@ public: // Debug/test methods /** @brief Get the number of references for a page */ - std::size_t get_ref(std::size_t pageno); + std::size_t get_ref(std::size_t pageno) const; /** @brief Get the number of used pages */ - std::size_t size(); + std::size_t size() const; /** @brief Get the capacity of the page store * * The capacity is expanded by a system call (mremap). * */ - std::size_t capacity(); + std::size_t capacity() const; }; XBT_ALWAYS_INLINE void PageStore::unref_page(std::size_t pageno) @@ -174,17 +174,17 @@ XBT_ALWAYS_INLINE void* PageStore::get_page(std::size_t pageno) const return (void*)simgrid::mc::mmu::join(pageno, (std::uintptr_t)this->memory_); } -XBT_ALWAYS_INLINE std::size_t PageStore::get_ref(std::size_t pageno) +XBT_ALWAYS_INLINE std::size_t PageStore::get_ref(std::size_t pageno) const { return this->page_counts_[pageno]; } -XBT_ALWAYS_INLINE std::size_t PageStore::size() +XBT_ALWAYS_INLINE std::size_t PageStore::size() const { return this->top_index_ - this->free_pages_.size(); } -XBT_ALWAYS_INLINE std::size_t PageStore::capacity() +XBT_ALWAYS_INLINE std::size_t PageStore::capacity() const { return this->capacity_; } diff --git a/src/mc/sosp/Region.cpp b/src/mc/sosp/Region.cpp index a770cd374f..653586c047 100644 --- a/src/mc/sosp/Region.cpp +++ b/src/mc/sosp/Region.cpp @@ -32,7 +32,7 @@ Region::Region(RegionType region_type, void* start_addr, size_t size) * * @param region Target region */ -void Region::restore() +void Region::restore() const { xbt_assert(((start().address()) & (xbt_pagesize - 1)) == 0, "Not at the beginning of a page"); xbt_assert(simgrid::mc::mmu::chunk_count(size()) == get_chunks().page_count()); diff --git a/src/mc/sosp/Region.hpp b/src/mc/sosp/Region.hpp index 0131713643..62d13c3f1a 100644 --- a/src/mc/sosp/Region.hpp +++ b/src/mc/sosp/Region.hpp @@ -60,7 +60,7 @@ public: bool contain(RemotePtr p) const { return p >= start() && p < end(); } /** @brief Restore a region from a snapshot */ - void restore(); + void restore() const; /** @brief Read memory that was snapshotted in this region * diff --git a/src/mc/sosp/Snapshot.cpp b/src/mc/sosp/Snapshot.cpp index 6646082f44..ccc1ac9728 100644 --- a/src/mc/sosp/Snapshot.cpp +++ b/src/mc/sosp/Snapshot.cpp @@ -104,7 +104,7 @@ static std::vector unwind_stack_frames(UnwindContext* stack_ // TODO, check condition check (unw_init_local==0 means end of frame) - while (1) { + while (true) { s_mc_stack_frame_t stack_frame; stack_frame.unw_cursor = c; @@ -273,7 +273,7 @@ Region* Snapshot::get_region(const void* addr, Region* hinted_region) const return get_region(addr); } -void Snapshot::restore(RemoteSimulation* process) +void Snapshot::restore(RemoteSimulation* process) const { XBT_DEBUG("Restore snapshot %i", num_state_); diff --git a/src/mc/sosp/Snapshot.hpp b/src/mc/sosp/Snapshot.hpp index d232d0b64a..ac6b434a26 100644 --- a/src/mc/sosp/Snapshot.hpp +++ b/src/mc/sosp/Snapshot.hpp @@ -74,7 +74,7 @@ public: ReadOptions options = ReadOptions::none()) const override; Region* get_region(const void* addr) const; Region* get_region(const void* addr, Region* hinted_region) const; - void restore(RemoteSimulation* get_remote_simulation); + void restore(RemoteSimulation* get_remote_simulation) const; // To be private int num_state_; diff --git a/src/msg/msg_legacy.cpp b/src/msg/msg_legacy.cpp index c76bad3dad..a255124bb4 100644 --- a/src/msg/msg_legacy.cpp +++ b/src/msg/msg_legacy.cpp @@ -96,7 +96,7 @@ void MSG_process_resume(sg_actor_t actor) { sg_actor_resume(actor); } -int MSG_process_is_suspended(sg_actor_t actor) +int MSG_process_is_suspended(const_sg_actor_t actor) { return sg_actor_is_suspended(actor); } diff --git a/src/plugins/dirty_page_tracking.cpp b/src/plugins/dirty_page_tracking.cpp index 1cd921a44f..db04237068 100644 --- a/src/plugins/dirty_page_tracking.cpp +++ b/src/plugins/dirty_page_tracking.cpp @@ -23,19 +23,19 @@ class DirtyPageTrackingExt { public: void start_tracking(); void stop_tracking() { dp_tracking_ = false; } - bool is_tracking() { return dp_tracking_; } + bool is_tracking() const { return dp_tracking_; } void track(kernel::activity::ExecImpl const* exec, double amount) { dp_objs_.insert({exec, amount}); } void untrack(kernel::activity::ExecImpl const* exec) { dp_objs_.erase(exec); } double get_stored_remains(kernel::activity::ExecImpl const* exec) { return dp_objs_.at(exec); } void update_dirty_page_count(double delta) { dp_updated_by_deleted_tasks_ += delta; } double computed_flops_lookup(); - double get_intensity() { return dp_intensity_; } + double get_intensity() const { return dp_intensity_; } void set_intensity(double intensity) { dp_intensity_ = intensity; } - double get_working_set_memory() { return working_set_memory_; } + sg_size_t get_working_set_memory() const { return working_set_memory_; } void set_working_set_memory(sg_size_t size) { working_set_memory_ = size; } void set_migration_speed(double speed) { mig_speed_ = speed; } - double get_migration_speed() { return mig_speed_; } - double get_max_downtime() { return max_downtime_; } + double get_migration_speed() const { return mig_speed_; } + double get_max_downtime() const { return max_downtime_; } static simgrid::xbt::Extension EXTENSION_ID; virtual ~DirtyPageTrackingExt() = default; diff --git a/src/plugins/file_system/s4u_FileSystem.cpp b/src/plugins/file_system/s4u_FileSystem.cpp index 88478db242..241189486b 100644 --- a/src/plugins/file_system/s4u_FileSystem.cpp +++ b/src/plugins/file_system/s4u_FileSystem.cpp @@ -9,6 +9,7 @@ #include "src/surf/HostImpl.hpp" #include "src/surf/xml/platf_private.hpp" #include "xbt/config.hpp" +#include "xbt/parse_units.hpp" #include #include @@ -142,7 +143,7 @@ File::~File() kernel::actor::simcall([this, desc_table] { desc_table->push_back(this->desc_id); }); } -void File::dump() +void File::dump() const { if (local_storage_) XBT_INFO("File Descriptor information:\n" @@ -296,7 +297,7 @@ sg_size_t File::write(sg_size_t size, bool write_inside) return 0; } -sg_size_t File::size() +sg_size_t File::size() const { return size_; } @@ -323,12 +324,12 @@ void File::seek(sg_offset_t offset, int origin) } } -sg_size_t File::tell() +sg_size_t File::tell() const { return current_position_; } -void File::move(const std::string& fullpath) +void File::move(const std::string& fullpath) const { /* Check if the new full path is on the same mount point */ if (fullpath.compare(0, mount_point_.length(), mount_point_) == 0) { @@ -354,7 +355,7 @@ void File::move(const std::string& fullpath) } } -int File::unlink() +int File::unlink() const { /* Check if the file is on local storage */ std::map* content = nullptr; @@ -483,8 +484,9 @@ int File::remote_move(sg_host_t host, const char* fullpath) FileSystemDiskExt::FileSystemDiskExt(const Disk* ptr) { const char* size_str = ptr->get_property("size"); + std::string dummyfile; if (size_str) - size_ = surf_parse_get_size(size_str, "disk size", ptr->get_name()); + size_ = surf_parse_get_size(dummyfile, -1, size_str, "disk size", ptr->get_name()); const char* current_mount_str = ptr->get_property("mount"); if (current_mount_str) @@ -700,7 +702,7 @@ void sg_file_close(const_sg_file_t fd) /** Retrieves the path to the file * @ingroup plugin_filesystem */ -const char* sg_file_get_name(sg_file_t fd) +const char* sg_file_get_name(const_sg_file_t fd) { xbt_assert((fd != nullptr), "Invalid file descriptor"); return fd->get_path(); @@ -709,12 +711,12 @@ const char* sg_file_get_name(sg_file_t fd) /** Retrieves the size of the file * @ingroup plugin_filesystem */ -sg_size_t sg_file_get_size(sg_file_t fd) +sg_size_t sg_file_get_size(const_sg_file_t fd) { return fd->size(); } -void sg_file_dump(sg_file_t fd) +void sg_file_dump(const_sg_file_t fd) { fd->dump(); } @@ -750,12 +752,12 @@ void sg_file_seek(sg_file_t fd, sg_offset_t offset, int origin) fd->seek(offset, origin); } -sg_size_t sg_file_tell(sg_file_t fd) +sg_size_t sg_file_tell(const_sg_file_t fd) { return fd->tell(); } -void sg_file_move(sg_file_t fd, const char* fullpath) +void sg_file_move(const_sg_file_t fd, const char* fullpath) { fd->move(fullpath); } diff --git a/src/plugins/host_dvfs.cpp b/src/plugins/host_dvfs.cpp index b46ed2d000..8082018af3 100644 --- a/src/plugins/host_dvfs.cpp +++ b/src/plugins/host_dvfs.cpp @@ -414,7 +414,7 @@ static void on_host_added(simgrid::s4u::Host& host) } }(); - while (1) { + while (true) { // Sleep *before* updating; important for startup (i.e., t = 0). // In the beginning, we want to go with the pstates specified in the platform file // (so we sleep first) diff --git a/src/plugins/host_energy.cpp b/src/plugins/host_energy.cpp index 0b1ed29b96..866e9f6c88 100644 --- a/src/plugins/host_energy.cpp +++ b/src/plugins/host_energy.cpp @@ -156,13 +156,13 @@ public: ~HostEnergy(); double get_current_watts_value(); - double get_current_watts_value(double cpu_load); + double get_current_watts_value(double cpu_load) const; double get_consumed_energy(); - double get_watt_idle_at(int pstate); - double get_watt_min_at(int pstate); - double get_watt_max_at(int pstate); - double get_power_range_slope_at(int pstate); - double get_last_update_time() { return last_updated_; } + double get_watt_idle_at(int pstate) const; + double get_watt_min_at(int pstate) const; + double get_watt_max_at(int pstate) const; + double get_power_range_slope_at(int pstate) const; + double get_last_update_time() const { return last_updated_; } void update(); }; @@ -228,28 +228,28 @@ HostEnergy::HostEnergy(simgrid::s4u::Host* ptr) : host_(ptr) HostEnergy::~HostEnergy() = default; -double HostEnergy::get_watt_idle_at(int pstate) +double HostEnergy::get_watt_idle_at(int pstate) const { xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s", host_->get_cname()); return power_range_watts_list_[pstate].idle_; } -double HostEnergy::get_watt_min_at(int pstate) +double HostEnergy::get_watt_min_at(int pstate) const { xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s", host_->get_cname()); return power_range_watts_list_[pstate].epsilon_; } -double HostEnergy::get_watt_max_at(int pstate) +double HostEnergy::get_watt_max_at(int pstate) const { xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s", host_->get_cname()); return power_range_watts_list_[pstate].max_; } -double HostEnergy::get_power_range_slope_at(int pstate) +double HostEnergy::get_power_range_slope_at(int pstate) const { xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s", host_->get_cname()); @@ -292,7 +292,7 @@ double HostEnergy::get_current_watts_value() * * Whether the host is ON or OFF is not taken into account. */ -double HostEnergy::get_current_watts_value(double cpu_load) +double HostEnergy::get_current_watts_value(double cpu_load) const { xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s", host_->get_cname()); diff --git a/src/plugins/host_load.cpp b/src/plugins/host_load.cpp index f881f1d235..c538bf335d 100644 --- a/src/plugins/host_load.cpp +++ b/src/plugins/host_load.cpp @@ -68,7 +68,7 @@ public: explicit HostLoad(simgrid::s4u::Host& ptr) = delete; explicit HostLoad(simgrid::s4u::Host&& ptr) = delete; - double get_current_load(); + double get_current_load() const; /** Get the the average load since last reset(), as a ratio * * That's the ratio (amount of flops that were actually computed) / (amount of flops that could have been computed at full speed) @@ -168,7 +168,7 @@ void HostLoad::update() * But still, if you call this function between the two events (in the simulator course), it * returns 0 although there is no time (in the simulated time) where this value is valid. */ -double HostLoad::get_current_load() +double HostLoad::get_current_load() const { // We don't need to call update() here because it is called every time an action terminates or starts return current_flops_ / (host_->get_speed() * host_->get_core_count()); @@ -218,7 +218,7 @@ void sg_host_load_plugin_init() // If SimGrid is already initialized, we need to attach an extension to each existing host if (simgrid::s4u::Engine::is_initialized()) { - simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance(); + const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance(); for (auto& host : e->get_all_hosts()) { host->extension_set(new HostLoad(host)); } diff --git a/src/plugins/link_energy.cpp b/src/plugins/link_energy.cpp index 4f076c0687..3d68a3bbc2 100644 --- a/src/plugins/link_energy.cpp +++ b/src/plugins/link_energy.cpp @@ -55,7 +55,7 @@ public: void update(); private: - double get_power(); + double get_power() const; s4u::Link* link_{}; @@ -123,7 +123,7 @@ void LinkEnergy::init_watts_range_list() } } -double LinkEnergy::get_power() +double LinkEnergy::get_power() const { if (!inited_) return 0.0; diff --git a/src/plugins/vm/VirtualMachineImpl.cpp b/src/plugins/vm/VirtualMachineImpl.cpp index 2d79dcbbb6..1bb1944a84 100644 --- a/src/plugins/vm/VirtualMachineImpl.cpp +++ b/src/plugins/vm/VirtualMachineImpl.cpp @@ -139,6 +139,9 @@ double VMModel::next_occurring_event(double now) /* iterate for all virtual machines */ for (s4u::VirtualMachine* const& ws_vm : VirtualMachineImpl::allVms_) { + if (ws_vm->get_state() == s4u::VirtualMachine::state::SUSPENDED) // Ignore suspended VMs + continue; + const kernel::resource::Cpu* cpu = ws_vm->pimpl_cpu; // solved_value below is X1 in comment above: what this VM got in the sharing on the PM @@ -287,12 +290,14 @@ void VirtualMachineImpl::set_physical_host(s4u::Host* destination) /* update net_elm with that of the destination physical host */ piface_->set_netpoint(destination->get_netpoint()); + /* Adapt the speed, pstate and other physical characteristics to the one of our new physical CPU */ + piface_->pimpl_cpu->reset_vcpu(destination->pimpl_cpu); + physical_host_ = destination; /* Update vcpu's action for the new pm */ /* create a cpu action bound to the pm model at the destination. */ - kernel::resource::CpuAction* new_cpu_action = - static_cast(destination->pimpl_cpu->execution_start(0, this->core_amount_)); + kernel::resource::CpuAction* new_cpu_action = destination->pimpl_cpu->execution_start(0, this->core_amount_); if (action_->get_remains_no_update() > 0) XBT_CRITICAL("FIXME: need copy the state(?), %f", action_->get_remains_no_update()); diff --git a/src/plugins/vm/VirtualMachineImpl.hpp b/src/plugins/vm/VirtualMachineImpl.hpp index a3c5464d4c..a12b01fcd8 100644 --- a/src/plugins/vm/VirtualMachineImpl.hpp +++ b/src/plugins/vm/VirtualMachineImpl.hpp @@ -48,7 +48,7 @@ public: /** @brief Change the physical host on which the given VM is running */ virtual void set_physical_host(s4u::Host* dest); /** @brief Get the physical host on which the given VM is running */ - s4u::Host* get_physical_host() { return physical_host_; } + s4u::Host* get_physical_host() const { return physical_host_; } sg_size_t get_ramsize() const { return ramsize_; } void set_ramsize(sg_size_t ramsize) { ramsize_ = ramsize; } @@ -56,8 +56,8 @@ public: s4u::VirtualMachine::state get_state() const { return vm_state_; } void set_state(s4u::VirtualMachine::state state) { vm_state_ = state; } - unsigned int get_core_amount() { return core_amount_; } - kernel::resource::Action* get_action() { return action_; } + unsigned int get_core_amount() const { return core_amount_; } + kernel::resource::Action* get_action() const { return action_; } virtual void set_bound(double bound); diff --git a/src/plugins/vm/VmLiveMigration.cpp b/src/plugins/vm/VmLiveMigration.cpp index c047a12298..0712e95848 100644 --- a/src/plugins/vm/VmLiveMigration.cpp +++ b/src/plugins/vm/VmLiveMigration.cpp @@ -115,7 +115,7 @@ sg_size_t MigrationTx::sendMigrationData(sg_size_t size, int stage, int stage2_r double clock_end = s4u::Engine::get_clock(); double duration = clock_end - clock_sta; - double actual_speed = size / duration; + double actual_speed = static_cast(size) / duration; if (stage == 2) XBT_DEBUG("mig-stage%d.%d: sent %llu duration %f actual_speed %f (target %f)", stage, stage2_round, size, duration, @@ -134,7 +134,7 @@ void MigrationTx::operator()() double host_speed = vm_->get_pm()->get_speed(); const sg_size_t ramsize = vm_->get_ramsize(); const double dp_rate = - host_speed ? (sg_vm_get_migration_speed(vm_) * sg_vm_get_dirty_page_intensity(vm_)) / host_speed : 1; + host_speed != 0.0 ? (sg_vm_get_migration_speed(vm_) * sg_vm_get_dirty_page_intensity(vm_)) / host_speed : 1; const sg_size_t dp_cap = sg_vm_get_working_set_memory(vm_); const double mig_speed = sg_vm_get_migration_speed(vm_); double max_downtime = sg_vm_get_max_downtime(vm_); diff --git a/src/plugins/vm/s4u_VirtualMachine.cpp b/src/plugins/vm/s4u_VirtualMachine.cpp index 4b1429dbbe..8c3963ad60 100644 --- a/src/plugins/vm/s4u_VirtualMachine.cpp +++ b/src/plugins/vm/s4u_VirtualMachine.cpp @@ -131,9 +131,10 @@ simgrid::s4u::Host* VirtualMachine::get_pm() const return pimpl_vm_->get_physical_host(); } -void VirtualMachine::set_pm(simgrid::s4u::Host* pm) +VirtualMachine* VirtualMachine::set_pm(simgrid::s4u::Host* pm) { kernel::actor::simcall([this, pm]() { pimpl_vm_->set_physical_host(pm); }); + return this; } VirtualMachine::state VirtualMachine::get_state() @@ -146,9 +147,10 @@ size_t VirtualMachine::get_ramsize() const return pimpl_vm_->get_ramsize(); } -void VirtualMachine::set_ramsize(size_t ramsize) +VirtualMachine* VirtualMachine::set_ramsize(size_t ramsize) { pimpl_vm_->set_ramsize(ramsize); + return this; } /** @brief Set a CPU bound for a given VM. * @ingroup msg_VMs @@ -176,9 +178,10 @@ void VirtualMachine::set_ramsize(size_t ramsize) * 2. Note that bound == 0 means no bound (i.e., unlimited). But, if a host has multiple CPU cores, the CPU share of a * computation task (or a VM) never exceeds the capacity of a CPU core. */ -void VirtualMachine::set_bound(double bound) +VirtualMachine* VirtualMachine::set_bound(double bound) { kernel::actor::simcall([this, bound]() { pimpl_vm_->set_bound(bound); }); + return this; } } // namespace simgrid diff --git a/src/s4u/s4u_Activity.cpp b/src/s4u/s4u_Activity.cpp index f1a24ae50d..18d27cf4c4 100644 --- a/src/s4u/s4u_Activity.cpp +++ b/src/s4u/s4u_Activity.cpp @@ -42,9 +42,35 @@ bool Activity::test() return false; } +Activity* Activity::suspend() +{ + if (suspended_) + return this; // Already suspended + suspended_ = true; + + if (state_ == State::STARTED) + pimpl_->suspend(); + + return this; +} + +Activity* Activity::resume() +{ + if (not suspended_) + return this; // nothing to restore when it's not suspended + + if (state_ == State::STARTED) + pimpl_->resume(); + + return this; +} + double Activity::get_remaining() const { - return remains_; + if (state_ == State::INITED || state_ == State::STARTING) + return remains_; + else + return pimpl_->get_remaining(); } Activity* Activity::set_remaining(double remains) diff --git a/src/s4u/s4u_Actor.cpp b/src/s4u/s4u_Actor.cpp index e6737e9342..b9cc2223c6 100644 --- a/src/s4u/s4u_Actor.cpp +++ b/src/s4u/s4u_Actor.cpp @@ -96,7 +96,7 @@ void intrusive_ptr_release(const Actor* actor) { intrusive_ptr_release(actor->pimpl_); } -int Actor::get_refcount() +int Actor::get_refcount() const { return pimpl_->get_refcount(); } @@ -234,7 +234,7 @@ void Actor::resume() s4u::Actor::on_resume(*this); } -bool Actor::is_suspended() +bool Actor::is_suspended() const { return pimpl_->is_suspended(); } @@ -245,7 +245,7 @@ void Actor::set_kill_time(double kill_time) } /** @brief Get the kill time of an actor(or 0 if unset). */ -double Actor::get_kill_time() +double Actor::get_kill_time() const { return pimpl_->get_kill_time(); } @@ -478,7 +478,7 @@ size_t sg_actor_count() sg_actor_t* sg_actor_list() { - simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance(); + const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance(); size_t actor_count = e->get_actor_count(); xbt_assert(actor_count > 0, "There is no actor!"); std::vector actors = e->get_all_actors(); @@ -624,7 +624,7 @@ void sg_actor_resume(sg_actor_t actor) * * This checks whether an actor is suspended or not by inspecting the task on which it was waiting for the completion. */ -int sg_actor_is_suspended(sg_actor_t actor) +int sg_actor_is_suspended(const_sg_actor_t actor) { return actor->is_suspended(); } diff --git a/src/s4u/s4u_Comm.cpp b/src/s4u/s4u_Comm.cpp index 4d5bb786b4..914138569f 100644 --- a/src/s4u/s4u_Comm.cpp +++ b/src/s4u/s4u_Comm.cpp @@ -138,6 +138,10 @@ Comm* Comm::start() } else { xbt_die("Cannot start a communication before specifying whether we are the sender or the receiver"); } + + if (suspended_) + pimpl_->suspend(); + state_ = State::STARTED; return this; } diff --git a/src/s4u/s4u_Engine.cpp b/src/s4u/s4u_Engine.cpp index c773d0e75f..89b3cedb5a 100644 --- a/src/s4u/s4u_Engine.cpp +++ b/src/s4u/s4u_Engine.cpp @@ -82,7 +82,7 @@ double Engine::get_clock() * See also: :ref:`platform`. * \endrst */ -void Engine::load_platform(const std::string& platf) +void Engine::load_platform(const std::string& platf) const { double start = xbt_os_time(); parse_platform_file(platf); @@ -145,18 +145,18 @@ void Engine::register_function(const std::string& name, const kernel::actor::Act * See also: :ref:`deploy`. * \endrst */ -void Engine::load_deployment(const std::string& deploy) +void Engine::load_deployment(const std::string& deploy) const { pimpl->load_deployment(deploy); } /** Returns the amount of hosts in the platform */ -size_t Engine::get_host_count() +size_t Engine::get_host_count() const { return pimpl->hosts_.size(); } -std::vector Engine::get_all_hosts() +std::vector Engine::get_all_hosts() const { std::vector res; for (auto const& kv : pimpl->hosts_) @@ -164,7 +164,7 @@ std::vector Engine::get_all_hosts() return res; } -std::vector Engine::get_filtered_hosts(const std::function& filter) +std::vector Engine::get_filtered_hosts(const std::function& filter) const { std::vector hosts; for (auto const& kv : pimpl->hosts_) { @@ -189,7 +189,7 @@ void Engine::host_unregister(const std::string& name) * * @throw std::invalid_argument if the searched host does not exist. */ -Host* Engine::host_by_name(const std::string& name) +Host* Engine::host_by_name(const std::string& name) const { if (pimpl->hosts_.find(name) == pimpl->hosts_.end()) throw std::invalid_argument(std::string("Host not found: '") + name + std::string("'")); @@ -197,7 +197,7 @@ Host* Engine::host_by_name(const std::string& name) } /** @brief Find a host from its name (or nullptr if that host does not exist) */ -Host* Engine::host_by_name_or_null(const std::string& name) +Host* Engine::host_by_name_or_null(const std::string& name) const { auto host = pimpl->hosts_.find(name); return host == pimpl->hosts_.end() ? nullptr : host->second; @@ -207,7 +207,7 @@ Host* Engine::host_by_name_or_null(const std::string& name) * * @throw std::invalid_argument if the searched link does not exist. */ -Link* Engine::link_by_name(const std::string& name) +Link* Engine::link_by_name(const std::string& name) const { if (pimpl->links_.find(name) == pimpl->links_.end()) throw std::invalid_argument(std::string("Link not found: ") + name); @@ -216,7 +216,7 @@ Link* Engine::link_by_name(const std::string& name) } /** @brief Find an link from its name (or nullptr if that link does not exist) */ -Link* Engine::link_by_name_or_null(const std::string& name) +Link* Engine::link_by_name_or_null(const std::string& name) const { auto link = pimpl->links_.find(name); return link == pimpl->links_.end() ? nullptr : link->second->get_iface(); @@ -233,13 +233,13 @@ void Engine::link_unregister(const std::string& name) } /** @brief Returns the amount of storages in the platform */ -size_t Engine::get_storage_count() +size_t Engine::get_storage_count() const { return pimpl->storages_.size(); } /** @brief Returns the list of all storages found in the platform */ -std::vector Engine::get_all_storages() +std::vector Engine::get_all_storages() const { std::vector res; for (auto const& kv : pimpl->storages_) @@ -251,7 +251,7 @@ std::vector Engine::get_all_storages() * * @throw std::invalid_argument if the searched storage does not exist. */ -Storage* Engine::storage_by_name(const std::string& name) +Storage* Engine::storage_by_name(const std::string& name) const { if (pimpl->storages_.find(name) == pimpl->storages_.end()) throw std::invalid_argument(std::string("Storage not found: ") + name); @@ -260,7 +260,7 @@ Storage* Engine::storage_by_name(const std::string& name) } /** @brief Find a storage from its name (or nullptr if that storage does not exist) */ -Storage* Engine::storage_by_name_or_null(const std::string& name) +Storage* Engine::storage_by_name_or_null(const std::string& name) const { auto storage = pimpl->storages_.find(name); return storage == pimpl->storages_.end() ? nullptr : storage->second->get_iface(); @@ -277,13 +277,13 @@ void Engine::storage_unregister(const std::string& name) } /** @brief Returns the amount of links in the platform */ -size_t Engine::get_link_count() +size_t Engine::get_link_count() const { return pimpl->links_.size(); } /** @brief Returns the list of all links found in the platform */ -std::vector Engine::get_all_links() +std::vector Engine::get_all_links() const { std::vector res; for (auto const& kv : pimpl->links_) @@ -291,7 +291,7 @@ std::vector Engine::get_all_links() return res; } -std::vector Engine::get_filtered_links(const std::function& filter) +std::vector Engine::get_filtered_links(const std::function& filter) const { std::vector filtered_list; for (auto const& kv : pimpl->links_) { @@ -302,12 +302,12 @@ std::vector Engine::get_filtered_links(const std::function& return filtered_list; } -size_t Engine::get_actor_count() +size_t Engine::get_actor_count() const { return simix_global->process_list.size(); } -std::vector Engine::get_all_actors() +std::vector Engine::get_all_actors() const { std::vector actor_list; for (auto const& kv : simix_global->process_list) { @@ -316,7 +316,7 @@ std::vector Engine::get_all_actors() return actor_list; } -std::vector Engine::get_filtered_actors(const std::function& filter) +std::vector Engine::get_filtered_actors(const std::function& filter) const { std::vector actor_list; for (auto const& kv : simix_global->process_list) { @@ -326,7 +326,7 @@ std::vector Engine::get_filtered_actors(const std::functionnetzone_root_->get_iface(); } @@ -366,19 +366,19 @@ static NetZone* netzone_by_name_recursive(NetZone* current, const std::string& n } /** @brief Retrieve the NetZone of the given name (or nullptr if not found) */ -NetZone* Engine::netzone_by_name_or_null(const std::string& name) +NetZone* Engine::netzone_by_name_or_null(const std::string& name) const { return netzone_by_name_recursive(get_netzone_root(), name); } /** @brief Retrieve the netpoint of the given name (or nullptr if not found) */ -kernel::routing::NetPoint* Engine::netpoint_by_name_or_null(const std::string& name) +kernel::routing::NetPoint* Engine::netpoint_by_name_or_null(const std::string& name) const { auto netp = pimpl->netpoints_.find(name); return netp == pimpl->netpoints_.end() ? nullptr : netp->second; } -std::vector Engine::get_all_netpoints() +std::vector Engine::get_all_netpoints() const { std::vector res; for (auto const& kv : pimpl->netpoints_) diff --git a/src/s4u/s4u_Exec.cpp b/src/s4u/s4u_Exec.cpp index b0c71750ba..7908468702 100644 --- a/src/s4u/s4u_Exec.cpp +++ b/src/s4u/s4u_Exec.cpp @@ -180,6 +180,10 @@ Exec* Exec::start() .set_flops_amount(flops_amounts_.front()) .start(); }); + + if (suspended_) + pimpl_->suspend(); + state_ = State::STARTED; on_start(*Actor::self(), *this); return this; diff --git a/src/s4u/s4u_Host.cpp b/src/s4u/s4u_Host.cpp index e5edd618a8..3dba929a30 100644 --- a/src/s4u/s4u_Host.cpp +++ b/src/s4u/s4u_Host.cpp @@ -294,17 +294,17 @@ std::unordered_map const& Host::get_mounted_storages() return *mounts_; } -ExecPtr Host::exec_async(double flops) +ExecPtr Host::exec_async(double flops) const { return this_actor::exec_init(flops); } -void Host::execute(double flops) +void Host::execute(double flops) const { execute(flops, 1.0 /* priority */); } -void Host::execute(double flops, double priority) +void Host::execute(double flops, double priority) const { this_actor::exec_init(flops)->set_priority(1 / priority)->start()->wait(); } @@ -319,7 +319,7 @@ size_t sg_host_count() } sg_host_t* sg_host_list() { - simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance(); + const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance(); size_t host_count = e->get_host_count(); xbt_assert(host_count > 0, "There is no host!"); std::vector hosts = e->get_all_hosts(); diff --git a/src/s4u/s4u_Io.cpp b/src/s4u/s4u_Io.cpp index cc9794aff4..4831daf91f 100644 --- a/src/s4u/s4u_Io.cpp +++ b/src/s4u/s4u_Io.cpp @@ -44,6 +44,10 @@ Io* Io::start() .start(); } }); + + if (suspended_) + pimpl_->suspend(); + state_ = State::STARTED; return this; } diff --git a/src/s4u/s4u_Link.cpp b/src/s4u/s4u_Link.cpp index bec2b9e757..3ac5de9d35 100644 --- a/src/s4u/s4u_Link.cpp +++ b/src/s4u/s4u_Link.cpp @@ -11,6 +11,7 @@ #include "simgrid/simix.hpp" #include "src/kernel/lmm/maxmin.hpp" #include "src/surf/network_interface.hpp" +#include "src/surf/network_wifi.hpp" #include "xbt/log.h" namespace simgrid { @@ -75,6 +76,15 @@ Link::SharingPolicy Link::get_sharing_policy() const return this->pimpl_->get_sharing_policy(); } +void Link::set_host_wifi_rate(const s4u::Host* host, int level) const +{ + xbt_assert(pimpl_->get_sharing_policy() == Link::SharingPolicy::WIFI, "Link %s does not seem to be a wifi link.", + get_cname()); + auto* wlink = dynamic_cast(pimpl_); + xbt_assert(wlink != nullptr, "Cannot convert link %s into a wifi link.", get_cname()); + wlink->set_host_rate(host, level); +} + double Link::get_usage() const { return this->pimpl_->get_constraint()->get_usage(); diff --git a/src/s4u/s4u_Mailbox.cpp b/src/s4u/s4u_Mailbox.cpp index abbf92e21a..72facec073 100644 --- a/src/s4u/s4u_Mailbox.cpp +++ b/src/s4u/s4u_Mailbox.cpp @@ -34,17 +34,17 @@ Mailbox* Mailbox::by_name(const std::string& name) return &mbox->piface_; } -bool Mailbox::empty() +bool Mailbox::empty() const { return pimpl_->comm_queue_.empty(); } -bool Mailbox::listen() +bool Mailbox::listen() const { return not this->empty() || (pimpl_->permanent_receiver_ && not pimpl_->done_comm_queue_.empty()); } -aid_t Mailbox::listen_from() +aid_t Mailbox::listen_from() const { kernel::activity::CommImplPtr comm = front(); if (comm && comm->src_actor_) @@ -53,7 +53,7 @@ aid_t Mailbox::listen_from() return -1; } -bool Mailbox::ready() +bool Mailbox::ready() const { bool comm_ready = false; if (not pimpl_->comm_queue_.empty()) { @@ -65,7 +65,7 @@ bool Mailbox::ready() return comm_ready; } -kernel::activity::CommImplPtr Mailbox::front() +kernel::activity::CommImplPtr Mailbox::front() const { return pimpl_->comm_queue_.empty() ? nullptr : pimpl_->comm_queue_.front(); } @@ -76,7 +76,7 @@ void Mailbox::set_receiver(ActorPtr actor) } /** @brief get the receiver (process associated to the mailbox) */ -ActorPtr Mailbox::get_receiver() +ActorPtr Mailbox::get_receiver() const { if (pimpl_->permanent_receiver_ == nullptr) return ActorPtr(); diff --git a/src/simdag/sd_dotloader.cpp b/src/simdag/sd_dotloader.cpp index 39792bb996..d9aaeafdd6 100644 --- a/src/simdag/sd_dotloader.cpp +++ b/src/simdag/sd_dotloader.cpp @@ -213,7 +213,7 @@ xbt_dynar_t SD_dotload_generic(const char* filename, bool sequential, bool sched if (previous_task && not SD_task_dependency_exists(previous_task, cur_task)) SD_task_dependency_add(previous_task, cur_task); - SD_task_schedulel(cur_task, 1, hosts[std::stod(elm.first)]); + SD_task_schedulel(cur_task, 1, hosts[std::stoi(elm.first)]); previous_task = cur_task; } } diff --git a/src/simdag/sd_task.cpp b/src/simdag/sd_task.cpp index 4bde0b8dc3..05a3afc6ec 100644 --- a/src/simdag/sd_task.cpp +++ b/src/simdag/sd_task.cpp @@ -40,7 +40,7 @@ SD_task_t SD_task_create(const char *name, void *data, double amount) task->state= SD_NOT_SCHEDULED; sd_global->initial_tasks.insert(task); - task->marked = 0; + task->marked = false; task->start_time = -1.0; task->finish_time = -1.0; task->surf_action = nullptr; @@ -371,7 +371,7 @@ xbt_dynar_t SD_task_get_children(const_SD_task_t task) */ int SD_task_get_workstation_count(const_SD_task_t task) { - return task->allocation->size(); + return static_cast(task->allocation->size()); } /** @@ -591,10 +591,10 @@ int SD_task_dependency_exists(const_SD_task_t src, SD_task_t dst) if (dst) { return (src->successors->find(dst) != src->successors->end() || src->outputs->find(dst) != src->outputs->end()); } else { - return src->successors->size() + src->outputs->size(); + return static_cast(src->successors->size() + src->outputs->size()); } } else { - return dst->predecessors->size() + dst->inputs->size(); + return static_cast(dst->predecessors->size() + dst->inputs->size()); } } @@ -916,7 +916,7 @@ void SD_task_schedulev(SD_task_t task, int count, const sg_host_t * list) /* Iterate over all inputs and outputs to say where I am located (and start them if runnable) */ for (auto const& input : *task->inputs) { - int src_nb = input->allocation->size(); + int src_nb = static_cast(input->allocation->size()); int dst_nb = count; if (input->allocation->empty()) XBT_VERB("Sender side of '%s' not scheduled. Set receiver side to '%s''s allocation", input->name, task->name); @@ -936,7 +936,7 @@ void SD_task_schedulev(SD_task_t task, int count, const sg_host_t * list) for (auto const& output : *task->outputs) { int src_nb = count; - int dst_nb = output->allocation->size(); + int dst_nb = static_cast(output->allocation->size()); if (output->allocation->empty()) XBT_VERB("Receiver side of '%s' not scheduled. Set sender side to '%s''s allocation", output->name, task->name); diff --git a/src/simix/libsmx.cpp b/src/simix/libsmx.cpp index 5830d3254e..86e74dac3b 100644 --- a/src/simix/libsmx.cpp +++ b/src/simix/libsmx.cpp @@ -97,8 +97,8 @@ void simcall_comm_send(smx_actor_t sender, smx_mailbox_t mbox, double task_size, /* the model-checker wants two separate simcalls */ simgrid::kernel::activity::ActivityImplPtr comm = nullptr; /* MC needs the comm to be set to nullptr during the simcall */ - comm = simcall_comm_isend(sender, mbox, task_size, rate, - src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, data, 0); + comm = simcall_comm_isend(sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, + data, false); simcall_comm_wait(comm.get(), timeout); comm = nullptr; } diff --git a/src/simix/popping_bodies.cpp b/src/simix/popping_bodies.cpp index d284a1603c..c659920331 100644 --- a/src/simix/popping_bodies.cpp +++ b/src/simix/popping_bodies.cpp @@ -41,133 +41,133 @@ inline static R simcall(e_smx_simcall_t call, T const&... t) inline static int simcall_BODY_execution_waitany_for(simgrid::kernel::activity::ExecImpl** execs, size_t count, double timeout) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_execution_waitany_for(&SIMIX_process_self()->simcall_, execs, count, timeout); return simcall(SIMCALL_EXECUTION_WAITANY_FOR, execs, count, timeout); } inline static void simcall_BODY_comm_send(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, unsigned char* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_send(&SIMIX_process_self()->simcall_, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, copy_data_fun, data, timeout); return simcall(SIMCALL_COMM_SEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, copy_data_fun, data, timeout); } inline static boost::intrusive_ptr simcall_BODY_comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, unsigned char* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data, bool detached) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_isend(&SIMIX_process_self()->simcall_, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun, data, detached); return simcall, smx_actor_t, smx_mailbox_t, double, double, unsigned char*, size_t, simix_match_func_t, simix_clean_func_t, simix_copy_data_func_t, void*, bool>(SIMCALL_COMM_ISEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun, data, detached); } inline static void simcall_BODY_comm_recv(smx_actor_t receiver, smx_mailbox_t mbox, unsigned char* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout, double rate) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_recv(&SIMIX_process_self()->simcall_, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, timeout, rate); return simcall(SIMCALL_COMM_RECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, timeout, rate); } inline static boost::intrusive_ptr simcall_BODY_comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, unsigned char* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_irecv(&SIMIX_process_self()->simcall_, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate); return simcall, smx_actor_t, smx_mailbox_t, unsigned char*, size_t*, simix_match_func_t, simix_copy_data_func_t, void*, double>(SIMCALL_COMM_IRECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate); } inline static int simcall_BODY_comm_waitany(simgrid::kernel::activity::CommImpl** comms, size_t count, double timeout) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_waitany(&SIMIX_process_self()->simcall_, comms, count, timeout); return simcall(SIMCALL_COMM_WAITANY, comms, count, timeout); } inline static void simcall_BODY_comm_wait(simgrid::kernel::activity::CommImpl* comm, double timeout) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_wait(&SIMIX_process_self()->simcall_, comm, timeout); return simcall(SIMCALL_COMM_WAIT, comm, timeout); } inline static bool simcall_BODY_comm_test(simgrid::kernel::activity::CommImpl* comm) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_test(&SIMIX_process_self()->simcall_, comm); return simcall(SIMCALL_COMM_TEST, comm); } inline static int simcall_BODY_comm_testany(simgrid::kernel::activity::CommImpl** comms, size_t count) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_comm_testany(&SIMIX_process_self()->simcall_, comms, count); return simcall(SIMCALL_COMM_TESTANY, comms, count); } inline static void simcall_BODY_mutex_lock(smx_mutex_t mutex) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_mutex_lock(&SIMIX_process_self()->simcall_, mutex); return simcall(SIMCALL_MUTEX_LOCK, mutex); } inline static int simcall_BODY_mutex_trylock(smx_mutex_t mutex) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_mutex_trylock(&SIMIX_process_self()->simcall_, mutex); return simcall(SIMCALL_MUTEX_TRYLOCK, mutex); } inline static void simcall_BODY_mutex_unlock(smx_mutex_t mutex) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_mutex_unlock(&SIMIX_process_self()->simcall_, mutex); return simcall(SIMCALL_MUTEX_UNLOCK, mutex); } inline static void simcall_BODY_cond_wait(smx_cond_t cond, smx_mutex_t mutex) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_cond_wait(&SIMIX_process_self()->simcall_, cond, mutex); return simcall(SIMCALL_COND_WAIT, cond, mutex); } inline static int simcall_BODY_cond_wait_timeout(smx_cond_t cond, smx_mutex_t mutex, double timeout) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_cond_wait_timeout(&SIMIX_process_self()->simcall_, cond, mutex, timeout); return simcall(SIMCALL_COND_WAIT_TIMEOUT, cond, mutex, timeout); } inline static void simcall_BODY_sem_acquire(smx_sem_t sem) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_sem_acquire(&SIMIX_process_self()->simcall_, sem); return simcall(SIMCALL_SEM_ACQUIRE, sem); } inline static int simcall_BODY_sem_acquire_timeout(smx_sem_t sem, double timeout) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_sem_acquire_timeout(&SIMIX_process_self()->simcall_, sem, timeout); return simcall(SIMCALL_SEM_ACQUIRE_TIMEOUT, sem, timeout); } inline static int simcall_BODY_mc_random(int min, int max) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ simcall_HANDLER_mc_random(&SIMIX_process_self()->simcall_, min, max); return simcall(SIMCALL_MC_RANDOM, min, max); } inline static void simcall_BODY_run_kernel(std::function const* code) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ SIMIX_run_kernel(code); return simcall const*>(SIMCALL_RUN_KERNEL, code); } inline static void simcall_BODY_run_blocking(std::function const* code) { - if (0) /* Go to that function to follow the code flow through the simcall barrier */ + if (false) /* Go to that function to follow the code flow through the simcall barrier */ SIMIX_run_blocking(code); return simcall const*>(SIMCALL_RUN_BLOCKING, code); } diff --git a/src/simix/popping_private.hpp b/src/simix/popping_private.hpp index 70e6d28f92..83e7c00935 100644 --- a/src/simix/popping_private.hpp +++ b/src/simix/popping_private.hpp @@ -48,8 +48,8 @@ struct s_smx_simcall { smx_timer_t timeout_cb_ = nullptr; // Callback to timeouts simgrid::mc::SimcallInspector* inspector_ = nullptr; // makes that simcall observable by the MC int mc_value_ = 0; - u_smx_scalar args_[11] = {{0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}}; - u_smx_scalar result_ = {0}; + u_smx_scalar args_[11] = {}; + u_smx_scalar result_ = {}; }; #define SIMCALL_SET_MC_VALUE(simcall, value) ((simcall).mc_value_ = (value)) diff --git a/src/simix/simcalls.py b/src/simix/simcalls.py index 36fa58dc89..5b0f872197 100755 --- a/src/simix/simcalls.py +++ b/src/simix/simcalls.py @@ -130,17 +130,17 @@ class Simcall(object): def case(self): res = [] indent = ' ' - args = ["simgrid::simix::unmarshal<%s>(simcall.args_[%d])" % (arg.rettype(), i) + args = ["simgrid::simix::unmarshal<%s>(simcall_.args_[%d])" % (arg.rettype(), i) for i, arg in enumerate(self.args)] res.append(indent + 'case SIMCALL_%s:' % (self.name.upper())) if self.need_handler: - call = "simcall_HANDLER_%s(&simcall%s%s)" % (self.name, + call = "simcall_HANDLER_%s(&simcall_%s%s)" % (self.name, ", " if args else "", ', '.join(args)) else: call = "SIMIX_%s(%s)" % (self.name, ', '.join(args)) if self.call_kind == 'Func': - res.append(indent + " simgrid::simix::marshal<%s>(simcall.result_, %s);" % (self.res.rettype(), call)) + res.append(indent + " simgrid::simix::marshal<%s>(simcall_.result_, %s);" % (self.res.rettype(), call)) else: res.append(indent + " " + call + ";") if self.call_kind != 'Blck': @@ -156,10 +156,10 @@ class Simcall(object): self.name, ', '.join('%s %s' % (arg.rettype(), arg.name) for arg in self.args))) res.append('{') - res.append(' if (0) /* Go to that function to follow the code flow through the simcall barrier */') + res.append(' if (false) /* Go to that function to follow the code flow through the simcall barrier */') if self.need_handler: res.append(' simcall_HANDLER_%s(%s);' % (self.name, - ', '.join(["&SIMIX_process_self()->simcall"] + [arg.name for arg in self.args]))) + ', '.join(["&SIMIX_process_self()->simcall_"] + [arg.name for arg in self.args]))) else: res.append(' SIMIX_%s(%s);' % (self.name, ', '.join(arg.name for arg in self.args))) @@ -341,12 +341,12 @@ if __name__ == '__main__': fd.write( 'void simgrid::kernel::actor::ActorImpl::simcall_handle(int value) {\n') fd.write( - ' XBT_DEBUG("Handling simcall %p: %s", &simcall, SIMIX_simcall_name(simcall.call_));\n') - fd.write(' SIMCALL_SET_MC_VALUE(simcall, value);\n') + ' XBT_DEBUG("Handling simcall %p: %s", &simcall_, SIMIX_simcall_name(simcall_.call_));\n') + fd.write(' SIMCALL_SET_MC_VALUE(simcall_, value);\n') fd.write( ' if (context_->wannadie())\n') fd.write(' return;\n') - fd.write(' switch (simcall.call_) {\n') + fd.write(' switch (simcall_.call_) {\n') handle(fd, Simcall.case, simcalls, simcalls_dict) @@ -382,15 +382,15 @@ template inline static R simcall(e_smx_simcall_t call, T const&... t) { smx_actor_t self = SIMIX_process_self(); - simgrid::simix::marshal(&self->simcall, call, t...); + simgrid::simix::marshal(&self->simcall_, call, t...); if (self != simix_global->maestro_) { - XBT_DEBUG("Yield process '%s' on simcall %s (%d)", self->get_cname(), SIMIX_simcall_name(self->simcall.call_), - (int)self->simcall.call_); + XBT_DEBUG("Yield process '%s' on simcall %s (%d)", self->get_cname(), SIMIX_simcall_name(self->simcall_.call_), + (int)self->simcall_.call_); self->yield(); } else { self->simcall_handle(0); } - return simgrid::simix::unmarshal(self->simcall.result_); + return simgrid::simix::unmarshal(self->simcall_.result_); } ''') handle(fd, Simcall.body, simcalls, simcalls_dict) diff --git a/src/simix/smx_global.cpp b/src/simix/smx_global.cpp index d8a18c0749..8ae0f08757 100644 --- a/src/simix/smx_global.cpp +++ b/src/simix/smx_global.cpp @@ -198,7 +198,7 @@ void Global::run_all_actors() } /** Wake up all actors waiting for a Surf action to finish */ -void Global::wake_all_waiting_actors() +void Global::wake_all_waiting_actors() const { for (auto const& model : all_existing_models) { kernel::resource::Action* action; @@ -220,7 +220,7 @@ void Global::wake_all_waiting_actors() } } -void Global::display_all_actor_status() +void Global::display_all_actor_status() const { XBT_INFO("%zu actors are still running, waiting for something.", process_list.size()); /* List the actors and their state */ diff --git a/src/simix/smx_private.hpp b/src/simix/smx_private.hpp index 54fdcac886..492d7cf403 100644 --- a/src/simix/smx_private.hpp +++ b/src/simix/smx_private.hpp @@ -30,8 +30,8 @@ public: */ void empty_trash(); void run_all_actors(); - void wake_all_waiting_actors(); - void display_all_actor_status(); + void wake_all_waiting_actors() const; + void display_all_actor_status() const; smx_context_factory_t context_factory = nullptr; std::vector actors_to_run; diff --git a/src/smpi/bindings/smpi_f77.cpp b/src/smpi/bindings/smpi_f77.cpp index 809002d7c8..4d108c298c 100644 --- a/src/smpi/bindings/smpi_f77.cpp +++ b/src/smpi/bindings/smpi_f77.cpp @@ -217,7 +217,8 @@ void mpi_win_get_name_(int* win, char* name, int* len, int* ierr) void mpi_win_allocate_(MPI_Aint* size, int* disp_unit, int* info, int* comm, void* base, int* win, int* ierr) { MPI_Win tmp; - *ierr = MPI_Win_allocate( *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),static_cast(base),&tmp); + *ierr = + MPI_Win_allocate(*size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm), base, &tmp); if(*ierr == MPI_SUCCESS) { *win = tmp->add_f(); } diff --git a/src/smpi/bindings/smpi_pmpi.cpp b/src/smpi/bindings/smpi_pmpi.cpp index 41894787d9..66346e8e01 100644 --- a/src/smpi/bindings/smpi_pmpi.cpp +++ b/src/smpi/bindings/smpi_pmpi.cpp @@ -232,7 +232,7 @@ int PMPI_Keyval_free(int* keyval) { MPI_Errhandler PMPI_Errhandler_f2c(MPI_Fint errhan){ if(errhan==-1) return MPI_ERRHANDLER_NULL; - return static_cast(simgrid::smpi::Errhandler::f2c(errhan)); + return simgrid::smpi::Errhandler::f2c(errhan); } MPI_Fint PMPI_Errhandler_c2f(MPI_Errhandler errhan){ diff --git a/src/smpi/bindings/smpi_pmpi_comm.cpp b/src/smpi/bindings/smpi_pmpi_comm.cpp index 85dd53aa74..4e24a76a6c 100644 --- a/src/smpi/bindings/smpi_pmpi_comm.cpp +++ b/src/smpi/bindings/smpi_pmpi_comm.cpp @@ -157,7 +157,7 @@ int PMPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int, MPI_Comm* comm_o MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){ if(comm==-1) return MPI_COMM_NULL; - return static_cast(simgrid::smpi::Comm::f2c(comm)); + return simgrid::smpi::Comm::f2c(comm); } MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){ diff --git a/src/smpi/bindings/smpi_pmpi_info.cpp b/src/smpi/bindings/smpi_pmpi_info.cpp index d96a263052..b8a21443c2 100644 --- a/src/smpi/bindings/smpi_pmpi_info.cpp +++ b/src/smpi/bindings/smpi_pmpi_info.cpp @@ -81,7 +81,7 @@ int PMPI_Info_get_valuelen( MPI_Info info, const char *key, int *valuelen, int * MPI_Info PMPI_Info_f2c(MPI_Fint info){ if(info==-1) return MPI_INFO_NULL; - return static_cast(simgrid::smpi::Info::f2c(info)); + return simgrid::smpi::Info::f2c(info); } MPI_Fint PMPI_Info_c2f(MPI_Info info){ diff --git a/src/smpi/bindings/smpi_pmpi_op.cpp b/src/smpi/bindings/smpi_pmpi_op.cpp index 5dedee4a9b..8c14f06d5b 100644 --- a/src/smpi/bindings/smpi_pmpi_op.cpp +++ b/src/smpi/bindings/smpi_pmpi_op.cpp @@ -37,7 +37,7 @@ int PMPI_Op_commutative(MPI_Op op, int* commute){ MPI_Op PMPI_Op_f2c(MPI_Fint op){ if(op==-1) return MPI_OP_NULL; - return static_cast(simgrid::smpi::Op::f2c(op)); + return simgrid::smpi::Op::f2c(op); } MPI_Fint PMPI_Op_c2f(MPI_Op op){ diff --git a/src/smpi/bindings/smpi_pmpi_request.cpp b/src/smpi/bindings/smpi_pmpi_request.cpp index a5c3141a47..ddc0a152eb 100644 --- a/src/smpi/bindings/smpi_pmpi_request.cpp +++ b/src/smpi/bindings/smpi_pmpi_request.cpp @@ -138,7 +138,7 @@ int PMPI_Startall(int count, MPI_Request * requests) TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("Startall")); if (not TRACE_smpi_view_internals()) for (int i = 0; i < count; i++) { - MPI_Request req = requests[i]; + const simgrid::smpi::Request* req = requests[i]; if (req->flags() & MPI_REQ_SEND) TRACE_smpi_send(my_proc_id, my_proc_id, getPid(req->comm(), req->dst()), req->tag(), req->size()); } @@ -147,7 +147,7 @@ int PMPI_Startall(int count, MPI_Request * requests) if (not TRACE_smpi_view_internals()) for (int i = 0; i < count; i++) { - MPI_Request req = requests[i]; + const simgrid::smpi::Request* req = requests[i]; if (req->flags() & MPI_REQ_RECV) TRACE_smpi_recv(getPid(req->comm(), req->src()), my_proc_id, req->tag()); } @@ -588,10 +588,9 @@ int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* statu } // TODO: cheinrich: Move declaration to other file? Rename this function - it's used for PMPI_Wait*? -static void trace_smpi_recv_helper(MPI_Request* request, MPI_Status* status); static void trace_smpi_recv_helper(MPI_Request* request, MPI_Status* status) { - MPI_Request req = *request; + const simgrid::smpi::Request* req = *request; if (req != MPI_REQUEST_NULL) { // Received requests become null int src_traced = req->src(); // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) @@ -620,7 +619,7 @@ int PMPI_Wait(MPI_Request * request, MPI_Status * status) MPI_Request savedreq = *request; if (savedreq != MPI_REQUEST_NULL && not(savedreq->flags() & MPI_REQ_FINISHED) && not(savedreq->flags() & MPI_REQ_GENERALIZED)) - savedreq->ref();//don't erase te handle in Request::wait, we'll need it later + savedreq->ref();//don't erase the handle in Request::wait, we'll need it later else savedreq = MPI_REQUEST_NULL; @@ -788,7 +787,7 @@ int PMPI_Request_get_status( MPI_Request request, int *flag, MPI_Status *status) MPI_Request PMPI_Request_f2c(MPI_Fint request){ if(request==-1) return MPI_REQUEST_NULL; - return static_cast(simgrid::smpi::Request::f2c(request)); + return simgrid::smpi::Request::f2c(request); } MPI_Fint PMPI_Request_c2f(MPI_Request request) { diff --git a/src/smpi/bindings/smpi_pmpi_topo.cpp b/src/smpi/bindings/smpi_pmpi_topo.cpp index c45f88bd33..7e774e6f9f 100644 --- a/src/smpi/bindings/smpi_pmpi_topo.cpp +++ b/src/smpi/bindings/smpi_pmpi_topo.cpp @@ -91,7 +91,7 @@ int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) { CHECK_COMM(1) CHECK_NULL(1, MPI_ERR_TOPOLOGY, comm->topo()) CHECK_NULL(2, MPI_ERR_ARG, ndims) - MPIR_Cart_Topology topo = static_cast(comm->topo().get()); + const simgrid::smpi::Topo_Cart* topo = static_cast(comm->topo().get()); if (topo==nullptr) { return MPI_ERR_ARG; } diff --git a/src/smpi/bindings/smpi_pmpi_win.cpp b/src/smpi/bindings/smpi_pmpi_win.cpp index 45b784428d..42f1b92681 100644 --- a/src/smpi/bindings/smpi_pmpi_win.cpp +++ b/src/smpi/bindings/smpi_pmpi_win.cpp @@ -669,7 +669,7 @@ int PMPI_Win_free_keyval(int* keyval) { MPI_Win PMPI_Win_f2c(MPI_Fint win){ if(win==-1) return MPI_WIN_NULL; - return static_cast(simgrid::smpi::Win::f2c(win)); + return simgrid::smpi::Win::f2c(win); } MPI_Fint PMPI_Win_c2f(MPI_Win win){ diff --git a/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp b/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp index 77b8723318..116e152d83 100644 --- a/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp +++ b/src/smpi/colls/allgather/allgather-NTSLR-NB.cpp @@ -28,7 +28,7 @@ allgather__NTSLR_NB(const void *sbuf, int scount, MPI_Datatype stype, MPI_Request* rrequest_array = new MPI_Request[size]; MPI_Request* srequest_array = new MPI_Request[size]; - // irregular case use default MPI fucntions + // irregular case use default MPI functions if (scount * sextent != rcount * rextent) { XBT_WARN("MPI_allgather_NTSLR_NB use default MPI_allgather."); allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm); diff --git a/src/smpi/colls/allgather/allgather-NTSLR.cpp b/src/smpi/colls/allgather/allgather-NTSLR.cpp index b2ded1c833..80428f0d91 100644 --- a/src/smpi/colls/allgather/allgather-NTSLR.cpp +++ b/src/smpi/colls/allgather/allgather-NTSLR.cpp @@ -26,7 +26,7 @@ allgather__NTSLR(const void *sbuf, int scount, MPI_Datatype stype, rextent = rtype->get_extent(); sextent = stype->get_extent(); - // irregular case use default MPI fucntions + // irregular case use default MPI functions if (scount * sextent != rcount * rextent) { XBT_WARN("MPI_allgather_NTSLR use default MPI_allgather."); allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm); diff --git a/src/smpi/colls/allgather/allgather-ompi-neighborexchange.cpp b/src/smpi/colls/allgather/allgather-ompi-neighborexchange.cpp index 75bb2f397b..8a0a2b7d1e 100644 --- a/src/smpi/colls/allgather/allgather-ompi-neighborexchange.cpp +++ b/src/smpi/colls/allgather/allgather-ompi-neighborexchange.cpp @@ -137,7 +137,7 @@ allgather__ompi_neighborexchange(const void *sbuf, int scount, - Rest of the steps: update recv_data_from according to offset, and exchange two blocks with appropriate neighbor. - the send location becomes previous receve location. + the send location becomes previous receive location. */ tmprecv = (char*)rbuf + neighbor[0] * rcount * rext; tmpsend = (char*)rbuf + rank * rcount * rext; diff --git a/src/smpi/colls/allgatherv/allgatherv-ompi-neighborexchange.cpp b/src/smpi/colls/allgatherv/allgatherv-ompi-neighborexchange.cpp index 38c47dfb57..2d07775677 100644 --- a/src/smpi/colls/allgatherv/allgatherv-ompi-neighborexchange.cpp +++ b/src/smpi/colls/allgatherv/allgatherv-ompi-neighborexchange.cpp @@ -141,7 +141,7 @@ allgatherv__ompi_neighborexchange(const void *sbuf, int scount, - Rest of the steps: update recv_data_from according to offset, and exchange two blocks with appropriate neighbor. - the send location becomes previous receve location. + the send location becomes previous receive location. Note, we need to create indexed datatype to send and receive these blocks properly. */ diff --git a/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp b/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp index 1ab40644b9..23d02679b4 100644 --- a/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp +++ b/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp @@ -164,7 +164,7 @@ int allreduce__mvapich2_two_level(const void *sendbuf, } } - /* Broadcasting the mesage from leader to the rest */ + /* Broadcasting the message from leader to the rest */ /* Note: shared memory broadcast could improve the performance */ mpi_errno = colls::bcast(recvbuf, count, datatype, 0, shmem_comm); diff --git a/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp b/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp index c864a0785d..47d62c6755 100644 --- a/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp +++ b/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp @@ -88,7 +88,7 @@ * [02b] [12b] [22b] * * COMPUTATION PHASE 0 (a) - * Step 0: rank r sends block ra to rank (r+1) and receives bloc (r-1)a + * Step 0: rank r sends block ra to rank (r+1) and receives block (r-1)a * from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [20a] @@ -98,7 +98,7 @@ * [22a+02a] [12a] [22a] * [02b] [12b] [22b] * - * Step 1: rank r sends block (r-1)a to rank (r+1) and receives bloc + * Step 1: rank r sends block (r-1)a to rank (r+1) and receives block * (r-2)a from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [00a+10a+20a] @@ -109,7 +109,7 @@ * [02b] [12b] [22b] * * COMPUTATION PHASE 1 (b) - * Step 0: rank r sends block rb to rank (r+1) and receives bloc (r-1)b + * Step 0: rank r sends block rb to rank (r+1) and receives block (r-1)b * from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [20a] @@ -119,7 +119,7 @@ * [22a+02a] [12a] [22a] * [22b+02b] [12b] [22b] * - * Step 1: rank r sends block (r-1)b to rank (r+1) and receives bloc + * Step 1: rank r sends block (r-1)b to rank (r+1) and receives block * (r-2)b from rank (r-1) [with wraparound]. * # 0 1 2 * [00a] [00a+10a] [00a+10a+20a] diff --git a/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp b/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp index 15c1446f4c..d573d6a153 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp @@ -30,7 +30,7 @@ int allreduce_smp_binomial_pipeline_segment_size = 4096; */ /* -This fucntion performs all-reduce operation as follow. ** in a pipeline fashion ** +This function performs all-reduce operation as follow. ** in a pipeline fashion ** 1) binomial_tree reduce inside each SMP node 2) binomial_tree reduce intra-communication between root of each SMP node 3) binomial_tree bcast intra-communication between root of each SMP node diff --git a/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp b/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp index 8c2c410716..de9695848e 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp @@ -20,7 +20,7 @@ //#include /* -This fucntion performs all-reduce operation as follow. +This function performs all-reduce operation as follow. 1) binomial_tree reduce inside each SMP node 2) binomial_tree reduce intra-communication between root of each SMP node 3) binomial_tree bcast intra-communication between root of each SMP node diff --git a/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp b/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp index 489e719f80..a3a33cb280 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp @@ -20,7 +20,7 @@ //#include /* -This fucntion performs all-reduce operation as follow. +This function performs all-reduce operation as follow. 1) binomial_tree reduce inside each SMP node 2) Recursive doubling intra-communication between root of each SMP node 3) binomial_tree bcast inside each SMP node diff --git a/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp b/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp index 1bf3f35c85..77ef310f1b 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp @@ -8,7 +8,7 @@ //#include /* -This fucntion performs all-reduce operation as follow. +This function performs all-reduce operation as follow. 1) binomial_tree reduce inside each SMP node 2) reduce-scatter -inter between root of each SMP node 3) allgather - inter between root of each SMP node diff --git a/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp b/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp index 7e3c0c14d1..722b2efc17 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp @@ -12,7 +12,7 @@ /* -This fucntion performs all-reduce operation as follow. +This function performs all-reduce operation as follow. 1) binomial_tree reduce inside each SMP node 2) reduce-scatter -inter between root of each SMP node 3) allgather - inter between root of each SMP node diff --git a/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp b/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp index 38f89eb25e..b6e50ce4d3 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp @@ -7,7 +7,7 @@ #include "../colls_private.hpp" /* -This fucntion performs all-reduce operation as follow. +This function performs all-reduce operation as follow. 1) binomial_tree reduce inside each SMP node 2) reduce-scatter -inter between root of each SMP node 3) allgather - inter between root of each SMP node diff --git a/src/smpi/colls/barrier/barrier-ompi.cpp b/src/smpi/colls/barrier/barrier-ompi.cpp index f5ac1dc187..396b2747e2 100644 --- a/src/smpi/colls/barrier/barrier-ompi.cpp +++ b/src/smpi/colls/barrier/barrier-ompi.cpp @@ -24,7 +24,7 @@ #include "../colls_private.hpp" /* - * Barrier is ment to be a synchronous operation, as some BTLs can mark + * Barrier is meant to be a synchronous operation, as some BTLs can mark * a request done before its passed to the NIC and progress might not be made * elsewhere we cannot allow a process to exit the barrier until its last * [round of] sends are completed. diff --git a/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp b/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp index 24736d4687..e91c7680cc 100644 --- a/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp +++ b/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp @@ -72,7 +72,7 @@ int bcast__ompi_split_bintree( void* buffer, int segindex, i, lr, pair; int segcount[2]; /* Number ompi_request_wait_allof elements sent with each segment */ uint32_t counts[2]; - int num_segments[2]; /* Number of segmenets */ + int num_segments[2]; /* Number of segments */ int sendcount[2]; /* the same like segcount, except for the last segment */ size_t realsegsize[2]; char *tmpbuf[2]; @@ -211,7 +211,7 @@ int bcast__ompi_split_bintree( void* buffer, Request::send(tmpbuf[lr], segcount[lr], datatype, tree->tree_next[i], COLL_TAG_BCAST, comm); } /* end of for each child */ - /* upate the base request */ + /* update the base request */ base_req = new_req; /* go to the next buffer (ie. the one corresponding to the next recv) */ tmpbuf[lr] += realsegsize[lr]; diff --git a/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp b/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp index 7148af816d..646d3b17af 100644 --- a/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp +++ b/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp @@ -114,7 +114,7 @@ int bcast__scatter_LR_allgather(void *buff, int count, } // This process is responsible for all processes that have bits - // set from the LSB upto (but not including) mask. Because of + // set from the LSB up to (but not including) mask. Because of // the "not including", we start by shifting mask back down // one. diff --git a/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp b/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp index 85cb9813d8..40cae7638d 100644 --- a/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp +++ b/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp @@ -72,7 +72,7 @@ static int scatter_for_bcast( } /* This process is responsible for all processes that have bits - set from the LSB upto (but not including) mask. Because of + set from the LSB up to (but not including) mask. Because of the "not including", we start by shifting mask back down one. */ diff --git a/src/smpi/colls/gather/gather-ompi.cpp b/src/smpi/colls/gather/gather-ompi.cpp index 3aaaab58b1..f1fca998f6 100644 --- a/src/smpi/colls/gather/gather-ompi.cpp +++ b/src/smpi/colls/gather/gather-ompi.cpp @@ -104,7 +104,7 @@ int gather__ompi_binomial(const void* sbuf, int scount, MPI_Datatype sdtype, voi } else if (!(vrank % 2)) { /* other non-leaf nodes, allocate temp buffer for data received from * children, the most we need is half of the total data elements due - * to the property of binimoal tree */ + * to the property of binomial tree */ tempbuf = smpi_get_tmp_sendbuffer(strue_extent + (scount * size - 1) * sextent); if (NULL == tempbuf) { err = MPI_ERR_OTHER; diff --git a/src/smpi/colls/reduce/reduce-ompi.cpp b/src/smpi/colls/reduce/reduce-ompi.cpp index 0caa56c09f..ccdd68e07a 100644 --- a/src/smpi/colls/reduce/reduce-ompi.cpp +++ b/src/smpi/colls/reduce/reduce-ompi.cpp @@ -235,7 +235,7 @@ int smpi_coll_tuned_ompi_reduce_generic(const void* sendbuf, void* recvbuf, int */ else { - /* If the number of segments is less than a maximum number of oustanding + /* If the number of segments is less than a maximum number of outstanding requests or there is no limit on the maximum number of outstanding requests, we send data to the parent using blocking send */ if ((0 == max_outstanding_reqs) || diff --git a/src/smpi/colls/reduce/reduce-rab.cpp b/src/smpi/colls/reduce/reduce-rab.cpp index 06059579c0..c1e5cf7e02 100644 --- a/src/smpi/colls/reduce/reduce-rab.cpp +++ b/src/smpi/colls/reduce/reduce-rab.cpp @@ -378,8 +378,8 @@ Benchmark results on CRAY T3E otherwise the new protocol is used (see variable Ldb). 3) These lines show the bandwidth (= buffer length / execution time) for both protocols. - 4) This line shows that the limit is choosen well if the ratio is - between 0.95 (loosing 5% for buffer length near and >=limit) + 4) This line shows that the limit is chosen well if the ratio is + between 0.95 (losing 5% for buffer length near and >=limit) and 1.10 (not gaining 10% for buffer length near and _smpi_cfg_host_speed; +extern XBT_PRIVATE simgrid::config::Flag _smpi_cfg_host_speed_string; extern XBT_PRIVATE simgrid::config::Flag _smpi_cfg_simulate_computation; extern XBT_PRIVATE simgrid::config::Flag _smpi_cfg_shared_malloc_string; extern XBT_PRIVATE simgrid::config::Flag _smpi_cfg_cpu_thresh; diff --git a/src/smpi/include/smpi_datatype.hpp b/src/smpi/include/smpi_datatype.hpp index 48316eefce..bf01abb7dc 100644 --- a/src/smpi/include/smpi_datatype.hpp +++ b/src/smpi/include/smpi_datatype.hpp @@ -9,6 +9,7 @@ #include "smpi_f2c.hpp" #include "smpi_keyvals.hpp" #include +#include constexpr unsigned DT_FLAG_DESTROYED = 0x0001; /**< user destroyed but some other layers still have a reference */ constexpr unsigned DT_FLAG_COMMITED = 0x0002; /**< ready to be used for a send/recv operation */ @@ -20,7 +21,7 @@ constexpr unsigned DT_FLAG_PREDEFINED = 0x0040; /**< cannot be removed: initial constexpr unsigned DT_FLAG_NO_GAPS = 0x0080; /**< no gaps around the datatype */ constexpr unsigned DT_FLAG_DATA = 0x0100; /**< data or control structure */ constexpr unsigned DT_FLAG_ONE_SIDED = 0x0200; /**< datatype can be used for one sided operations */ -constexpr unsigned DT_FLAG_UNAVAILABLE = 0x0400; /**< datatypes unavailable on the build (OS or compiler dependant) */ +constexpr unsigned DT_FLAG_UNAVAILABLE = 0x0400; /**< datatypes unavailable on the build (OS or compiler dependent) */ constexpr unsigned DT_FLAG_DERIVED = 0x0800; /**< is the datatype derived ? */ /* * We should make the difference here between the predefined contiguous and non contiguous @@ -78,19 +79,13 @@ namespace smpi{ class Datatype_contents { public: int combiner_; - int number_of_integers_; - int* integers_; - int number_of_addresses_; - MPI_Aint* addresses_; - int number_of_datatypes_; - MPI_Datatype* datatypes_; + std::vector integers_; + std::vector addresses_; + std::vector datatypes_; Datatype_contents(int combiner, int number_of_integers, const int* integers, int number_of_addresses, const MPI_Aint* addresses, int number_of_datatypes, const MPI_Datatype* datatypes); - Datatype_contents(const Datatype_contents&) = delete; - Datatype_contents& operator=(const Datatype_contents&) = delete; - ~Datatype_contents(); }; class Datatype : public F2C, public Keyval{ @@ -120,26 +115,26 @@ public: Datatype& operator=(const Datatype&) = delete; virtual ~Datatype(); - char* name() { return name_; } - size_t size() { return size_; } - MPI_Aint lb() { return lb_; } - MPI_Aint ub() { return ub_; } - int flags() { return flags_; } - int refcount() { return refcount_; } + char* name() const { return name_; } + size_t size() const { return size_; } + MPI_Aint lb() const { return lb_; } + MPI_Aint ub() const { return ub_; } + int flags() const { return flags_; } + int refcount() const { return refcount_; } void ref(); static void unref(MPI_Datatype datatype); void commit(); int copy_attrs(Datatype* datatype); - bool is_valid(); - bool is_basic(); + bool is_valid() const; + bool is_basic() const; static const char* encode(const Datatype* dt) { return dt->id.c_str(); } static MPI_Datatype decode(const std::string& datatype_id); - bool is_replayable(); + bool is_replayable() const; void addflag(int flag); - int extent(MPI_Aint* lb, MPI_Aint* extent); - MPI_Aint get_extent() { return ub_ - lb_; }; - void get_name(char* name, int* length); + int extent(MPI_Aint* lb, MPI_Aint* extent) const; + MPI_Aint get_extent() const { return ub_ - lb_; }; + void get_name(char* name, int* length) const; void set_name(const char* name); static int copy(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount, MPI_Datatype recvtype); @@ -151,11 +146,9 @@ public: static int keyval_free(int* keyval); int pack(const void* inbuf, int incount, void* outbuf, int outcount, int* position, const Comm* comm); int unpack(const void* inbuf, int insize, int* position, void* outbuf, int outcount, const Comm* comm); - int get_contents(int max_integers, int max_addresses, - int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses, - MPI_Datatype *array_of_datatypes); - int get_envelope(int* num_integers, int* num_addresses, - int* num_datatypes, int* combiner); + int get_contents(int max_integers, int max_addresses, int max_datatypes, int* array_of_integers, + MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes) const; + int get_envelope(int* num_integers, int* num_addresses, int* num_datatypes, int* combiner) const; static int create_contiguous(int count, MPI_Datatype old_type, MPI_Aint lb, MPI_Datatype* new_type); static int create_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type); static int create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type); diff --git a/src/smpi/include/smpi_datatype_derived.hpp b/src/smpi/include/smpi_datatype_derived.hpp index 707b47c464..d749e2fc75 100644 --- a/src/smpi/include/smpi_datatype_derived.hpp +++ b/src/smpi/include/smpi_datatype_derived.hpp @@ -33,7 +33,6 @@ public: MPI_Aint block_stride_; MPI_Datatype old_type_; -public: Type_Hvector(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, int block_length, MPI_Aint block_stride, MPI_Datatype old_type); Type_Hvector(const Type_Hvector&) = delete; @@ -58,7 +57,6 @@ public: MPI_Aint* block_indices_; MPI_Datatype old_type_; -public: Type_Hindexed(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, const int* block_lengths, const MPI_Aint* block_indices, MPI_Datatype old_type); Type_Hindexed(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, const int* block_lengths, const int* block_indices, diff --git a/src/smpi/include/smpi_errhandler.hpp b/src/smpi/include/smpi_errhandler.hpp index 30309016ed..cc47cdeab5 100644 --- a/src/smpi/include/smpi_errhandler.hpp +++ b/src/smpi/include/smpi_errhandler.hpp @@ -25,9 +25,9 @@ class Errhandler: public F2C { explicit Errhandler(MPI_File_errhandler_fn *function):file_func_(function){}; explicit Errhandler(MPI_Win_errhandler_fn *function):win_func_(function){}; void ref(); - void call(MPI_Comm comm, int errorcode); - void call(MPI_Win win, int errorcode); - void call(MPI_File file, int errorcode); + void call(MPI_Comm comm, int errorcode) const; + void call(MPI_Win win, int errorcode) const; + void call(MPI_File file, int errorcode) const; static void unref(Errhandler* errhandler); static Errhandler* f2c(int id); }; diff --git a/src/smpi/include/smpi_f2c.hpp b/src/smpi/include/smpi_f2c.hpp index de22e285e3..d0889f1986 100644 --- a/src/smpi/include/smpi_f2c.hpp +++ b/src/smpi/include/smpi_f2c.hpp @@ -1,4 +1,4 @@ -/* Handle Fortan - C conversion for MPI Types*/ +/* Handle Fortran - C conversion for MPI Types*/ /* Copyright (c) 2010-2020. The SimGrid Team. * All rights reserved. */ diff --git a/src/smpi/include/smpi_file.hpp b/src/smpi/include/smpi_file.hpp index ddd0cffa8f..25b917112f 100644 --- a/src/smpi/include/smpi_file.hpp +++ b/src/smpi/include/smpi_file.hpp @@ -37,25 +37,26 @@ class File : public F2C{ File(const File&) = delete; File& operator=(const File&) = delete; ~File(); - int size(); - int get_position(MPI_Offset* offset); - int get_position_shared(MPI_Offset* offset); - int flags(); - MPI_Comm comm(); + int size() const; + int get_position(MPI_Offset* offset) const; + int get_position_shared(MPI_Offset* offset) const; + int flags() const; + MPI_Comm comm() const; int sync(); int seek(MPI_Offset offset, int whence); int seek_shared(MPI_Offset offset, int whence); int set_view(MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char* datarep, const Info* info); - int get_view(MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep); + int get_view(MPI_Offset* disp, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const; MPI_Info info(); void set_info( MPI_Info info); - static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status); - static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status); - static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status); - static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status); - static int write_shared(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status); - static int write_ordered(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status); - template int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status); + static int read(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); + static int read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); + static int read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); + static int write(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); + static int write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status); + static int write_ordered(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status); + template + int op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status); static int close(MPI_File *fh); static int del(const char* filename, const Info* info); MPI_Errhandler errhandler(); @@ -63,141 +64,145 @@ class File : public F2C{ static File* f2c(int id); }; - /* Read_all, Write_all : loosely based on */ - /* @article{Thakur:1996:ETM:245875.245879,*/ - /* author = {Thakur, Rajeev and Choudhary, Alok},*/ - /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/ - /* journal = {Sci. Program.},*/ - /* issue_date = {Winter 1996},*/ - /* pages = {301--317},*/ - /* }*/ - template - int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){ - //get min and max offsets from everyone. - int size = comm_->size(); - int rank = comm_-> rank(); - MPI_Offset min_offset = file_->tell(); - MPI_Offset max_offset = min_offset + count * datatype->get_extent();//cheating, as we don't care about exact data location, we can skip extent - MPI_Offset* min_offsets = new MPI_Offset[size]; - MPI_Offset* max_offsets = new MPI_Offset[size]; - simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_); - simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_); - MPI_Offset min=min_offset; - MPI_Offset max=max_offset; - MPI_Offset tot= 0; - int empty=1; - for(int i=0;imax) - max=max_offsets[i]; - } - - XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max); - if(empty==1){ - delete[] min_offsets; - delete[] max_offsets; - status->count=0; - return MPI_SUCCESS; - } - MPI_Offset total = max-min; - if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){ - delete[] min_offsets; - delete[] max_offsets; - //contiguous. Just have each proc perform its read - if(status != MPI_STATUS_IGNORE) - status->count=count * datatype->size(); - return T(this,buf,count,datatype, status); - } +/* Read_all, Write_all : loosely based on */ +/* @article{Thakur:1996:ETM:245875.245879,*/ +/* author = {Thakur, Rajeev and Choudhary, Alok},*/ +/* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/ +/* journal = {Sci. Program.},*/ +/* issue_date = {Winter 1996},*/ +/* pages = {301--317},*/ +/* }*/ +template +int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status) +{ + // get min and max offsets from everyone. + int size = comm_->size(); + int rank = comm_->rank(); + MPI_Offset min_offset = file_->tell(); + MPI_Offset max_offset = + min_offset + + count * datatype->get_extent(); // cheating, as we don't care about exact data location, we can skip extent + MPI_Offset* min_offsets = new MPI_Offset[size]; + MPI_Offset* max_offsets = new MPI_Offset[size]; + simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_); + simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_); + MPI_Offset min = min_offset; + MPI_Offset max = max_offset; + MPI_Offset tot = 0; + int empty = 1; + for (int i = 0; i < size; i++) { + if (min_offsets[i] != max_offsets[i]) + empty = 0; + tot += (max_offsets[i] - min_offsets[i]); + if (min_offsets[i] < min) + min = min_offsets[i]; + if (max_offsets[i] > max) + max = max_offsets[i]; + } - //Interleaved case : How much do I need to read, and whom to send it ? - MPI_Offset my_chunk_start=(max-min+1)/size*rank; - MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1)); - XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end); - int* send_sizes = new int[size]; - int* recv_sizes = new int[size]; - int* send_disps = new int[size]; - int* recv_disps = new int[size]; - int total_sent=0; - for(int i=0;irecv as we use recv buffer - if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])|| - ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){ - send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start)); - // store min and max offset to actually read - min_offset=std::min(min_offset, min_offsets[i]); - total_sent+=send_sizes[i]; - XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i); - } + XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, + max); + if (empty == 1) { + delete[] min_offsets; + delete[] max_offsets; + status->count = 0; + return MPI_SUCCESS; + } + MPI_Offset total = max - min; + if (total == tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)) { + delete[] min_offsets; + delete[] max_offsets; + // contiguous. Just have each proc perform its read + if (status != MPI_STATUS_IGNORE) + status->count = count * datatype->size(); + return T(this, buf, count, datatype, status); + } + + // Interleaved case : How much do I need to read, and whom to send it ? + MPI_Offset my_chunk_start = (max - min + 1) / size * rank; + MPI_Offset my_chunk_end = ((max - min + 1) / size * (rank + 1)); + XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end); + int* send_sizes = new int[size]; + int* recv_sizes = new int[size]; + int* send_disps = new int[size]; + int* recv_disps = new int[size]; + int total_sent = 0; + for (int i = 0; i < size; i++) { + send_sizes[i] = 0; + send_disps[i] = 0; // cheat to avoid issues when send>recv as we use recv buffer + if ((my_chunk_start >= min_offsets[i] && my_chunk_start < max_offsets[i]) || + ((my_chunk_end <= max_offsets[i]) && my_chunk_end > min_offsets[i])) { + send_sizes[i] = (std::min(max_offsets[i] - 1, my_chunk_end - 1) - std::max(min_offsets[i], my_chunk_start)); + // store min and max offset to actually read + min_offset = std::min(min_offset, min_offsets[i]); + total_sent += send_sizes[i]; + XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i); } - min_offset=std::max(min_offset, my_chunk_start); + } + min_offset = std::max(min_offset, my_chunk_start); - //merge the ranges of every process - std::vector> ranges; - for(int i=0; i> chunks; - chunks.push_back(ranges[0]); + // merge the ranges of every process + std::vector> ranges; + for (int i = 0; i < size; ++i) + ranges.push_back(std::make_pair(min_offsets[i], max_offsets[i])); + std::sort(ranges.begin(), ranges.end()); + std::vector> chunks; + chunks.push_back(ranges[0]); - unsigned int nchunks=0; - unsigned int i=1; - while(i < ranges.size()){ - if(ranges[i].second>chunks[nchunks].second){ - // else range included - ignore - if(ranges[i].first>chunks[nchunks].second){ - //new disjoint range - chunks.push_back(ranges[i]); - nchunks++; - } else { - //merge ranges - chunks[nchunks].second=ranges[i].second; - } + unsigned int nchunks = 0; + unsigned int i = 1; + while (i < ranges.size()) { + if (ranges[i].second > chunks[nchunks].second) { + // else range included - ignore + if (ranges[i].first > chunks[nchunks].second) { + // new disjoint range + chunks.push_back(ranges[i]); + nchunks++; + } else { + // merge ranges + chunks[nchunks].second = ranges[i].second; } - i++; - } - //what do I need to read ? - MPI_Offset totreads=0; - for(i=0; i my_chunk_end) - continue; - else - totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start)); } - XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads); + i++; + } + // what do I need to read ? + MPI_Offset totreads = 0; + for (i = 0; i < chunks.size(); i++) { + if (chunks[i].second < my_chunk_start) + continue; + else if (chunks[i].first > my_chunk_end) + continue; + else + totreads += (std::min(chunks[i].second, my_chunk_end - 1) - std::max(chunks[i].first, my_chunk_start)); + } + XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads); - unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent); + unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent); - if(totreads>0){ - seek(min_offset, MPI_SEEK_SET); - T(this,sendbuf,totreads/datatype->size(),datatype, status); - } - simgrid::smpi::colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_); - int total_recv=0; - for(int i=0;icount=count * datatype->size(); - smpi_free_tmp_buffer(sendbuf); - delete[] send_sizes; - delete[] recv_sizes; - delete[] send_disps; - delete[] recv_disps; - delete[] min_offsets; - delete[] max_offsets; - return MPI_SUCCESS; + if (totreads > 0) { + seek(min_offset, MPI_SEEK_SET); + T(this, sendbuf, totreads / datatype->size(), datatype, status); + } + simgrid::smpi::colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_); + int total_recv = 0; + for (int i = 0; i < size; i++) { + recv_disps[i] = total_recv; + total_recv += recv_sizes[i]; } + // Set buf value to avoid copying dumb data + simgrid::smpi::colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE, buf, recv_sizes, recv_disps, MPI_BYTE, + comm_); + if (status != MPI_STATUS_IGNORE) + status->count = count * datatype->size(); + smpi_free_tmp_buffer(sendbuf); + delete[] send_sizes; + delete[] recv_sizes; + delete[] send_disps; + delete[] recv_disps; + delete[] min_offsets; + delete[] max_offsets; + return MPI_SUCCESS; +} } } diff --git a/src/smpi/include/smpi_group.hpp b/src/smpi/include/smpi_group.hpp index 889dfa628d..67e1291183 100644 --- a/src/smpi/include/smpi_group.hpp +++ b/src/smpi/include/smpi_group.hpp @@ -29,7 +29,7 @@ class Group : public F2C{ public: Group() = default; explicit Group(int size) : size_(size), rank_to_actor_map_(size, nullptr), index_to_rank_map_(size, MPI_UNDEFINED) {} - explicit Group(Group* origin); + explicit Group(const Group* origin); void set_mapping(s4u::Actor* actor, int rank); int rank(int index); @@ -37,7 +37,7 @@ public: int rank(s4u::Actor* process); void ref(); static void unref(MPI_Group group); - int size() { return size_; } + int size() const { return size_; } int compare(MPI_Group group2); int incl(int n, const int* ranks, MPI_Group* newgroup); int excl(int n, const int* ranks, MPI_Group* newgroup); diff --git a/src/smpi/include/smpi_info.hpp b/src/smpi/include/smpi_info.hpp index 280afa5410..99664b56e1 100644 --- a/src/smpi/include/smpi_info.hpp +++ b/src/smpi/include/smpi_info.hpp @@ -26,11 +26,11 @@ public: void ref(); static void unref(MPI_Info info); void set(const char* key, const char* value) { map_[key] = value; } - int get(const char* key, int valuelen, char* value, int* flag); + int get(const char* key, int valuelen, char* value, int* flag) const; int remove(const char* key); - int get_nkeys(int* nkeys); - int get_nthkey(int n, char* key); - int get_valuelen(const char* key, int* valuelen, int* flag); + int get_nkeys(int* nkeys) const; + int get_nthkey(int n, char* key) const; + int get_valuelen(const char* key, int* valuelen, int* flag) const; static Info* f2c(int id); }; diff --git a/src/smpi/include/smpi_op.hpp b/src/smpi/include/smpi_op.hpp index 403b230aa8..1b458848fe 100644 --- a/src/smpi/include/smpi_op.hpp +++ b/src/smpi/include/smpi_op.hpp @@ -21,11 +21,11 @@ class Op : public F2C{ public: Op(MPI_User_function* function, bool commutative, bool predefined=false) : func_(function), is_commutative_(commutative), predefined_(predefined) {} - bool is_commutative() { return is_commutative_; } - bool is_fortran_op() { return is_fortran_op_; } + bool is_commutative() const { return is_commutative_; } + bool is_fortran_op() const { return is_fortran_op_; } // tell that we were created from fortran, so we need to translate the type to fortran when called void set_fortran_op() { is_fortran_op_ = true; } - void apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype); + void apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype) const; static Op* f2c(int id); void ref(); static void unref(MPI_Op* op); diff --git a/src/smpi/include/smpi_request.hpp b/src/smpi/include/smpi_request.hpp index 925702e6e4..091219edfa 100644 --- a/src/smpi/include/smpi_request.hpp +++ b/src/smpi/include/smpi_request.hpp @@ -56,23 +56,23 @@ class Request : public F2C { public: Request() = default; Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op = MPI_REPLACE); - MPI_Comm comm() { return comm_; } - size_t size() { return size_; } - size_t real_size() { return real_size_; } - int src() { return src_; } - int dst() { return dst_; } - int tag() { return tag_; } - int flags() { return flags_; } - bool detached() { return detached_; } - MPI_Datatype type() { return old_type_; } - void print_request(const char* message); + MPI_Comm comm() const { return comm_; } + size_t size() const { return size_; } + size_t real_size() const { return real_size_; } + int src() const { return src_; } + int dst() const { return dst_; } + int tag() const { return tag_; } + int flags() const { return flags_; } + bool detached() const { return detached_; } + MPI_Datatype type() const { return old_type_; } + void print_request(const char* message) const; void start(); void cancel(); void init_buffer(int count); void ref(); void set_nbc_requests(MPI_Request* reqs, int size); - int get_nbc_requests_size(); - MPI_Request* get_nbc_requests(); + int get_nbc_requests_size() const; + MPI_Request* get_nbc_requests() const; static void finish_wait(MPI_Request* request, MPI_Status* status); static void unref(MPI_Request* request); static int wait(MPI_Request* req, MPI_Status* status); diff --git a/src/smpi/include/smpi_status.hpp b/src/smpi/include/smpi_status.hpp index 31a7a1d450..c906afd8d0 100644 --- a/src/smpi/include/smpi_status.hpp +++ b/src/smpi/include/smpi_status.hpp @@ -20,7 +20,7 @@ static void empty(MPI_Status * status); static int cancelled (const MPI_Status * status); static void set_cancelled (MPI_Status * status, int flag); static void set_elements(MPI_Status* status, const Datatype*, int count); -static int get_count(const MPI_Status * status, MPI_Datatype datatype); +static int get_count(const MPI_Status* status, const Datatype* datatype); }; diff --git a/src/smpi/include/smpi_topo.hpp b/src/smpi/include/smpi_topo.hpp index 58c7576d4f..e7354c6fa8 100644 --- a/src/smpi/include/smpi_topo.hpp +++ b/src/smpi/include/smpi_topo.hpp @@ -39,7 +39,7 @@ public: int get(int maxdims, int* dims, int* periods, int* coords); int rank(const int* coords, int* rank); int shift(int direction, int disp, int* rank_source, int* rank_dest); - int dim_get(int* ndims); + int dim_get(int* ndims) const; static int Dims_create(int nnodes, int ndims, int dims[]); }; diff --git a/src/smpi/include/smpi_win.hpp b/src/smpi/include/smpi_win.hpp index 84a3f82b53..b50573c837 100644 --- a/src/smpi/include/smpi_win.hpp +++ b/src/smpi/include/smpi_win.hpp @@ -53,20 +53,20 @@ public: ~Win(); int attach (void *base, MPI_Aint size); int detach (const void *base); - void get_name( char* name, int* length); + void get_name(char* name, int* length) const; void get_group( MPI_Group* group); void set_name(const char* name); - int rank(); - int dynamic(); + int rank() const; + int dynamic() const; int start(MPI_Group group, int assert); int post(MPI_Group group, int assert); int complete(); MPI_Info info(); void set_info( MPI_Info info); int wait(); - MPI_Aint size(); - void* base(); - int disp_unit(); + MPI_Aint size() const; + void* base() const; + int disp_unit() const; int fence(int assert); int put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request=nullptr); @@ -91,7 +91,7 @@ public: int flush_local_all(); int finish_comms(); int finish_comms(int rank); - int shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr); + int shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const; MPI_Errhandler errhandler(); void set_errhandler( MPI_Errhandler errhandler); }; diff --git a/src/smpi/internals/smpi_actor.cpp b/src/smpi/internals/smpi_actor.cpp index c29d78792e..c6f71647ba 100644 --- a/src/smpi/internals/smpi_actor.cpp +++ b/src/smpi/internals/smpi_actor.cpp @@ -74,19 +74,19 @@ void ActorExt::finalize() } /** @brief Check if a process is finalized */ -int ActorExt::finalized() +int ActorExt::finalized() const { return (state_ == SmpiProcessState::FINALIZED); } /** @brief Check if a process is partially initialized already */ -int ActorExt::initializing() +int ActorExt::initializing() const { return (state_ == SmpiProcessState::INITIALIZING); } /** @brief Check if a process is initialized */ -int ActorExt::initialized() +int ActorExt::initialized() const { // TODO cheinrich: Check if we still need this. This should be a global condition, not for a // single process ... ? @@ -106,7 +106,7 @@ void ActorExt::set_replaying(bool value) replaying_ = value; } -bool ActorExt::replaying() +bool ActorExt::replaying() const { return replaying_; } @@ -131,23 +131,23 @@ void ActorExt::set_privatized_region(smpi_privatization_region_t region) privatized_region_ = region; } -smpi_privatization_region_t ActorExt::privatized_region() +smpi_privatization_region_t ActorExt::privatized_region() const { return privatized_region_; } -MPI_Comm ActorExt::comm_world() +MPI_Comm ActorExt::comm_world() const { return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_; } -s4u::MutexPtr ActorExt::mailboxes_mutex() +s4u::MutexPtr ActorExt::mailboxes_mutex() const { return mailboxes_mutex_; } #if HAVE_PAPI -int ActorExt::papi_event_set() +int ActorExt::papi_event_set() const { return papi_event_set_; } @@ -168,7 +168,7 @@ void ActorExt::simulated_start() simulated_ = SIMIX_get_clock(); } -double ActorExt::simulated_elapsed() +double ActorExt::simulated_elapsed() const { return SIMIX_get_clock() - simulated_; } @@ -205,7 +205,7 @@ void ActorExt::set_sampling(int s) sampling_ = s; } -int ActorExt::sampling() +int ActorExt::sampling() const { return sampling_; } @@ -243,7 +243,7 @@ void ActorExt::init() XBT_DEBUG("<%ld> SMPI process has been initialized: %p", ext->actor_->get_pid(), ext->actor_); } -int ActorExt::get_optind() +int ActorExt::get_optind() const { return optind_; } diff --git a/src/smpi/internals/smpi_bench.cpp b/src/smpi/internals/smpi_bench.cpp index c28f90a63c..0ef90facfd 100644 --- a/src/smpi/internals/smpi_bench.cpp +++ b/src/smpi/internals/smpi_bench.cpp @@ -103,7 +103,7 @@ void smpi_bench_begin() double smpi_adjust_comp_speed(){ double speedup=1; if (smpi_cfg_comp_adjustment_file()[0] != '\0') { - smpi_trace_call_location_t* loc = smpi_process()->call_location(); + const smpi_trace_call_location_t* loc = smpi_process()->call_location(); std::string key = loc->get_composed_key(); std::unordered_map::const_iterator it = location2speedup.find(key); if (it != location2speedup.end()) { @@ -283,7 +283,7 @@ unsigned long long smpi_rastro_timestamp () unsigned long long sec = static_cast(now); unsigned long long pre = (now - sec) * smpi_rastro_resolution(); smpi_bench_begin(); - return static_cast(sec) * smpi_rastro_resolution() + pre; + return sec * smpi_rastro_resolution() + pre; } /* ****************************** Functions related to the SMPI_SAMPLE_ macros ************************************/ diff --git a/src/smpi/internals/smpi_config.cpp b/src/smpi/internals/smpi_config.cpp index 7bd5640a18..f24680ceba 100644 --- a/src/smpi/internals/smpi_config.cpp +++ b/src/smpi/internals/smpi_config.cpp @@ -2,12 +2,14 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include "mc/mc.h" +#include "smpi_config.hpp" #include "include/xbt/config.hpp" +#include "mc/mc.h" #include "private.hpp" #include "smpi_coll.hpp" -#include "smpi_config.hpp" #include "src/simix/smx_private.hpp" +#include "xbt/parse_units.hpp" + #include /* DBL_MAX */ #include /* trim */ #include @@ -31,13 +33,19 @@ constexpr bool HAVE_WORKING_MMAP = true; bool _smpi_options_initialized=false; SharedMallocType _smpi_cfg_shared_malloc = SharedMallocType::GLOBAL; SmpiPrivStrategies _smpi_cfg_privatization = SmpiPrivStrategies::NONE; +double _smpi_cfg_host_speed; XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_config, smpi, "Logging specific to SMPI (config)"); -simgrid::config::Flag _smpi_cfg_host_speed{ - "smpi/host-speed", "Speed of the host running the simulation (in flop/s). " - "Used to bench the operations.", 20000.0, - [](const double& val) { xbt_assert(val > 0.0, "Invalid value (%f) for 'smpi/host-speed': it must be positive.", val); }}; +simgrid::config::Flag _smpi_cfg_host_speed_string{ + "smpi/host-speed", + "Speed of the host running the simulation (in flop/s). " + "Used to bench the operations.", + "20000f", [](const std::string& str) { + _smpi_cfg_host_speed = xbt_parse_get_speed("smpi/host-speed", 1, str.c_str(), "option", "smpi/host-speed"); + xbt_assert(_smpi_cfg_host_speed > 0.0, "Invalid value (%s) for 'smpi/host-speed': it must be positive.", + _smpi_cfg_host_speed_string.get().c_str()); + }}; simgrid::config::Flag _smpi_cfg_simulate_computation{ "smpi/simulate-computation", "Whether the computational part of the simulated application should be simulated.", diff --git a/src/smpi/internals/smpi_deployment.cpp b/src/smpi/internals/smpi_deployment.cpp index 7d3d3872a8..6eb504a9be 100644 --- a/src/smpi/internals/smpi_deployment.cpp +++ b/src/smpi/internals/smpi_deployment.cpp @@ -23,7 +23,7 @@ public: Instance(int max_no_processes, MPI_Comm comm) : size_(max_no_processes), comm_world_(comm) { MPI_Group group = new simgrid::smpi::Group(size_); - comm_world_ = new simgrid::smpi::Comm(group, nullptr, 0, -1); + comm_world_ = new simgrid::smpi::Comm(group, nullptr, false, -1); // FIXME : using MPI_Attr_put with MPI_UNIVERSE_SIZE is forbidden and we make it a no-op (which triggers a warning // as MPI_ERR_ARG is returned). Directly calling Comm::attr_put breaks for now, as MPI_UNIVERSE_SIZE,is <0 // instance.comm_world->attr_put(MPI_UNIVERSE_SIZE, reinterpret_cast(instance.size)); diff --git a/src/smpi/internals/smpi_global.cpp b/src/smpi/internals/smpi_global.cpp index c3e32fa864..c2ff4c60a6 100644 --- a/src/smpi/internals/smpi_global.cpp +++ b/src/smpi/internals/smpi_global.cpp @@ -196,7 +196,7 @@ void smpi_comm_copy_buffer_callback(simgrid::kernel::activity::CommImpl* comm, v (static_cast(buff) < smpi_data_exe_start + smpi_data_exe_size)) { XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); smpi_switch_data_segment(comm->src_actor_->iface()); - tmpbuff = static_cast(xbt_malloc(buff_size)); + tmpbuff = xbt_malloc(buff_size); memcpy_private(tmpbuff, buff, private_blocks); } diff --git a/src/smpi/internals/smpi_memory.cpp b/src/smpi/internals/smpi_memory.cpp index a5f00a7da5..394dbb4ee0 100644 --- a/src/smpi/internals/smpi_memory.cpp +++ b/src/smpi/internals/smpi_memory.cpp @@ -192,7 +192,7 @@ void smpi_switch_data_segment(simgrid::s4u::ActorPtr actor) #if HAVE_PRIVATIZATION // FIXME, cross-process support (mmap across process when necessary) XBT_DEBUG("Switching data frame to the one of process %ld", actor->get_pid()); - simgrid::smpi::ActorExt* process = smpi_process_remote(actor); + const simgrid::smpi::ActorExt* process = smpi_process_remote(actor); int current = process->privatized_region()->file_descriptor; const void* tmp = mmap(TOPAGE(smpi_data_exe_start), smpi_data_exe_size, PROT_RW, MAP_FIXED | MAP_SHARED, current, 0); if (tmp != TOPAGE(smpi_data_exe_start)) diff --git a/src/smpi/internals/smpi_replay.cpp b/src/smpi/internals/smpi_replay.cpp index d05719e85d..d05763b69a 100644 --- a/src/smpi/internals/smpi_replay.cpp +++ b/src/smpi/internals/smpi_replay.cpp @@ -90,17 +90,14 @@ private: public: RequestStorage() {} - int size() - { - return store.size(); - } + int size() const { return store.size(); } req_storage_t& get_store() { return store; } - void get_requests(std::vector& vec) + void get_requests(std::vector& vec) const { for (auto const& pair : store) { auto& req = pair.second; @@ -118,7 +115,7 @@ public: return (it == store.end()) ? MPI_REQUEST_NULL : it->second; } - void remove(MPI_Request req) + void remove(const Request* req) { if (req == MPI_REQUEST_NULL) return; @@ -475,7 +472,7 @@ void RecvAction::kernel(simgrid::xbt::ReplayAction&) arg_size = status.count; } - bool is_recv = false; // Help analyzers understanding that status is not used unintialized + bool is_recv = false; // Help analyzers understanding that status is not used uninitialized if (get_name() == "recv") { is_recv = true; Request::recv(nullptr, arg_size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status); diff --git a/src/smpi/internals/smpi_shared.cpp b/src/smpi/internals/smpi_shared.cpp index c510fc5d63..b2653ab587 100644 --- a/src/smpi/internals/smpi_shared.cpp +++ b/src/smpi/internals/smpi_shared.cpp @@ -142,8 +142,8 @@ static void *smpi_shared_malloc_local(size_t size, const char *file, int line) } // Align functions, from http://stackoverflow.com/questions/4840410/how-to-align-a-pointer-in-c -#define ALIGN_UP(n, align) (((n) + (align)-1) & -(align)) -#define ALIGN_DOWN(n, align) ((n) & -(align)) +#define ALIGN_UP(n, align) (((int64_t)(n) + (int64_t)(align) - 1) & -(int64_t)(align)) +#define ALIGN_DOWN(n, align) ((int64_t)(n) & -(int64_t)(align)) constexpr unsigned PAGE_SIZE = 0x1000; constexpr unsigned HUGE_PAGE_SIZE = 1U << 21; @@ -184,7 +184,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int "to allow big allocations.\n", size >> 20); if(use_huge_page) - mem = (void*)ALIGN_UP((int64_t)allocated_ptr, HUGE_PAGE_SIZE); + mem = (void*)ALIGN_UP(allocated_ptr, HUGE_PAGE_SIZE); else mem = allocated_ptr; @@ -235,8 +235,8 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int if(i_block < nb_shared_blocks-1) xbt_assert(stop_offset < shared_block_offsets[2*i_block+2], "stop_offset (%zu) should be lower than its successor start offset (%zu)", stop_offset, shared_block_offsets[2*i_block+2]); - size_t start_block_offset = ALIGN_UP((int64_t)start_offset, smpi_shared_malloc_blocksize); - size_t stop_block_offset = ALIGN_DOWN((int64_t)stop_offset, smpi_shared_malloc_blocksize); + size_t start_block_offset = ALIGN_UP(start_offset, smpi_shared_malloc_blocksize); + size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize); for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) { XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset); void* pos = (void*)((unsigned long)mem + offset); @@ -248,8 +248,8 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int "and that the directory you are passing is mounted correctly (mount /path/to/huge -t hugetlbfs -o rw,mode=0777).", strerror(errno)); } - size_t low_page_start_offset = ALIGN_UP((int64_t)start_offset, PAGE_SIZE); - size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN((int64_t)stop_offset, (int64_t)PAGE_SIZE); + size_t low_page_start_offset = ALIGN_UP(start_offset, PAGE_SIZE); + size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE); if(low_page_start_offset < low_page_stop_offset) { XBT_DEBUG("\t\tglobal shared allocation, mmap block start"); void* pos = (void*)((unsigned long)mem + low_page_start_offset); @@ -263,7 +263,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int } if(low_page_stop_offset <= stop_block_offset) { XBT_DEBUG("\t\tglobal shared allocation, mmap block stop"); - size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE); + size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE); if(high_page_stop_offset > stop_block_offset) { void* pos = (void*)((unsigned long)mem + stop_block_offset); const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE, diff --git a/src/smpi/internals/smpi_utils.cpp b/src/smpi/internals/smpi_utils.cpp index 23a1736abd..c5bf7ff9b9 100644 --- a/src/smpi/internals/smpi_utils.cpp +++ b/src/smpi/internals/smpi_utils.cpp @@ -5,13 +5,18 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "smpi_utils.hpp" + +#include "src/surf/xml/platf_private.hpp" #include "xbt/log.h" +#include "xbt/parse_units.hpp" #include "xbt/sysdep.h" #include -#include "src/surf/xml/platf_private.hpp" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_utils, smpi, "Logging specific to SMPI (utils)"); +extern std::string surf_parsed_filename; +extern int surf_parse_lineno; + std::vector parse_factor(const std::string& smpi_coef_string) { std::vector smpi_factor; @@ -49,7 +54,8 @@ std::vector parse_factor(const std::string& smpi_coef_string) } } else { try { - fact.values.push_back(surf_parse_get_time((*factor_iter).c_str(), "smpi factor", "")); + fact.values.push_back( + xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, (*factor_iter).c_str(), "smpi factor", "")); } catch (const std::invalid_argument&) { throw std::invalid_argument(std::string("Invalid factor value ") + std::to_string(iteration) + " in chunk " + std::to_string(smpi_factor.size() + 1) + ": " + *factor_iter); diff --git a/src/smpi/mpi/smpi_comm.cpp b/src/smpi/mpi/smpi_comm.cpp index 64fd5921df..d3e4978d1e 100644 --- a/src/smpi/mpi/smpi_comm.cpp +++ b/src/smpi/mpi/smpi_comm.cpp @@ -125,26 +125,26 @@ MPI_Group Comm::group() return group_; } -int Comm::size() +int Comm::size() const { if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->size(); return group_->size(); } -int Comm::rank() +int Comm::rank() const { if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->rank(); return group_->rank(s4u::Actor::self()); } -int Comm::id() +int Comm::id() const { return id_; } -void Comm::get_name (char* name, int* len) +void Comm::get_name(char* name, int* len) const { if (this == MPI_COMM_UNINITIALIZED){ smpi_process()->comm_world()->get_name(name, len); @@ -179,45 +179,49 @@ void Comm::set_leaders_comm(MPI_Comm leaders){ leaders_comm_=leaders; } -int* Comm::get_non_uniform_map(){ +int* Comm::get_non_uniform_map() const +{ if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->get_non_uniform_map(); return non_uniform_map_; } -int* Comm::get_leaders_map(){ +int* Comm::get_leaders_map() const +{ if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->get_leaders_map(); return leaders_map_; } -MPI_Comm Comm::get_leaders_comm(){ +MPI_Comm Comm::get_leaders_comm() const +{ if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->get_leaders_comm(); return leaders_comm_; } -MPI_Comm Comm::get_intra_comm(){ +MPI_Comm Comm::get_intra_comm() const +{ if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD) return smpi_process()->comm_intra(); else return intra_comm_; } -bool Comm::is_uniform() +bool Comm::is_uniform() const { if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->is_uniform(); return is_uniform_ != 0; } -bool Comm::is_blocked() +bool Comm::is_blocked() const { if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->is_blocked(); return is_blocked_ != 0; } -bool Comm::is_smp_comm() +bool Comm::is_smp_comm() const { if (this == MPI_COMM_UNINITIALIZED) return smpi_process()->comm_world()->is_smp_comm(); @@ -360,7 +364,7 @@ MPI_Comm Comm::find_intra_comm(int * leader){ } } *leader=min_index; - return new Comm(group_intra, nullptr, 1); + return new Comm(group_intra, nullptr, true); } void Comm::init_smp(){ @@ -427,7 +431,7 @@ void Comm::init_smp(){ //create leader_communicator for (i=0; i< leader_group_size;i++) leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i); - leader_comm = new Comm(leaders_group, nullptr,1); + leader_comm = new Comm(leaders_group, nullptr, true); this->set_leaders_comm(leader_comm); this->set_intra_comm(comm_intra); @@ -437,7 +441,7 @@ void Comm::init_smp(){ leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i); if(this->get_leaders_comm()==MPI_COMM_NULL){ - leader_comm = new Comm(leaders_group, nullptr,1); + leader_comm = new Comm(leaders_group, nullptr, true); this->set_leaders_comm(leader_comm); }else{ leader_comm=this->get_leaders_comm(); @@ -528,7 +532,8 @@ void Comm::remove_rma_win(MPI_Win win){ rma_wins_.remove(win); } -void Comm::finish_rma_calls(){ +void Comm::finish_rma_calls() const +{ for (auto const& it : rma_wins_) { if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)? int finished = it->finish_comms(); diff --git a/src/smpi/mpi/smpi_datatype.cpp b/src/smpi/mpi/smpi_datatype.cpp index 5422b5dfe9..c28c526b3f 100644 --- a/src/smpi/mpi/smpi_datatype.cpp +++ b/src/smpi/mpi/smpi_datatype.cpp @@ -11,6 +11,8 @@ #include "src/instr/instr_private.hpp" #include "src/smpi/include/smpi_actor.hpp" +#include +#include #include XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_datatype, smpi, "Logging specific to SMPI (datatype)"); @@ -222,16 +224,17 @@ void Datatype::commit() flags_ |= DT_FLAG_COMMITED; } -bool Datatype::is_valid(){ +bool Datatype::is_valid() const +{ return (flags_ & DT_FLAG_COMMITED); } -bool Datatype::is_basic() +bool Datatype::is_basic() const { return (flags_ & DT_FLAG_BASIC); } -bool Datatype::is_replayable() +bool Datatype::is_replayable() const { return (simgrid::instr::trace_format == simgrid::instr::TraceFormat::Ti) && ((this == MPI_BYTE) || (this == MPI_DOUBLE) || (this == MPI_INT) || (this == MPI_CHAR) || @@ -247,13 +250,15 @@ void Datatype::addflag(int flag){ flags_ &= flag; } -int Datatype::extent(MPI_Aint * lb, MPI_Aint * extent){ +int Datatype::extent(MPI_Aint* lb, MPI_Aint* extent) const +{ *lb = lb_; *extent = ub_ - lb_; return MPI_SUCCESS; } -void Datatype::get_name(char* name, int* length){ +void Datatype::get_name(char* name, int* length) const +{ if(name_!=nullptr){ *length = strlen(name_); strncpy(name, name_, *length+1); @@ -286,33 +291,25 @@ int Datatype::unpack(const void* inbuf, int insize, int* position, void* outbuf, return MPI_SUCCESS; } -int Datatype::get_contents (int max_integers, int max_addresses, - int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses, - MPI_Datatype *array_of_datatypes) +int Datatype::get_contents(int max_integers, int max_addresses, int max_datatypes, int* array_of_integers, + MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes) const { if(contents_==nullptr) return MPI_ERR_ARG; - if(max_integersnumber_of_integers_) + if (static_cast(max_integers) < contents_->integers_.size()) return MPI_ERR_COUNT; - for(int i=0; inumber_of_integers_; i++){ - array_of_integers[i]=contents_->integers_[i]; - } - if(max_addressesnumber_of_addresses_) + std::copy(begin(contents_->integers_), end(contents_->integers_), array_of_integers); + if (static_cast(max_addresses) < contents_->addresses_.size()) return MPI_ERR_COUNT; - for(int i=0; inumber_of_addresses_; i++){ - array_of_addresses[i]=contents_->addresses_[i]; - } - if(max_datatypesnumber_of_datatypes_) + std::copy(begin(contents_->addresses_), end(contents_->addresses_), array_of_addresses); + if (static_cast(max_datatypes) < contents_->datatypes_.size()) return MPI_ERR_COUNT; - for(int i=0; inumber_of_datatypes_; i++){ - array_of_datatypes[i]=contents_->datatypes_[i]; - contents_->datatypes_[i]->ref(); - } + std::copy(begin(contents_->datatypes_), end(contents_->datatypes_), array_of_datatypes); + std::for_each(begin(contents_->datatypes_), end(contents_->datatypes_), std::mem_fn(&Datatype::ref)); return MPI_SUCCESS; } -int Datatype::get_envelope (int* num_integers, int* num_addresses, - int* num_datatypes, int* combiner) +int Datatype::get_envelope(int* num_integers, int* num_addresses, int* num_datatypes, int* combiner) const { if(contents_==nullptr){ *num_integers = 0; @@ -320,9 +317,9 @@ int Datatype::get_envelope (int* num_integers, int* num_addresses, *num_datatypes = 0; *combiner = MPI_COMBINER_NAMED; }else{ - *num_integers = contents_->number_of_integers_; - *num_addresses = contents_->number_of_addresses_; - *num_datatypes = contents_->number_of_datatypes_; + *num_integers = contents_->integers_.size(); + *num_addresses = contents_->addresses_.size(); + *num_datatypes = contents_->datatypes_.size(); *combiner = contents_->combiner_; } return MPI_SUCCESS; diff --git a/src/smpi/mpi/smpi_datatype_derived.cpp b/src/smpi/mpi/smpi_datatype_derived.cpp index fb3addadc2..21d04c9340 100644 --- a/src/smpi/mpi/smpi_datatype_derived.cpp +++ b/src/smpi/mpi/smpi_datatype_derived.cpp @@ -13,32 +13,13 @@ namespace simgrid{ namespace smpi{ - -Datatype_contents::Datatype_contents(int combiner, - int number_of_integers, const int* integers, - int number_of_addresses, const MPI_Aint* addresses, - int number_of_datatypes, const MPI_Datatype* datatypes) -: combiner_(combiner), number_of_integers_(number_of_integers), - number_of_addresses_(number_of_addresses), - number_of_datatypes_(number_of_datatypes) +Datatype_contents::Datatype_contents(int combiner, int number_of_integers, const int* integers, int number_of_addresses, + const MPI_Aint* addresses, int number_of_datatypes, const MPI_Datatype* datatypes) + : combiner_(combiner) + , integers_(integers, integers + number_of_integers) + , addresses_(addresses, addresses + number_of_addresses) + , datatypes_(datatypes, datatypes + number_of_datatypes) { - integers_=new int[number_of_integers_]; - for(int i=0; i(F2C::f2c_lookup()->at(get_key(key, id))); } else { - return static_cast(MPI_ERRHANDLER_NULL); + return MPI_ERRHANDLER_NULL; } } -void Errhandler::call(MPI_Comm comm, int errorcode){ +void Errhandler::call(MPI_Comm comm, int errorcode) const +{ comm_func_(&comm, &errorcode); } -void Errhandler::call(MPI_Win win, int errorcode){ +void Errhandler::call(MPI_Win win, int errorcode) const +{ win_func_(&win, &errorcode); } -void Errhandler::call(MPI_File file, int errorcode){ +void Errhandler::call(MPI_File file, int errorcode) const +{ file_func_(&file, &errorcode); } diff --git a/src/smpi/mpi/smpi_file.cpp b/src/smpi/mpi/smpi_file.cpp index d48222118c..872d9a7e3c 100644 --- a/src/smpi/mpi/smpi_file.cpp +++ b/src/smpi/mpi/smpi_file.cpp @@ -103,12 +103,14 @@ namespace smpi{ return MPI_SUCCESS; } - int File::get_position(MPI_Offset* offset){ + int File::get_position(MPI_Offset* offset) const + { *offset=file_->tell(); return MPI_SUCCESS; } - int File::get_position_shared(MPI_Offset* offset){ + int File::get_position_shared(MPI_Offset* offset) const + { shared_mutex_->lock(); *offset=*shared_file_pointer_; shared_mutex_->unlock(); @@ -143,7 +145,7 @@ namespace smpi{ return MPI_SUCCESS; } - int File::read(MPI_File fh, void* /*buf*/, int count, MPI_Datatype datatype, MPI_Status* status) + int File::read(MPI_File fh, void* /*buf*/, int count, const Datatype* datatype, MPI_Status* status) { //get position first as we may be doing non contiguous reads and it will probably be updated badly MPI_Offset position = fh->file_->tell(); @@ -171,7 +173,8 @@ namespace smpi{ /* address="Berlin, Heidelberg",*/ /* pages="84--93"*/ /* }*/ - int File::read_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){ + int File::read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status) + { fh->shared_mutex_->lock(); fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET); read(fh, buf, count, datatype, status); @@ -180,7 +183,8 @@ namespace smpi{ return MPI_SUCCESS; } - int File::read_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){ + int File::read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status) + { //0 needs to get the shared pointer value MPI_Offset val; if(fh->comm_->rank()==0){ @@ -203,7 +207,7 @@ namespace smpi{ return ret; } - int File::write(MPI_File fh, void* /*buf*/, int count, MPI_Datatype datatype, MPI_Status* status) + int File::write(MPI_File fh, void* /*buf*/, int count, const Datatype* datatype, MPI_Status* status) { //get position first as we may be doing non contiguous reads and it will probably be updated badly MPI_Offset position = fh->file_->tell(); @@ -221,7 +225,8 @@ namespace smpi{ return MPI_SUCCESS; } - int File::write_shared(MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status){ + int File::write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status) + { fh->shared_mutex_->lock(); XBT_DEBUG("Write shared on %s - Shared ptr before : %lld",fh->file_->get_path(), *(fh->shared_file_pointer_)); fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET); @@ -232,7 +237,8 @@ namespace smpi{ return MPI_SUCCESS; } - int File::write_ordered(MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status){ + int File::write_ordered(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status) + { //0 needs to get the shared pointer value MPI_Offset val; if(fh->comm_->rank()==0){ @@ -263,7 +269,7 @@ namespace smpi{ return MPI_SUCCESS; } - int File::get_view(MPI_Offset* /*disp*/, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) + int File::get_view(MPI_Offset* /*disp*/, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const { *etype=etype_; *filetype=filetype_; @@ -271,11 +277,13 @@ namespace smpi{ return MPI_SUCCESS; } - int File::size(){ + int File::size() const + { return file_->size(); } - int File::flags(){ + int File::flags() const + { return flags_; } @@ -301,7 +309,8 @@ namespace smpi{ info_->ref(); } - MPI_Comm File::comm(){ + MPI_Comm File::comm() const + { return comm_; } diff --git a/src/smpi/mpi/smpi_group.cpp b/src/smpi/mpi/smpi_group.cpp index 162b8deb08..37b2a22d6e 100644 --- a/src/smpi/mpi/smpi_group.cpp +++ b/src/smpi/mpi/smpi_group.cpp @@ -14,7 +14,7 @@ MPI_Group MPI_GROUP_EMPTY=&mpi_MPI_GROUP_EMPTY; namespace simgrid{ namespace smpi{ -Group::Group(Group* origin) +Group::Group(const Group* origin) { if (origin != MPI_GROUP_NULL && origin != MPI_GROUP_EMPTY) { size_ = origin->size(); @@ -322,7 +322,7 @@ MPI_Group Group::f2c(int id) { char key[KEY_SIZE]; return static_cast(F2C::f2c_lookup()->at(get_key(key, id))); } else { - return static_cast(MPI_GROUP_NULL); + return MPI_GROUP_NULL; } } diff --git a/src/smpi/mpi/smpi_info.cpp b/src/smpi/mpi/smpi_info.cpp index 564b22f4b5..446e6b921d 100644 --- a/src/smpi/mpi/smpi_info.cpp +++ b/src/smpi/mpi/smpi_info.cpp @@ -21,7 +21,8 @@ void Info::unref(Info* info){ } } -int Info::get(const char *key, int valuelen, char *value, int *flag){ +int Info::get(const char* key, int valuelen, char* value, int* flag) const +{ *flag=false; auto val = map_.find(key); if (val != map_.end()) { @@ -42,12 +43,14 @@ int Info::remove(const char *key){ return MPI_SUCCESS; } -int Info::get_nkeys(int *nkeys){ +int Info::get_nkeys(int* nkeys) const +{ *nkeys = map_.size(); return MPI_SUCCESS; } -int Info::get_nthkey(int n, char *key){ +int Info::get_nthkey(int n, char* key) const +{ int num=0; for (auto const& elm : map_) { if (num == n) { @@ -59,7 +62,8 @@ int Info::get_nthkey(int n, char *key){ return MPI_ERR_ARG; } -int Info::get_valuelen(const char *key, int *valuelen, int *flag){ +int Info::get_valuelen(const char* key, int* valuelen, int* flag) const +{ *flag=false; auto val = map_.find(key); if (val != map_.end()) { diff --git a/src/smpi/mpi/smpi_op.cpp b/src/smpi/mpi/smpi_op.cpp index 84a3d8299f..3ae327d4cb 100644 --- a/src/smpi/mpi/smpi_op.cpp +++ b/src/smpi/mpi/smpi_op.cpp @@ -238,7 +238,7 @@ CREATE_MPI_OP(MPI_NO_OP, no_func) namespace simgrid{ namespace smpi{ -void Op::apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype) +void Op::apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype) const { if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) { // we need to switch as the called function may silently touch global variables diff --git a/src/smpi/mpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp index 7812f96a1e..2bc696f74f 100644 --- a/src/smpi/mpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -33,10 +33,18 @@ extern void (*smpi_comm_copy_data_callback)(simgrid::kernel::activity::CommImpl* namespace simgrid{ namespace smpi{ -Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op) - : buf_(const_cast(buf)), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op) +Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, + unsigned flags, MPI_Op op) + : buf_(const_cast(buf)) + , old_type_(datatype) + , size_(datatype->size() * count) + , src_(src) + , dst_(dst) + , tag_(tag) + , comm_(comm) + , flags_(flags) + , op_(op) { - size_ = datatype->size() * count; datatype->ref(); comm_->ref(); if(op != MPI_REPLACE && op != MPI_OP_NULL) @@ -130,7 +138,7 @@ void Request::init_buffer(int count){ // This part handles the problem of non-contiguous memory (for the unserialization at the reception) if ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (old_type_->flags() & DT_FLAG_DERIVED)) { // This part handles the problem of non-contiguous memory - old_buf = const_cast(buf_); + old_buf = buf_; if (count==0){ buf_ = nullptr; }else { @@ -157,7 +165,7 @@ bool Request::match_send(void* a, void* b, simgrid::kernel::activity::CommImpl*) return match_common(req, ref, req); } -void Request::print_request(const char *message) +void Request::print_request(const char* message) const { XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]", message, this, buf_, size_, src_, dst_, tag_, flags_); @@ -421,7 +429,7 @@ void Request::start() if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0) mut->unlock(); } else { /* the RECV flag was not set, so this is a send */ - simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_)); + const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_)); xbt_assert(process, "Actor pid=%d is gone??", dst_); int rank = src_; if (TRACE_smpi_view_internals()) { @@ -1018,7 +1026,7 @@ int Request::waitany(int count, MPI_Request requests[], MPI_Status * status) return index; } -static int sort_accumulates(MPI_Request a, MPI_Request b) +static int sort_accumulates(const Request* a, const Request* b) { return (a->tag() > b->tag()); } @@ -1047,8 +1055,8 @@ int Request::waitall(int count, MPI_Request requests[], MPI_Status status[]) wait(&requests[c],pstat); index = c; } else { - index = waitany(count, (MPI_Request*)requests, pstat); - + index = waitany(count, requests, pstat); + if (index == MPI_UNDEFINED) break; @@ -1082,7 +1090,7 @@ int Request::waitsome(int incount, MPI_Request requests[], int *indices, MPI_Sta int index = 0; MPI_Status stat; MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat; - index = waitany(incount, (MPI_Request*)requests, pstat); + index = waitany(incount, requests, pstat); if(index==MPI_UNDEFINED) return MPI_UNDEFINED; if(status != MPI_STATUSES_IGNORE) { status[count] = *pstat; @@ -1111,7 +1119,7 @@ MPI_Request Request::f2c(int id) { char key[KEY_SIZE]; if(id==MPI_FORTRAN_REQUEST_NULL) - return static_cast(MPI_REQUEST_NULL); + return MPI_REQUEST_NULL; return static_cast(F2C::f2c_lookup()->at(get_key(key,id))); } @@ -1189,13 +1197,14 @@ void Request::set_nbc_requests(MPI_Request* reqs, int size){ } } -int Request::get_nbc_requests_size(){ +int Request::get_nbc_requests_size() const +{ return nbc_requests_size_; } -MPI_Request* Request::get_nbc_requests(){ +MPI_Request* Request::get_nbc_requests() const +{ return nbc_requests_; } - } } diff --git a/src/smpi/mpi/smpi_status.cpp b/src/smpi/mpi/smpi_status.cpp index b048fe74bc..c11c0979f7 100644 --- a/src/smpi/mpi/smpi_status.cpp +++ b/src/smpi/mpi/smpi_status.cpp @@ -37,7 +37,7 @@ void Status::set_elements(MPI_Status* status, const Datatype*, int count) status->count=count; } -int Status::get_count(const MPI_Status * status, MPI_Datatype datatype) +int Status::get_count(const MPI_Status* status, const Datatype* datatype) { return status->count / datatype->size(); } diff --git a/src/smpi/mpi/smpi_topo.cpp b/src/smpi/mpi/smpi_topo.cpp index 592850187e..e25a214819 100644 --- a/src/smpi/mpi/smpi_topo.cpp +++ b/src/smpi/mpi/smpi_topo.cpp @@ -238,7 +238,8 @@ int Topo_Cart::shift(int direction, int disp, int* rank_source, int* rank_dest) return MPI_SUCCESS; } -int Topo_Cart::dim_get(int *ndims) { +int Topo_Cart::dim_get(int* ndims) const +{ *ndims =ndims_; return MPI_SUCCESS; } diff --git a/src/smpi/mpi/smpi_win.cpp b/src/smpi/mpi/smpi_win.cpp index 58a644861e..f62ab4e2ef 100644 --- a/src/smpi/mpi/smpi_win.cpp +++ b/src/smpi/mpi/smpi_win.cpp @@ -110,7 +110,8 @@ int Win::detach(const void* /*base*/) return MPI_SUCCESS; } -void Win::get_name(char* name, int* length){ +void Win::get_name(char* name, int* length) const +{ if(name_==nullptr){ *length=0; name=nullptr; @@ -136,23 +137,28 @@ MPI_Info Win::info() return info_; } -int Win::rank(){ +int Win::rank() const +{ return rank_; } -MPI_Aint Win::size(){ +MPI_Aint Win::size() const +{ return size_; } -void* Win::base(){ +void* Win::base() const +{ return base_; } -int Win::disp_unit(){ +int Win::disp_unit() const +{ return disp_unit_; } -int Win::dynamic(){ +int Win::dynamic() const +{ return dynamic_; } @@ -730,7 +736,7 @@ int Win::finish_comms(int rank){ return size; } -int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) +int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const { const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr; for (int i = 0; not target_win && i < comm_->size(); i++) { diff --git a/src/smpi/plugins/load_balancer/LoadBalancer.cpp b/src/smpi/plugins/load_balancer/LoadBalancer.cpp index 1af847c7fa..1e9866c34a 100644 --- a/src/smpi/plugins/load_balancer/LoadBalancer.cpp +++ b/src/smpi/plugins/load_balancer/LoadBalancer.cpp @@ -41,7 +41,7 @@ bool compare_hosts::operator()(s4u::Host* const a, s4u::Host* const b) const void LoadBalancer::run() { - s4u::Engine* engine = s4u::Engine::get_instance(); + const s4u::Engine* engine = s4u::Engine::get_instance(); std::vector available_hosts = engine->get_filtered_hosts([](const s4u::Host* host) { return host->is_on(); }); xbt_assert(available_hosts.size() > 0, "No hosts available; are they all switched off?"); @@ -63,7 +63,7 @@ void LoadBalancer::run() // after a host got another actor assigned (or moved from). // We can't use std::priorityQueue here because we modify *two* elements: The top element, which // we can access and which has the lowest load, gets a new actor assigned. - // However, the host loosing that actor must be updated as well. + // However, the host losing that actor must be updated as well. // std::priorityQueue is immutable and hence doesn't work for us. // // This heap contains the least loaded host at the top diff --git a/src/smpi/plugins/load_balancer/load_balancer.hpp b/src/smpi/plugins/load_balancer/load_balancer.hpp index 4264966b62..50e03b4319 100644 --- a/src/smpi/plugins/load_balancer/load_balancer.hpp +++ b/src/smpi/plugins/load_balancer/load_balancer.hpp @@ -36,9 +36,9 @@ public: host_to_actors.insert({host, actor}); } - s4u::Host* get_host(s4u::ActorPtr actor) { return actor_to_host[actor]; } + s4u::Host* get_host(s4u::ActorPtr actor) const { return actor_to_host.at(actor); } - unsigned int count_actors(s4u::Host* host) + unsigned int count_actors(s4u::Host* host) const { return host_to_actors.count(host); // TODO This is linear in the size of the map. Maybe replace by constant lookup through another map? } diff --git a/src/smpi/smpicc.in b/src/smpi/smpicc.in index e0487db70b..7661c53e15 100755 --- a/src/smpi/smpicc.in +++ b/src/smpi/smpicc.in @@ -17,7 +17,7 @@ CMAKE_LINKARGS="-L@libdir@" @SMPITOOLS_SH@ # Don't use -Wl,-z-defs with Clang and address sanitizer -if [ "@CMAKE_C_COMPILER_ID@" = "Clang" -a "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then +if [ "@CMAKE_C_COMPILER_ID@" = "Clang" ] && [ "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then LINKER_UNDEFINED_ERROR="" else LINKER_UNDEFINED_ERROR="1" @@ -63,7 +63,7 @@ while [ $# -gt 0 ]; do list_add CMDARGS "-c" ;; *.c) - SRCFILE="$(readlink -f ${ARG} 2>/dev/null)" + SRCFILE="$(readlink -f "${ARG}" 2>/dev/null)" if [ -z "$SRCFILE" ] ; then SRCFILE="$ARG" fi @@ -96,12 +96,12 @@ done list_set CMDLINE "${CC}" list_add_not_empty CMDLINE "${CFLAGS}" list_add_not_empty CMDLINE ${INCLUDEARGS} -list_add_not_empty CMDLINE ${CMAKE_LINKARGS} +list_add_not_empty CMDLINE "${CMAKE_LINKARGS}" list_add_not_empty CMDLINE "${CMDARGS}" list_add_not_empty CMDLINE "${LINKARGS}" -eval $(list_get CMDLINE) -if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then +eval "$(list_get CMDLINE)" +if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then echo "$@" [ "x$show" = x1 ] && exit 0 fi diff --git a/src/smpi/smpicxx.in b/src/smpi/smpicxx.in index 16e8ec06a7..c4af9cff96 100755 --- a/src/smpi/smpicxx.in +++ b/src/smpi/smpicxx.in @@ -19,7 +19,7 @@ CMAKE_LINKARGS="-L@libdir@" list_set CXXFLAGS "-std=gnu++11" @SMPI_CXX_FLAGS@ list_set LINKARGS -if [ "@CMAKE_C_COMPILER_ID@" = "Clang" -a "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then +if [ "@CMAKE_C_COMPILER_ID@" = "Clang" ] && [ "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then LINKER_UNDEFINED_ERROR="" else LINKER_UNDEFINED_ERROR="1" @@ -63,7 +63,7 @@ while [ $# -gt 0 ]; do list_add CMDARGS "-c" ;; *.c) - SRCFILE="$(readlink -f ${ARG} 2>/dev/null)" + SRCFILE="$(readlink -f "${ARG}" 2>/dev/null)" if [ -z "$SRCFILE" ] ; then SRCFILE="$ARG" fi @@ -93,12 +93,12 @@ done list_set CMDLINE "${CXX}" list_add_not_empty CMDLINE "${CXXFLAGS}" list_add_not_empty CMDLINE ${INCLUDEARGS} -list_add_not_empty CMDLINE ${CMAKE_LINKARGS} +list_add_not_empty CMDLINE "${CMAKE_LINKARGS}" list_add_not_empty CMDLINE "${CMDARGS}" list_add_not_empty CMDLINE "${LINKARGS}" eval $(list_get CMDLINE) -if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then +if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then echo "$@" [ "x$show" = x1 ] && exit 0 fi diff --git a/src/smpi/smpif90.in b/src/smpi/smpif90.in index d89ff5e7f9..e661b8307a 100644 --- a/src/smpi/smpif90.in +++ b/src/smpi/smpif90.in @@ -30,7 +30,6 @@ else echo "Warning: smpif90 pretends to be a regular compiler (SMPI_PRETEND_CC is set). Produced binaries will not be usable in SimGrid." fi list_set TMPFILES -main_name=main cleanup () { eval $(list_get TMPFILES) @@ -64,13 +63,13 @@ while [ $# -gt 0 ]; do list_add CMDLINE "-c" ;; *.f90) - FILENAME=`basename ${ARG}` + FILENAME=$(basename "${ARG}") TMPFILE=$(mymktemp "${ARG}" ".f90") ORIGFILE="${FILENAME%.f90}" filter_and_compile ;; *.F90) - FILENAME=`basename ${ARG}` + FILENAME=$(basename "${ARG}") TMPFILE=$(mymktemp "${ARG}" ".F90") ORIGFILE="${FILENAME%.F90}" filter_and_compile @@ -106,11 +105,11 @@ if [ $NEEDS_OUTPUT -ne 0 ]; then fi list_add_not_empty CMDLINE ${INCLUDEARGS} -list_add_not_empty CMDLINE ${CMAKE_LINKARGS} +list_add_not_empty CMDLINE "${CMAKE_LINKARGS}" list_add_not_empty CMDLINE "${LINKARGS}" eval $(list_get CMDLINE) -if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then +if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then echo "$@" [ "x$show" = x1 ] && exit 0 fi diff --git a/src/smpi/smpiff.in b/src/smpi/smpiff.in index a642099499..f413b77397 100644 --- a/src/smpi/smpiff.in +++ b/src/smpi/smpiff.in @@ -30,7 +30,6 @@ else echo "Warning: smpiff pretends to be a regular compiler (SMPI_PRETEND_CC is set). Produced binaries will not be usable in SimGrid." fi list_set TMPFILES -main_name=main cleanup () { eval $(list_get TMPFILES) @@ -42,9 +41,9 @@ filter_and_compile_f77() { list_add TMPFILES "${TMPFILE}" if [ "x${SMPI_PRETEND_CC}" = "x" ]; then #replace "program main_name by subroutine user_main (and the end clause as well)" - if [ $TRACE_CALL_LOCATION -gt 0 ]; then - echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > ${TMPFILE} - echo "#line 1 \"${ARG}\"" >> ${TMPFILE} + if [ "$TRACE_CALL_LOCATION" -gt 0 ]; then + echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > "${TMPFILE}" + echo "#line 1 \"${ARG}\"" >> "${TMPFILE}" fi sed 's/^[[:space:]]\{6\}[[:space:]]*\([eE][nN][dD] \)\{0,1\}[pP][rR][oO][gG][rR][aA][mM][^a-zA-Z0-9]*\([a-zA-Z0-9_]*\)/ \1subroutine user_main /g' "${ARG}" >> "${TMPFILE}" SRCFILE="${TMPFILE}" @@ -56,9 +55,9 @@ filter_and_compile_f77() { filter_and_compile_f90() { list_add TMPFILES "${TMPFILE}" #replace "program main_name by subroutine user_main (and the end clause as well)" - if [ $TRACE_CALL_LOCATION -gt 0 ]; then - echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > ${TMPFILE} - echo "#line 1 \"${ARG}\"" >> ${TMPFILE} + if [ "$TRACE_CALL_LOCATION" -gt 0 ]; then + echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > "${TMPFILE}" + echo "#line 1 \"${ARG}\"" >> "${TMPFILE}" fi sed 's/^\([[:space:]]*\)\([eE][nN][dD] \)\{0,1\}[pP][rR][oO][gG][rR][aA][mM][^a-zA-Z0-9]*\([a-zA-Z0-9_]*\)/\1\2subroutine user_main /g' "${ARG}" >> "${TMPFILE}" SRCFILE="${TMPFILE}" @@ -79,25 +78,25 @@ while [ $# -gt 0 ]; do list_add CMDLINE "-c" ;; *.f) - FILENAME=`basename ${ARG}` + FILENAME=$(basename "${ARG}") TMPFILE=$(mymktemp "${ARG}" ".f") ORIGFILE="${FILENAME%.f}" filter_and_compile_f77 ;; *.F)$ - FILENAME=`basename ${ARG}` + FILENAME=$(basename "${ARG}") TMPFILE=$(mymktemp "${ARG}" ".F") ORIGFILE="${FILENAME%.F}" filter_and_compile_f77 ;; *.f90) - FILENAME=`basename ${ARG}` + FILENAME=$(basename "${ARG}") TMPFILE=$(mymktemp "${ARG}" ".f90") ORIGFILE="${FILENAME%.f90}" filter_and_compile_f90 ;; *.F90)$ - FILENAME=`basename ${ARG}` + FILENAME=$(basename "${ARG}") TMPFILE=$(mymktemp "${ARG}" ".F90") ORIGFILE="${FILENAME%.F90}" filter_and_compile_f90 @@ -139,11 +138,11 @@ if [ $NEEDS_OUTPUT -ne 0 ]; then fi list_add_not_empty CMDLINE ${INCLUDEARGS} -list_add_not_empty CMDLINE ${CMAKE_LINKARGS} +list_add_not_empty CMDLINE "${CMAKE_LINKARGS}" list_add_not_empty CMDLINE "${LINKARGS}" eval $(list_get CMDLINE) -if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then +if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then echo "$@" [ "x$show" = x1 ] && exit 0 fi diff --git a/src/smpi/smpirun.in b/src/smpi/smpirun.in index da48f7c766..74523e1dcf 100755 --- a/src/smpi/smpirun.in +++ b/src/smpi/smpirun.in @@ -14,7 +14,6 @@ DEFAULT_LOOPBACK_BANDWIDTH="498000000Bps" DEFAULT_LOOPBACK_LATENCY="0.000004s" DEFAULT_NETWORK_BANDWIDTH="$((26 * 1024 * 1024))Bps" DEFAULT_NETWORK_LATENCY="0.000005s" -DEFAULT_NUMPROCS="4" DEFAULT_SPEED="100flops" LOOPBACK_BANDWIDTH="${DEFAULT_LOOPBACK_BANDWIDTH}" @@ -79,27 +78,27 @@ unset pid trapped_signals="HUP INT QUIT ILL ABRT SEGV FPE ALRM TERM USR1 USR2 BUS" die () { - printf '[%s] ** error: %s. Aborting.\n' "$(basename $0)" "$*" >&2 + printf '[%s] ** error: %s. Aborting.\n' "$(basename "$0")" "$*" >&2 exit 1 } smpirun_cleanup() { if [ -z "${KEEP}" ] ; then - if [ -z "${PLATFORM}" -a -n "$PLATFORMTMP" ]; then - rm -f ${PLATFORMTMP} + if [ -z "${PLATFORM}" ] && [ -n "$PLATFORMTMP" ]; then + rm -f "${PLATFORMTMP}" PLATFORMTMP="" fi - if [ ${HOSTFILETMP} = 1 -a -n "$HOSTFILE" ] ; then - rm -f ${HOSTFILE} + if [ ${HOSTFILETMP} = 1 ] && [ -n "$HOSTFILE" ] ; then + rm -f "${HOSTFILE}" HOSTFILE="" fi - if [ ${UNROLLEDHOSTFILETMP} = 1 -a -n "$UNROLLEDHOSTFILE" ] ; then - rm -f ${UNROLLEDHOSTFILE} + if [ "${UNROLLEDHOSTFILETMP}" = 1 ] && [ -n "$UNROLLEDHOSTFILE" ] ; then + rm -f "${UNROLLEDHOSTFILE}" UNROLLEDHOSTFILE="" fi - if [ -n ${APPLICATIONTMP} ]; then - rm -f ${APPLICATIONTMP} + if [ -n "${APPLICATIONTMP}" ]; then + rm -f "${APPLICATIONTMP}" APPLICATIONTMP="" fi fi @@ -111,14 +110,14 @@ smpirun_trap() { # Cleanup and kill the child process: smpirun_cleanup - if ! [ -z "$pid" ]; then - kill -TERM $pid + if [ -n "$pid" ]; then + kill -TERM "$pid" fi unset pid # Raise the same signal again (remove the traps first): trap - $trapped_signals - kill -$sig $$ + kill -"$sig" $$ # This should never happen: kill -ABRT $$ @@ -126,7 +125,7 @@ smpirun_trap() { } for s in $trapped_signals; do - trap "smpirun_trap $s" $s + trap "smpirun_trap $s" "$s" done while true; do @@ -325,12 +324,12 @@ for elem in tree.findall(".//cluster"): print(prefix + str(i) + suffix) else: print(prefix + r + suffix) - ' < ${PLATFORM} > ${HOSTFILE} + ' < "${PLATFORM}" > "${HOSTFILE}" fi UNROLLEDHOSTFILETMP=0 # parse if our lines are terminated by :num_process -if grep -q ':' $HOSTFILE ; then +if grep -q ':' "$HOSTFILE" ; then UNROLLEDHOSTFILETMP=1 UNROLLEDHOSTFILE="$(mktemp smpitmp-hostfXXXXXX)" @PYTHON_EXECUTABLE@ -c ' @@ -344,17 +343,17 @@ for line in sys.stdin: print(m.group(1)) else: print(line.strip()) -' < ${HOSTFILE} > ${UNROLLEDHOSTFILE} +' < "${HOSTFILE}" > "${UNROLLEDHOSTFILE}" if [ ${HOSTFILETMP} = 1 ] ; then - rm ${HOSTFILE} + rm "${HOSTFILE}" HOSTFILETMP=0 fi HOSTFILE=$UNROLLEDHOSTFILE fi # Don't use wc -l to compute it to avoid issues with trailing \n at EOF -hostfile_procs=$(grep -c "[a-zA-Z0-9]" $HOSTFILE) -if [ ${hostfile_procs} = 0 ] ; then +hostfile_procs=$(grep -c "[a-zA-Z0-9]" "$HOSTFILE") +if [ "${hostfile_procs}" = 0 ] ; then die "the hostfile '${HOSTFILE}' is empty" fi @@ -363,7 +362,7 @@ if [ -z "${NUMPROCS}" ] ; then NUMPROCS=$hostfile_procs fi -if [ ${NUMPROCS} -gt ${hostfile_procs} ] ; then +if [ "${NUMPROCS}" -gt "${hostfile_procs}" ] ; then echo "You requested to use ${NUMPROCS} ranks, but there is only ${hostfile_procs} processes in your hostfile..." >&2 fi @@ -371,7 +370,7 @@ fi if [ -z "${PLATFORM}" ]; then PLATFORMTMP="$(mktemp smpitmp-platfXXXXXX)" - cat > ${PLATFORMTMP} < "${PLATFORMTMP}" < @@ -379,28 +378,30 @@ if [ -z "${PLATFORM}" ]; then PLATFORMHEAD i=${NUMPROCS} - while [ $i -gt 0 ]; do - echo " " >> ${PLATFORMTMP} - echo " " >> ${PLATFORMTMP} - echo " " >> ${PLATFORMTMP} + while [ "$i" -gt 0 ]; do + { + echo " " + echo " " + echo " " + } >> "${PLATFORMTMP}" i=$((i - 1)) done i=${NUMPROCS} - while [ $i -gt 0 ]; do + while [ "$i" -gt 0 ]; do j=${NUMPROCS} - while [ $j -gt 0 ]; do - if [ $i -eq $j ]; then - echo " " >> ${PLATFORMTMP} + while [ "$j" -gt 0 ]; do + if [ "$i" -eq "$j" ]; then + echo " " >> "${PLATFORMTMP}" else - echo " " >> ${PLATFORMTMP} + echo " " >> "${PLATFORMTMP}" fi j=$((j - 1)) done i=$((i - 1)) done - cat >> ${PLATFORMTMP} <> "${PLATFORMTMP}" < PLATFORMFOOT @@ -413,21 +414,21 @@ fi APPLICATIONTMP="$(mktemp smpitmp-appXXXXXX)" #APPLICATIONTMP="app.xml" -cat > ${APPLICATIONTMP} < "${APPLICATIONTMP}" < APPLICATIONHEAD ##---- cache hostnames of hostfile--------------- -if [ -n "${HOSTFILE}" ] && [ -f ${HOSTFILE} ]; then - hostnames=$(cat ${HOSTFILE} | tr '\n\r' ' ') +if [ -n "${HOSTFILE}" ] && [ -f "${HOSTFILE}" ]; then + hostnames=$(< "${HOSTFILE}" tr '\n\r' ' ') fi if [ -n "${APP_TRACES}" ]; then if [ -f "${APP_TRACES}" ]; then - hosttraces=$(cat ${APP_TRACES} | tr '\n\r' ' ' ) - NUMTRACES=$(cat ${APP_TRACES} | wc -l) + hosttraces=$(< "${APP_TRACES}" tr '\n\r' ' ' ) + NUMTRACES=$(< "${APP_TRACES}" wc -l) REPLAY=1 else printf "File not found: %s\n" "${APP_TRACES:-\${APP_TRACES\}}" >&2 @@ -447,7 +448,7 @@ if [ -n "${HAVE_SEQ}" ]; then SEQ=$(${HAVE_SEQ} 0 $(( NUMPROCS - 1))) else cnt=0 - while [ $cnt -lt ${NUMPROCS} ] ; do + while [ $cnt -lt "${NUMPROCS}" ] ; do SEQ="$SEQ $cnt" cnt=$((cnt + 1)); done @@ -471,24 +472,25 @@ do if [ ${MAPOPT} = 1 ]; then echo "[rank $i] -> $host" fi - + { echo " - " >> ${APPLICATIONTMP} + " if [ ${REPLAY} = 1 ]; then - echo " " >> ${APPLICATIONTMP} - if [ ${NUMTRACES} -gt 1 ]; then - echo " " >> ${APPLICATIONTMP} + echo " " + if [ "${NUMTRACES}" -gt 1 ]; then + echo " " else - echo " " >> ${APPLICATIONTMP} + echo " " fi else - echo ${XML_ARGS} >> ${APPLICATIONTMP} + echo "${XML_ARGS}" fi - echo " " >> ${APPLICATIONTMP} + echo "
" + } >> "${APPLICATIONTMP}" done -cat >> ${APPLICATIONTMP} <> "${APPLICATIONTMP}" < APPLICATIONFOOT ##-------------------------------- end DEFAULT APPLICATION -------------------------------------- @@ -527,8 +529,8 @@ fi # Do not remove, this variable may be used by user code (e.g. StarPU) export SMPI_GLOBAL_SIZE=${NUMPROCS} -if [ -n "${KEEP}" -a -z "${QUIET}" ] ; then - echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} +if [ -n "${KEEP}" ] && [ -z "${QUIET}" ] ; then + echo "${EXEC}" ${PRIVATIZE} "${TRACEOPTIONS}" "${SIMOPTS}" "${PLATFORMTMP}" "${APPLICATIONTMP}" if [ ${HOSTFILETMP} = 1 ] ; then echo "Generated hostfile ${HOSTFILE} kept." fi @@ -548,7 +550,7 @@ fi # * The FD 3 is used to temporarily store FD 1. This is because the shell connects FD 1 to /dev/null when the command # is launched in the background: this can be overridden in bash but not in standard bourne shell. exec 3<&0 -${WRAPPER} "@SMPIMAIN@" ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} <&3 3>&- & +${WRAPPER} "@SMPIMAIN@" "${EXEC}" ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} "${PLATFORMTMP}" "${APPLICATIONTMP}" <&3 3>&- & pid=$! exec 3>&- wait $pid @@ -564,8 +566,8 @@ pid="" # Keep temporary files on failures to help debugging # if [ ${status} -ne 0 ] ; then - if [ -z "${KEEP}" -a -z "${QUIET}" ]; then - echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} + if [ -z "${KEEP}" ] && [ -z "${QUIET}" ]; then + echo "${EXEC}" ${PRIVATIZE} "${TRACEOPTIONS}" "${SIMOPTS}" "${PLATFORMTMP}" "${APPLICATIONTMP}" if [ ${HOSTFILETMP} = 1 ] ; then echo "Generated hostfile ${HOSTFILE} kept." fi diff --git a/src/surf/HostImpl.cpp b/src/surf/HostImpl.cpp index 2366c33b37..ff17a48e31 100644 --- a/src/surf/HostImpl.cpp +++ b/src/surf/HostImpl.cpp @@ -55,7 +55,7 @@ HostImpl::~HostImpl() * * Weird things will happen if you turn on a host that is already on. S4U is fool-proof, not this. */ -void HostImpl::turn_on() +void HostImpl::turn_on() const { for (auto const& arg : actors_at_boot_) { XBT_DEBUG("Booting Actor %s(%s) right now", arg->name.c_str(), arg->host->get_cname()); @@ -73,7 +73,7 @@ void HostImpl::turn_on() } /** Kill all actors hosted here */ -void HostImpl::turn_off(kernel::actor::ActorImpl* issuer) +void HostImpl::turn_off(const kernel::actor::ActorImpl* issuer) { for (auto& actor : actor_list_) { XBT_DEBUG("Killing Actor %s@%s on behalf of %s which turned off that host.", actor.get_cname(), @@ -98,12 +98,12 @@ std::vector HostImpl::get_all_actors() res.push_back(actor.ciface()); return res; } -size_t HostImpl::get_actor_count() +size_t HostImpl::get_actor_count() const { return actor_list_.size(); } -std::vector HostImpl::get_disks() +std::vector HostImpl::get_disks() const { std::vector disks; for (auto const& d : disks_) diff --git a/src/surf/HostImpl.hpp b/src/surf/HostImpl.hpp index c5667405e7..6e3404f197 100644 --- a/src/surf/HostImpl.hpp +++ b/src/surf/HostImpl.hpp @@ -53,7 +53,7 @@ public: explicit HostImpl(s4u::Host* host); virtual ~HostImpl(); - std::vector get_disks(); + std::vector get_disks() const; void set_disks(const std::vector& disks, s4u::Host* host); void add_disk(const s4u::Disk* disk); void remove_disk(const std::string& disk_name); @@ -65,10 +65,10 @@ public: s4u::Host* get_iface() { return piface_; } - void turn_on(); - void turn_off(kernel::actor::ActorImpl* issuer); + void turn_on() const; + void turn_off(const kernel::actor::ActorImpl* issuer); std::vector get_all_actors(); - size_t get_actor_count(); + size_t get_actor_count() const; void add_actor(kernel::actor::ActorImpl* actor) { actor_list_.push_back(*actor); } void remove_actor(kernel::actor::ActorImpl* actor) { xbt::intrusive_erase(actor_list_, *actor); } void add_actor_at_boot(kernel::actor::ProcessArg* arg) { actors_at_boot_.emplace_back(arg); } diff --git a/src/surf/StorageImpl.hpp b/src/surf/StorageImpl.hpp index b1c60472f4..ba17f01a59 100644 --- a/src/surf/StorageImpl.hpp +++ b/src/surf/StorageImpl.hpp @@ -48,7 +48,8 @@ public: StorageModel& operator=(const StorageModel&) = delete; ~StorageModel(); - virtual StorageImpl* createStorage(const std::string& id, const std::string& type_id, const std::string& content_name, + virtual StorageImpl* createStorage(std::string& filename, int lineno, const std::string& id, + const std::string& type_id, const std::string& content_name, const std::string& attach) = 0; }; @@ -82,7 +83,7 @@ public: ~StorageImpl() override; s4u::Storage* get_iface() { return &piface_; } - const char* get_type() { return typeId_.c_str(); } + const char* get_type() const { return typeId_.c_str(); } lmm::Constraint* get_read_constraint() const { return constraint_read_; } lmm::Constraint* get_write_constraint() const { return constraint_write_; } /** @brief Check if the Storage is used (if an action currently uses its resources) */ diff --git a/src/surf/cpu_cas01.cpp b/src/surf/cpu_cas01.cpp index 557e549d78..9aae097fd2 100644 --- a/src/surf/cpu_cas01.cpp +++ b/src/surf/cpu_cas01.cpp @@ -108,7 +108,7 @@ void CpuCas01::on_speed_change() get_model()->get_maxmin_system()->update_constraint_bound(get_constraint(), get_core_count() * speed_.scale * speed_.peak); while ((var = get_constraint()->get_variable(&elem))) { - auto* action = static_cast(var->get_id()); + const CpuCas01Action* action = static_cast(var->get_id()); get_model()->get_maxmin_system()->update_variable_bound(action->get_variable(), action->requested_core() * speed_.scale * speed_.peak); @@ -145,7 +145,7 @@ void CpuCas01::apply_event(profile::Event* event, double value) get_host()->turn_off(); while ((var = cnst->get_variable(&elem))) { - auto* action = static_cast(var->get_id()); + Action* action = var->get_id(); if (action->get_state() == Action::State::INITED || action->get_state() == Action::State::STARTED || action->get_state() == Action::State::IGNORED) { @@ -218,7 +218,7 @@ CpuCas01Action::CpuCas01Action(Model* model, double cost, bool failed, double sp { } -int CpuCas01Action::requested_core() +int CpuCas01Action::requested_core() const { return requested_core_; } diff --git a/src/surf/cpu_cas01.hpp b/src/surf/cpu_cas01.hpp index 2950172ea6..1621b1b3fc 100644 --- a/src/surf/cpu_cas01.hpp +++ b/src/surf/cpu_cas01.hpp @@ -69,7 +69,7 @@ public: CpuCas01Action(const CpuCas01Action&) = delete; CpuCas01Action& operator=(const CpuCas01Action&) = delete; ~CpuCas01Action() override; - int requested_core(); + int requested_core() const; private: int requested_core_ = 1; @@ -79,4 +79,4 @@ private: } // namespace kernel } // namespace simgrid -#endif \ No newline at end of file +#endif diff --git a/src/surf/cpu_interface.cpp b/src/surf/cpu_interface.cpp index f85cce4b9f..6cbbe10fcc 100644 --- a/src/surf/cpu_interface.cpp +++ b/src/surf/cpu_interface.cpp @@ -72,6 +72,14 @@ Cpu::Cpu(Model* model, s4u::Host* host, lmm::Constraint* constraint, const std:: xbt_assert(speed_.scale > 0, "Speed of host %s must be >0", host->get_cname()); } +void Cpu::reset_vcpu(Cpu* that) +{ + this->pstate_ = that->pstate_; + this->speed_ = that->speed_; + this->speed_per_pstate_.clear(); + this->speed_per_pstate_.assign(that->speed_per_pstate_.begin(), that->speed_per_pstate_.end()); +} + int Cpu::get_pstate_count() const { return speed_per_pstate_.size(); diff --git a/src/surf/cpu_interface.hpp b/src/surf/cpu_interface.hpp index 1e77a39467..0215897243 100644 --- a/src/surf/cpu_interface.hpp +++ b/src/surf/cpu_interface.hpp @@ -56,8 +56,10 @@ class CpuAction; class XBT_PUBLIC Cpu : public Resource { int core_count_ = 1; s4u::Host* host_; - int pstate_ = 0; /*< Current pstate (index in the speed_per_pstate_)*/ - const std::vector speed_per_pstate_; /*< List of supported CPU capacities (pstate related) */ + int pstate_ = 0; /*< Current pstate (index in the speed_per_pstate_)*/ + std::vector speed_per_pstate_; /*< List of supported CPU capacities (pstate related). Not 'const' because VCPU + get modified on migration */ + friend simgrid::vm::VirtualMachineImpl; // Resets the VCPU public: /** @@ -128,6 +130,13 @@ protected: /** @brief Take speed changes (either load or max) into account */ virtual void on_speed_change(); + /** Reset most characteristics of this CPU to the one of that CPU. + * + * Used to reset a VCPU when its VM migrates to another host, so it only resets the fields that should be in this + *case. + **/ + virtual void reset_vcpu(Cpu* that); + public: /** @brief Get the available speed ratio, between 0 and 1. * diff --git a/src/surf/cpu_ti.cpp b/src/surf/cpu_ti.cpp index 0e1e208ccf..788dff2cf3 100644 --- a/src/surf/cpu_ti.cpp +++ b/src/surf/cpu_ti.cpp @@ -50,7 +50,7 @@ CpuTiProfile::CpuTiProfile(const profile::Profile* profile) * @param b End of interval * @return the integrate value. -1 if an error occurs. */ -double CpuTiTmgr::integrate(double a, double b) +double CpuTiTmgr::integrate(double a, double b) const { if ((a < 0.0) || (a > b)) { xbt_die("Error, invalid integration interval [%.2f,%.2f]. " @@ -90,7 +90,7 @@ double CpuTiTmgr::integrate(double a, double b) * @param a Initial point * @param b Final point */ -double CpuTiProfile::integrate_simple(double a, double b) +double CpuTiProfile::integrate_simple(double a, double b) const { return integrate_simple_point(b) - integrate_simple_point(a); } @@ -99,7 +99,7 @@ double CpuTiProfile::integrate_simple(double a, double b) * @brief Auxiliary function to compute the integral at point a. * @param a point */ -double CpuTiProfile::integrate_simple_point(double a) +double CpuTiProfile::integrate_simple_point(double a) const { double integral = 0; double a_aux = a; @@ -126,7 +126,7 @@ double CpuTiProfile::integrate_simple_point(double a) * @param amount Amount to be executed * @return End time */ -double CpuTiTmgr::solve(double a, double amount) +double CpuTiTmgr::solve(double a, double amount) const { /* Fix very small negative numbers */ if ((a < 0.0) && (a > -EPSILON)) { @@ -182,7 +182,7 @@ double CpuTiTmgr::solve(double a, double amount) * @param amount Amount of flops * @return The date when amount is available. */ -double CpuTiProfile::solve_simple(double a, double amount) +double CpuTiProfile::solve_simple(double a, double amount) const { double integral_a = integrate_simple_point(a); int ind = binary_search(integral_, integral_a + amount); @@ -200,7 +200,7 @@ double CpuTiProfile::solve_simple(double a, double amount) * @param a Time * @return CPU speed scale */ -double CpuTiTmgr::get_power_scale(double a) +double CpuTiTmgr::get_power_scale(double a) const { double reduced_a = a - floor(a / last_time_) * last_time_; int point = CpuTiProfile::binary_search(profile_->time_points_, reduced_a); diff --git a/src/surf/cpu_ti.hpp b/src/surf/cpu_ti.hpp index 4ce90e2881..92a052715f 100644 --- a/src/surf/cpu_ti.hpp +++ b/src/surf/cpu_ti.hpp @@ -29,9 +29,9 @@ class CpuTiProfile { public: explicit CpuTiProfile(const profile::Profile* profile); - double integrate_simple(double a, double b); - double integrate_simple_point(double a); - double solve_simple(double a, double amount); + double integrate_simple(double a, double b) const; + double integrate_simple_point(double a) const; + double solve_simple(double a, double amount) const; std::vector time_points_; std::vector integral_; @@ -50,9 +50,9 @@ public: CpuTiTmgr(const CpuTiTmgr&) = delete; CpuTiTmgr& operator=(const CpuTiTmgr&) = delete; - double integrate(double a, double b); - double solve(double a, double amount); - double get_power_scale(double a); + double integrate(double a, double b) const; + double solve(double a, double amount) const; + double get_power_scale(double a) const; private: Type type_ = Type::FIXED; diff --git a/src/surf/disk_s19.cpp b/src/surf/disk_s19.cpp index 66525d7eee..0f3f4b7423 100644 --- a/src/surf/disk_s19.cpp +++ b/src/surf/disk_s19.cpp @@ -48,7 +48,7 @@ void DiskS19Model::update_actions_state(double /*now*/, double delta) for (auto it = std::begin(*get_started_action_set()); it != std::end(*get_started_action_set());) { auto& action = *it; ++it; // increment iterator here since the following calls to action.finish() may invalidate it - action.update_remains(lrint(action.get_variable()->get_value() * delta)); + action.update_remains(rint(action.get_variable()->get_value() * delta)); action.update_max_duration(delta); if (((action.get_remains_no_update() <= 0) && (action.get_variable()->get_penalty() > 0)) || @@ -70,17 +70,17 @@ DiskS19::DiskS19(DiskModel* model, const std::string& name, lmm::System* maxminS DiskAction* DiskS19::io_start(sg_size_t size, s4u::Io::OpType type) { - return new DiskS19Action(get_model(), size, not is_on(), this, type); + return new DiskS19Action(get_model(), static_cast(size), not is_on(), this, type); } DiskAction* DiskS19::read(sg_size_t size) { - return new DiskS19Action(get_model(), size, not is_on(), this, s4u::Io::OpType::READ); + return new DiskS19Action(get_model(), static_cast(size), not is_on(), this, s4u::Io::OpType::READ); } DiskAction* DiskS19::write(sg_size_t size) { - return new DiskS19Action(get_model(), size, not is_on(), this, s4u::Io::OpType::WRITE); + return new DiskS19Action(get_model(), static_cast(size), not is_on(), this, s4u::Io::OpType::WRITE); } /********** diff --git a/src/surf/network_cm02.cpp b/src/surf/network_cm02.cpp index a6b00b46f9..1c1241dbca 100644 --- a/src/surf/network_cm02.cpp +++ b/src/surf/network_cm02.cpp @@ -238,7 +238,7 @@ Action* NetworkCm02Model::communicate(s4u::Host* src, s4u::Host* dst, double siz if (action->latency_ > 0) { action->set_variable(get_maxmin_system()->variable_new(action, 0.0, -1.0, constraints_per_variable)); if (is_update_lazy()) { - // add to the heap the event when the latency is payed + // add to the heap the event when the latency is paid double date = action->latency_ + action->get_last_update(); ActionHeap::Type type = route.empty() ? ActionHeap::Type::normal : ActionHeap::Type::latency; @@ -270,7 +270,7 @@ Action* NetworkCm02Model::communicate(s4u::Host* src, s4u::Host* dst, double siz // WIFI links are handled manually just above, so skip them now if (link->get_sharing_policy() == s4u::Link::SharingPolicy::WIFI) { xbt_assert(link == src_wifi_link || link == dst_wifi_link, - "Wifi links can only occure at the beginning of the route (meaning that it's attached to the src) or " + "Wifi links can only occur at the beginning of the route (meaning that it's attached to the src) or " "at its end (meaning that it's attached to the dst"); } else { get_maxmin_system()->expand(link->get_constraint(), action->get_variable(), 1.0); diff --git a/src/surf/network_ib.cpp b/src/surf/network_ib.cpp index dfcaf91401..0a8c889a61 100644 --- a/src/surf/network_ib.cpp +++ b/src/surf/network_ib.cpp @@ -110,7 +110,7 @@ NetworkIBModel::NetworkIBModel() : NetworkSmpiModel() } } -void NetworkIBModel::computeIBfactors(IBNode* root) +void NetworkIBModel::computeIBfactors(IBNode* root) const { double num_comm_out = root->ActiveCommsUp.size(); double max_penalty_out = 0.0; @@ -157,7 +157,7 @@ void NetworkIBModel::computeIBfactors(IBNode* root) XBT_DEBUG("Finished computing IB penalties"); } -void NetworkIBModel::updateIBfactors_rec(IBNode* root, std::vector& updatedlist) +void NetworkIBModel::updateIBfactors_rec(IBNode* root, std::vector& updatedlist) const { if (not updatedlist[root->id]) { XBT_DEBUG("IB - Updating rec %d", root->id); @@ -174,7 +174,7 @@ void NetworkIBModel::updateIBfactors_rec(IBNode* root, std::vector& update } } -void NetworkIBModel::updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove) +void NetworkIBModel::updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove) const { if (from == to) // disregard local comms (should use loopback) return; diff --git a/src/surf/network_ib.hpp b/src/surf/network_ib.hpp index 4b14ff2b60..72eecd3af8 100644 --- a/src/surf/network_ib.hpp +++ b/src/surf/network_ib.hpp @@ -42,15 +42,15 @@ class XBT_PRIVATE NetworkIBModel : public NetworkSmpiModel { double Bs; double Be; double ys; - void updateIBfactors_rec(IBNode* root, std::vector& updatedlist); - void computeIBfactors(IBNode* root); + void updateIBfactors_rec(IBNode* root, std::vector& updatedlist) const; + void computeIBfactors(IBNode* root) const; public: NetworkIBModel(); explicit NetworkIBModel(const char* name); NetworkIBModel(const NetworkIBModel&) = delete; NetworkIBModel& operator=(const NetworkIBModel&) = delete; - void updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove); + void updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove) const; std::unordered_map active_nodes; std::unordered_map> active_comms; diff --git a/src/surf/network_interface.cpp b/src/surf/network_interface.cpp index 5d9a7a522c..764c414c75 100644 --- a/src/surf/network_interface.cpp +++ b/src/surf/network_interface.cpp @@ -142,7 +142,7 @@ void LinkImpl::turn_off() const kernel::lmm::Element* elem = nullptr; double now = surf_get_clock(); while ((var = get_constraint()->get_variable(&elem))) { - Action* action = static_cast(var->get_id()); + Action* action = var->get_id(); if (action->get_state() == Action::State::INITED || action->get_state() == Action::State::STARTED) { action->set_finish_time(now); action->set_state(Action::State::FAILED); @@ -151,7 +151,7 @@ void LinkImpl::turn_off() } } -void LinkImpl::on_bandwidth_change() +void LinkImpl::on_bandwidth_change() const { s4u::Link::on_bandwidth_change(this->piface_); } diff --git a/src/surf/network_interface.hpp b/src/surf/network_interface.hpp index 087dadf75c..9cb6cc7621 100644 --- a/src/surf/network_interface.hpp +++ b/src/surf/network_interface.hpp @@ -144,7 +144,7 @@ public: void turn_on() override; void turn_off() override; - void on_bandwidth_change(); + void on_bandwidth_change() const; virtual void set_bandwidth_profile(kernel::profile::Profile* profile); /*< setup the profile file with bandwidth events diff --git a/src/surf/network_ns3.cpp b/src/surf/network_ns3.cpp index 824a3bf606..65d5e7dc61 100644 --- a/src/surf/network_ns3.cpp +++ b/src/surf/network_ns3.cpp @@ -20,8 +20,8 @@ #include #include -#include "ns3/wifi-module.h" #include "ns3/mobility-module.h" +#include "ns3/wifi-module.h" #include "network_ns3.hpp" #include "ns3/ns3_simulator.hpp" @@ -37,9 +37,6 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(ns3, surf, "Logging specific to the SURF network ns-3 module"); -std::vector IPV4addr; -static std::string transformIpv4Address(ns3::Ipv4Address from); - /***************** * Crude globals * *****************/ @@ -48,71 +45,80 @@ extern std::map flow_from_sock; extern std::map sink_from_sock; static ns3::InternetStackHelper stack; -static ns3::NodeContainer nodes; -static ns3::NodeContainer Cluster_nodes; -static ns3::Ipv4InterfaceContainer interfaces; -static int number_of_nodes = 0; -static int number_of_clusters_nodes = 0; static int number_of_links = 1; static int number_of_networks = 1; /* wifi globals */ static ns3::WifiHelper wifi; -static ns3::YansWifiPhyHelper wifiPhy = ns3::YansWifiPhyHelper::Default (); -static ns3::YansWifiChannelHelper wifiChannel = ns3::YansWifiChannelHelper::Default (); +static ns3::YansWifiPhyHelper wifiPhy = ns3::YansWifiPhyHelper::Default(); +static ns3::YansWifiChannelHelper wifiChannel = ns3::YansWifiChannelHelper::Default(); static ns3::WifiMacHelper wifiMac; static ns3::MobilityHelper mobility; simgrid::xbt::Extension NetPointNs3::EXTENSION_ID; +static std::string transformIpv4Address(ns3::Ipv4Address from) +{ + std::stringstream sstream; + sstream << from; + return sstream.str(); +} + NetPointNs3::NetPointNs3() : ns3_node_(ns3::CreateObject(0)) { stack.Install(ns3_node_); - nodes.Add(ns3_node_); - node_num = number_of_nodes++; } WifiZone::WifiZone(std::string name_, simgrid::s4u::Host* host_, ns3::Ptr ap_node_, - ns3::Ptr channel_, int mcs_, int nss_, int network_, int link_) : - name(name_), host(host_), ap_node(ap_node_), channel(channel_), mcs(mcs_), nss(nss_), - network(network_), link(link_) { - n_sta_nodes = 0; - wifi_zones[name_] = this; + ns3::Ptr channel_, int mcs_, int nss_, int network_, int link_) + : name(name_) + , host(host_) + , ap_node(ap_node_) + , channel(channel_) + , mcs(mcs_) + , nss(nss_) + , network(network_) + , link(link_) +{ + n_sta_nodes = 0; + wifi_zones[name_] = this; } -bool WifiZone::is_ap(ns3::Ptr node){ - for (std::pair zone : wifi_zones) - if (zone.second->get_ap_node() == node) - return true; - return false; +bool WifiZone::is_ap(ns3::Ptr node) +{ + for (std::pair zone : wifi_zones) + if (zone.second->get_ap_node() == node) + return true; + return false; } -WifiZone* WifiZone::by_name(std::string name) { - WifiZone* zone; - try { - zone = wifi_zones.at(name); - } - catch (const std::out_of_range& oor) { - return nullptr; - } - return zone; +WifiZone* WifiZone::by_name(std::string name) +{ + WifiZone* zone; + try { + zone = wifi_zones.at(name); + } catch (const std::out_of_range& oor) { + return nullptr; + } + return zone; } std::unordered_map WifiZone::wifi_zones; -static void initialize_ns3_wifi() { - wifi.SetStandard (ns3::WIFI_PHY_STANDARD_80211n_5GHZ); +static void initialize_ns3_wifi() +{ + wifi.SetStandard(ns3::WIFI_PHY_STANDARD_80211n_5GHZ); - for (auto host : simgrid::s4u::Engine::get_instance()->get_all_hosts()) { - const char* wifi_link = host->get_property("wifi_link"); - const char* wifi_mcs = host->get_property("wifi_mcs"); - const char* wifi_nss = host->get_property("wifi_nss"); + for (auto host : simgrid::s4u::Engine::get_instance()->get_all_hosts()) { + const char* wifi_link = host->get_property("wifi_link"); + const char* wifi_mcs = host->get_property("wifi_mcs"); + const char* wifi_nss = host->get_property("wifi_nss"); - if (wifi_link) - new WifiZone(wifi_link, host, host->get_netpoint()->extension()->ns3_node_, - wifiChannel.Create(), wifi_mcs ? atoi(wifi_mcs) : 3, wifi_nss ? atoi(wifi_nss) : 1, 0, 0); - } + if (wifi_link) + new WifiZone(wifi_link, host, host->get_netpoint()->extension()->ns3_node_, wifiChannel.Create(), + wifi_mcs ? atoi(wifi_mcs) : 3, wifi_nss ? atoi(wifi_nss) : 1, 0, 0); + } } /************* @@ -121,24 +127,45 @@ static void initialize_ns3_wifi() { static void clusterCreation_cb(simgrid::kernel::routing::ClusterCreationArgs const& cluster) { - for (int const& i : *cluster.radicals) { - // Routers don't create a router on the other end of the private link by themselves. - // We just need this router to be given an ID so we create a temporary NetPointNS3 so that it gets one - auto* host_dst = new NetPointNs3(); + ns3::NodeContainer Nodes; + for (int const& i : *cluster.radicals) { // Create private link std::string host_id = cluster.prefix + std::to_string(i) + cluster.suffix; - auto* host_src = simgrid::s4u::Host::by_name(host_id)->get_netpoint()->extension(); - xbt_assert(host_src, "Cannot find a ns-3 host of name %s", host_id.c_str()); + auto* src = simgrid::s4u::Host::by_name(host_id)->get_netpoint(); + auto* dst = simgrid::s4u::Engine::get_instance()->netpoint_by_name_or_null(cluster.router_id); + xbt_assert(dst != nullptr, "No router named %s", cluster.router_id.c_str()); - // Any ns-3 route is symmetrical - ns3_add_direct_route(host_src, host_dst, cluster.bw, cluster.lat, cluster.id, cluster.sharing_policy); + ns3_add_direct_route(src, dst, cluster.bw, cluster.lat, cluster.id, + cluster.sharing_policy); // Any ns-3 route is symmetrical - delete host_dst; + // Also add the host to the list of hosts that will be connected to the backbone + Nodes.Add(src->extension()->ns3_node_); } - //Create link backbone - ns3_add_cluster(cluster.id.c_str(), cluster.bb_bw, cluster.bb_lat); + // Create link backbone + + xbt_assert(Nodes.GetN() <= 65000, "Cluster with ns-3 is limited to 65000 nodes"); + ns3::CsmaHelper csma; + csma.SetChannelAttribute("DataRate", + ns3::DataRateValue(ns3::DataRate(cluster.bb_bw * 8))); // ns-3 takes bps, but we provide Bps + csma.SetChannelAttribute("Delay", ns3::TimeValue(ns3::Seconds(cluster.bb_lat))); + ns3::NetDeviceContainer devices = csma.Install(Nodes); + XBT_DEBUG("Create CSMA"); + + std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links); + XBT_DEBUG("Assign IP Addresses %s to CSMA.", addr.c_str()); + ns3::Ipv4AddressHelper ipv4; + ipv4.SetBase(addr.c_str(), "255.255.0.0"); + ipv4.Assign(devices); + + if (number_of_links == 255) { + xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255"); + number_of_links = 1; + number_of_networks++; + } else { + number_of_links++; + } } static void routeCreation_cb(bool symmetrical, simgrid::kernel::routing::NetPoint* src, @@ -156,17 +183,8 @@ static void routeCreation_cb(bool symmetrical, simgrid::kernel::routing::NetPoin // XBT_DEBUG("src (%s), dst (%s), src_id = %d, dst_id = %d",src,dst, src_id, dst_id); XBT_DEBUG("\tLink (%s) bw:%fbps lat:%fs", link->get_cname(), link->get_bandwidth(), link->get_latency()); - // create link ns3 - auto* host_src = src->extension(); - auto* host_dst = dst->extension(); - - host_src->set_name(src->get_name()); - host_dst->set_name(dst->get_name()); - - xbt_assert(host_src != nullptr, "Network element %s does not seem to be ns-3-ready", src->get_cname()); - xbt_assert(host_dst != nullptr, "Network element %s does not seem to be ns-3-ready", dst->get_cname()); - - ns3_add_direct_route(host_src, host_dst, link->get_bandwidth(), link->get_latency(), link->get_name(), link->get_sharing_policy()); + ns3_add_direct_route(src, dst, link->get_bandwidth(), link->get_latency(), link->get_name(), + link->get_sharing_policy()); } else { static bool warned_about_long_routes = false; @@ -181,14 +199,6 @@ static void routeCreation_cb(bool symmetrical, simgrid::kernel::routing::NetPoin } } -/* Create the ns3 topology based on routing strategy */ -static void postparse_cb() -{ - IPV4addr.shrink_to_fit(); - ns3::GlobalRouteManager::BuildGlobalRoutingDatabase(); - ns3::GlobalRouteManager::InitializeRoutes(); -} - /********* * Model * *********/ @@ -215,22 +225,36 @@ NetworkNS3Model::NetworkNS3Model() : NetworkModel(Model::UpdateAlgo::FULL) NetPointNs3::EXTENSION_ID = routing::NetPoint::extension_create(); - ns3_initialize(ns3_tcp_model.get()); + ns3::Config::SetDefault("ns3::TcpSocket::SegmentSize", ns3::UintegerValue(1000)); + ns3::Config::SetDefault("ns3::TcpSocket::DelAckCount", ns3::UintegerValue(1)); + ns3::Config::SetDefault("ns3::TcpSocketBase::Timestamp", ns3::BooleanValue(false)); + + auto TcpProtocol = ns3_tcp_model.get(); + if (TcpProtocol == "default") { + /* nothing to do */ + + } else if (TcpProtocol == "Reno" || TcpProtocol == "NewReno" || TcpProtocol == "Tahoe") { + XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str()); + ns3::Config::SetDefault("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::Tcp" + TcpProtocol)); + + } else { + xbt_die("The ns3/TcpModel must be: NewReno or Reno or Tahoe"); + } routing::NetPoint::on_creation.connect([](routing::NetPoint& pt) { pt.extension_set(new NetPointNs3()); - XBT_VERB("SimGrid's %s is known as node %d within ns-3", pt.get_cname(), pt.extension()->node_num); + XBT_VERB("Declare SimGrid's %s within ns-3", pt.get_cname()); }); - routing::on_cluster_creation.connect(&clusterCreation_cb); - s4u::Engine::on_platform_created.connect(&postparse_cb); + s4u::Engine::on_platform_created.connect([]() { + /* Create the ns3 topology based on routing strategy */ + ns3::GlobalRouteManager::BuildGlobalRoutingDatabase(); + ns3::GlobalRouteManager::InitializeRoutes(); + }); + routing::on_cluster_creation.connect(&clusterCreation_cb); s4u::NetZone::on_route_creation.connect(&routeCreation_cb); } -NetworkNS3Model::~NetworkNS3Model() { - IPV4addr.clear(); -} - LinkImpl* NetworkNS3Model::create_link(const std::string& name, const std::vector& bandwidths, double latency, s4u::Link::SharingPolicy policy) { @@ -337,63 +361,60 @@ void NetworkNS3Model::update_actions_state(double now, double delta) LinkNS3::LinkNS3(NetworkNS3Model* model, const std::string& name, double bandwidth, double latency, s4u::Link::SharingPolicy policy) - : LinkImpl(model, name, nullptr) + : LinkImpl(model, name, nullptr), sharing_policy_(policy) { bandwidth_.peak = bandwidth; latency_.peak = latency; - sharing_policy_ = policy; + + /* If wifi, create the wifizone now. If not, don't do anything: the links will be created in routeCreate_cb */ if (policy == simgrid::s4u::Link::SharingPolicy::WIFI) { - static bool wifi_init = false; - if (!wifi_init) { - initialize_ns3_wifi(); - wifi_init = true; - } - - ns3::NetDeviceContainer netA; - WifiZone* zone = WifiZone::by_name(name); - xbt_assert(zone != 0, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str()); - - wifi.SetRemoteStationManager ("ns3::ConstantRateWifiManager", - "ControlMode", ns3::StringValue ("HtMcs0"), - "DataMode", ns3::StringValue ("HtMcs" + std::to_string(zone->get_mcs()))); - - wifiPhy.SetChannel (zone->get_channel()); - wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss())); - wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss())); - wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss())); - wifiMac.SetType("ns3::ApWifiMac", - "Ssid", ns3::SsidValue(name)); - - netA.Add(wifi.Install (wifiPhy, wifiMac, zone->get_ap_node())); - - ns3::Ptr positionAllocS = ns3::CreateObject (); - positionAllocS->Add(ns3::Vector(0, 0, 0)); - mobility.SetPositionAllocator(positionAllocS); - mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel"); - mobility.Install(zone->get_ap_node()); - - ns3::Ipv4AddressHelper address; - std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links); - address.SetBase(addr.c_str(), "255.255.0.0"); - XBT_DEBUG("\tInterface stack '%s'", addr.c_str()); - interfaces.Add(address.Assign (netA)); - zone->set_network(number_of_networks); - zone->set_link(number_of_links); - - NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension(); - int nodeNum = netpoint_ns3->node_num; - if (IPV4addr.size() <= (unsigned)nodeNum) - IPV4addr.resize(nodeNum + 1); - IPV4addr[nodeNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 1)); - - if (number_of_links == 255){ - xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255"); - number_of_links = 1; - number_of_networks++; - } else { - number_of_links++; - } + static bool wifi_init = false; + if (!wifi_init) { + initialize_ns3_wifi(); + wifi_init = true; + } + + ns3::NetDeviceContainer netA; + WifiZone* zone = WifiZone::by_name(name); + xbt_assert(zone != 0, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str()); + NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension(); + + wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode", + ns3::StringValue("HtMcs" + std::to_string(zone->get_mcs()))); + + wifiPhy.SetChannel(zone->get_channel()); + wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss())); + wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss())); + wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss())); + + wifiMac.SetType("ns3::ApWifiMac"); + + netA.Add(wifi.Install(wifiPhy, wifiMac, zone->get_ap_node())); + + ns3::Ptr positionAllocS = ns3::CreateObject(); + positionAllocS->Add(ns3::Vector(0, 0, 0)); + mobility.SetPositionAllocator(positionAllocS); + mobility.SetMobilityModel("ns3::ConstantPositionMobilityModel"); + mobility.Install(zone->get_ap_node()); + + ns3::Ipv4AddressHelper address; + std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links); + address.SetBase(addr.c_str(), "255.255.0.0"); + XBT_DEBUG("\tInterface stack '%s'", addr.c_str()); + auto addresses = address.Assign(netA); + zone->set_network(number_of_networks); + zone->set_link(number_of_links); + + netpoint_ns3->ipv4_address_ = transformIpv4Address(addresses.GetAddress(addresses.GetN() - 1)); + + if (number_of_links == 255) { + xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255"); + number_of_links = 1; + number_of_networks++; + } else { + number_of_links++; + } } s4u::Link::on_creation(*this->get_iface()); } @@ -432,19 +453,15 @@ NetworkNS3Action::NetworkNS3Action(Model* model, double totalBytes, s4u::Host* s static int port_number = 1025; // Port number is limited from 1025 to 65 000 - unsigned int node1 = src->get_netpoint()->extension()->node_num; - unsigned int node2 = dst->get_netpoint()->extension()->node_num; - ns3::Ptr src_node = src->get_netpoint()->extension()->ns3_node_; ns3::Ptr dst_node = dst->get_netpoint()->extension()->ns3_node_; - xbt_assert(node2 < IPV4addr.size(), "Element %s is unknown to ns-3. Is it connected to any one-hop link?", - dst->get_netpoint()->get_cname()); - std::string& addr = IPV4addr[node2]; + std::string& addr = dst->get_netpoint()->extension()->ipv4_address_; xbt_assert(not addr.empty(), "Element %s is unknown to ns-3. Is it connected to any one-hop link?", dst->get_netpoint()->get_cname()); - XBT_DEBUG("ns3: Create flow of %.0f Bytes from %u to %u with Interface %s", totalBytes, node1, node2, addr.c_str()); + XBT_DEBUG("ns3: Create flow of %.0f Bytes from %s to %s with Interface %s", totalBytes, src->get_cname(), + dst->get_cname(), addr.c_str()); ns3::PacketSinkHelper sink("ns3::TcpSocketFactory", ns3::InetSocketAddress(ns3::Ipv4Address::GetAny(), port_number)); ns3::ApplicationContainer apps = sink.Install(dst_node); @@ -462,7 +479,6 @@ NetworkNS3Action::NetworkNS3Action(Model* model, double totalBytes, s4u::Host* s port_number = 1025; XBT_WARN("Too many connections! Port number is saturated. Trying to use the oldest ports."); } - xbt_assert(port_number <= 65000, "Too many connections! Port number is saturated."); s4u::Link::on_communicate(*this); } @@ -502,162 +518,82 @@ void ns3_simulator(double maxSeconds) id.Cancel(); } -// initialize the ns-3 interface and environment -void ns3_initialize(std::string TcpProtocol) +void ns3_add_direct_route(simgrid::kernel::routing::NetPoint* src, simgrid::kernel::routing::NetPoint* dst, double bw, + double lat, const std::string& link_name, simgrid::s4u::Link::SharingPolicy policy) { - // tcpModel are: - // "ns3::TcpNewReno" - // "ns3::TcpReno" - // "ns3::TcpTahoe" - - ns3::Config::SetDefault ("ns3::TcpSocket::SegmentSize", ns3::UintegerValue (1000)); - ns3::Config::SetDefault ("ns3::TcpSocket::DelAckCount", ns3::UintegerValue (1)); - ns3::Config::SetDefault ("ns3::TcpSocketBase::Timestamp", ns3::BooleanValue (false)); - - if (TcpProtocol == "default") { - /* nothing to do */ - - } else if (TcpProtocol == "Reno") { - XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str()); - ns3::Config::SetDefault ("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::TcpReno")); - - } else if (TcpProtocol == "NewReno") { - XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str()); - ns3::Config::SetDefault ("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::TcpNewReno")); + ns3::Ipv4AddressHelper address; + ns3::NetDeviceContainer netA; - } else if (TcpProtocol == "Tahoe") { - XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str()); - ns3::Config::SetDefault ("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::TcpTahoe")); + // create link ns3 + auto* host_src = src->extension(); + auto* host_dst = dst->extension(); - } else { - xbt_die("The ns3/TcpModel must be: NewReno or Reno or Tahoe"); - } -} + xbt_assert(host_src != nullptr, "Network element %s does not seem to be ns-3-ready", src->get_cname()); + xbt_assert(host_dst != nullptr, "Network element %s does not seem to be ns-3-ready", dst->get_cname()); -void ns3_add_cluster(const char* /*id*/, double bw, double lat) -{ - ns3::NodeContainer Nodes; + if (policy == simgrid::s4u::Link::SharingPolicy::WIFI) { + auto a = host_src->ns3_node_; + auto b = host_dst->ns3_node_; + xbt_assert(WifiZone::is_ap(a) != WifiZone::is_ap(b), + "A wifi route can only exist between an access point node and a station node."); - for (unsigned int i = number_of_clusters_nodes; i < Cluster_nodes.GetN(); i++) { - Nodes.Add(Cluster_nodes.Get(i)); - XBT_DEBUG("Add node %u to cluster", i); - } - number_of_clusters_nodes = Cluster_nodes.GetN(); + ns3::Ptr apNode = WifiZone::is_ap(a) ? a : b; + ns3::Ptr staNode = apNode == a ? b : a; - XBT_DEBUG("Add router %u to cluster", nodes.GetN() - Nodes.GetN() - 1); - Nodes.Add(nodes.Get(nodes.GetN()-Nodes.GetN()-1)); + WifiZone* zone = WifiZone::by_name(link_name); - xbt_assert(Nodes.GetN() <= 65000, "Cluster with ns-3 is limited to 65000 nodes"); - ns3::CsmaHelper csma; - csma.SetChannelAttribute("DataRate", ns3::DataRateValue(ns3::DataRate(bw * 8))); // ns-3 takes bps, but we provide Bps - csma.SetChannelAttribute("Delay", ns3::TimeValue(ns3::Seconds(lat))); - ns3::NetDeviceContainer devices = csma.Install(Nodes); - XBT_DEBUG("Create CSMA"); + wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode", + ns3::StringValue("HtMcs" + std::to_string(zone->get_mcs()))); - std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links); - XBT_DEBUG("Assign IP Addresses %s to CSMA.", addr.c_str()); - ns3::Ipv4AddressHelper ipv4; - ipv4.SetBase(addr.c_str(), "255.255.0.0"); - interfaces.Add(ipv4.Assign (devices)); - - if(number_of_links == 255){ - xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255"); - number_of_links = 1; - number_of_networks++; - }else{ - number_of_links++; - } - XBT_DEBUG("Number of nodes in Cluster_nodes: %u", Cluster_nodes.GetN()); -} + wifiPhy.SetChannel(zone->get_channel()); + wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss())); + wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss())); + wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss())); -static std::string transformIpv4Address(ns3::Ipv4Address from) -{ - std::stringstream sstream; - sstream << from ; - return sstream.str(); -} + wifiMac.SetType("ns3::StaWifiMac"); -void ns3_add_direct_route(NetPointNs3* src, NetPointNs3* dst, double bw, double lat, std::string link_name, - simgrid::s4u::Link::SharingPolicy policy) -{ - ns3::Ipv4AddressHelper address; - ns3::NetDeviceContainer netA; + netA.Add(wifi.Install(wifiPhy, wifiMac, staNode)); - int srcNum = src->node_num; - int dstNum = dst->node_num; + ns3::Config::Set("/NodeList/*/DeviceList/*/$ns3::WifiNetDevice/Phy/ChannelWidth", ns3::UintegerValue(40)); - ns3::Ptr a = src->ns3_node_; - ns3::Ptr b = dst->ns3_node_; + NetPointNs3* sta_netpointNs3 = WifiZone::is_ap(host_src->ns3_node_) ? host_src : host_dst; + const char* wifi_distance = simgrid::s4u::Host::by_name(sta_netpointNs3->name_)->get_property("wifi_distance"); + ns3::Ptr positionAllocS = ns3::CreateObject(); + positionAllocS->Add(ns3::Vector(wifi_distance ? atof(wifi_distance) : 10.0, 0, 0)); + mobility.SetPositionAllocator(positionAllocS); + mobility.SetMobilityModel("ns3::ConstantPositionMobilityModel"); + mobility.Install(staNode); - if (policy == simgrid::s4u::Link::SharingPolicy::WIFI) { - xbt_assert(WifiZone::is_ap(a) != WifiZone::is_ap(b), "A wifi route can only exist between an access point node and a station node."); - - ns3::Ptr apNode = WifiZone::is_ap(a) ? a : b; - ns3::Ptr staNode = apNode == a ? b : a; - - WifiZone* zone = WifiZone::by_name(link_name); - - wifi.SetRemoteStationManager ("ns3::ConstantRateWifiManager", - "ControlMode", ns3::StringValue ("HtMcs0"), - "DataMode", ns3::StringValue ("HtMcs" + std::to_string(zone->get_mcs()))); - - wifiPhy.SetChannel (zone->get_channel()); - wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss())); - wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss())); - wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss())); - - wifiMac.SetType ("ns3::StaWifiMac", - "Ssid", ns3::SsidValue(link_name), - "ActiveProbing", ns3::BooleanValue(false)); - - netA.Add(wifi.Install (wifiPhy, wifiMac, staNode)); - - ns3::Config::Set ("/NodeList/*/DeviceList/*/$ns3::WifiNetDevice/Phy/ChannelWidth", ns3::UintegerValue (40)); - - NetPointNs3* sta_netpointNs3 = WifiZone::is_ap(src->ns3_node_) ? dst : src; - const char* wifi_distance = simgrid::s4u::Host::by_name(sta_netpointNs3->name_)->get_property("wifi_distance"); - ns3::Ptr positionAllocS = ns3::CreateObject (); - positionAllocS->Add(ns3::Vector( wifi_distance ? atof(wifi_distance) : 10.0 , 0, 0)); - mobility.SetPositionAllocator(positionAllocS); - mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel"); - mobility.Install(staNode); - - std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", zone->get_network(), zone->get_link()); - address.SetBase(addr.c_str(), "255.255.0.0", ("0.0.0." + std::to_string(zone->get_n_sta_nodes() + 2)).c_str()); - zone->add_sta_node(); - XBT_DEBUG("\tInterface stack '%s'", addr.c_str()); - interfaces.Add(address.Assign (netA)); - if (IPV4addr.size() <= (unsigned)dstNum) - IPV4addr.resize(dstNum + 1); - IPV4addr[dstNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 1)); + std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", zone->get_network(), zone->get_link()); + address.SetBase(addr.c_str(), "255.255.0.0", ("0.0.0." + std::to_string(zone->get_n_sta_nodes() + 2)).c_str()); + zone->add_sta_node(); + XBT_DEBUG("\tInterface stack '%s'", addr.c_str()); + auto addresses = address.Assign(netA); + host_dst->ipv4_address_ = transformIpv4Address(addresses.GetAddress(addresses.GetN() - 1)); } else { ns3::PointToPointHelper pointToPoint; - XBT_DEBUG("\tAdd PTP from %d to %d bw:'%f Bps' lat:'%fs'", srcNum, dstNum, bw, lat); + + XBT_DEBUG("\tAdd PTP from %s to %s bw:'%f Bps' lat:'%fs'", src->get_cname(), dst->get_cname(), bw, lat); pointToPoint.SetDeviceAttribute("DataRate", ns3::DataRateValue(ns3::DataRate(bw * 8))); // ns-3 takes bps, but we provide Bps pointToPoint.SetChannelAttribute("Delay", ns3::TimeValue(ns3::Seconds(lat))); - netA.Add(pointToPoint.Install(a, b)); + netA.Add(pointToPoint.Install(host_src->ns3_node_, host_dst->ns3_node_)); std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links); address.SetBase(addr.c_str(), "255.255.0.0"); XBT_DEBUG("\tInterface stack '%s'", addr.c_str()); - interfaces.Add(address.Assign (netA)); - - if (IPV4addr.size() <= (unsigned)srcNum) - IPV4addr.resize(srcNum + 1); - IPV4addr[srcNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 2)); + auto addresses = address.Assign(netA); - if (IPV4addr.size() <= (unsigned)dstNum) - IPV4addr.resize(dstNum + 1); - IPV4addr[dstNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 1)); + host_src->ipv4_address_ = transformIpv4Address(addresses.GetAddress(0)); + host_dst->ipv4_address_ = transformIpv4Address(addresses.GetAddress(1)); - if (number_of_links == 255){ - xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255"); - number_of_links = 1; - number_of_networks++; + if (number_of_links == 255) { + xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255"); + number_of_links = 1; + number_of_networks++; } else { - number_of_links++; + number_of_links++; } } } diff --git a/src/surf/network_ns3.hpp b/src/surf/network_ns3.hpp index 4e4defdb84..abd978b2f9 100644 --- a/src/surf/network_ns3.hpp +++ b/src/surf/network_ns3.hpp @@ -17,7 +17,7 @@ namespace resource { class NetworkNS3Model : public NetworkModel { public: NetworkNS3Model(); - ~NetworkNS3Model(); + ~NetworkNS3Model() = default; LinkImpl* create_link(const std::string& name, const std::vector& bandwidth, double latency, s4u::Link::SharingPolicy policy) override; Action* communicate(s4u::Host* src, s4u::Host* dst, double size, double rate) override; diff --git a/src/surf/network_wifi.cpp b/src/surf/network_wifi.cpp index fb98ffa49d..9c60524148 100644 --- a/src/surf/network_wifi.cpp +++ b/src/surf/network_wifi.cpp @@ -46,8 +46,12 @@ double NetworkWifiLink::get_host_rate(const s4u::Host* host) return -1; int rate_id = host_rates_it->second; - xbt_assert(rate_id >= 0 && rate_id < (int)bandwidths_.size(), "Host '%s' has an invalid rate '%d' on wifi link '%s'", - host->get_name().c_str(), rate_id, this->get_cname()); + xbt_assert(rate_id >= 0, + "Negative host wifi rate levels are invalid but host '%s' uses %d as a rate level on link '%s'", + host->get_cname(), rate_id, this->get_cname()); + xbt_assert(rate_id < (int)bandwidths_.size(), + "Link '%s' only has %zu wifi rate levels, so the provided level %d is invalid for host '%s'.", + this->get_cname(), bandwidths_.size(), rate_id, host->get_cname()); Metric rate = use_decay_model_ ? decay_bandwidths_[rate_id] : bandwidths_[rate_id]; return rate.peak * rate.scale; @@ -60,11 +64,11 @@ s4u::Link::SharingPolicy NetworkWifiLink::get_sharing_policy() void NetworkWifiLink::refresh_decay_bandwidths(){ // Compute number of STAtion on the Access Point - int nSTA=host_rates_.size(); - + int nSTA = static_cast(host_rates_.size()); + std::vector new_bandwidths; for (auto bandwidth : bandwidths_){ - // Instanciate decay model relatively to the actual bandwidth + // Instantiate decay model relatively to the actual bandwidth double max_bw=bandwidth.peak; double min_bw=bandwidth.peak-(wifi_max_rate_-wifi_min_rate_); double model_rate=bandwidth.peak-(wifi_max_rate_-model_rate_); diff --git a/src/surf/network_wifi.hpp b/src/surf/network_wifi.hpp index f0d2cce6c0..08699dcee2 100644 --- a/src/surf/network_wifi.hpp +++ b/src/surf/network_wifi.hpp @@ -25,7 +25,7 @@ class NetworkWifiLink : public LinkImpl { /** @brief Hold every rates association between host and links (host name, rates id) */ std::map host_rates_; - /** @brief A link can have several bandwith attach to it (mostly use by wifi model) */ + /** @brief A link can have several bandwidths attached to it (mostly use by wifi model) */ std::vector bandwidths_; /** @brief Should we use the decay model ? */ diff --git a/src/surf/ns3/ns3_simulator.hpp b/src/surf/ns3/ns3_simulator.hpp index 04944b13d1..a09388c1e7 100644 --- a/src/surf/ns3/ns3_simulator.hpp +++ b/src/surf/ns3/ns3_simulator.hpp @@ -9,30 +9,29 @@ #include "simgrid/s4u/Host.hpp" #include "src/surf/network_ns3.hpp" +#include "ns3/wifi-module.h" #include #include -#include "ns3/wifi-module.h" #include -class NetPointNs3 { +class XBT_PRIVATE NetPointNs3 { public: static simgrid::xbt::Extension EXTENSION_ID; - void set_name(std::string name) {name_ = name;} + void set_name(std::string name) { name_ = name; } explicit NetPointNs3(); std::string name_; - int node_num; ns3::Ptr ns3_node_; - + std::string ipv4_address_; }; -XBT_PUBLIC void ns3_initialize(std::string TcpProtocol); -XBT_PUBLIC void ns3_simulator(double max_seconds); -XBT_PUBLIC void ns3_add_direct_route(NetPointNs3* src, NetPointNs3* dst, double bw, double lat, std::string link_name, - simgrid::s4u::Link::SharingPolicy policy); -XBT_PUBLIC void ns3_add_cluster(const char* id, double bw, double lat); +XBT_PRIVATE void ns3_initialize(std::string TcpProtocol); +XBT_PRIVATE void ns3_simulator(double max_seconds); +XBT_PRIVATE void ns3_add_direct_route(simgrid::kernel::routing::NetPoint* src, simgrid::kernel::routing::NetPoint* dst, + double bw, double lat, const std::string& link_name, + simgrid::s4u::Link::SharingPolicy policy); class XBT_PRIVATE SgFlow { public: @@ -47,7 +46,7 @@ public: simgrid::kernel::resource::NetworkNS3Action* action_; }; -void start_flow(ns3::Ptr sock, const char* to, uint16_t port_number); +XBT_PRIVATE void start_flow(ns3::Ptr sock, const char* to, uint16_t port_number); static inline std::string transform_socket_ptr(ns3::Ptr local_socket) { @@ -61,19 +60,19 @@ public: WifiZone(std::string name_, simgrid::s4u::Host* host_, ns3::Ptr ap_node_, ns3::Ptr channel_, int mcs_, int nss_, int network_, int link_); - const char* get_cname() {return name.c_str();} - simgrid::s4u::Host* get_host(){return host;} - ns3::Ptr get_ap_node() {return ap_node;} - ns3::Ptr get_channel() {return channel;} - int get_mcs() {return mcs;} - int get_nss() {return nss;} - int get_network() {return network;} - int get_link() {return link;} - int get_n_sta_nodes() {return n_sta_nodes;} - - void set_network(int network_) {network = network_;} - void set_link(int link_) {link = link_;} - void add_sta_node() {n_sta_nodes++;} + const char* get_cname() { return name.c_str(); } + simgrid::s4u::Host* get_host() { return host; } + ns3::Ptr get_ap_node() { return ap_node; } + ns3::Ptr get_channel() { return channel; } + int get_mcs() { return mcs; } + int get_nss() { return nss; } + int get_network() { return network; } + int get_link() { return link; } + int get_n_sta_nodes() { return n_sta_nodes; } + + void set_network(int network_) { network = network_; } + void set_link(int link_) { link = link_; } + void add_sta_node() { n_sta_nodes++; } static bool is_ap(ns3::Ptr node); static WifiZone* by_name(std::string name); diff --git a/src/surf/ptask_L07.cpp b/src/surf/ptask_L07.cpp index ea60e12d58..d1f2593df1 100644 --- a/src/surf/ptask_L07.cpp +++ b/src/surf/ptask_L07.cpp @@ -144,7 +144,7 @@ kernel::resource::CpuAction* HostL07Model::execute_parallel(const std::vector& host_list, const double* flops_amount, const double* bytes_amount, double rate) - : CpuAction(model, 1, 0), computationAmount_(flops_amount), communicationAmount_(bytes_amount), rate_(rate) + : CpuAction(model, 1.0, false), computationAmount_(flops_amount), communicationAmount_(bytes_amount), rate_(rate) { size_t link_nb = 0; size_t used_host_nb = 0; /* Only the hosts with something to compute (>0 flops) are counted) */ @@ -295,7 +295,7 @@ void CpuL07::on_speed_change() get_model()->get_maxmin_system()->update_constraint_bound(get_constraint(), speed_.peak * speed_.scale); while ((var = get_constraint()->get_variable(&elem))) { - const kernel::resource::Action* action = static_cast(var->get_id()); + const kernel::resource::Action* action = var->get_id(); get_model()->get_maxmin_system()->update_variable_bound(action->get_variable(), speed_.scale * speed_.peak); } diff --git a/src/surf/sg_platf.cpp b/src/surf/sg_platf.cpp index 01cbb713f7..e493ca1b28 100644 --- a/src/surf/sg_platf.cpp +++ b/src/surf/sg_platf.cpp @@ -238,7 +238,7 @@ void sg_platf_new_cluster(simgrid::kernel::routing::ClusterCreationArgs* cluster linkUp = simgrid::s4u::Link::by_name_or_null(tmp_link); linkDown = simgrid::s4u::Link::by_name_or_null(tmp_link); - auto* as_cluster = static_cast(current_as); + ClusterZone* as_cluster = current_as; as_cluster->private_links_.insert({as_cluster->node_pos(rankId), {linkUp->get_impl(), linkDown->get_impl()}}); } @@ -274,12 +274,9 @@ void sg_platf_new_cluster(simgrid::kernel::routing::ClusterCreationArgs* cluster // Add a router. XBT_DEBUG(" "); XBT_DEBUG("", cluster->router_id.c_str()); - if (cluster->router_id.empty()) { - std::string newid = std::string(cluster->prefix) + cluster->id + "_router" + cluster->suffix; - current_as->router_ = sg_platf_new_router(newid, NULL); - } else { - current_as->router_ = sg_platf_new_router(cluster->router_id, NULL); - } + if (cluster->router_id.empty()) + cluster->router_id = std::string(cluster->prefix) + cluster->id + "_router" + cluster->suffix; + current_as->router_ = sg_platf_new_router(cluster->router_id, NULL); //Make the backbone if ((cluster->bb_bw > 0) || (cluster->bb_lat > 0)) { @@ -383,7 +380,8 @@ void sg_platf_new_storage(simgrid::kernel::routing::StorageCreationArgs* storage storage->id.c_str(), stype->model.c_str(), stype->id.c_str(), storage->content.c_str(), storage->properties); - auto s = surf_storage_model->createStorage(storage->id, stype->id, storage->content, storage->attach); + auto s = surf_storage_model->createStorage(storage->filename, storage->lineno, storage->id, stype->id, + storage->content, storage->attach); if (storage->properties) { s->set_properties(*storage->properties); @@ -666,13 +664,13 @@ void sg_platf_new_Zone_seal() xbt_assert(current_routing, "Cannot seal the current AS: none under construction"); current_routing->seal(); simgrid::s4u::NetZone::on_seal(*current_routing->get_iface()); - current_routing = static_cast(current_routing->get_father()); + current_routing = current_routing->get_father(); } /** @brief Add a link connecting a host to the rest of its AS (which must be cluster or vivaldi) */ void sg_platf_new_hostlink(const simgrid::kernel::routing::HostLinkCreationArgs* hostlink) { - simgrid::kernel::routing::NetPoint* netpoint = simgrid::s4u::Host::by_name(hostlink->id)->get_netpoint(); + const simgrid::kernel::routing::NetPoint* netpoint = simgrid::s4u::Host::by_name(hostlink->id)->get_netpoint(); xbt_assert(netpoint, "Host '%s' not found!", hostlink->id.c_str()); xbt_assert(dynamic_cast(current_routing), "Only hosts from Cluster and Vivaldi ASes can get a host_link."); diff --git a/src/surf/storage_n11.cpp b/src/surf/storage_n11.cpp index 50439d8426..dba05a810f 100644 --- a/src/surf/storage_n11.cpp +++ b/src/surf/storage_n11.cpp @@ -10,6 +10,7 @@ #include "src/kernel/lmm/maxmin.hpp" #include "src/surf/xml/platf.hpp" #include "surf/surf.hpp" +#include "xbt/parse_units.hpp" XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(surf_storage); @@ -49,15 +50,16 @@ StorageN11Model::StorageN11Model() all_existing_models.push_back(this); } -StorageImpl* StorageN11Model::createStorage(const std::string& id, const std::string& type_id, - const std::string& content_name, const std::string& attach) +StorageImpl* StorageN11Model::createStorage(std::string& filename, int lineno, const std::string& id, + const std::string& type_id, const std::string& content_name, + const std::string& attach) { const StorageType* storage_type = storage_types.at(type_id); - double Bread = - surf_parse_get_bandwidth(storage_type->model_properties->at("Bread").c_str(), "property Bread, storage", type_id); - double Bwrite = surf_parse_get_bandwidth(storage_type->model_properties->at("Bwrite").c_str(), - "property Bwrite, storage", type_id); + double Bread = xbt_parse_get_bandwidth(filename, lineno, storage_type->model_properties->at("Bread").c_str(), + "property Bread, storage", type_id); + double Bwrite = xbt_parse_get_bandwidth(filename, lineno, storage_type->model_properties->at("Bwrite").c_str(), + "property Bwrite, storage", type_id); XBT_DEBUG("SURF storage create resource\n\t\tid '%s'\n\t\ttype '%s'\n\t\tBread '%f'\n", id.c_str(), type_id.c_str(), Bread); @@ -76,7 +78,7 @@ void StorageN11Model::update_actions_state(double /*now*/, double delta) for (auto it = std::begin(*get_started_action_set()); it != std::end(*get_started_action_set());) { auto& action = *it; ++it; // increment iterator here since the following calls to action.finish() may invalidate it - action.update_remains(lrint(action.get_variable()->get_value() * delta)); + action.update_remains(rint(action.get_variable()->get_value() * delta)); action.update_max_duration(delta); if (((action.get_remains_no_update() <= 0) && (action.get_variable()->get_penalty() > 0)) || @@ -101,17 +103,17 @@ StorageN11::StorageN11(StorageModel* model, const std::string& name, lmm::System StorageAction* StorageN11::io_start(sg_size_t size, s4u::Io::OpType type) { - return new StorageN11Action(get_model(), size, not is_on(), this, type); + return new StorageN11Action(get_model(), static_cast(size), not is_on(), this, type); } StorageAction* StorageN11::read(sg_size_t size) { - return new StorageN11Action(get_model(), size, not is_on(), this, s4u::Io::OpType::READ); + return new StorageN11Action(get_model(), static_cast(size), not is_on(), this, s4u::Io::OpType::READ); } StorageAction* StorageN11::write(sg_size_t size) { - return new StorageN11Action(get_model(), size, not is_on(), this, s4u::Io::OpType::WRITE); + return new StorageN11Action(get_model(), static_cast(size), not is_on(), this, s4u::Io::OpType::WRITE); } /********** diff --git a/src/surf/storage_n11.hpp b/src/surf/storage_n11.hpp index cad37742db..3fcd75c37a 100644 --- a/src/surf/storage_n11.hpp +++ b/src/surf/storage_n11.hpp @@ -29,8 +29,8 @@ class XBT_PRIVATE StorageN11Action; class StorageN11Model : public StorageModel { public: StorageN11Model(); - StorageImpl* createStorage(const std::string& id, const std::string& type_id, const std::string& content_name, - const std::string& attach) override; + StorageImpl* createStorage(std::string& filename, int lineno, const std::string& id, const std::string& type_id, + const std::string& content_name, const std::string& attach) override; double next_occurring_event(double now) override; void update_actions_state(double now, double delta) override; }; diff --git a/src/surf/surf_c_bindings.cpp b/src/surf/surf_c_bindings.cpp index 44e77881dd..10d2874153 100644 --- a/src/surf/surf_c_bindings.cpp +++ b/src/surf/surf_c_bindings.cpp @@ -82,7 +82,7 @@ double surf_solve(double max_date) XBT_DEBUG("Looking for next trace event"); - while (1) { // Handle next occurring events until none remains + while (true) { // Handle next occurring events until none remains double next_event_date = simgrid::kernel::profile::future_evt_set.next_date(); XBT_DEBUG("Next TRACE event: %f", next_event_date); diff --git a/src/surf/surf_interface.cpp b/src/surf/surf_interface.cpp index 624ee5441c..9c8f2301c1 100644 --- a/src/surf/surf_interface.cpp +++ b/src/surf/surf_interface.cpp @@ -197,7 +197,7 @@ int find_model_description(const std::vector& table, c auto pos = std::find_if(table.begin(), table.end(), [&name](const surf_model_description_t& item) { return item.name == name; }); if (pos != table.end()) - return std::distance(table.begin(), pos); + return static_cast(std::distance(table.begin(), pos)); if (table.empty()) xbt_die("No model is valid! This is a bug."); diff --git a/src/surf/xml/platf.hpp b/src/surf/xml/platf.hpp index 070894d3aa..1ea9f325d9 100644 --- a/src/surf/xml/platf.hpp +++ b/src/surf/xml/platf.hpp @@ -23,12 +23,6 @@ XBT_PUBLIC void surf_parse_assert_netpoint(const std::string& hostname, const st XBT_PUBLIC double surf_parse_get_double(const std::string& s); XBT_PUBLIC int surf_parse_get_int(const std::string& s); -XBT_PUBLIC double surf_parse_get_time(const char* string, const char* entity_kind, const std::string& name); -XBT_PUBLIC double surf_parse_get_size(const char* string, const char* entity_kind, const std::string& name); -XBT_PUBLIC double surf_parse_get_bandwidth(const char* string, const char* entity_kind, const std::string& name); -XBT_PUBLIC std::vector surf_parse_get_bandwidths(const char* string, const char* entity_kind, - const std::string& name); -XBT_PUBLIC double surf_parse_get_speed(const char* string, const char* entity_kind, const std::string& name); XBT_PUBLIC void surf_parse(); /* Entry-point to the parser */ diff --git a/src/surf/xml/platf_private.hpp b/src/surf/xml/platf_private.hpp index 71a639858f..5b7a510a27 100644 --- a/src/surf/xml/platf_private.hpp +++ b/src/surf/xml/platf_private.hpp @@ -123,6 +123,8 @@ public: class StorageCreationArgs { public: + std::string filename; + int lineno; std::string id; std::string type_id; std::string content; diff --git a/src/surf/xml/surfxml_parseplatf.cpp b/src/surf/xml/surfxml_parseplatf.cpp index 1a0e0add21..638d2f5e5b 100644 --- a/src/surf/xml/surfxml_parseplatf.cpp +++ b/src/surf/xml/surfxml_parseplatf.cpp @@ -61,7 +61,7 @@ void sg_platf_trace_connect(simgrid::kernel::routing::TraceConnectCreationArgs* void parse_platform_file(const std::string& file) { const char* cfile = file.c_str(); - int len = strlen(cfile); + size_t len = strlen(cfile); bool is_lua = len > 3 && file[len - 3] == 'l' && file[len - 2] == 'u' && file[len - 1] == 'a'; sg_platf_init(); diff --git a/src/surf/xml/surfxml_sax_cb.cpp b/src/surf/xml/surfxml_sax_cb.cpp index 06db5168d0..f8fcade127 100644 --- a/src/surf/xml/surfxml_sax_cb.cpp +++ b/src/surf/xml/surfxml_sax_cb.cpp @@ -14,8 +14,8 @@ #include "src/surf/xml/platf_private.hpp" #include "surf/surf.hpp" #include "xbt/file.hpp" +#include "xbt/parse_units.hpp" -#include #include #include #include @@ -27,7 +27,7 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_parse, surf, "Logging specific to the SURF parsing module"); -static std::string surf_parsed_filename; // Currently parsed file (for the error messages) +std::string surf_parsed_filename; // Currently parsed file (for the error messages) std::vector parsed_link_list; /* temporary store of current link list of a route */ std::vector parsed_disk_list; /* temporary store of current disk list of a host */ @@ -94,10 +94,8 @@ int surf_parse_get_int(const std::string& s) } } -namespace { - /* Turn something like "1-4,6,9-11" into the vector {1,2,3,4,6,9,10,11} */ -std::vector* explodesRadical(const std::string& radicals) +static std::vector* explodesRadical(const std::string& radicals) { std::vector* exploded = new std::vector(); @@ -127,143 +125,6 @@ std::vector* explodesRadical(const std::string& radicals) return exploded; } -class unit_scale : public std::unordered_map { -public: - using std::unordered_map::unordered_map; - // tuples are : - explicit unit_scale(std::initializer_list> generators); -}; - -unit_scale::unit_scale(std::initializer_list> generators) -{ - for (const auto& gen : generators) { - const std::string& unit = std::get<0>(gen); - double value = std::get<1>(gen); - const int base = std::get<2>(gen); - const bool abbrev = std::get<3>(gen); - double mult; - std::vector prefixes; - switch (base) { - case 2: - mult = 1024.0; - prefixes = abbrev ? std::vector{"Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"} - : std::vector{"kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"}; - break; - case 10: - mult = 1000.0; - prefixes = abbrev ? std::vector{"k", "M", "G", "T", "P", "E", "Z", "Y"} - : std::vector{"kilo", "mega", "giga", "tera", "peta", "exa", "zeta", "yotta"}; - break; - default: - THROW_IMPOSSIBLE; - } - emplace(unit, value); - for (const auto& prefix : prefixes) { - value *= mult; - emplace(prefix + unit, value); - } - } -} - -/* Note: no warning is issued for unit-less values when `name' is empty. */ -double surf_parse_get_value_with_unit(const char* string, const unit_scale& units, const char* entity_kind, - const std::string& name, const char* error_msg, const char* default_unit) -{ - char* endptr; - errno = 0; - double res = strtod(string, &endptr); - const char* ptr = endptr; // for const-correctness - if (errno == ERANGE) - surf_parse_error(std::string("value out of range: ") + string); - if (ptr == string) - surf_parse_error(std::string("cannot parse number:") + string); - if (ptr[0] == '\0') { - // Ok, 0 can be unit-less - if (res != 0 && not name.empty()) - XBT_WARN("Deprecated unit-less value '%s' for %s %s. %s", string, entity_kind, name.c_str(), error_msg); - ptr = default_unit; - } - auto u = units.find(ptr); - if (u == units.end()) - surf_parse_error(std::string("unknown unit: ") + ptr); - return res * u->second; -} -} - -double surf_parse_get_time(const char* string, const char* entity_kind, const std::string& name) -{ - static const unit_scale units{std::make_pair("w", 7 * 24 * 60 * 60), - std::make_pair("d", 24 * 60 * 60), - std::make_pair("h", 60 * 60), - std::make_pair("m", 60), - std::make_pair("s", 1.0), - std::make_pair("ms", 1e-3), - std::make_pair("us", 1e-6), - std::make_pair("ns", 1e-9), - std::make_pair("ps", 1e-12)}; - return surf_parse_get_value_with_unit(string, units, entity_kind, name, - "Append 's' to your time to get seconds", "s"); -} - -double surf_parse_get_size(const char* string, const char* entity_kind, const std::string& name) -{ - static const unit_scale units{std::make_tuple("b", 0.125, 2, true), std::make_tuple("b", 0.125, 10, true), - std::make_tuple("B", 1.0, 2, true), std::make_tuple("B", 1.0, 10, true)}; - return surf_parse_get_value_with_unit(string, units, entity_kind, name, - "Append 'B' to get bytes (or 'b' for bits but 1B = 8b).", "B"); -} - -double surf_parse_get_bandwidth(const char* string, const char* entity_kind, const std::string& name) -{ - static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true), - std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)}; - return surf_parse_get_value_with_unit(string, units, entity_kind, name, - "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", "Bps"); -} - -std::vector surf_parse_get_bandwidths(const char* string, const char* entity_kind, const std::string& name) -{ - static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true), - std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)}; - - std::vector bandwidths; - std::vector tokens; - boost::split(tokens, string, boost::is_any_of(";")); - for (auto token : tokens) { - bandwidths.push_back(surf_parse_get_value_with_unit( - token.c_str(), units, entity_kind, name, - "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", "Bps")); - } - - return bandwidths; -} - -double surf_parse_get_speed(const char* string, const char* entity_kind, const std::string& name) -{ - static const unit_scale units{std::make_tuple("f", 1.0, 10, true), std::make_tuple("flops", 1.0, 10, false)}; - return surf_parse_get_value_with_unit(string, units, entity_kind, name, - "Append 'f' or 'flops' to your speed to get flop per second", "f"); -} - -static std::vector surf_parse_get_all_speeds(char* speeds, const char* entity_kind, const std::string& id) -{ - std::vector speed_per_pstate; - - if (strchr(speeds, ',') == nullptr){ - double speed = surf_parse_get_speed(speeds, entity_kind, id); - speed_per_pstate.push_back(speed); - } else { - std::vector pstate_list; - boost::split(pstate_list, speeds, boost::is_any_of(",")); - for (auto speed_str : pstate_list) { - boost::trim(speed_str); - double speed = surf_parse_get_speed(speed_str.c_str(), entity_kind, id); - speed_per_pstate.push_back(speed); - XBT_DEBUG("Speed value: %f", speed); - } - } - return speed_per_pstate; -} /* * All the callback lists that can be overridden anywhere. @@ -293,6 +154,8 @@ void ETag_surfxml_storage() storage.properties = property_sets.back(); property_sets.pop_back(); + storage.filename = surf_parsed_filename; + storage.lineno = surf_parse_lineno; storage.id = A_surfxml_storage_id; storage.type_id = A_surfxml_storage_typeId; storage.content = A_surfxml_storage_content; @@ -320,7 +183,8 @@ void ETag_surfxml_storage___type() storage_type.id = A_surfxml_storage___type_id; storage_type.model = A_surfxml_storage___type_model; storage_type.size = - surf_parse_get_size(A_surfxml_storage___type_size, "size of storage type", storage_type.id.c_str()); + static_cast(surf_parse_get_size(surf_parsed_filename, surf_parse_lineno, A_surfxml_storage___type_size, + "size of storage type", storage_type.id.c_str())); sg_platf_new_storage_type(&storage_type); } @@ -418,7 +282,8 @@ void ETag_surfxml_host() { host.id = A_surfxml_host_id; - host.speed_per_pstate = surf_parse_get_all_speeds(A_surfxml_host_speed, "speed of host", host.id); + host.speed_per_pstate = + xbt_parse_get_all_speeds(surf_parsed_filename, surf_parse_lineno, A_surfxml_host_speed, "speed of host", host.id); XBT_DEBUG("pstate: %s", A_surfxml_host_pstate); host.core_amount = surf_parse_get_int(A_surfxml_host_core); @@ -450,8 +315,10 @@ void ETag_surfxml_disk() { property_sets.pop_back(); disk.id = A_surfxml_disk_id; - disk.read_bw = surf_parse_get_bandwidth(A_surfxml_disk_read___bw, "read_bw of disk ", disk.id); - disk.write_bw = surf_parse_get_bandwidth(A_surfxml_disk_write___bw, "write_bw of disk ", disk.id); + disk.read_bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_disk_read___bw, + "read_bw of disk ", disk.id); + disk.write_bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_disk_write___bw, + "write_bw of disk ", disk.id); parsed_disk_list.push_back(sg_platf_new_disk(&disk)); } @@ -479,20 +346,29 @@ void ETag_surfxml_cluster(){ cluster.prefix = A_surfxml_cluster_prefix; cluster.suffix = A_surfxml_cluster_suffix; cluster.radicals = explodesRadical(A_surfxml_cluster_radical); - cluster.speeds = surf_parse_get_all_speeds(A_surfxml_cluster_speed, "speed of cluster", cluster.id); + cluster.speeds = xbt_parse_get_all_speeds(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_speed, + "speed of cluster", cluster.id); cluster.core_amount = surf_parse_get_int(A_surfxml_cluster_core); - cluster.bw = surf_parse_get_bandwidth(A_surfxml_cluster_bw, "bw of cluster", cluster.id); - cluster.lat = surf_parse_get_time(A_surfxml_cluster_lat, "lat of cluster", cluster.id); + cluster.bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_bw, "bw of cluster", + cluster.id); + cluster.lat = + xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_lat, "lat of cluster", cluster.id); if(strcmp(A_surfxml_cluster_bb___bw,"")) - cluster.bb_bw = surf_parse_get_bandwidth(A_surfxml_cluster_bb___bw, "bb_bw of cluster", cluster.id); + cluster.bb_bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_bb___bw, + "bb_bw of cluster", cluster.id); if(strcmp(A_surfxml_cluster_bb___lat,"")) - cluster.bb_lat = surf_parse_get_time(A_surfxml_cluster_bb___lat, "bb_lat of cluster", cluster.id); + cluster.bb_lat = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_bb___lat, + "bb_lat of cluster", cluster.id); if(strcmp(A_surfxml_cluster_limiter___link,"")) - cluster.limiter_link = surf_parse_get_bandwidth(A_surfxml_cluster_limiter___link, "limiter_link of cluster", cluster.id); + cluster.limiter_link = + xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_limiter___link, + "limiter_link of cluster", cluster.id); if(strcmp(A_surfxml_cluster_loopback___bw,"")) - cluster.loopback_bw = surf_parse_get_bandwidth(A_surfxml_cluster_loopback___bw, "loopback_bw of cluster", cluster.id); + cluster.loopback_bw = xbt_parse_get_bandwidth( + surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_loopback___bw, "loopback_bw of cluster", cluster.id); if(strcmp(A_surfxml_cluster_loopback___lat,"")) - cluster.loopback_lat = surf_parse_get_time(A_surfxml_cluster_loopback___lat, "loopback_lat of cluster", cluster.id); + cluster.loopback_lat = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_loopback___lat, + "loopback_lat of cluster", cluster.id); switch(AX_surfxml_cluster_topology){ case A_surfxml_cluster_topology_FLAT: @@ -553,9 +429,12 @@ void STag_surfxml_cabinet(){ cabinet.id = A_surfxml_cabinet_id; cabinet.prefix = A_surfxml_cabinet_prefix; cabinet.suffix = A_surfxml_cabinet_suffix; - cabinet.speed = surf_parse_get_speed(A_surfxml_cabinet_speed, "speed of cabinet", cabinet.id.c_str()); - cabinet.bw = surf_parse_get_bandwidth(A_surfxml_cabinet_bw, "bw of cabinet", cabinet.id.c_str()); - cabinet.lat = surf_parse_get_time(A_surfxml_cabinet_lat, "lat of cabinet", cabinet.id.c_str()); + cabinet.speed = xbt_parse_get_speed(surf_parsed_filename, surf_parse_lineno, A_surfxml_cabinet_speed, + "speed of cabinet", cabinet.id.c_str()); + cabinet.bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cabinet_bw, "bw of cabinet", + cabinet.id.c_str()); + cabinet.lat = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cabinet_lat, "lat of cabinet", + cabinet.id.c_str()); cabinet.radicals = explodesRadical(A_surfxml_cabinet_radical); sg_platf_new_cabinet(&cabinet); @@ -565,9 +444,12 @@ void STag_surfxml_peer(){ simgrid::kernel::routing::PeerCreationArgs peer; peer.id = std::string(A_surfxml_peer_id); - peer.speed = surf_parse_get_speed(A_surfxml_peer_speed, "speed of peer", peer.id.c_str()); - peer.bw_in = surf_parse_get_bandwidth(A_surfxml_peer_bw___in, "bw_in of peer", peer.id.c_str()); - peer.bw_out = surf_parse_get_bandwidth(A_surfxml_peer_bw___out, "bw_out of peer", peer.id.c_str()); + peer.speed = xbt_parse_get_speed(surf_parsed_filename, surf_parse_lineno, A_surfxml_peer_speed, "speed of peer", + peer.id.c_str()); + peer.bw_in = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_peer_bw___in, "bw_in of peer", + peer.id.c_str()); + peer.bw_out = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_peer_bw___out, + "bw_out of peer", peer.id.c_str()); peer.coord = A_surfxml_peer_coordinates; peer.speed_trace = nullptr; if (A_surfxml_peer_availability___file[0] != '\0') { @@ -598,11 +480,13 @@ void ETag_surfxml_link(){ property_sets.pop_back(); link.id = std::string(A_surfxml_link_id); - link.bandwidths = surf_parse_get_bandwidths(A_surfxml_link_bandwidth, "bandwidth of link", link.id.c_str()); + link.bandwidths = xbt_parse_get_bandwidths(surf_parsed_filename, surf_parse_lineno, A_surfxml_link_bandwidth, + "bandwidth of link", link.id.c_str()); link.bandwidth_trace = A_surfxml_link_bandwidth___file[0] ? simgrid::kernel::profile::Profile::from_file(A_surfxml_link_bandwidth___file) : nullptr; - link.latency = surf_parse_get_time(A_surfxml_link_latency, "latency of link", link.id.c_str()); + link.latency = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_link_latency, "latency of link", + link.id.c_str()); link.latency_trace = A_surfxml_link_latency___file[0] ? simgrid::kernel::profile::Profile::from_file(A_surfxml_link_latency___file) : nullptr; @@ -672,9 +556,10 @@ void ETag_surfxml_backbone(){ link.properties = nullptr; link.id = std::string(A_surfxml_backbone_id); - link.bandwidths.push_back( - surf_parse_get_bandwidth(A_surfxml_backbone_bandwidth, "bandwidth of backbone", link.id.c_str())); - link.latency = surf_parse_get_time(A_surfxml_backbone_latency, "latency of backbone", link.id.c_str()); + link.bandwidths.push_back(xbt_parse_get_bandwidth( + surf_parsed_filename, surf_parse_lineno, A_surfxml_backbone_bandwidth, "bandwidth of backbone", link.id.c_str())); + link.latency = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_backbone_latency, + "latency of backbone", link.id.c_str()); link.policy = simgrid::s4u::Link::SharingPolicy::SHARED; sg_platf_new_link(&link); diff --git a/src/xbt/automaton/automaton.c b/src/xbt/automaton/automaton.c index 1d6be9f2a4..5fc60df54e 100644 --- a/src/xbt/automaton/automaton.c +++ b/src/xbt/automaton/automaton.c @@ -329,9 +329,9 @@ int xbt_automaton_exp_label_compare(const_xbt_automaton_exp_label_t l1, const_xb int xbt_automaton_propositional_symbols_compare_value(const_xbt_dynar_t s1, const_xbt_dynar_t s2) { - unsigned int nb_elem = xbt_dynar_length(s1); + unsigned long nb_elem = xbt_dynar_length(s1); - for (unsigned int cursor = 0; cursor < nb_elem; cursor++) { + for (unsigned long cursor = 0; cursor < nb_elem; cursor++) { const int* iptr1 = xbt_dynar_get_ptr(s1, cursor); const int* iptr2 = xbt_dynar_get_ptr(s2, cursor); if(*iptr1 != *iptr2) diff --git a/src/xbt/config.cpp b/src/xbt/config.cpp index 8d05c901dd..f317be522f 100644 --- a/src/xbt/config.cpp +++ b/src/xbt/config.cpp @@ -97,9 +97,14 @@ template class ConfigType; template <> class ConfigType { public: static constexpr const char* type_name = "int"; - static inline double parse(const char* value) + static inline int parse(const char* value) { - return parse_long(value); + long val = parse_long(value); + if (val < INT_MIN) + throw std::range_error("underflow"); + if (val > INT_MAX) + throw std::range_error("overflow"); + return static_cast(val); } }; template <> class ConfigType { @@ -269,9 +274,9 @@ public: } // Debug: - void dump(const char *name, const char *indent); - void show_aliases(); - void help(); + void dump(const char* name, const char* indent) const; + void show_aliases() const; + void help() const; protected: ConfigurationElement* get_dict_element(const std::string& name); @@ -321,7 +326,7 @@ void Config::alias(const std::string& realname, const std::string& aliasname) * @param name The name to give to this config set * @param indent what to write at the beginning of each line (right number of spaces) */ -void Config::dump(const char *name, const char *indent) +void Config::dump(const char* name, const char* indent) const { if (name) XBT_CVERB(xbt_help, "%s>> Dumping of the config set '%s':", indent, name); @@ -335,14 +340,14 @@ void Config::dump(const char *name, const char *indent) } /** @brief Displays the declared aliases and their replacement */ -void Config::show_aliases() +void Config::show_aliases() const { for (auto const& elm : aliases) XBT_HELP(" %-40s %s", elm.first.c_str(), elm.second->get_key().c_str()); } /** @brief Displays the declared options and their description */ -void Config::help() +void Config::help() const { for (auto const& elm : options) { simgrid::config::ConfigurationElement* variable = elm.second.get(); diff --git a/src/xbt/dynar.cpp b/src/xbt/dynar.cpp index 31bbef2572..6778ca1f92 100644 --- a/src/xbt/dynar.cpp +++ b/src/xbt/dynar.cpp @@ -160,7 +160,7 @@ void xbt_dynar_free(xbt_dynar_t* dynar) */ unsigned long xbt_dynar_length(const_xbt_dynar_t dynar) { - return (dynar ? (unsigned long) dynar->used : (unsigned long) 0); + return (dynar ? dynar->used : 0UL); } /**@brief check if a dynar is empty @@ -399,7 +399,7 @@ void xbt_dynar_map(const_xbt_dynar_t dynar, void_f_pvoid_t op) _sanity_check_dynar(dynar); for (i = 0; i < used; i++) { - char* elm = (char*) data + i * elmsize; + char* elm = data + i * elmsize; op(elm); } } diff --git a/src/xbt/log.cpp b/src/xbt/log.cpp index ef3fd942cd..7f4da38243 100644 --- a/src/xbt/log.cpp +++ b/src/xbt/log.cpp @@ -161,7 +161,7 @@ void _xbt_log_event_log(xbt_log_event_t ev, const char *fmt, ...) xbt_assert(static_cast(ev->priority) < sizeof(xbt_log_priority_names)/sizeof(xbt_log_priority_names[0]), "Priority %d is greater than the biggest allowed value", ev->priority); - while (1) { + while (true) { const s_xbt_log_appender_t* appender = cat->appender; if (appender != nullptr) { @@ -175,14 +175,14 @@ void _xbt_log_event_log(xbt_log_event_t ev, const char *fmt, ...) va_start(ev->ap, fmt); done = cat->layout->do_layout(cat->layout, ev, fmt); va_end(ev->ap); - ev->buffer = nullptr; // Calm down, static analyzers, this pointer to local array wont leak out of the scope. + ev->buffer = nullptr; // Calm down, static analyzers, this pointer to local array won't leak out of the scope. if (done) { appender->do_append(appender, buff); } else { /* The static buffer was too small, use a dynamically expanded one */ ev->buffer_size = XBT_LOG_DYNAMIC_BUFFER_SIZE; ev->buffer = static_cast(xbt_malloc(ev->buffer_size)); - while (1) { + while (true) { va_start(ev->ap, fmt); done = cat->layout->do_layout(cat->layout, ev, fmt); va_end(ev->ap); diff --git a/src/xbt/mmalloc/mfree.c b/src/xbt/mmalloc/mfree.c index 86dae306ed..ce7b5e2d50 100644 --- a/src/xbt/mmalloc/mfree.c +++ b/src/xbt/mmalloc/mfree.c @@ -82,8 +82,10 @@ void mfree(struct mdesc *mdp, void *ptr) /* Mark all my ex-blocks as free */ for (it=0; itheapinfo[block].busy_block.size; it++) { if (mdp->heapinfo[block+it].type < 0) { - fprintf(stderr,"Internal Error: Asked to free a block already marked as free (block=%lu it=%d type=%lu). Please report this bug.\n", - (unsigned long)block,it,(unsigned long)mdp->heapinfo[block].type); + fprintf(stderr, + "Internal Error: Asked to free a block already marked as free (block=%zu it=%d type=%d). " + "Please report this bug.\n", + block, it, mdp->heapinfo[block].type); abort(); } mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE; @@ -101,8 +103,10 @@ void mfree(struct mdesc *mdp, void *ptr) /* Mark all my ex-blocks as free */ for (it=0; itheapinfo[block].free_block.size; it++) { if (mdp->heapinfo[block+it].type <0) { - fprintf(stderr,"Internal error: Asked to free a block already marked as free (block=%lu it=%d/%lu type=%lu). Please report this bug.\n", - (unsigned long)block,it,(unsigned long)mdp->heapinfo[block].free_block.size,(unsigned long)mdp->heapinfo[block].type); + fprintf(stderr, + "Internal error: Asked to free a block already marked as free (block=%zu it=%d/%zu type=%d). " + "Please report this bug.\n", + block, it, mdp->heapinfo[block].free_block.size, mdp->heapinfo[block].type); abort(); } mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE; @@ -149,7 +153,7 @@ void mfree(struct mdesc *mdp, void *ptr) default: if (type < 0) { - fprintf(stderr, "Unkown mmalloc block type.\n"); + fprintf(stderr, "Unknown mmalloc block type.\n"); abort(); } @@ -189,7 +193,7 @@ void mfree(struct mdesc *mdp, void *ptr) mdp -> heapstats.chunks_free -= BLOCKSIZE >> type; mdp -> heapstats.bytes_free -= BLOCKSIZE; - mfree((void *) mdp, (void *) ADDRESS(block)); + mfree(mdp, ADDRESS(block)); } else if (mdp->heapinfo[block].busy_frag.nfree != 0) { /* If some fragments of this block are free, you know what? I'm already happy. */ ++mdp->heapinfo[block].busy_frag.nfree; diff --git a/src/xbt/mmalloc/mmalloc.c b/src/xbt/mmalloc/mmalloc.c index e3ba23c211..d3c26bbd26 100644 --- a/src/xbt/mmalloc/mmalloc.c +++ b/src/xbt/mmalloc/mmalloc.c @@ -54,7 +54,7 @@ static void initialize_heapinfo_heapinfo(const s_xbt_mheap_t* mdp) { // Update heapinfo about the heapinfo pages (!): xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0); - int block = BLOCK(mdp->heapinfo); + size_t block = BLOCK(mdp->heapinfo); size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE; // Mark them as free: for (size_t j=0; j!=nblocks; ++j) { @@ -250,7 +250,7 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) result = mmalloc(mdp, BLOCKSIZE); // does not return NULL block = BLOCK(result); - mdp->heapinfo[block].type = log; + mdp->heapinfo[block].type = (int)log; /* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */ size_t i; for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) { @@ -282,7 +282,9 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) block = MALLOC_SEARCH_START; while (mdp->heapinfo[block].free_block.size < blocks) { if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool! - fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type); + fprintf(stderr, + "Internal error: found a free block not marked as such (block=%zu type=%d). Please report this bug.\n", + block, mdp->heapinfo[block].type); abort(); } diff --git a/src/xbt/mmalloc/mmorecore.c b/src/xbt/mmalloc/mmorecore.c index 99bc655ac7..d59a2a3c75 100644 --- a/src/xbt/mmalloc/mmorecore.c +++ b/src/xbt/mmalloc/mmorecore.c @@ -79,7 +79,7 @@ void *mmorecore(struct mdesc *mdp, ssize_t size) /* We are deallocating memory. If the amount requested would cause us to try to deallocate back past the base of * the mmap'd region then die verbosely. Otherwise, deallocate the memory and return the old break value. */ if (((char*)mdp->breakval) + size >= (char*)mdp->base) { - result = (void*)mdp->breakval; + result = mdp->breakval; mdp->breakval = (char*)mdp->breakval + size; moveto = PAGE_ALIGN(mdp->breakval); munmap(moveto, (size_t)(((char*)mdp->top) - ((char*)moveto)) - 1); @@ -134,11 +134,11 @@ void *mmorecore(struct mdesc *mdp, ssize_t size) mdp->base = mdp->breakval = mapto; mdp->top = PAGE_ALIGN((char*)mdp->breakval + size); - result = (void *) mdp->breakval; + result = mdp->breakval; mdp->breakval = (char*)mdp->breakval + size; } else { /* Memory is already mapped, we only need to increase the breakval: */ - result = (void *) mdp->breakval; + result = mdp->breakval; mdp->breakval = (char*)mdp->breakval + size; } } diff --git a/src/xbt/mmalloc/mrealloc.c b/src/xbt/mmalloc/mrealloc.c index 7639ede797..4ce18deaf3 100644 --- a/src/xbt/mmalloc/mrealloc.c +++ b/src/xbt/mmalloc/mrealloc.c @@ -79,7 +79,7 @@ void *mrealloc(xbt_mheap_t mdp, void *ptr, size_t size) /* Full blocks -> Full blocks; see if we can hold it in place. */ blocks = BLOCKIFY(size); if (blocks < mdp->heapinfo[block].busy_block.size) { - int it; + size_t it; /* The new size is smaller; return excess memory to the free list. */ for (it= block+blocks; it< mdp->heapinfo[block].busy_block.size ; it++){ mdp->heapinfo[it].type = MMALLOC_TYPE_UNFRAGMENTED; // FIXME that should be useless, type should already be 0 here @@ -126,7 +126,7 @@ void *mrealloc(xbt_mheap_t mdp, void *ptr, size_t size) default: /* Fragment -> ??; type=logarithm to base two of the fragment size. */ if (type < 0) { - fprintf(stderr, "Unkown mmalloc block type.\n"); + fprintf(stderr, "Unknown mmalloc block type.\n"); abort(); } @@ -134,7 +134,7 @@ void *mrealloc(xbt_mheap_t mdp, void *ptr, size_t size) /* The new size is the same kind of fragment. */ result = ptr; - int frag_nb = RESIDUAL(result, BLOCKSIZE) >> type; + uintptr_t frag_nb = RESIDUAL(result, BLOCKSIZE) >> type; mdp->heapinfo[block].busy_frag.frag_size[frag_nb] = requested_size; mdp->heapinfo[block].busy_frag.ignore[frag_nb] = 0; diff --git a/src/xbt/random.cpp b/src/xbt/random.cpp index 8439deaff3..7deb41e704 100644 --- a/src/xbt/random.cpp +++ b/src/xbt/random.cpp @@ -29,7 +29,7 @@ bool Random::read_state(const std::string& filename) return not file.fail(); } -bool Random::write_state(const std::string& filename) +bool Random::write_state(const std::string& filename) const { std::ofstream file(filename); file << mt19937_gen; @@ -73,7 +73,7 @@ int XbtRandom::uniform_int(int min, int max) do { value = mt19937_gen(); } while (value >= decltype(mt19937_gen)::max() - decltype(mt19937_gen)::max() % range); - return value % range + min; + return static_cast(value % range + min); } double XbtRandom::uniform_real(double min, double max) @@ -84,7 +84,7 @@ double XbtRandom::uniform_real(double min, double max) do { numerator = mt19937_gen() - decltype(mt19937_gen)::min(); } while (numerator == divisor); - return min + (max - min) * numerator / divisor; + return min + (max - min) * static_cast(numerator) / divisor; } double XbtRandom::exponential(double lambda) diff --git a/src/xbt/xbt_log_layout_format.cpp b/src/xbt/xbt_log_layout_format.cpp index e07eca46dd..00e58fd526 100644 --- a/src/xbt/xbt_log_layout_format.cpp +++ b/src/xbt/xbt_log_layout_format.cpp @@ -30,18 +30,19 @@ static constexpr const char* ERRMSG = } else \ return 0 -#define set_sz_from_precision() \ - if (1) { \ - sz = rem_size; \ - if (precision != -1) { \ - if (precision < sz) \ - sz = precision + 1; /* +1 for the final '\0' */ \ - precision = -1; \ - } \ - } else (void)0 +#define set_sz_from_precision() \ + if (true) { \ + sz = rem_size; \ + if (precision != -1) { \ + if (precision < sz) \ + sz = precision + 1; /* +1 for the final '\0' */ \ + precision = -1; \ + } \ + } else \ + (void)0 #define show_it(data, letter) \ - if (1) { \ + if (true) { \ int len; \ int wd; \ if (length == -1) { \ @@ -58,13 +59,14 @@ static constexpr const char* ERRMSG = } \ check_overflow(len); \ } else \ - (void)0 + (void)0 -#define show_string(data) \ - if (1) { \ - const char *show_string_data = (data); \ - show_it(show_string_data ? show_string_data : "(null)", "s"); \ - } else (void)0 +#define show_string(data) \ + if (true) { \ + const char* show_string_data = (data); \ + show_it(show_string_data ? show_string_data : "(null)", "s"); \ + } else \ + (void)0 #define show_int(data) show_it((data), "d") #define show_double(data) show_it((data), "f") @@ -97,7 +99,7 @@ static int xbt_log_layout_format_doit(const s_xbt_log_layout_t* l, xbt_log_event check_overflow(1); break; case '.': /* precision specifier */ - precision = strtol(q + 1, &q, 10); + precision = static_cast(strtol(q + 1, &q, 10)); continue; /* conversion specifier still not found, continue reading */ case '0': case '1': @@ -109,7 +111,7 @@ static int xbt_log_layout_format_doit(const s_xbt_log_layout_t* l, xbt_log_event case '7': case '8': case '9': /* length modifier */ - length = strtol(q, &q, 10); + length = static_cast(strtol(q, &q, 10)); continue; /* conversion specifier still not found, continue reading */ case 'c': /* category name; LOG4J compliant should accept a precision postfix to show the hierarchy */ @@ -165,7 +167,7 @@ static int xbt_log_layout_format_doit(const s_xbt_log_layout_t* l, xbt_log_event xbt_abort(); } break; /* done, continue normally */ - } while (1); + } while (true); } else { *p = *q; check_overflow(1); diff --git a/src/xbt/xbt_main.cpp b/src/xbt/xbt_main.cpp index 324c2bf6c5..317741d265 100644 --- a/src/xbt/xbt_main.cpp +++ b/src/xbt/xbt_main.cpp @@ -87,12 +87,12 @@ static void xbt_preinit() GetSystemInfo(&si); xbt_pagesize = si.dwPageSize; #elif HAVE_SYSCONF - xbt_pagesize = sysconf(_SC_PAGESIZE); + xbt_pagesize = static_cast(sysconf(_SC_PAGESIZE)); #else # error Cannot get page size. #endif - xbt_pagebits = log2(xbt_pagesize); + xbt_pagebits = static_cast(log2(xbt_pagesize)); #ifdef _TWO_DIGIT_EXPONENT /* Even printf behaves differently on Windows... */ diff --git a/src/xbt/xbt_os_file.cpp b/src/xbt/xbt_os_file.cpp index e28fe03b8f..713009edb6 100644 --- a/src/xbt/xbt_os_file.cpp +++ b/src/xbt/xbt_os_file.cpp @@ -33,14 +33,14 @@ simgrid::xbt::Path::Path() #endif } -std::string simgrid::xbt::Path::get_dir_name() +std::string simgrid::xbt::Path::get_dir_name() const { std::string p(path_); const char* res = dirname(&p[0]); return std::string(res, strlen(res)); } -std::string simgrid::xbt::Path::get_base_name() +std::string simgrid::xbt::Path::get_base_name() const { std::string p(path_); const char* res = basename(&p[0]); diff --git a/src/xbt/xbt_os_time.c b/src/xbt/xbt_os_time.c index 4d67b49857..1a1c371435 100644 --- a/src/xbt/xbt_os_time.c +++ b/src/xbt/xbt_os_time.c @@ -80,7 +80,7 @@ double xbt_os_time(void) return (double) (time(NULL)); #endif /* HAVE_GETTIMEOFDAY? */ - return (double) (tv.tv_sec + tv.tv_usec / 1000000.0); + return (double)tv.tv_sec + (double)tv.tv_usec / 1e6; } void xbt_os_sleep(double sec) @@ -91,14 +91,14 @@ void xbt_os_sleep(double sec) #elif HAVE_NANOSLEEP struct timespec ts; - ts.tv_sec = sec; - ts.tv_nsec = (sec - floor(sec)) * 1e9; + ts.tv_sec = (time_t)sec; + ts.tv_nsec = (long)((sec - floor(sec)) * 1e9); nanosleep (&ts, NULL); #else /* don't have nanosleep. Use select to sleep less than one second */ struct timeval timeout; - timeout.tv_sec = (unsigned long) (sec); - timeout.tv_usec = (sec - floor(sec)) * 1000000; + timeout.tv_sec = (long)sec; + timeout.tv_usec = (long)(sec - floor(sec)) * 1e6); select(0, NULL, NULL, NULL, &timeout); #endif diff --git a/src/xbt/xbt_parse_units.cpp b/src/xbt/xbt_parse_units.cpp new file mode 100644 index 0000000000..7321a37464 --- /dev/null +++ b/src/xbt/xbt_parse_units.cpp @@ -0,0 +1,159 @@ +#include "simgrid/Exception.hpp" +#include "xbt/ex.h" +#include "xbt/log.h" + +#include "xbt/parse_units.hpp" + +#include +#include +#include +#include +#include +#include + +XBT_LOG_NEW_DEFAULT_SUBCATEGORY(parse, xbt, "Parsing functions"); + +class unit_scale : public std::unordered_map { +public: + using std::unordered_map::unordered_map; + // tuples are : + explicit unit_scale(std::initializer_list> generators); +}; + +unit_scale::unit_scale(std::initializer_list> generators) +{ + for (const auto& gen : generators) { + const std::string& unit = std::get<0>(gen); + double value = std::get<1>(gen); + const int base = std::get<2>(gen); + const bool abbrev = std::get<3>(gen); + double mult; + std::vector prefixes; + switch (base) { + case 2: + mult = 1024.0; + prefixes = abbrev ? std::vector{"Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"} + : std::vector{"kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"}; + break; + case 10: + mult = 1000.0; + prefixes = abbrev ? std::vector{"k", "M", "G", "T", "P", "E", "Z", "Y"} + : std::vector{"kilo", "mega", "giga", "tera", "peta", "exa", "zeta", "yotta"}; + break; + default: + THROW_IMPOSSIBLE; + } + emplace(unit, value); + for (const auto& prefix : prefixes) { + value *= mult; + emplace(prefix + unit, value); + } + } +} + +/* Note: no warning is issued for unit-less values when `name' is empty. */ +static double surf_parse_get_value_with_unit(const std::string& filename, int lineno, const char* string, + const unit_scale& units, const char* entity_kind, const std::string& name, + const char* error_msg, const char* default_unit) +{ + char* endptr; + errno = 0; + double res = strtod(string, &endptr); + const char* ptr = endptr; // for const-correctness + if (errno == ERANGE) + throw simgrid::ParseError(filename, lineno, std::string("value out of range: ") + string); + if (ptr == string) + throw simgrid::ParseError(filename, lineno, std::string("cannot parse number:") + string); + if (ptr[0] == '\0') { + // Ok, 0 can be unit-less + if (res != 0 && not name.empty()) + XBT_WARN("Deprecated unit-less value '%s' for %s %s. %s", string, entity_kind, name.c_str(), error_msg); + ptr = default_unit; + } + auto u = units.find(ptr); + if (u == units.end()) + throw simgrid::ParseError(filename, lineno, std::string("unknown unit: ") + ptr); + return res * u->second; +} + +double xbt_parse_get_time(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name) +{ + static const unit_scale units{std::make_pair("w", 7 * 24 * 60 * 60), + std::make_pair("d", 24 * 60 * 60), + std::make_pair("h", 60 * 60), + std::make_pair("m", 60), + std::make_pair("s", 1.0), + std::make_pair("ms", 1e-3), + std::make_pair("us", 1e-6), + std::make_pair("ns", 1e-9), + std::make_pair("ps", 1e-12)}; + return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name, + "Append 's' to your time to get seconds", "s"); +} + +double surf_parse_get_size(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name) +{ + static const unit_scale units{std::make_tuple("b", 0.125, 2, true), std::make_tuple("b", 0.125, 10, true), + std::make_tuple("B", 1.0, 2, true), std::make_tuple("B", 1.0, 10, true)}; + return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name, + "Append 'B' to get bytes (or 'b' for bits but 1B = 8b).", "B"); +} + +double xbt_parse_get_bandwidth(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name) +{ + static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true), + std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)}; + return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name, + "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", + "Bps"); +} + +std::vector xbt_parse_get_bandwidths(const std::string& filename, int lineno, const char* string, + const char* entity_kind, const std::string& name) +{ + static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true), + std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)}; + + std::vector bandwidths; + std::vector tokens; + boost::split(tokens, string, boost::is_any_of(";,")); + for (auto token : tokens) { + bandwidths.push_back(surf_parse_get_value_with_unit( + filename, lineno, token.c_str(), units, entity_kind, name, + "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", "Bps")); + } + + return bandwidths; +} + +double xbt_parse_get_speed(const std::string& filename, int lineno, const char* string, const char* entity_kind, + const std::string& name) +{ + static const unit_scale units{std::make_tuple("f", 1.0, 10, true), std::make_tuple("flops", 1.0, 10, false)}; + return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name, + "Append 'f' or 'flops' to your speed to get flop per second", "f"); +} + +std::vector xbt_parse_get_all_speeds(const std::string& filename, int lineno, char* speeds, + const char* entity_kind, const std::string& id) +{ + std::vector speed_per_pstate; + + if (strchr(speeds, ',') == nullptr) { + double speed = xbt_parse_get_speed(filename, lineno, speeds, entity_kind, id); + speed_per_pstate.push_back(speed); + } else { + std::vector pstate_list; + boost::split(pstate_list, speeds, boost::is_any_of(",")); + for (auto speed_str : pstate_list) { + boost::trim(speed_str); + double speed = xbt_parse_get_speed(filename, lineno, speed_str.c_str(), entity_kind, id); + speed_per_pstate.push_back(speed); + XBT_DEBUG("Speed value: %f", speed); + } + } + return speed_per_pstate; +} diff --git a/src/xbt/xbt_str_test.cpp b/src/xbt/xbt_str_test.cpp index 6a3bc606fd..81ed1aa0da 100644 --- a/src/xbt/xbt_str_test.cpp +++ b/src/xbt/xbt_str_test.cpp @@ -50,8 +50,8 @@ TEST_CASE("xbt::str: String Handling", "xbt_str") test_split_quoted("Useless backslashes", "\\t\\o\\t\\o \\t\\u\\t\\u", {"toto", "tutu"}); test_split_quoted("Protected space", "toto\\ tutu", {"toto tutu"}); test_split_quoted("Several spaces", "toto tutu", {"toto", "tutu"}); - test_split_quoted("LTriming", " toto tatu", {"toto", "tatu"}); - test_split_quoted("Triming", " toto tutu ", {"toto", "tutu"}); + test_split_quoted("LTrimming", " toto tatu", {"toto", "tatu"}); + test_split_quoted("Trimming", " toto tutu ", {"toto", "tutu"}); test_split_quoted("Single quotes", "'toto tutu' tata", {"toto tutu", "tata"}); test_split_quoted("Double quotes", "\"toto tutu\" tata", {"toto tutu", "tata"}); test_split_quoted("Mixed quotes", "\"toto' 'tutu\" tata", {"toto' 'tutu", "tata"}); diff --git a/src/xbt/xbt_virtu.cpp b/src/xbt/xbt_virtu.cpp index 5902f3adb3..d2ef77c3fe 100644 --- a/src/xbt/xbt_virtu.cpp +++ b/src/xbt/xbt_virtu.cpp @@ -12,7 +12,7 @@ int xbt_getpid() { const simgrid::kernel::actor::ActorImpl* self = SIMIX_process_self(); - return self == nullptr ? 0 : self->get_pid(); + return self == nullptr ? 0 : static_cast(self->get_pid()); } const char* xbt_procname(void) diff --git a/teshsuite/lua/CMakeLists.txt b/teshsuite/lua/CMakeLists.txt index a213e80910..7ee215c5fb 100644 --- a/teshsuite/lua/CMakeLists.txt +++ b/teshsuite/lua/CMakeLists.txt @@ -1,6 +1,5 @@ IF(SIMGRID_HAVE_LUA) - # Tests testing simulation from C but using lua for platform files. Executed like this - # ~$ ./masterslave platform.lua deploy.lua - ADD_TESH(lua-platform-masterslave --setenv srcdir=${CMAKE_HOME_DIRECTORY} --setenv bindir=${CMAKE_BINARY_DIR} --cd ${CMAKE_BINARY_DIR} ${CMAKE_HOME_DIRECTORY}/teshsuite/lua/lua_platforms.tesh) - SET_TESTS_PROPERTIES(lua-platform-masterslave PROPERTIES ENVIRONMENT "LUA_CPATH=${CMAKE_BINARY_DIR}/lib/lib?.${LIB_EXE}") + # Tests testing simulation from C++ but using lua for platform files. + ADD_TESH(lua-platform-masterworker --setenv srcdir=${CMAKE_HOME_DIRECTORY} --setenv bindir=${CMAKE_BINARY_DIR} --cd ${CMAKE_BINARY_DIR} ${CMAKE_HOME_DIRECTORY}/teshsuite/lua/lua_platforms.tesh) + SET_TESTS_PROPERTIES(lua-platform-masterworker PROPERTIES ENVIRONMENT "LUA_CPATH=${CMAKE_BINARY_DIR}/lib/lib?.${LIB_EXE}") ENDIF() diff --git a/teshsuite/mc/dwarf-expression/dwarf-expression.cpp b/teshsuite/mc/dwarf-expression/dwarf-expression.cpp index 32f8ecff38..2810a039ee 100644 --- a/teshsuite/mc/dwarf-expression/dwarf-expression.cpp +++ b/teshsuite/mc/dwarf-expression/dwarf-expression.cpp @@ -23,7 +23,8 @@ static std::default_random_engine rnd_engine; static simgrid::mc::RemoteSimulation* process; -static uintptr_t eval_binary_operation(simgrid::dwarf::ExpressionContext const& state, int op, uintptr_t a, uintptr_t b) +static uintptr_t eval_binary_operation(simgrid::dwarf::ExpressionContext const& state, uint8_t op, uintptr_t a, + uintptr_t b) { Dwarf_Op ops[15]; ops[0].atom = DW_OP_const8u; diff --git a/teshsuite/s4u/actor-suspend/actor-suspend.cpp b/teshsuite/s4u/actor-suspend/actor-suspend.cpp index cab47290ba..14bf657f9b 100644 --- a/teshsuite/s4u/actor-suspend/actor-suspend.cpp +++ b/teshsuite/s4u/actor-suspend/actor-suspend.cpp @@ -4,7 +4,7 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ // This is the MWE of https://framagit.org/simgrid/simgrid/-/issues/50 -// The problem was occuring when suspending an actor that will be executed later in the same scheduling round +// The problem was occurring when suspending an actor that will be executed later in the same scheduling round #include #include @@ -18,7 +18,7 @@ simgrid::s4u::ActorPtr receiver; class Receiver { public: - void operator()() + void operator()() const { XBT_INFO("Starting."); auto mailbox = simgrid::s4u::Mailbox::by_name("receiver"); @@ -29,7 +29,7 @@ public: class Suspender { public: - void operator()() + void operator()() const { XBT_INFO("Suspend the receiver..."); receiver->suspend(); @@ -50,7 +50,7 @@ public: int main(int argc, char** argv) { - simgrid::s4u::Engine* engine = new simgrid::s4u::Engine(&argc, argv); + const simgrid::s4u::Engine* engine = new simgrid::s4u::Engine(&argc, argv); engine->load_platform(argv[1]); simgrid::s4u::Host* host = simgrid::s4u::Host::by_name("Tremblay"); diff --git a/teshsuite/s4u/cloud-sharing/cloud-sharing.cpp b/teshsuite/s4u/cloud-sharing/cloud-sharing.cpp index 7604ded9ef..fb0be79f7b 100644 --- a/teshsuite/s4u/cloud-sharing/cloud-sharing.cpp +++ b/teshsuite/s4u/cloud-sharing/cloud-sharing.cpp @@ -310,9 +310,9 @@ static void run_test(const std::string& chooser) XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM"); vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2); run_test_process("( [ooo]2 X )2", pm2, flop_amount * 2 / 3); - run_test_process("( [Xoo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3 - run_test_process("( [oXo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3 - run_test_process("( [ooX]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3 + run_test_process("( [Xoo]2 o )2", vm0, (flop_amount * 4 / 3) / 3); // VM_share/3 + run_test_process("( [oXo]2 o )2", vm0, (flop_amount * 4 / 3) / 3); // VM_share/3 + run_test_process("( [ooX]2 o )2", vm0, (flop_amount * 4 / 3) / 3); // VM_share/3 simgrid::s4u::this_actor::sleep_for(2); test_energy_consumption(chooser,2); vm0->destroy(); @@ -480,9 +480,9 @@ static void run_test(const std::string& chooser) } else if (chooser == "( [ooo]2 ooo )4") { XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM"); vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2); - run_test_process("( [Xoo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3); // The VM has 8/5 of the PM - run_test_process("( [oXo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3); - run_test_process("( [ooX]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3); + run_test_process("( [Xoo]2 ooo )4", vm0, (flop_amount * 8 / 5) / 3); // The VM has 8/5 of the PM + run_test_process("( [oXo]2 ooo )4", vm0, (flop_amount * 8 / 5) / 3); + run_test_process("( [ooX]2 ooo )4", vm0, (flop_amount * 8 / 5) / 3); run_test_process("( [ooo]2 Xoo )4", pm4, flop_amount * 4 / 5); run_test_process("( [ooo]2 oXo )4", pm4, flop_amount * 4 / 5); diff --git a/teshsuite/s4u/concurrent_rw/concurrent_rw.cpp b/teshsuite/s4u/concurrent_rw/concurrent_rw.cpp index 5edb401efd..7a6975643d 100644 --- a/teshsuite/s4u/concurrent_rw/concurrent_rw.cpp +++ b/teshsuite/s4u/concurrent_rw/concurrent_rw.cpp @@ -10,7 +10,7 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u test"); static void host() { simgrid::s4u::Disk* disk = simgrid::s4u::this_actor::get_host()->get_disks().front(); // Disk1 - int id = simgrid::s4u::this_actor::get_pid(); + int id = static_cast(simgrid::s4u::this_actor::get_pid()); XBT_INFO("process %d is writing!", id); disk->write(4000000); XBT_INFO("process %d goes to sleep for %d seconds", id, id); diff --git a/teshsuite/s4u/evaluate-get-route-time/evaluate-get-route-time.cpp b/teshsuite/s4u/evaluate-get-route-time/evaluate-get-route-time.cpp index a607ed6953..0367a741c4 100644 --- a/teshsuite/s4u/evaluate-get-route-time/evaluate-get-route-time.cpp +++ b/teshsuite/s4u/evaluate-get-route-time/evaluate-get-route-time.cpp @@ -24,7 +24,7 @@ int main(int argc, char** argv) e.load_platform(argv[1]); std::vector hosts = e.get_all_hosts(); - int host_count = e.get_host_count(); + int host_count = static_cast(e.get_host_count()); /* Random number initialization */ simgrid::xbt::random::set_mersenne_seed(static_cast(xbt_os_time())); diff --git a/teshsuite/s4u/host-on-off-actors/host-on-off-actors.cpp b/teshsuite/s4u/host-on-off-actors/host-on-off-actors.cpp index 0f8701db85..c681ddf59b 100644 --- a/teshsuite/s4u/host-on-off-actors/host-on-off-actors.cpp +++ b/teshsuite/s4u/host-on-off-actors/host-on-off-actors.cpp @@ -10,7 +10,7 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u example") int tasks_done = 0; -static void actor_daemon() +XBT_ATTRIB_NORETURN static void actor_daemon() { const simgrid::s4u::Host* host = simgrid::s4u::Host::current(); XBT_INFO(" Start daemon on %s (%f)", host->get_cname(), host->get_speed()); diff --git a/teshsuite/s4u/host-on-off/host-on-off.cpp b/teshsuite/s4u/host-on-off/host-on-off.cpp index 906c5361c5..ff634b9da0 100644 --- a/teshsuite/s4u/host-on-off/host-on-off.cpp +++ b/teshsuite/s4u/host-on-off/host-on-off.cpp @@ -13,7 +13,7 @@ static void worker() const std::string* payload; simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name("jupi"); - while (1) { + while (true) { try { payload = static_cast(mailbox->get()); } catch (const simgrid::HostFailureException&) { diff --git a/teshsuite/s4u/pid/pid.cpp b/teshsuite/s4u/pid/pid.cpp index 8620544a9e..9cf784895b 100644 --- a/teshsuite/s4u/pid/pid.cpp +++ b/teshsuite/s4u/pid/pid.cpp @@ -9,9 +9,9 @@ XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this msg example") static void sendpid() { - simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name("mailbox"); - int pid = simgrid::s4u::this_actor::get_pid(); - double comm_size = 100000; + simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name("mailbox"); + int pid = static_cast(simgrid::s4u::this_actor::get_pid()); + long comm_size = 100000; simgrid::s4u::this_actor::on_exit([pid](bool /*failed*/) { XBT_INFO("Process \"%d\" killed.", pid); }); XBT_INFO("Sending pid of \"%d\".", pid); diff --git a/teshsuite/s4u/storage_client_server/storage_client_server.cpp b/teshsuite/s4u/storage_client_server/storage_client_server.cpp index 2cc23e4336..f633892ff3 100644 --- a/teshsuite/s4u/storage_client_server/storage_client_server.cpp +++ b/teshsuite/s4u/storage_client_server/storage_client_server.cpp @@ -126,7 +126,7 @@ static void server() simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_host()->get_cname()); XBT_INFO("Server waiting for transfers ..."); - while (1) { + while (true) { const std::string* msg = static_cast(mailbox->get()); if (*msg == "finalize") { // Shutdown ... delete msg; diff --git a/teshsuite/simdag/availability/availability.c b/teshsuite/simdag/availability/availability.c index 6c307656cf..16fe88fe46 100644 --- a/teshsuite/simdag/availability/availability.c +++ b/teshsuite/simdag/availability/availability.c @@ -21,7 +21,7 @@ static void scheduleDAX(const_xbt_dynar_t dax) SD_task_t task; sg_host_t *hosts = sg_host_list(); - int totalHosts = sg_host_count(); + size_t totalHosts = sg_host_count(); qsort((void *) hosts, totalHosts, sizeof(sg_host_t), name_compare_hosts); xbt_dynar_foreach(dax, cursor, task) { diff --git a/teshsuite/simdag/basic-parsing-test/basic-parsing-test.c b/teshsuite/simdag/basic-parsing-test/basic-parsing-test.c index 4a075a86af..5464341c26 100644 --- a/teshsuite/simdag/basic-parsing-test/basic-parsing-test.c +++ b/teshsuite/simdag/basic-parsing-test/basic-parsing-test.c @@ -37,11 +37,11 @@ int main(int argc, char **argv) xbt_dynar_free_container(&route); } if (!strcmp(argv[2], "FULL_LINK")) { - int list_size = sg_host_count(); - for (int i = 0; i < list_size; i++) { + size_t list_size = sg_host_count(); + for (size_t i = 0; i < list_size; i++) { const_sg_host_t h1 = hosts[i]; const char *name1 = sg_host_get_name(h1); - for (int j = 0; j < list_size; j++) { + for (size_t j = 0; j < list_size; j++) { const_sg_host_t h2 = hosts[j]; const char *name2 = sg_host_get_name(h2); fprintf(stderr, "Route between %s and %s\n", name1, name2); diff --git a/teshsuite/simdag/flatifier/flatifier.cpp b/teshsuite/simdag/flatifier/flatifier.cpp index 6e18fe0e9e..e0a01e4670 100644 --- a/teshsuite/simdag/flatifier/flatifier.cpp +++ b/teshsuite/simdag/flatifier/flatifier.cpp @@ -42,7 +42,7 @@ static void create_environment(xbt_os_timer_t parse_time, const char *platformFi static void dump_hosts() { - unsigned int totalHosts = sg_host_count(); + unsigned int totalHosts = static_cast(sg_host_count()); sg_host_t* hosts = sg_host_list(); std::sort(hosts, hosts + totalHosts, [](const_sg_host_t a, const_sg_host_t b) { return strcmp(sg_host_get_name(a), sg_host_get_name(b)) < 0; }); @@ -110,7 +110,7 @@ static void dump_routers() static void dump_routes() { - unsigned int totalHosts = sg_host_count(); + unsigned int totalHosts = static_cast(sg_host_count()); sg_host_t* hosts = sg_host_list(); std::sort(hosts, hosts + totalHosts, [](const_sg_host_t a, const_sg_host_t b) { return strcmp(sg_host_get_name(a), sg_host_get_name(b)) < 0; }); diff --git a/teshsuite/smpi/bug-17132/bug-17132.c b/teshsuite/smpi/bug-17132/bug-17132.c index 16c35f2cc3..bb9cd25b00 100644 --- a/teshsuite/smpi/bug-17132/bug-17132.c +++ b/teshsuite/smpi/bug-17132/bug-17132.c @@ -11,8 +11,8 @@ int main(int argc, char ** argv) { - size_t M = 8*1024; - size_t N = 32*1024; + const int M = 8 * 1024; + const int N = 32 * 1024; MPI_Init(&argc, &argv); diff --git a/teshsuite/smpi/fort_args/fort_args.f90 b/teshsuite/smpi/fort_args/fort_args.f90 index 0e634ed320..dfd53083b5 100644 --- a/teshsuite/smpi/fort_args/fort_args.f90 +++ b/teshsuite/smpi/fort_args/fort_args.f90 @@ -1,4 +1,4 @@ -! Check that getarg does somethig sensible. +! Check that getarg does something sensible. program getarg_1 use mpi CHARACTER*10 ARGS, ARGS2 diff --git a/teshsuite/smpi/isp/umpire/CMakeLists.txt b/teshsuite/smpi/isp/umpire/CMakeLists.txt index 2619992daa..e99fdf882e 100644 --- a/teshsuite/smpi/isp/umpire/CMakeLists.txt +++ b/teshsuite/smpi/isp/umpire/CMakeLists.txt @@ -217,7 +217,7 @@ if(enable_smpi AND enable_model-checking AND enable_smpi_ISP_testsuite) foreach (test ${umpire_tests_passing}) write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! timeout 30") write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! output display" APPEND) - write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll-selector:ompi \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND) + write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1Gf --cfg=smpi/coll-selector:ompi \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND) endforeach() foreach (test ${umpire_tests_deadlock} ${umpire_tests_problematic} ) diff --git a/teshsuite/smpi/isp/umpire/change-send-buffer-type-exhaustive.c b/teshsuite/smpi/isp/umpire/change-send-buffer-type-exhaustive.c index c770a4cb0a..9cbee5697c 100644 --- a/teshsuite/smpi/isp/umpire/change-send-buffer-type-exhaustive.c +++ b/teshsuite/smpi/isp/umpire/change-send-buffer-type-exhaustive.c @@ -105,7 +105,7 @@ main (int argc, char **argv) MPI_Type_extent (newtype[0], &basic_extent); if (basic_extent != sizeof (test_basic_struct_t)) { - fprintf (stderr, "(%d): Unexpect extent for struct\n"); + fprintf (stderr, "(%d): Unexpected extent for struct\n"); MPI_Abort (MPI_COMM_WORLD, 666); } diff --git a/teshsuite/smpi/macro-partial-shared-communication/macro-partial-shared-communication.c b/teshsuite/smpi/macro-partial-shared-communication/macro-partial-shared-communication.c index a4088a2750..03ff5ecad4 100644 --- a/teshsuite/smpi/macro-partial-shared-communication/macro-partial-shared-communication.c +++ b/teshsuite/smpi/macro-partial-shared-communication/macro-partial-shared-communication.c @@ -12,12 +12,12 @@ // Set the elements between buf[start] and buf[stop-1] to (i+value)%256 static void set(uint8_t *buf, size_t start, size_t stop, uint8_t value) { for(size_t i = start; i < stop; i++) { - buf[i] = (i+value)%256; + buf[i] = (uint8_t)((i + value) % 256); } } // Return the number of times that an element is equal to (i+value)%256 between buf[start] and buf[stop-1]. -static int count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value) +static size_t count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value) { size_t occ = 0; for(size_t i = start ; i < stop ; i++) { @@ -64,15 +64,15 @@ int main(int argc, char *argv[]) for(int i = 0; i < nb_blocks-1; i++) { size_t start = shared_blocks[2*i+1]; size_t stop = shared_blocks[2*i+2]; - set(buf, start, stop, rank); + set(buf, start, stop, (uint8_t)rank); } } // Then, even processes send their buffer to their successor if(rank%2 == 0) { - MPI_Send(buf, mem_size, MPI_UINT8_T, rank+1, 0, MPI_COMM_WORLD); + MPI_Send(buf, (int)mem_size, MPI_UINT8_T, rank + 1, 0, MPI_COMM_WORLD); } else { - MPI_Recv(buf, mem_size, MPI_UINT8_T, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + MPI_Recv(buf, (int)mem_size, MPI_UINT8_T, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } @@ -81,7 +81,7 @@ int main(int argc, char *argv[]) for(int i = 0; i < nb_blocks-1; i++) { size_t start = shared_blocks[2*i+1]; size_t stop = shared_blocks[2*i+2]; - int comm = check_all(buf, start, stop, rank-1); + int comm = check_all(buf, start, stop, (uint8_t)(rank - 1)); printf("[%d] The result of the (normal) communication check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, comm); } memset(buf, rank, mem_size); @@ -92,10 +92,10 @@ int main(int argc, char *argv[]) // Then, even processes send a sub-part of their buffer their successor // Note that the last block should not be copied entirely if(rank%2 == 0) { - MPI_Send(buf+0x10000, mem_size-0xa00000, MPI_UINT8_T, rank+1, 0, MPI_COMM_WORLD); + MPI_Send(buf + 0x10000, (int)(mem_size - 0xa00000), MPI_UINT8_T, rank + 1, 0, MPI_COMM_WORLD); } else { - MPI_Recv(buf+0x10000, mem_size-0xa00000, MPI_UINT8_T, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + MPI_Recv(buf + 0x10000, (int)(mem_size - 0xa00000), MPI_UINT8_T, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } @@ -104,7 +104,7 @@ int main(int argc, char *argv[]) for(int i = 0; i < nb_blocks-1; i++) { size_t start = shared_blocks[2*i+1]; size_t stop = shared_blocks[2*i+2]; - int comm = check_all(buf, start, stop, rank-1); + int comm = check_all(buf, start, stop, (uint8_t)(rank - 1)); printf("[%d] The result of the (shifted) communication check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, comm); } } diff --git a/teshsuite/smpi/macro-partial-shared/macro-partial-shared.c b/teshsuite/smpi/macro-partial-shared/macro-partial-shared.c index 1a255df98e..29168cc9d0 100644 --- a/teshsuite/smpi/macro-partial-shared/macro-partial-shared.c +++ b/teshsuite/smpi/macro-partial-shared/macro-partial-shared.c @@ -11,12 +11,12 @@ // Set the elements between buf[start] and buf[stop-1] to (i+value)%256 static void set(uint8_t *buf, size_t start, size_t stop, uint8_t value) { for(size_t i = start; i < stop; i++) { - buf[i] = (i+value)%256; + buf[i] = (uint8_t)((i + value) % 256); } } // Return the number of times that an element is equal to (i+value)%256 between buf[start] and buf[stop-1]. -static int count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value) +static size_t count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value) { size_t occ = 0; for(size_t i = start ; i < stop ; i++) { diff --git a/teshsuite/smpi/mpich3-test/attr/attrorder.c b/teshsuite/smpi/mpich3-test/attr/attrorder.c index 0fd53bdc8d..28d1608c1f 100644 --- a/teshsuite/smpi/mpich3-test/attr/attrorder.c +++ b/teshsuite/smpi/mpich3-test/attr/attrorder.c @@ -96,7 +96,7 @@ int checkAttrs(MPI_Comm comm, int n, int key[], int attrval[]) } else if (val_p != &attrval[i]) { errs++; - fprintf(stderr, "Atribute value for key %d not correct\n", i); + fprintf(stderr, "Attribute value for key %d not correct\n", i); } } diff --git a/teshsuite/smpi/mpich3-test/attr/attrordercomm.c b/teshsuite/smpi/mpich3-test/attr/attrordercomm.c index f2ea4e83c1..95ffa2d86d 100644 --- a/teshsuite/smpi/mpich3-test/attr/attrordercomm.c +++ b/teshsuite/smpi/mpich3-test/attr/attrordercomm.c @@ -96,7 +96,7 @@ int checkAttrs(MPI_Comm comm, int n, int key[], int attrval[]) } else if (val_p != &attrval[i]) { errs++; - fprintf(stderr, "Atribute value for key %d not correct\n", i); + fprintf(stderr, "Attribute value for key %d not correct\n", i); } } diff --git a/teshsuite/smpi/mpich3-test/attr/attrordertype.c b/teshsuite/smpi/mpich3-test/attr/attrordertype.c index d3de828c33..837a02fd46 100644 --- a/teshsuite/smpi/mpich3-test/attr/attrordertype.c +++ b/teshsuite/smpi/mpich3-test/attr/attrordertype.c @@ -96,7 +96,7 @@ int checkAttrs(MPI_Datatype type, int n, int key[], int attrval[]) } else if (val_p != &attrval[i]) { errs++; - fprintf(stderr, "Atribute value for key %d not correct\n", i); + fprintf(stderr, "Attribute value for key %d not correct\n", i); } } diff --git a/teshsuite/smpi/mpich3-test/coll/allred4.c b/teshsuite/smpi/mpich3-test/coll/allred4.c index 0efc44006c..93bc9f660d 100644 --- a/teshsuite/smpi/mpich3-test/coll/allred4.c +++ b/teshsuite/smpi/mpich3-test/coll/allred4.c @@ -171,7 +171,7 @@ int main(int argc, char *argv[]) MPI_Op_create(matmult, 0, &op); - /* A single rotation matrix (3x3, stored as 9 consequetive elements) */ + /* A single rotation matrix (3x3, stored as 9 consecutive elements) */ MPI_Type_contiguous(9, MPI_INT, &mattype); MPI_Type_commit(&mattype); diff --git a/teshsuite/smpi/mpich3-test/coll/alltoallw1.c b/teshsuite/smpi/mpich3-test/coll/alltoallw1.c index cac4c5d0aa..529a2d0ba3 100644 --- a/teshsuite/smpi/mpich3-test/coll/alltoallw1.c +++ b/teshsuite/smpi/mpich3-test/coll/alltoallw1.c @@ -199,7 +199,7 @@ int main(int argc, char *argv[]) /* Create the local matrices. * Initialize the input matrix so that the entries are - * consequtive integers, by row, starting at 0. + * consecutive integers, by row, starting at 0. */ if (rank == size - 1) { localA = (float *) malloc(gN * lmlast * sizeof(float)); @@ -226,7 +226,7 @@ int main(int argc, char *argv[]) Transpose(localA, localB, gM, gN, comm); /* check the transposed matrix - * In the global matrix, the transpose has consequtive integers, + * In the global matrix, the transpose has consecutive integers, * organized by columns. */ if (rank == size - 1) { diff --git a/teshsuite/smpi/mpich3-test/coll/coll10.c b/teshsuite/smpi/mpich3-test/coll/coll10.c index e2a424af7e..d2a3981a80 100644 --- a/teshsuite/smpi/mpich3-test/coll/coll10.c +++ b/teshsuite/smpi/mpich3-test/coll/coll10.c @@ -16,7 +16,7 @@ int assoc(int *, int *, int *, MPI_Datatype *); (see 4.9.4). The order is important. Note that the computation is in process rank (in the communicator) - order, independant of the root. + order, independent of the root. */ int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype) { diff --git a/teshsuite/smpi/mpich3-test/coll/coll11.c b/teshsuite/smpi/mpich3-test/coll/coll11.c index b79482e3a0..3c4e16abc2 100644 --- a/teshsuite/smpi/mpich3-test/coll/coll11.c +++ b/teshsuite/smpi/mpich3-test/coll/coll11.c @@ -24,7 +24,7 @@ void addem(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype) (see 4.9.4). The order is important. Note that the computation is in process rank (in the communicator) - order, independant of the root. + order, independent of the root. */ void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype) { diff --git a/teshsuite/smpi/mpich3-test/coll/gather_big.c b/teshsuite/smpi/mpich3-test/coll/gather_big.c index ce37c954e9..0b6ee4db16 100644 --- a/teshsuite/smpi/mpich3-test/coll/gather_big.c +++ b/teshsuite/smpi/mpich3-test/coll/gather_big.c @@ -61,7 +61,7 @@ int main(int argc, char *argv[]) for (i = 0; i < size; i++) { for (j = 0; j < COUNT; j++) { if (recvbuf[i * COUNT + j] != i * VERIFY_CONST + j) { - printf("PE 0: mis-match error"); + printf("PE 0: mismatch error"); printf(" recbuf[%d * %d + %d] = ", i, COUNT, j); printf(" %ld,", recvbuf[i * COUNT + j]); printf(" should be %ld\n", i * VERIFY_CONST + j); diff --git a/teshsuite/smpi/mpich3-test/coll/nonblocking3.c b/teshsuite/smpi/mpich3-test/coll/nonblocking3.c index 76828b6ac4..2557c92a29 100644 --- a/teshsuite/smpi/mpich3-test/coll/nonblocking3.c +++ b/teshsuite/smpi/mpich3-test/coll/nonblocking3.c @@ -122,7 +122,7 @@ static void cleanup_laundry(struct laundry *l) } /* Starts a "random" operation on "comm" corresponding to "rndnum" and returns - * in (*req) a request handle corresonding to that operation. This call should + * in (*req) a request handle corresponding to that operation. This call should * be considered collective over comm (with a consistent value for "rndnum"), * even though the operation may only be a point-to-point request. */ static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request * req, diff --git a/teshsuite/smpi/mpich3-test/coll/opprod.c b/teshsuite/smpi/mpich3-test/coll/opprod.c index 9c8001e6f4..83434d2f5b 100644 --- a/teshsuite/smpi/mpich3-test/coll/opprod.c +++ b/teshsuite/smpi/mpich3-test/coll/opprod.c @@ -48,7 +48,7 @@ int main(int argc, char *argv[]) else maxsize = size; - /* General forumula: If we multiple the values from 1 to n, the + /* General formula: If we multiple the values from 1 to n, the * product is n!. This grows very fast, so we'll only use the first * five (1! = 1, 2! = 2, 3! = 6, 4! = 24, 5! = 120), with n! * stored in the array result[n] */ diff --git a/teshsuite/smpi/mpich3-test/coll/red_scat_block2.c b/teshsuite/smpi/mpich3-test/coll/red_scat_block2.c index ae886690b0..1ae353ae87 100644 --- a/teshsuite/smpi/mpich3-test/coll/red_scat_block2.c +++ b/teshsuite/smpi/mpich3-test/coll/red_scat_block2.c @@ -50,7 +50,7 @@ void right(void *a, void *b, int *count, MPI_Datatype * type) } /* Just performs a simple sum but can be marked as non-commutative to - potentially tigger different logic in the implementation. */ + potentially trigger different logic in the implementation. */ void nc_sum(void *a, void *b, int *count, MPI_Datatype * type); void nc_sum(void *a, void *b, int *count, MPI_Datatype * type) { diff --git a/teshsuite/smpi/mpich3-test/coll/redscat2.c b/teshsuite/smpi/mpich3-test/coll/redscat2.c index 1f59bcdc68..44af33a3eb 100644 --- a/teshsuite/smpi/mpich3-test/coll/redscat2.c +++ b/teshsuite/smpi/mpich3-test/coll/redscat2.c @@ -50,7 +50,7 @@ void right(void *a, void *b, int *count, MPI_Datatype * type) } /* Just performs a simple sum but can be marked as non-commutative to - potentially tigger different logic in the implementation. */ + potentially trigger different logic in the implementation. */ void nc_sum(void *a, void *b, int *count, MPI_Datatype * type); void nc_sum(void *a, void *b, int *count, MPI_Datatype * type) { diff --git a/teshsuite/smpi/mpich3-test/coll/scantst.c b/teshsuite/smpi/mpich3-test/coll/scantst.c index 12ddcad89b..153726d4f2 100644 --- a/teshsuite/smpi/mpich3-test/coll/scantst.c +++ b/teshsuite/smpi/mpich3-test/coll/scantst.c @@ -24,7 +24,7 @@ void addem(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype) (see 4.9.4). The order is important. Note that the computation is in process rank (in the communicator) - order, independant of the root. + order, independent of the root. */ void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype) { diff --git a/teshsuite/smpi/mpich3-test/comm/comm_idup_iallreduce.c b/teshsuite/smpi/mpich3-test/comm/comm_idup_iallreduce.c index b507a9b666..c3b92062d4 100644 --- a/teshsuite/smpi/mpich3-test/comm/comm_idup_iallreduce.c +++ b/teshsuite/smpi/mpich3-test/comm/comm_idup_iallreduce.c @@ -13,18 +13,18 @@ #define ITERS 10 /* This test uses several scenarios to overlap iallreduce and comm_idup - * 1.) Use comm_idup dublicate the COMM_WORLD and do iallreduce + * 1.) Use comm_idup duplicate the COMM_WORLD and do iallreduce * on the COMM_WORLD * 2.) Do the above test in a loop - * 3.) Dublicate COMM_WORLD, overalp iallreduce on one + * 3.) Duplicate COMM_WORLD, overalp iallreduce on one * communicator with comm_idup on the nother communicator * 4.) Split MPI_COMM_WORLD, communicate on the split communicator while dublicating COMM_WORLD * 5.) Duplicate the split communicators with comm_idup * while communicating onCOMM_WORLD - * 6.) Ceate an inter-communicator and duplicate it with comm_idup while + * 6.) Create an inter-communicator and duplicate it with comm_idup while * communicating on the inter-communicator - * 7.) Dublicate the inter-communicator whil communicate on COMM_WORLD + * 7.) Duplicate the inter-communicator whil communicate on COMM_WORLD * 8.) Merge the inter-communicator to an intra-communicator and idup it, * overlapping with communication on MPI_COMM_WORLD * 9.) Communicate on the merge communicator, while duplicating COMM_WORLD diff --git a/teshsuite/smpi/mpich3-test/comm/commname.c b/teshsuite/smpi/mpich3-test/comm/commname.c index ed52049f48..d8ae639253 100644 --- a/teshsuite/smpi/mpich3-test/comm/commname.c +++ b/teshsuite/smpi/mpich3-test/comm/commname.c @@ -22,7 +22,7 @@ int main(int argc, char *argv[]) char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME]; MTest_Init(&argc, &argv); - /* Check world and self firt */ + /* Check world and self first */ nameout[0] = 0; MPI_Comm_get_name(MPI_COMM_WORLD, nameout, &rlen); if (strcmp(nameout, "MPI_COMM_WORLD")) { diff --git a/teshsuite/smpi/mpich3-test/comm/dupic.c b/teshsuite/smpi/mpich3-test/comm/dupic.c index e3712769fa..974d0f06d5 100644 --- a/teshsuite/smpi/mpich3-test/comm/dupic.c +++ b/teshsuite/smpi/mpich3-test/comm/dupic.c @@ -14,7 +14,7 @@ int main(int argc, char *argv[]) MPI_Comm comm, dupcomm, dupcomm2; MPI_Request rreq[2]; int count; - int indicies[2]; + int indices[2]; int r1buf, r2buf, s1buf, s2buf; int rank, isLeft; @@ -40,12 +40,12 @@ int main(int argc, char *argv[]) MPI_Irecv(&r1buf, 1, MPI_INT, 0, 0, dupcomm, &rreq[0]); MPI_Irecv(&r2buf, 1, MPI_INT, 0, 0, comm, &rreq[1]); MPI_Send(&s2buf, 1, MPI_INT, 0, 0, comm); - MPI_Waitsome(2, rreq, &count, indicies, MPI_STATUSES_IGNORE); - if (count != 1 || indicies[0] != 1) { + MPI_Waitsome(2, rreq, &count, indices, MPI_STATUSES_IGNORE); + if (count != 1 || indices[0] != 1) { /* The only valid return is that exactly one message * has been received */ errs++; - if (count == 1 && indicies[0] != 1) { + if (count == 1 && indices[0] != 1) { printf("Error in context values for intercomm\n"); } else if (count == 2) { @@ -55,7 +55,7 @@ int main(int argc, char *argv[]) int i; printf("Error: count = %d", count); for (i = 0; i < count; i++) { - printf(" indicies[%d] = %d", i, indicies[i]); + printf(" indices[%d] = %d", i, indices[i]); } printf("\n"); } diff --git a/teshsuite/smpi/mpich3-test/datatype/blockindexed-misc.c b/teshsuite/smpi/mpich3-test/datatype/blockindexed-misc.c index d26720587b..338c6d1ad8 100644 --- a/teshsuite/smpi/mpich3-test/datatype/blockindexed-misc.c +++ b/teshsuite/smpi/mpich3-test/datatype/blockindexed-misc.c @@ -149,7 +149,7 @@ int blockindexed_contig_test(void) /* blockindexed_vector_test() * * Tests behavior with a blockindexed of some vector types; - * this shouldn't be easily convertable into anything else. + * this shouldn't be easily convertible into anything else. * * Returns the number of errors encountered. */ diff --git a/teshsuite/smpi/mpich3-test/datatype/hindexed_block.c b/teshsuite/smpi/mpich3-test/datatype/hindexed_block.c index 5f00009e26..5c975fc95f 100644 --- a/teshsuite/smpi/mpich3-test/datatype/hindexed_block.c +++ b/teshsuite/smpi/mpich3-test/datatype/hindexed_block.c @@ -161,7 +161,7 @@ int hindexed_block_contig_test(void) /* hindexed_block_vector_test() * * Tests behavior with a hindexed_block of some vector types; - * this shouldn't be easily convertable into anything else. + * this shouldn't be easily convertible into anything else. * * Returns the number of errors encountered. */ diff --git a/teshsuite/smpi/mpich3-test/datatype/large_type.c b/teshsuite/smpi/mpich3-test/datatype/large_type.c index 48a256d532..eb6b2cd334 100644 --- a/teshsuite/smpi/mpich3-test/datatype/large_type.c +++ b/teshsuite/smpi/mpich3-test/datatype/large_type.c @@ -17,7 +17,7 @@ static MPI_Datatype make_largexfer_type_struct(MPI_Offset nbytes) int remainder = 0; MPI_Datatype memtype, chunktype; - /* need to cook up a new datatype to accomodate large datatypes */ + /* need to cook up a new datatype to accommodate large datatypes */ /* first pass: chunks of 1 MiB plus an additional remainder. Does require * 8 byte MPI_Aint, which should have been checked for earlier */ @@ -56,7 +56,7 @@ static MPI_Datatype make_largexfer_type_hindexed(MPI_Offset nbytes) MPI_Aint *disp; MPI_Datatype memtype; - /* need to cook up a new datatype to accomodate large datatypes */ + /* need to cook up a new datatype to accommodate large datatypes */ /* Does require 8 byte MPI_Aint, which should have been checked for earlier */ diff --git a/teshsuite/smpi/mpich3-test/datatype/lbub.c b/teshsuite/smpi/mpich3-test/datatype/lbub.c index b1c48248eb..158b66b35f 100644 --- a/teshsuite/smpi/mpich3-test/datatype/lbub.c +++ b/teshsuite/smpi/mpich3-test/datatype/lbub.c @@ -1135,7 +1135,7 @@ int int_with_negextent_test(void) fprintf(stderr, " MPI_Type_struct of %s failed.\n", typemapstring); if (verbose) MTestPrintError(err); - /* No point in contiuing */ + /* No point in continuing */ return errs; } diff --git a/teshsuite/smpi/mpich3-test/datatype/typename.c b/teshsuite/smpi/mpich3-test/datatype/typename.c index 8ff1fc0f3d..ca8d1dce04 100644 --- a/teshsuite/smpi/mpich3-test/datatype/typename.c +++ b/teshsuite/smpi/mpich3-test/datatype/typename.c @@ -124,7 +124,7 @@ mpi_names_t mpi_names[] = { /* added in MPI 3 */ { MPI_COUNT, "MPI_COUNT" }, #endif - { 0, (char *)0 }, /* Sentinal used to indicate the last element */ + { 0, (char *)0 }, /* Sentinel used to indicate the last element */ }; char name[MPI_MAX_OBJECT_NAME]; diff --git a/teshsuite/smpi/mpich3-test/errhan/errfatal.c b/teshsuite/smpi/mpich3-test/errhan/errfatal.c index 0e9f55a8e2..e95a7cab56 100644 --- a/teshsuite/smpi/mpich3-test/errhan/errfatal.c +++ b/teshsuite/smpi/mpich3-test/errhan/errfatal.c @@ -36,7 +36,7 @@ int main(int argc, char **argv) /* We should not get here, because the default error handler * is ERRORS_ARE_FATAL. This makes sure that the correct error - * handler is called and that no failure occured (such as + * handler is called and that no failure occurred (such as * a SEGV) in Comm_call_errhandler on the default * error handler. */ printf("After the Error Handler Has Been Called\n"); diff --git a/teshsuite/smpi/mpich3-test/f77/ext/c2fmult.c b/teshsuite/smpi/mpich3-test/f77/ext/c2fmult.c index 4f1d343838..8f81d40d53 100644 --- a/teshsuite/smpi/mpich3-test/f77/ext/c2fmult.c +++ b/teshsuite/smpi/mpich3-test/f77/ext/c2fmult.c @@ -50,7 +50,7 @@ int main( int argc, char *argv[] ) printf( "Unable to cancel MPI_Irecv request\n" ); } /* Using MPI_Request_free should be ok, but some MPI implementations - object to it imediately after the cancel and that isn't essential to + object to it immediately after the cancel and that isn't essential to this test */ MTest_Finalize( errs ); diff --git a/teshsuite/smpi/mpich3-test/f77/ext/ctypesfromc.c b/teshsuite/smpi/mpich3-test/f77/ext/ctypesfromc.c index e0e5f60d92..8775b83d81 100644 --- a/teshsuite/smpi/mpich3-test/f77/ext/ctypesfromc.c +++ b/teshsuite/smpi/mpich3-test/f77/ext/ctypesfromc.c @@ -64,7 +64,7 @@ static mpi_names_t mpi_names[] = { { MPI_LONG_LONG, "MPI_LONG_LONG" }, { MPI_UNSIGNED_LONG_LONG, "MPI_UNSIGNED_LONG_LONG" }, { MPI_LONG_DOUBLE_INT, "MPI_LONG_DOUBLE_INT" }, - { 0, (char *)0 }, /* Sentinal used to indicate the last element */ + { 0, (char *)0 }, /* Sentinel used to indicate the last element */ }; /* diff --git a/teshsuite/smpi/mpich3-test/f77/topo/cartcrf.f b/teshsuite/smpi/mpich3-test/f77/topo/cartcrf.f index a23c178812..e6d046b53c 100644 --- a/teshsuite/smpi/mpich3-test/f77/topo/cartcrf.f +++ b/teshsuite/smpi/mpich3-test/f77/topo/cartcrf.f @@ -22,7 +22,7 @@ C call mtest_init( ierr ) C -C For upto 6 dimensions, test with periodicity in 0 through all +C For up to 6 dimensions, test with periodicity in 0 through all C dimensions. The test is computed by both: C get info about the created communicator C apply cart shift diff --git a/teshsuite/smpi/mpich3-test/f77/topo/dgraph_unwgtf.f b/teshsuite/smpi/mpich3-test/f77/topo/dgraph_unwgtf.f index f040a86f0e..23f54f26cc 100644 --- a/teshsuite/smpi/mpich3-test/f77/topo/dgraph_unwgtf.f +++ b/teshsuite/smpi/mpich3-test/f77/topo/dgraph_unwgtf.f @@ -154,7 +154,7 @@ C the nearest neighbors that within a ring. C now create one with MPI_WEIGHTS_EMPTY C NOTE that MPI_WEIGHTS_EMPTY was added in MPI-3 and does not -C appear before then. Incluing this test means that this test cannot +C appear before then. Including this test means that this test cannot C be compiled if the MPI version is less than 3 (see the testlist file) degs(1) = 0; diff --git a/teshsuite/smpi/mpich3-test/f90/datatype/createf90.f90 b/teshsuite/smpi/mpich3-test/f90/datatype/createf90.f90 index b2edf87b6f..7672858202 100644 --- a/teshsuite/smpi/mpich3-test/f90/datatype/createf90.f90 +++ b/teshsuite/smpi/mpich3-test/f90/datatype/createf90.f90 @@ -16,7 +16,7 @@ errs = 0 call mtest_init( ierr ) -! integers with upto 9 are 4 bytes integers; r of 4 are 2 byte, +! integers with up to 9 are 4 bytes integers; r of 4 are 2 byte, ! and r of 2 is 1 byte call mpi_type_create_f90_integer( 9, ntype1, ierr ) ! diff --git a/teshsuite/smpi/mpich3-test/f90/datatype/kinds.f90 b/teshsuite/smpi/mpich3-test/f90/datatype/kinds.f90 index 3d42946571..fd22c58aff 100644 --- a/teshsuite/smpi/mpich3-test/f90/datatype/kinds.f90 +++ b/teshsuite/smpi/mpich3-test/f90/datatype/kinds.f90 @@ -90,19 +90,19 @@ endif call MPI_RECV( aint, 1, MPI_AINT, 0, 0, MPI_COMM_WORLD, s, ierr ) if (taint .ne. aint) then - print *, "Address-sized int not correctly transfered" + print *, "Address-sized int not correctly transferred" print *, "Value should be ", taint, " but is ", aint errs = errs + 1 endif call MPI_RECV( oint, 1, MPI_OFFSET, 0, 1, MPI_COMM_WORLD, s, ierr ) if (toint .ne. oint) then - print *, "Offset-sized int not correctly transfered" + print *, "Offset-sized int not correctly transferred" print *, "Value should be ", toint, " but is ", oint errs = errs + 1 endif call MPI_RECV( iint, 1, MPI_INTEGER, 0, 2, MPI_COMM_WORLD, s, ierr ) if (tiint .ne. iint) then - print *, "Integer (by kind) not correctly transfered" + print *, "Integer (by kind) not correctly transferred" print *, "Value should be ", tiint, " but is ", iint errs = errs + 1 endif diff --git a/teshsuite/smpi/mpich3-test/f90/datatype/structf.f90 b/teshsuite/smpi/mpich3-test/f90/datatype/structf.f90 index dbdc86101c..b2118b69b5 100644 --- a/teshsuite/smpi/mpich3-test/f90/datatype/structf.f90 +++ b/teshsuite/smpi/mpich3-test/f90/datatype/structf.f90 @@ -73,7 +73,7 @@ call mpi_type_free(newtype,ierr) ! write(*,*) "Sent ",name(1:5),x else -! Everyone calls barrier incase size > 2 +! Everyone calls barrier in case size > 2 call mpi_barrier( MPI_COMM_WORLD, ierr ) if (me.eq.dest) then position=0 diff --git a/teshsuite/smpi/mpich3-test/group/grouptest.c b/teshsuite/smpi/mpich3-test/group/grouptest.c index c2904deb60..64f3deeacc 100644 --- a/teshsuite/smpi/mpich3-test/group/grouptest.c +++ b/teshsuite/smpi/mpich3-test/group/grouptest.c @@ -21,7 +21,7 @@ int main(int argc, char *argv[]) MPI_Comm_rank(MPI_COMM_WORLD, &myrank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 8) { - fprintf(stderr, "Test requires 8 processes (16 prefered) only %d provided\n", size); + fprintf(stderr, "Test requires 8 processes (16 preferred) only %d provided\n", size); errs++; } diff --git a/teshsuite/smpi/mpich3-test/group/grouptest2.c b/teshsuite/smpi/mpich3-test/group/grouptest2.c index 16bbaab507..505319312b 100644 --- a/teshsuite/smpi/mpich3-test/group/grouptest2.c +++ b/teshsuite/smpi/mpich3-test/group/grouptest2.c @@ -7,7 +7,7 @@ /* Test the group routines - (some tested elsewere) + (some tested elsewhere) MPI_Group_compare MPI_Group_excl diff --git a/teshsuite/smpi/mpich3-test/info/infodel.c b/teshsuite/smpi/mpich3-test/info/infodel.c index b281847769..aa6eb48188 100644 --- a/teshsuite/smpi/mpich3-test/info/infodel.c +++ b/teshsuite/smpi/mpich3-test/info/infodel.c @@ -28,7 +28,7 @@ int main(int argc, char *argv[]) MTest_Init(&argc, &argv); MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ for (i = 0; i < NKEYS; i++) { MPI_Info_set(info, keys[i], values[i]); diff --git a/teshsuite/smpi/mpich3-test/info/infodup.c b/teshsuite/smpi/mpich3-test/info/infodup.c index 4c79749d22..cbce96b9d1 100644 --- a/teshsuite/smpi/mpich3-test/info/infodup.c +++ b/teshsuite/smpi/mpich3-test/info/infodup.c @@ -24,7 +24,7 @@ int main(int argc, char *argv[]) MTest_Init(&argc, &argv); MPI_Info_create(&info1); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ MPI_Info_set(info1, (char *) "host", (char *) "myhost.myorg.org"); MPI_Info_set(info1, (char *) "file", (char *) "runfile.txt"); diff --git a/teshsuite/smpi/mpich3-test/info/infoorder.c b/teshsuite/smpi/mpich3-test/info/infoorder.c index e1189c46a2..d859a91cc4 100644 --- a/teshsuite/smpi/mpich3-test/info/infoorder.c +++ b/teshsuite/smpi/mpich3-test/info/infoorder.c @@ -30,7 +30,7 @@ int main(int argc, char *argv[]) /* 1,2,3 */ MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ for (i = 0; i < NKEYS; i++) { MPI_Info_set(info, keys1[i], values1[i]); @@ -52,7 +52,7 @@ int main(int argc, char *argv[]) /* 3,2,1 */ MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ for (i = NKEYS - 1; i >= 0; i--) { MPI_Info_set(info, keys1[i], values1[i]); @@ -74,7 +74,7 @@ int main(int argc, char *argv[]) /* 1,3,2 */ MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ MPI_Info_set(info, keys1[0], values1[0]); MPI_Info_set(info, keys1[2], values1[2]); @@ -96,7 +96,7 @@ int main(int argc, char *argv[]) /* 2,1,3 */ MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ MPI_Info_set(info, keys1[1], values1[1]); MPI_Info_set(info, keys1[0], values1[0]); @@ -118,7 +118,7 @@ int main(int argc, char *argv[]) /* 2,3,1 */ MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ MPI_Info_set(info, keys1[1], values1[1]); MPI_Info_set(info, keys1[2], values1[2]); @@ -140,7 +140,7 @@ int main(int argc, char *argv[]) /* 3,1,2 */ MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ MPI_Info_set(info, keys1[2], values1[2]); MPI_Info_set(info, keys1[0], values1[0]); diff --git a/teshsuite/smpi/mpich3-test/info/infovallen.c b/teshsuite/smpi/mpich3-test/info/infovallen.c index 22fbfe5d1a..39cf7070e4 100644 --- a/teshsuite/smpi/mpich3-test/info/infovallen.c +++ b/teshsuite/smpi/mpich3-test/info/infovallen.c @@ -24,7 +24,7 @@ int main(int argc, char *argv[]) MTest_Init(&argc, &argv); MPI_Info_create(&info); - /* Use only named keys incase the info implementation only supports + /* Use only named keys in case the info implementation only supports * the predefined keys (e.g., IBM) */ for (i = 0; i < NKEYS; i++) { MPI_Info_set(info, keys[i], values[i]); diff --git a/teshsuite/smpi/mpich3-test/init/attrself.c b/teshsuite/smpi/mpich3-test/init/attrself.c index 027a413c32..bbbcd9c714 100644 --- a/teshsuite/smpi/mpich3-test/init/attrself.c +++ b/teshsuite/smpi/mpich3-test/init/attrself.c @@ -97,7 +97,7 @@ int checkAttrs(MPI_Comm comm, int n, int lkey[], int attrval[]) } else if (val_p != &attrval[i]) { lerrs++; - fprintf(stderr, "Atribute value for key %d not correct\n", i); + fprintf(stderr, "Attribute value for key %d not correct\n", i); } } diff --git a/teshsuite/smpi/mpich3-test/io/i_noncontig_coll2.c b/teshsuite/smpi/mpich3-test/io/i_noncontig_coll2.c index 35466b4cee..57a4868a3a 100644 --- a/teshsuite/smpi/mpich3-test/io/i_noncontig_coll2.c +++ b/teshsuite/smpi/mpich3-test/io/i_noncontig_coll2.c @@ -15,7 +15,7 @@ * * . generalized file writing/reading to handle arbitrary number of processors * . provides the "cb_config_list" hint with several permutations of the - * avaliable processors. + * available processors. * [ makes use of code copied from ROMIO's ADIO code to collect the names of * the processors ] */ diff --git a/teshsuite/smpi/mpich3-test/io/simple_collective.c b/teshsuite/smpi/mpich3-test/io/simple_collective.c index 2acc2bcef8..2c5699b1d8 100644 --- a/teshsuite/smpi/mpich3-test/io/simple_collective.c +++ b/teshsuite/smpi/mpich3-test/io/simple_collective.c @@ -6,7 +6,7 @@ /* this deceptively simple test uncovered a bug in the way certain file systems - * dealt with tuning parmeters. See + * dealt with tuning parameters. See * https://github.com/open-mpi/ompi/issues/158 and * http://trac.mpich.org/projects/mpich/ticket/2261 * @@ -110,7 +110,7 @@ int main(int argc, char **argv) sprintf(file, "%s", opt_file); MPI_Info_create(&info); nr_errors += test_write(file, nprocs, rank, info); - /* acutal value does not matter. test only writes a small amount of data */ + /* actual value does not matter. test only writes a small amount of data */ MPI_Info_set(info, "striping_factor", "50"); nr_errors += test_write(file, nprocs, rank, info); MPI_Info_free(&info); diff --git a/teshsuite/smpi/mpich3-test/pt2pt/anyall.c b/teshsuite/smpi/mpich3-test/pt2pt/anyall.c index 405c794804..8e7557cc94 100644 --- a/teshsuite/smpi/mpich3-test/pt2pt/anyall.c +++ b/teshsuite/smpi/mpich3-test/pt2pt/anyall.c @@ -11,7 +11,7 @@ #define MAX_MSGS 30 /* -static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI recieve uses both ANY_SOURCE and ANY_TAG"; +static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI receive uses both ANY_SOURCE and ANY_TAG"; */ int main(int argc, char *argv[]) diff --git a/teshsuite/smpi/mpich3-test/pt2pt/bsendalign.c b/teshsuite/smpi/mpich3-test/pt2pt/bsendalign.c index ec358e66d3..c01343e77b 100644 --- a/teshsuite/smpi/mpich3-test/pt2pt/bsendalign.c +++ b/teshsuite/smpi/mpich3-test/pt2pt/bsendalign.c @@ -8,7 +8,7 @@ #include "mpi.h" #include "mpitest.h" -/* Test bsend with a buffer with arbitray alignment */ +/* Test bsend with a buffer with arbitrary alignment */ #define BUFSIZE 2000*4 int main(int argc, char *argv[]) { @@ -62,7 +62,7 @@ int main(int argc, char *argv[]) if (bptr != buf + align) { errs++; printf - ("Did not recieve the same buffer on detach that was provided on init (%p vs %p)\n", + ("Did not receive the same buffer on detach that was provided on init (%p vs %p)\n", bptr, buf); } } diff --git a/teshsuite/smpi/mpich3-test/rma/accfence1.c b/teshsuite/smpi/mpich3-test/rma/accfence1.c index f4b8930d6b..ef71a3daec 100644 --- a/teshsuite/smpi/mpich3-test/rma/accfence1.c +++ b/teshsuite/smpi/mpich3-test/rma/accfence1.c @@ -81,7 +81,7 @@ int main(int argc, char *argv[]) else if (rank == dest) { MPI_Win_fence(0, win); /* This should have the same effect, in terms of - * transfering data, as a send/recv pair */ + * transferring data, as a send/recv pair */ err = MTestCheckRecv(0, &recvtype); if (err) { errs += err; diff --git a/teshsuite/smpi/mpich3-test/rma/accpscw1.c b/teshsuite/smpi/mpich3-test/rma/accpscw1.c index 67ab3b48bf..dd9162ceb6 100644 --- a/teshsuite/smpi/mpich3-test/rma/accpscw1.c +++ b/teshsuite/smpi/mpich3-test/rma/accpscw1.c @@ -85,7 +85,7 @@ int main(int argc, char *argv[]) MPI_Group_free(&neighbors); MPI_Win_wait(win); /* This should have the same effect, in terms of - * transfering data, as a send/recv pair */ + * transferring data, as a send/recv pair */ err = MTestCheckRecv(0, &recvtype); if (err) { errs += errs; diff --git a/teshsuite/smpi/mpich3-test/rma/atomic_rmw_gacc.c b/teshsuite/smpi/mpich3-test/rma/atomic_rmw_gacc.c index 0e73e2d4a7..9a7eafeb3b 100644 --- a/teshsuite/smpi/mpich3-test/rma/atomic_rmw_gacc.c +++ b/teshsuite/smpi/mpich3-test/rma/atomic_rmw_gacc.c @@ -8,11 +8,11 @@ /* This test is going to test the atomicity for "read-modify-write" in GACC * operations */ -/* This test is similiar with atomic_rmw_fop.c. +/* This test is similar with atomic_rmw_fop.c. * There are three processes involved in this test: P0 (origin_shm), P1 (origin_am), * and P2 (dest). P0 and P1 issues multiple GACC with MPI_SUM and OP_COUNT integers * (value 1) to P2 via SHM and AM respectively. The correct results should be that the - * results on P0 and P1 never be the same for intergers on the corresponding index + * results on P0 and P1 never be the same for integers on the corresponding index * in [0...OP_COUNT-1]. */ diff --git a/teshsuite/smpi/mpich3-test/rma/attrorderwin.c b/teshsuite/smpi/mpich3-test/rma/attrorderwin.c index 9d6ef8721d..14723e88eb 100644 --- a/teshsuite/smpi/mpich3-test/rma/attrorderwin.c +++ b/teshsuite/smpi/mpich3-test/rma/attrorderwin.c @@ -101,7 +101,7 @@ int checkAttrs(MPI_Win win, int n, int key[], int attrval[]) } else if (val_p != &attrval[i]) { errs++; - fprintf(stderr, "Atribute value for key %d not correct\n", i); + fprintf(stderr, "Attribute value for key %d not correct\n", i); } } diff --git a/teshsuite/smpi/mpich3-test/rma/contention_put.c b/teshsuite/smpi/mpich3-test/rma/contention_put.c index 28b99524a7..9dff333762 100644 --- a/teshsuite/smpi/mpich3-test/rma/contention_put.c +++ b/teshsuite/smpi/mpich3-test/rma/contention_put.c @@ -7,7 +7,7 @@ /** Contended RMA put test -- James Dinan * * Each process issues COUNT put operations to non-overlapping locations on - * every other processs. + * every other process. */ #include diff --git a/teshsuite/smpi/mpich3-test/rma/contention_putget.c b/teshsuite/smpi/mpich3-test/rma/contention_putget.c index abfdb2bbe5..4f25827cff 100644 --- a/teshsuite/smpi/mpich3-test/rma/contention_putget.c +++ b/teshsuite/smpi/mpich3-test/rma/contention_putget.c @@ -7,7 +7,7 @@ /** Contended RMA put/get test -- James Dinan * * Each process issues COUNT put and get operations to non-overlapping - * locations on every other processs. + * locations on every other process. */ #include diff --git a/teshsuite/smpi/mpich3-test/rma/getfence1.c b/teshsuite/smpi/mpich3-test/rma/getfence1.c index 6b068930f7..80bf6f1863 100644 --- a/teshsuite/smpi/mpich3-test/rma/getfence1.c +++ b/teshsuite/smpi/mpich3-test/rma/getfence1.c @@ -52,7 +52,7 @@ static inline int test(MPI_Comm comm, int rank, int source, int dest, MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN); /* This should have the same effect, in terms of - * transfering data, as a send/recv pair */ + * transferring data, as a send/recv pair */ err = MPI_Get(recvtype->buf, recvtype->count, recvtype->datatype, source, 0, sendtype->count, sendtype->datatype, win); if (err) { diff --git a/teshsuite/smpi/mpich3-test/rma/mixedsync.c b/teshsuite/smpi/mpich3-test/rma/mixedsync.c index 0bf63c3c5b..72e9042963 100644 --- a/teshsuite/smpi/mpich3-test/rma/mixedsync.c +++ b/teshsuite/smpi/mpich3-test/rma/mixedsync.c @@ -127,7 +127,7 @@ int main(int argc, char *argv[]) /* Perform several communication operations, mixing synchronization * types. Use multiple communication to avoid the single-operation * optimization that may be present. */ - MTestPrintfMsg(3, "Begining loop %d of mixed sync put/acc operations\n", loop); + MTestPrintfMsg(3, "Beginning loop %d of mixed sync put/acc operations\n", loop); memset(winbuf, 0, count * sizeof(int)); MPI_Barrier(comm); if (crank == source) { @@ -173,7 +173,7 @@ int main(int argc, char *argv[]) /* Perform several communication operations, mixing synchronization * types. Use multiple communication to avoid the single-operation * optimization that may be present. */ - MTestPrintfMsg(3, "Begining loop %d of mixed sync put/get/acc operations\n", loop); + MTestPrintfMsg(3, "Beginning loop %d of mixed sync put/get/acc operations\n", loop); MPI_Barrier(comm); if (crank == source) { MPI_Win_lock(MPI_LOCK_EXCLUSIVE, dest, 0, win); diff --git a/teshsuite/smpi/mpich3-test/rma/putfence1.c b/teshsuite/smpi/mpich3-test/rma/putfence1.c index 4f49bea9ff..68fb5a027a 100644 --- a/teshsuite/smpi/mpich3-test/rma/putfence1.c +++ b/teshsuite/smpi/mpich3-test/rma/putfence1.c @@ -61,7 +61,7 @@ static inline int test(MPI_Comm comm, int rank, int source, int dest, else if (rank == dest) { MPI_Win_fence(0, win); /* This should have the same effect, in terms of - * transfering data, as a send/recv pair */ + * transferring data, as a send/recv pair */ err = MTestCheckRecv(0, recvtype); if (err) { if (errs < 10) { diff --git a/teshsuite/smpi/mpich3-test/rma/putpscw1.c b/teshsuite/smpi/mpich3-test/rma/putpscw1.c index d46c8f98ff..e8cbfd08a7 100644 --- a/teshsuite/smpi/mpich3-test/rma/putpscw1.c +++ b/teshsuite/smpi/mpich3-test/rma/putpscw1.c @@ -85,7 +85,7 @@ int main(int argc, char *argv[]) MPI_Group_free(&neighbors); MPI_Win_wait(win); /* This should have the same effect, in terms of - * transfering data, as a send/recv pair */ + * transferring data, as a send/recv pair */ err = MTestCheckRecv(0, &recvtype); if (err) { errs += errs; diff --git a/teshsuite/smpi/mpich3-test/runtests b/teshsuite/smpi/mpich3-test/runtests index 987787e3cd..4d458482f7 100755 --- a/teshsuite/smpi/mpich3-test/runtests +++ b/teshsuite/smpi/mpich3-test/runtests @@ -724,7 +724,7 @@ sub AddMPIProgram { if ($ResultTest ne "") { # This test really needs to be run manually, with this test - # Eventually, we can update this to include handleing in checktests. + # Eventually, we can update this to include handling in checktests. print STDERR "Run $curdir/$programname with $np processes and use $ResultTest to check the results\n"; return; } diff --git a/teshsuite/smpi/mpich3-test/util/dtypes.c b/teshsuite/smpi/mpich3-test/util/dtypes.c index b93fc9b713..239d4011e4 100644 --- a/teshsuite/smpi/mpich3-test/util/dtypes.c +++ b/teshsuite/smpi/mpich3-test/util/dtypes.c @@ -139,7 +139,7 @@ static int basic_only = 0; free(myname); \ counts[cnt] = 1; bytesize[cnt] = sizeof(_ctype) * (_count); cnt++; } -/* This defines a structure of two basic members; by chosing things like +/* This defines a structure of two basic members; by choosing things like (char, double), various packing and alignment tests can be made */ #define SETUPSTRUCT2TYPE(_mpitype1,_ctype1,_mpitype2,_ctype2,_count,_tname) { \ int i; char *myname; \ diff --git a/teshsuite/smpi/mpich3-test/util/mtest.c b/teshsuite/smpi/mpich3-test/util/mtest.c index cb24b3239d..b68178ee91 100644 --- a/teshsuite/smpi/mpich3-test/util/mtest.c +++ b/teshsuite/smpi/mpich3-test/util/mtest.c @@ -575,7 +575,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 0; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } @@ -607,7 +607,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 0; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } @@ -640,7 +640,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 0; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } @@ -673,7 +673,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 0; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } @@ -716,7 +716,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 0; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } @@ -769,7 +769,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 1; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } @@ -813,7 +813,7 @@ int MTestGetIntercomm(MPI_Comm * comm, int *isLeftGroup, int min_size) rleader = 0; } else { - /* Remote leader is signficant only for the processes + /* Remote leader is significant only for the processes * designated local leaders */ rleader = -1; } diff --git a/teshsuite/smpi/timers/timers.tesh b/teshsuite/smpi/timers/timers.tesh index 8e0283b87f..26429aaada 100644 --- a/teshsuite/smpi/timers/timers.tesh +++ b/teshsuite/smpi/timers/timers.tesh @@ -1,4 +1,4 @@ p Test timers -$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_config.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100000 --log=xbt_cfg.thres:warning --cfg=smpi/wtime:0 +$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_config.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100Gf --log=xbt_cfg.thres:warning --cfg=smpi/wtime:0 > [rank 0] -> Tremblay diff --git a/teshsuite/smpi/topo-cart-sub/topo-cart-sub.c b/teshsuite/smpi/topo-cart-sub/topo-cart-sub.c index e0c4146b96..dc6dfb72c6 100644 --- a/teshsuite/smpi/topo-cart-sub/topo-cart-sub.c +++ b/teshsuite/smpi/topo-cart-sub/topo-cart-sub.c @@ -68,7 +68,7 @@ int main(int argc, char** argv) MPI_Barrier(MPI_COMM_WORLD); - /* Set dims[] values to descibe a grid of nbNodes and DIM dimensions*/ + /* Set dims[] values to describe a grid of nbNodes and DIM dimensions*/ MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder, &gridComm); if (gridComm == MPI_COMM_NULL) diff --git a/teshsuite/surf/CMakeLists.txt b/teshsuite/surf/CMakeLists.txt index fa59bb3536..0952484a9d 100644 --- a/teshsuite/surf/CMakeLists.txt +++ b/teshsuite/surf/CMakeLists.txt @@ -24,6 +24,10 @@ endforeach() set(tesh_files ${tesh_files} PARENT_SCOPE) set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/maxmin_bench/maxmin_bench.cpp PARENT_SCOPE) -foreach(x small medium large) - ADD_TESH(tesh-surf-maxmin-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_${x}.tesh) -endforeach() +ADD_TESH(tesh-surf-maxmin-large --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_large.tesh) + +if(enable_debug) + foreach(x small medium) + ADD_TESH(tesh-surf-maxmin-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_${x}.tesh) + endforeach() +endif() \ No newline at end of file diff --git a/teshsuite/surf/lmm_usage/lmm_usage.cpp b/teshsuite/surf/lmm_usage/lmm_usage.cpp index a1257d3cf5..9ecc8e57ab 100644 --- a/teshsuite/surf/lmm_usage/lmm_usage.cpp +++ b/teshsuite/surf/lmm_usage/lmm_usage.cpp @@ -147,7 +147,7 @@ static void test3() /* Link constraints and variables */ for (int i = 0; i < 15; i++) for (int j = 0; j < 16; j++) - if (A[i][j]) + if (A[i][j] != 0.0) Sys->expand(tmp_cnst[i], tmp_var[j], 1.0); Sys->solve(); diff --git a/teshsuite/surf/maxmin_bench/maxmin_bench.cpp b/teshsuite/surf/maxmin_bench/maxmin_bench.cpp index e2d1d9f2ee..6242ceb607 100644 --- a/teshsuite/surf/maxmin_bench/maxmin_bench.cpp +++ b/teshsuite/surf/maxmin_bench/maxmin_bench.cpp @@ -19,7 +19,7 @@ double date; static void test(int nb_cnst, int nb_var, int nb_elem, unsigned int pw_base_limit, unsigned int pw_max_limit, - float rate_no_limit, int max_share, int mode) + double rate_no_limit, int max_share, int mode) { simgrid::kernel::lmm::Constraint** cnst = new simgrid::kernel::lmm::Constraint*[nb_cnst]; simgrid::kernel::lmm::Variable** var = new simgrid::kernel::lmm::Variable*[nb_var]; @@ -44,7 +44,7 @@ static void test(int nb_cnst, int nb_var, int nb_elem, unsigned int pw_base_limi for (int i = 0; i < nb_var; i++) { var[i] = Sys->variable_new(NULL, 1.0, -1.0, nb_elem); //Have a few variables with a concurrency share of two (e.g. cross-traffic in some cases) - int concurrency_share = 1 + simgrid::xbt::random::uniform_int(0, max_share - 1); + short concurrency_share = static_cast(1 + simgrid::xbt::random::uniform_int(0, max_share - 1)); var[i]->set_concurrency_share(concurrency_share); for (int j = 0; j < nb_cnst; j++) @@ -105,9 +105,9 @@ int main(int argc, char **argv) { simgrid::s4u::Engine e(&argc, argv); - float rate_no_limit=0.2; - float acc_date=0; - float acc_date2=0; + double rate_no_limit = 0.2; + double acc_date = 0.0; + double acc_date2 = 0.0; int testclass; if(argc<3) { @@ -167,8 +167,8 @@ int main(int argc, char **argv) acc_date2+=date*date; } - float mean_date= acc_date/(float)testcount; - float stdev_date= sqrt(acc_date2/(float)testcount-mean_date*mean_date); + double mean_date = acc_date / static_cast(testcount); + double stdev_date = sqrt(acc_date2 / static_cast(testcount) - mean_date * mean_date); fprintf(stderr, "%ix One shot execution time for a total of %u constraints, " "%u variables with %u active constraint each, concurrency in [%i,%i] and max concurrency share %u\n", diff --git a/teshsuite/surf/wifi_usage/wifi_usage.cpp b/teshsuite/surf/wifi_usage/wifi_usage.cpp index b510cd293c..19e17416b8 100644 --- a/teshsuite/surf/wifi_usage/wifi_usage.cpp +++ b/teshsuite/surf/wifi_usage/wifi_usage.cpp @@ -7,7 +7,7 @@ #include "xbt/config.hpp" #include "xbt/log.h" -#include "src/surf/network_wifi.hpp" +#include "src/surf/network_interface.hpp" XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[usage] wifi_usage "); @@ -72,9 +72,9 @@ void run_ping_test(const char* src, const char* dest, int data_size) simgrid::s4u::this_actor::get_host()->get_cname(), dest, end_time - start_time); }); simgrid::s4u::Actor::create("receiver", simgrid::s4u::Host::by_name(dest), [mailbox]() { mailbox->get(); }); - auto* l = (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl(); - l->set_host_rate(simgrid::s4u::Host::by_name(src), 0); - l->set_host_rate(simgrid::s4u::Host::by_name(dest), 0); + const auto* ap1 = simgrid::s4u::Link::by_name("AP1"); + ap1->set_host_wifi_rate(simgrid::s4u::Host::by_name(src), 0); + ap1->set_host_wifi_rate(simgrid::s4u::Host::by_name(dest), 0); simgrid::s4u::this_actor::sleep_for(10); XBT_INFO("\n"); } diff --git a/teshsuite/xbt/parmap_bench/parmap_bench.cpp b/teshsuite/xbt/parmap_bench/parmap_bench.cpp index 1270071fde..789ee427ee 100644 --- a/teshsuite/xbt/parmap_bench/parmap_bench.cpp +++ b/teshsuite/xbt/parmap_bench/parmap_bench.cpp @@ -19,8 +19,6 @@ constexpr unsigned MODES_DEFAULT = 0x7; constexpr unsigned ARRAY_SIZE = 10007; constexpr unsigned FIBO_MAX = 25; -void (*fun_to_apply)(unsigned*); - static std::string parmap_mode_name(e_xbt_parmap_mode_t mode) { std::string name; @@ -62,7 +60,8 @@ static void fun_big_comp(unsigned* arg) *arg = fibonacci(*arg % FIBO_MAX); } -static void bench_parmap(int nthreads, double timeout, e_xbt_parmap_mode_t mode, bool full_bench) +template +void bench_parmap(int nthreads, double timeout, e_xbt_parmap_mode_t mode, bool full_bench, F func_to_apply) { std::string mode_name = parmap_mode_name(mode); XBT_INFO("** mode = %s", mode_name.c_str()); @@ -86,7 +85,7 @@ static void bench_parmap(int nthreads, double timeout, e_xbt_parmap_mode_t mode, delete parmap; parmap = new simgrid::xbt::Parmap(nthreads, mode); } - parmap->apply(fun_to_apply, data); + parmap->apply(func_to_apply, data); elapsed_time = xbt_os_time() - start_time; i++; } while (elapsed_time < timeout); @@ -95,14 +94,14 @@ static void bench_parmap(int nthreads, double timeout, e_xbt_parmap_mode_t mode, XBT_INFO(" ran %d times in %g seconds (%g/s)", i, elapsed_time, i / elapsed_time); } -static void bench_all_modes(int nthreads, double timeout, unsigned modes, bool full_bench) +template void bench_all_modes(int nthreads, double timeout, unsigned modes, bool full_bench, F func_to_apply) { std::vector all_modes = {XBT_PARMAP_POSIX, XBT_PARMAP_FUTEX, XBT_PARMAP_BUSY_WAIT, XBT_PARMAP_DEFAULT}; for (unsigned i = 0; i < all_modes.size(); i++) { if (1U << i & modes) - bench_parmap(nthreads, timeout, all_modes[i], full_bench); + bench_parmap(nthreads, timeout, all_modes[i], full_bench, func_to_apply); } } @@ -129,30 +128,27 @@ int main(int argc, char* argv[]) } timeout = atof(argv[2]); if (argc == 4) - modes = strtol(argv[2], NULL, 0); + modes = static_cast(strtoul(argv[2], NULL, 0)); XBT_INFO("Parmap benchmark with %d workers (modes = %#x)...", nthreads, modes); XBT_INFO("%s", ""); SIMIX_context_set_nthreads(nthreads); - fun_to_apply = &fun_small_comp; XBT_INFO("Benchmark for parmap create+apply+destroy (small comp):"); - bench_all_modes(nthreads, timeout, modes, true); + bench_all_modes(nthreads, timeout, modes, true, &fun_small_comp); XBT_INFO("%s", ""); XBT_INFO("Benchmark for parmap apply only (small comp):"); - bench_all_modes(nthreads, timeout, modes, false); + bench_all_modes(nthreads, timeout, modes, false, &fun_small_comp); XBT_INFO("%s", ""); - fun_to_apply = &fun_big_comp; - XBT_INFO("Benchmark for parmap create+apply+destroy (big comp):"); - bench_all_modes(nthreads, timeout, modes, true); + bench_all_modes(nthreads, timeout, modes, true, &fun_big_comp); XBT_INFO("%s", ""); XBT_INFO("Benchmark for parmap apply only (big comp):"); - bench_all_modes(nthreads, timeout, modes, false); + bench_all_modes(nthreads, timeout, modes, false, &fun_big_comp); XBT_INFO("%s", ""); return EXIT_SUCCESS; diff --git a/teshsuite/xbt/parmap_test/parmap_test.cpp b/teshsuite/xbt/parmap_test/parmap_test.cpp index 30da8b5dd7..3da1c4a467 100644 --- a/teshsuite/xbt/parmap_test/parmap_test.cpp +++ b/teshsuite/xbt/parmap_test/parmap_test.cpp @@ -75,7 +75,7 @@ static int test_parmap_extended(e_xbt_parmap_mode_t mode) parmap.apply(fun_get_id, data); std::sort(begin(a), end(a)); - unsigned count = std::distance(begin(a), std::unique(begin(a), end(a))); + unsigned count = static_cast(std::distance(begin(a), std::unique(begin(a), end(a)))); if (count != num_workers) { XBT_CRITICAL("only %u/%u threads did some work", count, num_workers); ret = 1; diff --git a/tools/cmake/DefinePackages.cmake b/tools/cmake/DefinePackages.cmake index ecfb07e9e2..739760d67f 100644 --- a/tools/cmake/DefinePackages.cmake +++ b/tools/cmake/DefinePackages.cmake @@ -295,6 +295,7 @@ set(XBT_SRC src/xbt/xbt_main.cpp src/xbt/xbt_os_file.cpp src/xbt/xbt_os_time.c + src/xbt/xbt_parse_units.cpp src/xbt/xbt_replay.cpp src/xbt/xbt_str.cpp src/xbt/xbt_virtu.cpp @@ -778,6 +779,7 @@ set(headers_to_install include/xbt/module.h include/xbt/PropertyHolder.hpp include/xbt/parmap.h + include/xbt/parse_units.hpp include/xbt/range.hpp include/xbt/random.hpp include/xbt/replay.hpp @@ -797,7 +799,7 @@ set(source_of_generated_headers src/internal_config.h.in include/smpi/mpif.h.in) -### depend of some variables setted upper +### depend of some variables set upper if(${HAVE_UCONTEXT_CONTEXTS}) #ucontext set(SIMIX_SRC ${SIMIX_SRC} src/kernel/context/ContextUnix.hpp src/kernel/context/ContextUnix.cpp) @@ -894,6 +896,20 @@ set(DOC_SOURCES docs/source/_ext/showfile.css docs/source/_ext/showfile.js docs/source/_ext/showfile.py + docs/source/_ext/javasphinx/LICENSE + docs/source/_ext/javasphinx/MANIFEST.in + docs/source/_ext/javasphinx/README.md + docs/source/_ext/javasphinx/doc/conf.py + docs/source/_ext/javasphinx/doc/index.rst + docs/source/_ext/javasphinx/javasphinx/__init__.py + docs/source/_ext/javasphinx/javasphinx/apidoc.py + docs/source/_ext/javasphinx/javasphinx/compiler.py + docs/source/_ext/javasphinx/javasphinx/domain.py + docs/source/_ext/javasphinx/javasphinx/extdoc.py + docs/source/_ext/javasphinx/javasphinx/formatter.py + docs/source/_ext/javasphinx/javasphinx/htmlrst.py + docs/source/_ext/javasphinx/javasphinx/util.py + docs/source/_ext/javasphinx/setup.py docs/source/_static/css/custom.css docs/source/_templates/breadcrumbs.html @@ -1010,7 +1026,7 @@ set(CMAKEFILES_TXT examples/s4u/CMakeLists.txt examples/smpi/CMakeLists.txt examples/smpi/NAS/CMakeLists.txt - examples/smpi/smpi_s4u_masterslave/CMakeLists.txt + examples/smpi/smpi_s4u_masterworker/CMakeLists.txt examples/smpi/replay_multiple/CMakeLists.txt examples/smpi/replay_multiple_manual_deploy/CMakeLists.txt examples/python/CMakeLists.txt @@ -1193,7 +1209,6 @@ set(PLATFORMS_EXAMPLES examples/platforms/two_hosts_platform_with_availability_included.xml examples/platforms/two_peers.xml examples/platforms/vivaldi.xml - examples/platforms/wifi_decay_2STA.xml examples/platforms/wifi.xml ) diff --git a/tools/cmake/Flags.cmake b/tools/cmake/Flags.cmake index 7b116d8d36..4cf24372c2 100644 --- a/tools/cmake/Flags.cmake +++ b/tools/cmake/Flags.cmake @@ -198,7 +198,7 @@ endif() set(CMAKE_C_FLAGS "${warnCFLAGS} ${CMAKE_C_FLAGS} ${optCFLAGS}") set(CMAKE_CXX_FLAGS "${warnCXXFLAGS} ${CMAKE_CXX_FLAGS} ${optCFLAGS}") -# Try to make Mac a bit more complient to open source standards +# Try to make Mac a bit more compliant to open source standards if(CMAKE_SYSTEM_NAME MATCHES "Darwin") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_XOPEN_SOURCE=700 -D_DARWIN_C_SOURCE") endif() diff --git a/tools/docker/Dockerfile.build-deps b/tools/docker/Dockerfile.build-deps index 8bade76d7b..afbd2a6d72 100644 --- a/tools/docker/Dockerfile.build-deps +++ b/tools/docker/Dockerfile.build-deps @@ -17,6 +17,6 @@ RUN apt-get --allow-releaseinfo-change update && \ doxygen fig2dev \ chrpath \ libdw-dev libevent-dev libunwind8-dev \ - linkchecker \ python3-sphinx python3-breathe python3-sphinx-rtd-theme - \ No newline at end of file + +# linkchecker \ diff --git a/tools/git-hooks/clang-format.pre-commit b/tools/git-hooks/clang-format.pre-commit index c3e404b63b..ccaa385004 100755 --- a/tools/git-hooks/clang-format.pre-commit +++ b/tools/git-hooks/clang-format.pre-commit @@ -198,7 +198,7 @@ printf "\nThe following differences were found between the code to commit " printf "and the clang-format rules:\n\n" cat "${patch}" -printf "\nYou can apply these changes and readd the files with:\n" +printf "\nYou can apply these changes and read the files with:\n" printf " git apply ${patch} && git apply --cached ${patch}\n" printf "(call this command from the root directory of your repository)\n" printf "\n\n\n" diff --git a/tools/internal/check_dist_archive.exclude b/tools/internal/check_dist_archive.exclude index d3df9da7ae..74872a26a5 100644 --- a/tools/internal/check_dist_archive.exclude +++ b/tools/internal/check_dist_archive.exclude @@ -26,6 +26,8 @@ + contrib/.* ++ \.github/.* + + tools/appveyor-irc-notify\.py + tools/docker/.* + tools/git-hooks/.* diff --git a/tools/internal/eclipse-formating.xml b/tools/internal/eclipse-formating.xml index 330a41ccef..7ce4add88f 100644 --- a/tools/internal/eclipse-formating.xml +++ b/tools/internal/eclipse-formating.xml @@ -1,6 +1,6 @@ - diff --git a/tools/jenkins/Coverage.sh b/tools/jenkins/Coverage.sh index 87639b8c22..efbc457535 100755 --- a/tools/jenkins/Coverage.sh +++ b/tools/jenkins/Coverage.sh @@ -14,7 +14,7 @@ die() { pkg_check() { for pkg do - if command -v $pkg + if command -v "$pkg" then echo "$pkg is installed. Good." else @@ -27,7 +27,7 @@ pkg_check xsltproc gcovr ant cover2cover.py ### Cleanup previous runs -! [ -z "$WORKSPACE" ] || die "No WORKSPACE" +[ -n "$WORKSPACE" ] || die "No WORKSPACE" [ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist" do_cleanup() { @@ -46,7 +46,7 @@ do_cleanup "$BUILDFOLDER" NUMPROC="$(nproc)" || NUMPROC=1 -cd $BUILDFOLDER +cd "$BUILDFOLDER" rm -rf java_cov* rm -rf jacoco_cov* rm -rf python_cov* @@ -60,7 +60,7 @@ cmake -Denable_documentation=OFF -Denable_lua=ON \ -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=ON -Denable_model-checking=ON \ -Denable_smpi_papi=ON \ -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=ON \ - -Denable_coverage=ON -DLTO_EXTRA_FLAG="auto" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON $WORKSPACE + -Denable_coverage=ON -DLTO_EXTRA_FLAG="auto" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "$WORKSPACE" #build with sonarqube scanner wrapper /home/ci/build-wrapper-linux-x86/build-wrapper-linux-x86-64 --out-dir bw-outputs make -j$NUMPROC tests @@ -79,34 +79,34 @@ if [ -f Testing/TAG ] ; then i=0 for file in $files do - sourcepath=$( dirname $file ) + sourcepath=$( dirname "$file" ) #convert jacoco reports in xml ones - ant -f $WORKSPACE/tools/jenkins/jacoco.xml -Dexamplesrcdir=$WORKSPACE -Dbuilddir=$BUILDFOLDER/${sourcepath} -Djarfile=$BUILDFOLDER/simgrid.jar -Djacocodir=${JACOCO_PATH}/lib + ant -f "$WORKSPACE"/tools/jenkins/jacoco.xml -Dexamplesrcdir="$WORKSPACE" -Dbuilddir="$BUILDFOLDER"/"${sourcepath}" -Djarfile="$BUILDFOLDER"/simgrid.jar -Djacocodir=${JACOCO_PATH}/lib #convert jacoco xml reports in cobertura xml reports - cover2cover.py $BUILDFOLDER/${sourcepath}/report.xml .. ../src/bindings/java src/bindings/java > $BUILDFOLDER/java_coverage_${i}.xml + cover2cover.py "$BUILDFOLDER"/"${sourcepath}"/report.xml .. ../src/bindings/java src/bindings/java > "$BUILDFOLDER"/java_coverage_${i}.xml #save jacoco xml report as sonar only allows it - mv $BUILDFOLDER/${sourcepath}/report.xml $BUILDFOLDER/jacoco_cov_${i}.xml + mv "$BUILDFOLDER"/"${sourcepath}"/report.xml "$BUILDFOLDER"/jacoco_cov_${i}.xml i=$((i + 1)) done #convert python coverage reports in xml ones - cd $BUILDFOLDER + cd "$BUILDFOLDER" find .. -size +1c -name ".coverage*" -exec mv {} . \; /usr/bin/python3-coverage combine /usr/bin/python3-coverage xml -i -o ./python_coverage.xml - cd $WORKSPACE + cd "$WORKSPACE" #convert all gcov reports to xml cobertura reports - gcovr -r . --xml-pretty -e teshsuite -u -o $BUILDFOLDER/xml_coverage.xml - xsltproc $WORKSPACE/tools/jenkins/ctest2junit.xsl build/Testing/$( head -n 1 < build/Testing/TAG )/Test.xml > CTestResults_memcheck.xml + gcovr -r . --xml-pretty -e teshsuite -u -o "$BUILDFOLDER"/xml_coverage.xml + xsltproc "$WORKSPACE"/tools/jenkins/ctest2junit.xsl build/Testing/"$( head -n 1 < build/Testing/TAG )"/Test.xml > CTestResults_memcheck.xml #generate sloccount report - sloccount --duplicates --wide --details $WORKSPACE | grep -v -e '.git' -e 'mpich3-test' -e 'sloccount.sc' -e 'isp/umpire' -e 'build/' -e 'xml_coverage.xml' -e 'CTestResults_memcheck.xml' -e 'DynamicAnalysis.xml' > $WORKSPACE/sloccount.sc + sloccount --duplicates --wide --details "$WORKSPACE" | grep -v -e '.git' -e 'mpich3-test' -e 'sloccount.sc' -e 'isp/umpire' -e 'build/' -e 'xml_coverage.xml' -e 'CTestResults_memcheck.xml' -e 'DynamicAnalysis.xml' > "$WORKSPACE"/sloccount.sc #generate PVS-studio report EXCLUDEDPATH="-e $WORKSPACE/src/include/catch.hpp -e $WORKSPACE/teshsuite/smpi/mpich3-test/ -e $WORKSPACE/teshsuite/smpi/isp/ -e *_dtd.c -e *_dtd.h -e *yy.c -e $WORKSPACE/src/xbt/automaton/ -e $WORKSPACE/src/smpi/colls/ -e $WORKSPACE/examples/smpi/NAS/ -e $WORKSPACE/examples/smpi/gemm/gemm.c -e $WORKSPACE/src/msg/ -e $WORKSPACE/include/msg/ -e $WORKSPACE/examples/deprecated/ -e $WORKSPACE/teshsuite/msg/" - pvs-studio-analyzer analyze -f $BUILDFOLDER/compile_commands.json -o $WORKSPACE/pvs.log $EXCLUDEDPATH -j$NUMPROC + pvs-studio-analyzer analyze -f "$BUILDFOLDER"/compile_commands.json -o "$WORKSPACE"/pvs.log $EXCLUDEDPATH -j$NUMPROC #disable V1042 (copyleft), V521 (commas in catch.hpp) - plog-converter -t xml -o $WORKSPACE/pvs.plog -d V1042,V521 $WORKSPACE/pvs.log + plog-converter -t xml -o "$WORKSPACE"/pvs.plog -d V1042,V521 "$WORKSPACE"/pvs.log fi || exit 42 diff --git a/tools/jenkins/DynamicAnalysis.sh b/tools/jenkins/DynamicAnalysis.sh index e0a87b8d61..f3e2930c0a 100755 --- a/tools/jenkins/DynamicAnalysis.sh +++ b/tools/jenkins/DynamicAnalysis.sh @@ -12,7 +12,7 @@ die() { pkg_check() { for pkg do - if command -v $pkg + if command -v "$pkg" then echo "$pkg is installed. Good." else @@ -25,7 +25,7 @@ pkg_check valgrind pcregrep ### Cleanup previous runs -! [ -z "$WORKSPACE" ] || die "No WORKSPACE" +[ -n "$WORKSPACE" ] || die "No WORKSPACE" [ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist" do_cleanup() { @@ -37,14 +37,14 @@ do_cleanup() { fi mkdir "$d" || die "Could not create $d" done - find $WORKSPACE -name "memcheck_test_*.memcheck" -exec rm {} \; + find "$WORKSPACE" -name "memcheck_test_*.memcheck" -exec rm {} \; } do_cleanup "$WORKSPACE/build" "$WORKSPACE/memcheck" NUMPROC="$(nproc)" || NUMPROC=1 -cd $WORKSPACE/build +cd "$WORKSPACE"/build ### Proceed with the tests ctest -D ExperimentalStart || true @@ -53,17 +53,16 @@ cmake -Denable_documentation=OFF -Denable_lua=OFF -Denable_python=OFF \ -Denable_compile_optimizations=OFF -Denable_compile_warnings=ON \ -Denable_jedule=OFF -Denable_mallocators=OFF \ -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_model-checking=OFF \ - -Denable_memcheck_xml=ON -DLTO_EXTRA_FLAG="auto" $WORKSPACE + -Denable_memcheck_xml=ON -DLTO_EXTRA_FLAG="auto" "$WORKSPACE" make -j$NUMPROC tests ctest --no-compress-output -D ExperimentalTest -j$NUMPROC || true -cd $WORKSPACE/build +cd "$WORKSPACE"/build if [ -f Testing/TAG ] ; then - find $WORKSPACE -iname "*.memcheck" -exec mv {} $WORKSPACE/memcheck \; + find "$WORKSPACE" -iname "*.memcheck" -exec mv {} "$WORKSPACE"/memcheck \; #remove all "empty" files - grep -r -L "error>" $WORKSPACE/memcheck | xargs rm -f - mv Testing/$(head -n 1 < Testing/TAG)/Test.xml $WORKSPACE/DynamicAnalysis.xml + grep -r -L "error>" "$WORKSPACE"/memcheck | xargs rm -f + mv Testing/"$(head -n 1 < Testing/TAG)"/Test.xml "$WORKSPACE"/DynamicAnalysis.xml fi - diff --git a/tools/jenkins/Flags.sh b/tools/jenkins/Flags.sh index a76c411d2c..cbc23d4c34 100755 --- a/tools/jenkins/Flags.sh +++ b/tools/jenkins/Flags.sh @@ -22,7 +22,7 @@ onoff() { ### Cleanup previous runs -! [ -z "$WORKSPACE" ] || die "No WORKSPACE" +[ -n "$WORKSPACE" ] || die "No WORKSPACE" [ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist" do_cleanup() { @@ -40,39 +40,39 @@ do_cleanup "$WORKSPACE/build" NUMPROC="$(nproc)" || NUMPROC=1 -cd $WORKSPACE/build +cd "$WORKSPACE"/build #we can't just receive ON or OFF as values as display is bad in the resulting jenkins matrix -if [ $1 = "JAVA" ] +if [ "$1" = "JAVA" ] then buildjava="ON" else buildjava="OFF" fi -if [ $2 = "MC" ] +if [ "$2" = "MC" ] then buildmc="ON" else buildmc="OFF" fi -if [ $3 = "SMPI" ] +if [ "$3" = "SMPI" ] then buildsmpi="ON" else buildsmpi="OFF" fi -if [ $4 = "DEBUG" ] +if [ "$4" = "DEBUG" ] then builddebug="ON" else builddebug="OFF" fi -if [ $5 = "MSG" ] +if [ "$5" = "MSG" ] then buildmsg="ON" else @@ -91,9 +91,7 @@ cmake -Denable_documentation=OFF -Denable_lua=ON -Denable_java=${buildjava} -Den -Denable_jedule=ON -Denable_mallocators=ON -Denable_debug=${builddebug} \ -Denable_smpi=${buildsmpi} -Denable_smpi_MPICH3_testsuite=${buildsmpi} -Denable_model-checking=${buildmc} \ -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=OFF \ - -Denable_ns3=$(onoff test "$buildmc" != "ON") -Denable_coverage=OFF -DLTO_EXTRA_FLAG="auto" $WORKSPACE + -Denable_ns3=$(onoff test "$buildmc" != "ON") -Denable_coverage=OFF -DLTO_EXTRA_FLAG="auto" "$WORKSPACE" make -j$NUMPROC tests make clean - - diff --git a/tools/jenkins/Sanitizers.sh b/tools/jenkins/Sanitizers.sh index 834a295e49..5425f603ae 100755 --- a/tools/jenkins/Sanitizers.sh +++ b/tools/jenkins/Sanitizers.sh @@ -39,7 +39,7 @@ fi pkg_check() { for pkg do - if command -v $pkg + if command -v "$pkg" then echo "$pkg is installed. Good." else @@ -52,7 +52,7 @@ pkg_check xsltproc ### Cleanup previous runs -! [ -z "$WORKSPACE" ] || die "No WORKSPACE" +[ -n "$WORKSPACE" ] || die "No WORKSPACE" [ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist" do_cleanup() { @@ -70,7 +70,7 @@ do_cleanup "$WORKSPACE/build" NUMPROC="$(nproc)" || NUMPROC=1 -cd $WORKSPACE/build +cd "$WORKSPACE"/build ctest -D ExperimentalStart || true @@ -79,14 +79,14 @@ cmake -Denable_documentation=OFF -Denable_lua=ON -Denable_java=OFF \ -Denable_jedule=ON -Denable_mallocators=OFF \ -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=ON -Denable_model-checking=OFF \ -Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=ON -Denable_coverage=OFF\ - -Denable_fortran=OFF -Denable_python=OFF -DLTO_EXTRA_FLAG="auto" ${SANITIZER_OPTIONS} $WORKSPACE + -Denable_fortran=OFF -Denable_python=OFF -DLTO_EXTRA_FLAG="auto" ${SANITIZER_OPTIONS} "$WORKSPACE" make -j$NUMPROC tests ctest --no-compress-output -D ExperimentalTest || true if [ -f Testing/TAG ] ; then - xsltproc $WORKSPACE/tools/jenkins/ctest2junit.xsl Testing/$(head -n 1 < Testing/TAG)/Test.xml > CTestResults_${SANITIZER}.xml - mv CTestResults_${SANITIZER}.xml $WORKSPACE + xsltproc "$WORKSPACE"/tools/jenkins/ctest2junit.xsl Testing/"$(head -n 1 < Testing/TAG)"/Test.xml > CTestResults_"${SANITIZER}".xml + mv CTestResults_"${SANITIZER}".xml "$WORKSPACE" fi make clean diff --git a/tools/jenkins/build.sh b/tools/jenkins/build.sh index 12d967ec93..2045041db0 100755 --- a/tools/jenkins/build.sh +++ b/tools/jenkins/build.sh @@ -31,7 +31,7 @@ die () { shift [ $# -gt 0 ] || set -- "Error - Halting" echo "$@" >&2 - exit $status + exit "$status" } # Get an ON/OFF string from a command: @@ -134,13 +134,13 @@ ulimit -c 0 || true echo "XX" echo "XX Get out of the tree" echo "XX" -if [ -d $WORKSPACE/build ] +if [ -d "$WORKSPACE"/build ] then # Windows cannot remove the directory if it's still used by the previous build - rm -rf $WORKSPACE/build || sleep 10 && rm -rf $WORKSPACE/build || sleep 10 && rm -rf $WORKSPACE/build + rm -rf "$WORKSPACE"/build || sleep 10 && rm -rf "$WORKSPACE"/build || sleep 10 && rm -rf "$WORKSPACE"/build fi -mkdir $WORKSPACE/build -cd $WORKSPACE/build +mkdir "$WORKSPACE"/build +cd "$WORKSPACE"/build have_NS3="no" if dpkg -l libns3-dev 2>&1|grep -q "ii libns3-dev" ; then @@ -153,25 +153,25 @@ PATH="$WORKSPACE/build/lib:$PATH" echo "XX" echo "XX Build the archive out of the tree" -echo "XX pwd: "$(pwd) +echo "XX pwd: $(pwd)" echo "XX" -cmake -G"$GENERATOR" -Denable_documentation=OFF $WORKSPACE +cmake -G"$GENERATOR" -Denable_documentation=OFF "$WORKSPACE" make dist -j $NUMBER_OF_PROCESSORS SIMGRID_VERSION=$(cat VERSION) echo "XX" echo "XX Open the resulting archive" echo "XX" -gunzip ${SIMGRID_VERSION}.tar.gz -tar xf ${SIMGRID_VERSION}.tar -mkdir ${WORKSPACE}/build/${SIMGRID_VERSION}/build -cd ${WORKSPACE}/build/${SIMGRID_VERSION}/build +gunzip "${SIMGRID_VERSION}".tar.gz +tar xf "${SIMGRID_VERSION}".tar +mkdir "${WORKSPACE}"/build/"${SIMGRID_VERSION}"/build +cd "${WORKSPACE}"/build/"${SIMGRID_VERSION}"/build SRCFOLDER="${WORKSPACE}/build/${SIMGRID_VERSION}" echo "XX" echo "XX Configure and build SimGrid" -echo "XX pwd: "$(pwd) +echo "XX pwd: $(pwd)" echo "XX" set -x @@ -197,7 +197,7 @@ cmake -G"$GENERATOR" ${INSTALL:+-DCMAKE_INSTALL_PREFIX=$INSTALL} \ -Denable_java=$(onoff test "$build_mode" = "ModelChecker") \ -Denable_msg=$(onoff test "$build_mode" = "ModelChecker") \ -DLTO_EXTRA_FLAG="auto" \ - $SRCFOLDER + "$SRCFOLDER" # -Denable_lua=$(onoff test "$build_mode" != "DynamicAnalysis") \ set +x @@ -210,7 +210,7 @@ echo "XX" ctest -T test --output-on-failure --no-compress-output || true -if test -n "$INSTALL" && [ ${branch_name} = "origin/master" ] ; then +if test -n "$INSTALL" && [ "${branch_name}" = "origin/master" ] ; then echo "XX" echo "XX Test done. Install everything since it's a regular build, not on a Windows." echo "XX" diff --git a/tools/jenkins/project_description.sh b/tools/jenkins/project_description.sh index 49ddd11e17..3bfd65d727 100755 --- a/tools/jenkins/project_description.sh +++ b/tools/jenkins/project_description.sh @@ -6,7 +6,7 @@ get_boost(){ then BOOST=$(grep -m 1 "Found Boost:" ./consoleText | sed "s/.*-- Found Boost:.*found suitable version \"\([a-zA-Z0-9\.]*\)\",.*/\1/g") fi - echo $BOOST + echo "$BOOST" } get_compiler(){ @@ -22,17 +22,18 @@ get_cmake(){ } get_ns3(){ - found=$(grep -c "ns-3 found" ./consoleText) - if [ $found != 0 ]; then - echo "✔" - else - echo "" - fi + grep -m 1 "ns-3 found (v3.[0-9]*; incl:" ./consoleText | sed "s/.*-- ns-3 found .v\(3.[0-9]*\); incl:.*/\1/g" +# found=$(grep -c "ns-3 found" ./consoleText) +# if [ "$found" != 0 ]; then +# echo "✔" +# else +# echo "" +# fi } get_python(){ found=$(grep -c "Compile Python bindings .....: ON" ./consoleText) - if [ $found != 0 ]; then + if [ "$found" != 0 ]; then grep -m 1 "Found PythonInterp" ./consoleText| sed "s/.*-- Found PythonInterp.*found suitable version \"\([a-zA-Z0-9\.]*\)\",.*/\1/g" else echo "" @@ -44,7 +45,7 @@ if [ -f consoleText ]; then fi -if [ -z $BUILD_URL ]; then +if [ -z "$BUILD_URL" ]; then BUILD_URL="https://ci.inria.fr/simgrid/job/SimGrid/lastBuild" fi @@ -66,7 +67,7 @@ function compareVersion(v1, v2) { v1[i] = parseInt(v1[i], 10); v2[i] = parseInt(v2[i], 10); if (v1[i] > v2[i]) return 1; - if (v1[i] < v2[i]) return -1; + if (v1[i] < v2[i]) return -1; } return v1.length == v2.length ? 0: (v1.length < v2.length ? -1 : 1); } @@ -148,44 +149,44 @@ do color1="" color2="" #in case of success, replace blue by green in status balls - wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DDebug%2Cnode%3D${node} -O status >/dev/null 2>&1 + wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DDebug%2Cnode%3D"${node}" -O status >/dev/null 2>&1 status=$(cat status) - if [ $status == "Success" ]; then + if [ "$status" == "Success" ]; then color1="&color=green" fi rm status statusmc="" - wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DModelChecker%2Cnode%3D${node} -O status >/dev/null 2>&1 + wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DModelChecker%2Cnode%3D"${node}" -O status >/dev/null 2>&1 status=$(cat status) - if [ $status ]; then - if [ $status == "Success" ]; then + if [ "$status" ]; then + if [ "$status" == "Success" ]; then color2="&color=green" fi statusmc="" fi rm status - echo " $node$os$compiler$boost$java$cmake$ns3$py${statusmc}" + echo " $node$os$compiler$boost$java$cmake$ns3$py${statusmc}" rm consoleText done #Travis - get ID of the last jobs with the API BUILD_NUM=$(curl -s 'https://api.travis-ci.org/repos/simgrid/simgrid/builds?limit=1' | grep -o '^\[{"id":[0-9]*,' | grep -o '[0-9]' | tr -d '\n') -BUILDS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/${BUILD_NUM} | grep -o '{"id":[0-9]*,' | grep -o '[0-9]*'| tail -n 3)) -OS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/${BUILD_NUM} | grep -o '"os":"[a-z]*",' | sed 's/"os":"\([a-z]*\)",/\1/g'| tail -n 3)) +BUILDS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/"${BUILD_NUM}" | grep -o '{"id":[0-9]*,' | grep -o '[0-9]*'| tail -n 3)) +OS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/"${BUILD_NUM}" | grep -o '"os":"[a-z]*",' | sed 's/"os":"\([a-z]*\)",/\1/g'| tail -n 3)) for id in "${!BUILDS[@]}" do - wget --quiet https://api.travis-ci.org/v3/job/${BUILDS[$id]}/log.txt -O ./consoleText >/dev/null 2>&1 + wget --quiet https://api.travis-ci.org/v3/job/"${BUILDS[$id]}"/log.txt -O ./consoleText >/dev/null 2>&1 sed -i -e "s/\r//g" ./consoleText - if [ ${OS[$id]} == "linux" ]; then + if [ "${OS[$id]}" == "linux" ]; then node="travis-linux (log)" os="Ubuntu 18.04 bionic" - elif [ ${OS[$id]} == "osx" ]; then + elif [ "${OS[$id]}" == "osx" ]; then node="travis-mac (log)" os="Mac OS X Catalina (10.15) " - elif [ ${OS[$id]} == "windows" ]; then + elif [ "${OS[$id]}" == "windows" ]; then node="travis-windows (log)" os="Windows Server 1809" fi @@ -206,9 +207,9 @@ done #Appveyor - get ID of the last job with the API BUILD_ID=$(curl -s "https://ci.appveyor.com/api/projects/mquinson/simgrid" | grep -o '\[{"jobId":"[a-zA-Z0-9]*",' | sed "s/\[{\"jobId\":\"//" | sed "s/\",//") -wget --quiet https://ci.appveyor.com/api/buildjobs/$BUILD_ID/log -O ./consoleText >/dev/null 2>&1 +wget --quiet https://ci.appveyor.com/api/buildjobs/"$BUILD_ID"/log -O ./consoleText >/dev/null 2>&1 sed -i -e "s/\r//g" ./consoleText -node="appveyor" +node="appveyor" os="Windows Server 2012 - VS2015 + mingw64 5.3.0" boost=$(get_boost) compiler=$(get_compiler) diff --git a/tools/simgrid.supp b/tools/simgrid.supp index 421834b7c2..bb1e4b181b 100644 --- a/tools/simgrid.supp +++ b/tools/simgrid.supp @@ -159,6 +159,14 @@ obj:/usr/lib/x86_64-linux-gnu/libunwind.so.* ... } +{ + ignore unwind cruft + Memcheck:Param + write(buf) + ... + fun:_ULx86_64_step + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.* +} { ignore unwind invalid reads diff --git a/tools/simgrid_convert_TI_traces.py b/tools/simgrid_convert_TI_traces.py index 995d3288b6..d3425f8f90 100755 --- a/tools/simgrid_convert_TI_traces.py +++ b/tools/simgrid_convert_TI_traces.py @@ -10,7 +10,7 @@ IRecv call arbitrarily. This new that includes tags field that links MPI_wait calls to the MPI_ISend or MPI_IRecv associated to this wait. -This script reproduce the old behavior of simgrid because informations are +This script reproduce the old behavior of simgrid because information are missing to add the tags properly. It also lower case all the mpi calls. It takes in input (as argument or in stdin) the trace list file that is only a diff --git a/tools/tesh/IO-bigsize.tesh b/tools/tesh/IO-bigsize.tesh index 926f53c028..a6711cb442 100644 --- a/tools/tesh/IO-bigsize.tesh +++ b/tools/tesh/IO-bigsize.tesh @@ -1,9 +1,9 @@ #!/usr/bin/env tesh # This suite contains two tests: -# The first one uses a very big input (150k) to check whether trucated input do work. +# The first one uses a very big input (150k) to check whether truncated input do work. # The second one uses both a big input and a big output (150k each). # -# This checks whether the non-blocking I/O mess is functionnal. +# This checks whether the non-blocking I/O mess is functional. # p First, a write test diff --git a/tools/tesh/set-output-sort.tesh b/tools/tesh/set-output-sort.tesh index cfff84a4d4..e560f56b0f 100644 --- a/tools/tesh/set-output-sort.tesh +++ b/tools/tesh/set-output-sort.tesh @@ -71,7 +71,7 @@ p This tests whether TESH correctly sorts command output < > c < > d $ ${bindir:=.}/tesh --ignore-jenkins -> Ignore all cruft seen on SimGrid's continous integration servers +> Ignore all cruft seen on SimGrid's continuous integration servers > Test suite from stdin > [(stdin):1] Test sorting and filtering of output > [(stdin):3] true diff --git a/tools/tesh/tesh.py b/tools/tesh/tesh.py index 3abc35ab66..4996630b23 100755 --- a/tools/tesh/tesh.py +++ b/tools/tesh/tesh.py @@ -531,7 +531,7 @@ if __name__ == '__main__': group1.add_argument( '--ignore-jenkins', action='store_true', - help='ignore all cruft generated on SimGrid continous integration servers') + help='ignore all cruft generated on SimGrid continuous integration servers') group1.add_argument('--wrapper', metavar='arg', help='Run each command in the provided wrapper (eg valgrind)') group1.add_argument( '--keep', @@ -545,7 +545,7 @@ if __name__ == '__main__': os.chdir(options.cd) if options.ignore_jenkins: - print("Ignore all cruft seen on SimGrid's continous integration servers") + print("Ignore all cruft seen on SimGrid's continuous integration servers") # Note: regexps should match at the beginning of lines TeshState().ignore_regexps_common = [ re.compile(r"profiling:"),