--- /dev/null
+name: SimGrid complete jar file generation
+
+on: [workflow_dispatch]
+
+jobs:
+ build:
+ runs-on: ${{ matrix.config.os }}-latest
+ strategy:
+ matrix:
+ config:
+ - { name: "Windows MingW", os: windows, cc: "gcc", cxx: "g++", generator: "MinGW Makefiles", cmake_extra_options: "-Denable_lto=OFF" }
+ - { name: "Ubuntu gcc", os: ubuntu, cc: "gcc", cxx: "g++", generator: "Unix Makefiles", cmake_extra_options: "-DLTO_EXTRA_FLAG=auto" }
+ - { name: "MacOS clang", os: macos, cc: "clang", cxx: "clang++", generator: "Unix Makefiles", cmake_extra_options: "-DLTO_EXTRA_FLAG=auto" }
+ steps:
+ - uses: actions/checkout@v2
+ # install dependencies
+ - name: Init options
+ run: |
+ echo "::set-env name=CC::${{ matrix.config.cc }}"
+ echo "::set-env name=CXX::${{ matrix.config.cxx }}"
+ - name: Install boost on ubuntu
+ if: matrix.config.os == 'ubuntu'
+ run: sudo apt-get update && sudo apt-get install -yq libboost-dev
+ - name: Install boost on macos
+ if: matrix.config.os == 'macos'
+ run: brew install boost
+ - name: Install boost and gcc on windows
+ if: matrix.config.os == 'windows'
+ run: |
+ Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
+ scoop install gcc --global
+ echo "::set-env name=BOOST_ROOT::$env:BOOST_ROOT_1_72_0"
+ echo "::set-env name=BOOST_INCLUDEDIR::$env:BOOST_ROOT\boost\include"
+ echo "::set-env name=BOOST_LIBRARYDIR::$env:BOOST_ROOT\lib"
+ - name: Build jar with Cmake
+ run: |
+ mkdir build
+ cd build
+ cmake -Denable_documentation=OFF -Denable_java=ON -Denable_msg=ON -Denable_lib_in_jar=ON -Dminimal-bindings=ON -Denable_compile_optimizations=ON -Denable_smpi=OFF ${{ matrix.config.cmake_extra_options }} -G "${{ matrix.config.generator }}" ..
+ make -j2 simgrid-java_jar
+ - name: Upload jar
+ uses: actions/upload-artifact@v2
+ with:
+ name: jar-${{ matrix.config.os }}
+ path: build/simgrid.jar
+
+ create_jar:
+ needs: build
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Download all jars from ubuntu
+ uses: actions/download-artifact@v2
+ - name: Build final jar
+ run: |
+ patch=$(grep -r set\(SIMGRID_VERSION_PATCH ./CMakeLists.txt | sed 's/.*"\([[:digit:]]\+\)".*/\1/g')
+ major=$(grep -r set\(SIMGRID_VERSION_MAJOR ./CMakeLists.txt | sed 's/.*"\([[:digit:]]\+\)".*/\1/g')
+ minor=$(grep -r set\(SIMGRID_VERSION_MINOR ./CMakeLists.txt | sed 's/.*"\([[:digit:]]\+\)".*/\1/g')
+ if [ $patch -ne 0 ]; then
+ version="$major.$minor.$patch"
+ else
+ version="$major.$minor"
+ fi
+ mkdir content
+ cd content
+ for j in ubuntu macos windows ; do unzip -n ../jar-$j/simgrid.jar ; done
+ strip NATIVE/*/*/*.so
+ x86_64-linux-gnu-strip NATIVE/*/*/lib*dll
+ zip -r ../simgrid-${version}.jar *
+ - name: Upload jar
+ uses: actions/upload-artifact@v2
+ with:
+ name: jar-final
+ path: simgrid-*.jar
+ - name: cleanup artifacts
+ uses: geekyeggo/delete-artifact@v1
+ with:
+ name: |
+ jar-ubuntu
+ jar-windows
+ jar-macos
examples/smpi/replay_multiple_manual_deploy/workload_nojob
examples/smpi/replay/one_trace
examples/smpi/replay/smpi_replay
-examples/smpi/smpi_msg_masterslave/masterslave_mailbox_smpi
+examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi
examples/smpi/trace_call_location/smpi_trace_call_location
examples/smpi/trace_simple/smpi_trace_simple
examples/smpi/trace/smpi_trace
stage: deploy
script:
- pip3 install --requirement docs/requirements.txt
- - cd docs
+ - cd docs/source/_ext/javasphinx; python3 setup.py build; python3 setup.py install
+ - cd ../../..
- LC_ALL=C.UTF-8 ./Build.sh
- mv build/html ../public
# - The CSS contains a reference to a font or something, not something we gonna fix on our side
# - The javasphinx output does not exist in the git, so the "edit on FramaGit" link is broken.
# I'd like to report this as a bug, but javasphinx seems abandonned upstream.
- - linkchecker --ignore-url='.*\.css$' --ignore-url=public/java/org ../public
+#not installed - linkchecker --ignore-url='.*\.css$' --ignore-url=public/java/org ../public
# From time to time, we should check external links with the
# following, but it has a lot of false positive
# - linkchecker --ignore-url='.*\.css$' --ignore-url=public/java/org --check-extern ../public
unset(CMAKE_INCLUDE_WIN)
endif()
-# library dependency cannot start with a space (CMP0004), so initialize it with something that is never desactivated.
+# library dependency cannot start with a space (CMP0004), so initialize it with something that is never deactivated.
set(SIMGRID_DEP "-lm")
### Determine the assembly flavor that we need today
### SMPI script used when simgrid is installed
set(exec_prefix ${CMAKE_INSTALL_PREFIX})
-set(includeflag "-I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR} -I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/smpi")
set(includedir "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}")
set(libdir "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
-set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${CMAKE_INSTALL_LIBDIR}")
+set(includeflag "-I${includedir} -I${includedir}/smpi")
+set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${libdir}")
if(NS3_LIBRARY_PATH)
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}")
endif()
### SMPI scripts used when compiling simgrid
set(exec_prefix "${CMAKE_BINARY_DIR}/smpi_script/")
-set(includeflag "-I${CMAKE_HOME_DIRECTORY}/include -I${CMAKE_HOME_DIRECTORY}/include/smpi")
-set(includeflag "${includeflag} -I${CMAKE_BINARY_DIR}/include -I${CMAKE_BINARY_DIR}/include/smpi")
set(includedir "${CMAKE_HOME_DIRECTORY}/include")
set(libdir "${CMAKE_BINARY_DIR}/lib")
-set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${CMAKE_BINARY_DIR}/lib")
+set(includeflag "-I${includedir} -I${includedir}/smpi")
+set(includeflag "${includeflag} -I${CMAKE_BINARY_DIR}/include -I${CMAKE_BINARY_DIR}/include/smpi")
+set(CMAKE_SMPI_COMMAND "export LD_LIBRARY_PATH=\"${libdir}")
if(NS3_LIBRARY_PATH)
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}")
endif()
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}\"")
-set(SMPIMAIN ${CMAKE_BINARY_DIR}/lib/simgrid/smpimain)
-set(SMPIREPLAYMAIN ${CMAKE_BINARY_DIR}/lib/simgrid/smpireplaymain)
+set(SMPIMAIN ${libdir}/simgrid/smpimain)
+set(SMPIREPLAYMAIN ${libdir}/simgrid/smpireplaymain)
foreach(script cc cxx ff f90 run)
configure_file(${CMAKE_HOME_DIRECTORY}/src/smpi/smpi${script}.in ${CMAKE_BINARY_DIR}/smpi_script/bin/smpi${script} @ONLY)
- GH#128: Parallelization of simulation with --cfg=contexts/nthreads
- GH#139: Allow pthread creation in SMPI
- GH#336: Packet-level simulation using SMPI?
+ - GH#346: [SMPI] error while loading shared libraries: libsimgrid.so
- GH!337: Fix link_energy plugin for wifi platforms
- GH!339: Add Mailbox set_receiver method to python binding
include examples/c/app-pingpong/app-pingpong_d.xml
include examples/c/app-token-ring/app-token-ring.c
include examples/c/app-token-ring/app-token-ring.tesh
-include examples/c/async-wait/async-wait.c
-include examples/c/async-wait/async-wait.tesh
-include examples/c/async-wait/async-wait2_d.xml
-include examples/c/async-wait/async-wait3_d.xml
-include examples/c/async-wait/async-wait4_d.xml
-include examples/c/async-wait/async-wait_d.xml
-include examples/c/async-waitall/async-waitall.c
-include examples/c/async-waitall/async-waitall.tesh
-include examples/c/async-waitall/async-waitall_d.xml
-include examples/c/async-waitany/async-waitany.c
-include examples/c/async-waitany/async-waitany.tesh
-include examples/c/async-waitany/async-waitany_d.xml
include examples/c/cloud-capping/cloud-capping.c
include examples/c/cloud-capping/cloud-capping.tesh
include examples/c/cloud-masterworker/cloud-masterworker.c
include examples/c/cloud-migration/cloud-migration.tesh
include examples/c/cloud-simple/cloud-simple.c
include examples/c/cloud-simple/cloud-simple.tesh
+include examples/c/comm-wait/comm-wait.c
+include examples/c/comm-wait/comm-wait.tesh
+include examples/c/comm-wait/comm-wait2_d.xml
+include examples/c/comm-wait/comm-wait3_d.xml
+include examples/c/comm-wait/comm-wait4_d.xml
+include examples/c/comm-wait/comm-wait_d.xml
+include examples/c/comm-waitall/comm-waitall.c
+include examples/c/comm-waitall/comm-waitall.tesh
+include examples/c/comm-waitall/comm-waitall_d.xml
+include examples/c/comm-waitany/comm-waitany.c
+include examples/c/comm-waitany/comm-waitany.tesh
+include examples/c/comm-waitany/comm-waitany_d.xml
include examples/c/dht-kademlia/answer.c
include examples/c/dht-kademlia/answer.h
include examples/c/dht-kademlia/common.h
include examples/python/actor-suspend/actor-suspend.tesh
include examples/python/actor-yield/actor-yield.py
include examples/python/actor-yield/actor-yield.tesh
-include examples/python/async-wait/async-wait.py
-include examples/python/async-wait/async-wait.tesh
-include examples/python/async-waitall/async-waitall.py
-include examples/python/async-waitall/async-waitall.tesh
-include examples/python/async-waitany/async-waitany.py
-include examples/python/async-waitany/async-waitany.tesh
+include examples/python/comm-wait/comm-wait.py
+include examples/python/comm-wait/comm-wait.tesh
+include examples/python/comm-waitall/comm-waitall.py
+include examples/python/comm-waitall/comm-waitall.tesh
+include examples/python/comm-waitany/comm-waitany.py
+include examples/python/comm-waitany/comm-waitany.tesh
include examples/python/exec-async/exec-async.py
include examples/python/exec-async/exec-async.tesh
include examples/python/exec-basic/exec-basic.py
include examples/s4u/app-pingpong/simix-breakpoint.tesh
include examples/s4u/app-token-ring/s4u-app-token-ring.cpp
include examples/s4u/app-token-ring/s4u-app-token-ring.tesh
-include examples/s4u/async-ready/s4u-async-ready.cpp
-include examples/s4u/async-ready/s4u-async-ready.tesh
-include examples/s4u/async-ready/s4u-async-ready_d.xml
-include examples/s4u/async-wait/s4u-async-wait.cpp
-include examples/s4u/async-wait/s4u-async-wait.tesh
-include examples/s4u/async-wait/s4u-async-wait_d.xml
-include examples/s4u/async-waitall/s4u-async-waitall.cpp
-include examples/s4u/async-waitall/s4u-async-waitall.tesh
-include examples/s4u/async-waitall/s4u-async-waitall_d.xml
-include examples/s4u/async-waitany/s4u-async-waitany.cpp
-include examples/s4u/async-waitany/s4u-async-waitany.tesh
-include examples/s4u/async-waitany/s4u-async-waitany_d.xml
-include examples/s4u/async-waituntil/s4u-async-waituntil.cpp
-include examples/s4u/async-waituntil/s4u-async-waituntil.tesh
-include examples/s4u/async-waituntil/s4u-async-waituntil_d.xml
include examples/s4u/cloud-capping/s4u-cloud-capping.cpp
include examples/s4u/cloud-capping/s4u-cloud-capping.tesh
include examples/s4u/cloud-migration/s4u-cloud-migration.cpp
include examples/s4u/cloud-simple/s4u-cloud-simple.tesh
include examples/s4u/comm-dependent/s4u-comm-dependent.cpp
include examples/s4u/comm-dependent/s4u-comm-dependent.tesh
+include examples/s4u/comm-ready/s4u-comm-ready.cpp
+include examples/s4u/comm-ready/s4u-comm-ready.tesh
+include examples/s4u/comm-ready/s4u-comm-ready_d.xml
+include examples/s4u/comm-suspend/s4u-comm-suspend.cpp
+include examples/s4u/comm-suspend/s4u-comm-suspend.tesh
+include examples/s4u/comm-suspend/s4u-comm-suspend_d.xml
+include examples/s4u/comm-wait/s4u-comm-wait.cpp
+include examples/s4u/comm-wait/s4u-comm-wait.tesh
+include examples/s4u/comm-wait/s4u-comm-wait_d.xml
+include examples/s4u/comm-waitall/s4u-comm-waitall.cpp
+include examples/s4u/comm-waitall/s4u-comm-waitall.tesh
+include examples/s4u/comm-waitall/s4u-comm-waitall_d.xml
+include examples/s4u/comm-waitany/s4u-comm-waitany.cpp
+include examples/s4u/comm-waitany/s4u-comm-waitany.tesh
+include examples/s4u/comm-waitany/s4u-comm-waitany_d.xml
+include examples/s4u/comm-waituntil/s4u-comm-waituntil.cpp
+include examples/s4u/comm-waituntil/s4u-comm-waituntil.tesh
+include examples/s4u/comm-waituntil/s4u-comm-waituntil_d.xml
include examples/s4u/dht-chord/s4u-dht-chord-node.cpp
include examples/s4u/dht-chord/s4u-dht-chord.cpp
include examples/s4u/dht-chord/s4u-dht-chord.hpp
include examples/s4u/network-ns3/onelink_d.xml
include examples/s4u/network-ns3/s4u-network-ns3.cpp
include examples/s4u/network-ns3/s4u-network-ns3.tesh
+include examples/s4u/network-wifi/s4u-network-wifi.cpp
+include examples/s4u/network-wifi/s4u-network-wifi.tesh
include examples/s4u/platform-failures/s4u-platform-failures.cpp
include examples/s4u/platform-failures/s4u-platform-failures.tesh
include examples/s4u/platform-failures/s4u-platform-failures_d.xml
include examples/smpi/replay_multiple_manual_deploy/workload_compute_simple
include examples/smpi/replay_multiple_manual_deploy/workload_mixed2_same_time
include examples/smpi/replay_multiple_manual_deploy/workload_mixed2_same_time_and_resources
-include examples/smpi/smpi_s4u_masterslave/deployment_masterslave_mailbox_smpi.xml
-include examples/smpi/smpi_s4u_masterslave/masterslave_mailbox_smpi.cpp
-include examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh
+include examples/smpi/smpi_s4u_masterworker/deployment_masterworker_mailbox_smpi.xml
+include examples/smpi/smpi_s4u_masterworker/masterworker_mailbox_smpi.cpp
+include examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh
include examples/smpi/trace/trace.c
include examples/smpi/trace/trace.tesh
include examples/smpi/trace_call_location/adjust
include docs/source/Tutorial_MPI_Applications.rst
include docs/source/XML_Reference.rst
include docs/source/_ext/autodoxy.py
+include docs/source/_ext/javasphinx/LICENSE
+include docs/source/_ext/javasphinx/MANIFEST.in
+include docs/source/_ext/javasphinx/README.md
+include docs/source/_ext/javasphinx/doc/conf.py
+include docs/source/_ext/javasphinx/doc/index.rst
+include docs/source/_ext/javasphinx/javasphinx/__init__.py
+include docs/source/_ext/javasphinx/javasphinx/apidoc.py
+include docs/source/_ext/javasphinx/javasphinx/compiler.py
+include docs/source/_ext/javasphinx/javasphinx/domain.py
+include docs/source/_ext/javasphinx/javasphinx/extdoc.py
+include docs/source/_ext/javasphinx/javasphinx/formatter.py
+include docs/source/_ext/javasphinx/javasphinx/htmlrst.py
+include docs/source/_ext/javasphinx/javasphinx/util.py
+include docs/source/_ext/javasphinx/setup.py
include docs/source/_ext/showfile.css
include docs/source/_ext/showfile.js
include docs/source/_ext/showfile.py
include examples/platforms/two_peers.xml
include examples/platforms/vivaldi.xml
include examples/platforms/wifi.xml
-include examples/platforms/wifi_decay_2STA.xml
include examples/python/CMakeLists.txt
include examples/python/actor-create/actor-create_d.xml
include examples/python/actor-lifetime/actor-lifetime_d.xml
-include examples/python/async-wait/async-wait_d.xml
-include examples/python/async-waitall/async-waitall_d.xml
-include examples/python/async-waitany/async-waitany_d.xml
+include examples/python/comm-wait/comm-wait_d.xml
+include examples/python/comm-waitall/comm-waitall_d.xml
+include examples/python/comm-waitany/comm-waitany_d.xml
include examples/s4u/CMakeLists.txt
include examples/smpi/CMakeLists.txt
include examples/smpi/NAS/CMakeLists.txt
include examples/smpi/mc/only_send_deterministic.tesh
include examples/smpi/replay_multiple/CMakeLists.txt
include examples/smpi/replay_multiple_manual_deploy/CMakeLists.txt
-include examples/smpi/smpi_s4u_masterslave/CMakeLists.txt
+include examples/smpi/smpi_s4u_masterworker/CMakeLists.txt
include include/simgrid/Exception.hpp
include include/simgrid/actor.h
include include/simgrid/barrier.h
include include/xbt/misc.h
include include/xbt/module.h
include include/xbt/parmap.h
+include include/xbt/parse_units.hpp
include include/xbt/random.hpp
include include/xbt/range.hpp
include include/xbt/replay.hpp
include src/xbt/xbt_main.cpp
include src/xbt/xbt_os_file.cpp
include src/xbt/xbt_os_time.c
+include src/xbt/xbt_parse_units.cpp
include src/xbt/xbt_replay.cpp
include src/xbt/xbt_str.cpp
include src/xbt/xbt_str_test.cpp
\_/ \___|_| |___/_|\___/|_| |_| |____(_)_____|\___/
(unreleased)
+ * SMPI: improved support of the proxy apps (including those using petsc)
_ _____ ____ ____
__ _____ _ __ ___(_) ___ _ __ |___ / |___ \| ___|
\ \ / / _ \ '__/ __| |/ _ \| '_ \ |_ \ __) |___ \
##'
##' Used to add raw data or summary statistics to the plot of a distribution.
##' The height of Y is arbitrary, and can be set to optimize visualization.
-##' If SE estimates are available, tehse wil be plotted
+##' If SE estimates are available, these will be plotted
##' @name plot.data
##' @title Add data to plot
##' @param trait.data data to be plotted
# the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
+# manageable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
You are at the right place... To understand what you can do or
cannot do with SimGrid, you should read the
<a href="https://simgrid.org/tutorials.html">tutorial
-slides</a> from the SimGrid's website. You may find more uptodate
+slides</a> from the SimGrid's website. You may find more up-to-date
material on the
<a href="http://people.irisa.fr/Martin.Quinson/blog/SimGrid/">blog of
Martin Quinson</a>.
msg_synchro (in Java, only semaphores are available). But actually,
many synchronization patterns can be encoded with communication on
mailboxes. Typically, if you need one process to notify another one,
-you could use a condition variable or a semphore, but sending a
+you could use a condition variable or a semaphore, but sending a
message to a specific mailbox does the trick in most cases.
@subsubsection faq_MIA_communication_time How can I get the *real* communication time?
m_task_t task = MSG_task_create("Task", task_comp_size, task_comm_size,
calloc(1,sizeof(double)));
*((double*) task->data) = MSG_get_clock();
- MSG_task_put(task, slaves[i % slaves_count], PORT_22);
+ MSG_task_put(task, workers[i % workers_count], PORT_22);
XBT_INFO("Send completed");
return 0;
}
This page describes the software infrastructure behind the SimGrid
project. This is not the components' organisation (described in @ref
-uhood_arch) but informations on how to extend the framework, how the
-automatic tests are run, and so on. These informations are split on
+uhood_arch) but information on how to extend the framework, how the
+automatic tests are run, and so on. These information are split on
several pages, as follows:
- @ref uhood_tech_inside
@endverbatim
This will add an extra verification before integrating any commit that
-you could prepare. If your code does not respects our formating code,
+you could prepare. If your code does not respects our formatting code,
git will say so, and provide a ready to use patch that you can apply
to improve your commit. Just carefully read the error message you get
-to find the exact command with git-apply to fix your formating.
+to find the exact command with git-apply to fix your formatting.
If you find that for a specific commit, the formatter does a very bad
job, then add --no-verify to your git commit command line.
For instance, if you want to add a new cup model called `Plop`, create two files
cpu_plop.hpp and cpu_plop_cpp which contains classes CpuPlopModel, CpuPlop and
-CpuPlopAction implementating respectively the interfaces CpuModel, Cpu and
+CpuPlopAction implementing respectively the interfaces CpuModel, Cpu and
CpuAction. You also need to define a initializing function like this:
~~~~
}
~~~~
-Then you need to add an entry in surf_interface.cpp refering to your
+Then you need to add an entry in surf_interface.cpp referring to your
initialization function.
~~~~
- If the simcall is not marked as "blocking" in its definition,
call `ActorImpl::simcall_answer()` that adds back the issuer
process to the list of processes to run in the next scheduling round.
- It is thus the responsability of the blocking simcalls to call
+ It is thus the responsibility of the blocking simcalls to call
`ActorImpl::simcall_answer()` themselves in their handler.
Note that empty HANDLERs can be omitted. These functions usually do
some parameter checking, or retrieve some information about the
simcall issuer, but when there no need for such things, the handler
-can be omited. In that case, we directly call the function
+can be omitted. In that case, we directly call the function
`simcall_<name>(<args>)`.
To simplify the simcall creation, a python script generates most of
For simcalls which might block, `kernel_sync()` can be used. It takes a
C++ callback and executes it immediately in maestro. This C++ callback is
expected to return a `simgrid::kernel::Future<T>` reprensenting the operation
-in the kernal. When the operations completes, the user process is waken up
+in the kernel. When the operations completes, the user process is waken up
with the result:
~~~
- 3.X.Y where Y is odd: git current status between two releases
- No expectations on such versions
- Example
- - 3.22.4: unannounced/losely documented stable release
+ - 3.22.4: unannounced/loosely documented stable release
- 3.22.5: git status somewhere between the release of 3.22.4 and the next one
- 3.23: Documented and announced stable release
and you should strive to make them as fast as possible, to not bother
the other developers. Do not hesitate to stress test your code, but
make sure that it runs reasonably fast, or nobody will run "ctest"
-before commiting code.
+before committing code.
@section inside_tests_add_integration Adding integration tests
details.@n
Tesh is sometimes annoying as you have to ensure that the expected
output will always be exactly the same. In particular, your should
- not output machine dependent informations such as absolute data
- path, nor memory adresses as they would change on each run. Several
+ not output machine dependent information such as absolute data
+ path, nor memory addresses as they would change on each run. Several
steps can be used here, such as the obfucation of the memory
- adresses unless the verbose logs are displayed (using the
+ addresses unless the verbose logs are displayed (using the
#XBT_LOG_ISENABLED() macro), or the modification of the log formats
to hide the timings when they depend on the host machine.@n
The script located in <project/directory>/tools/tesh/generate_tesh can
As usual, you must run "make distcheck" after modifying the cmake files,
to ensure that you did not forget any files in the distributed archive.
-@section inside_tests_ci Continous Integration
+@section inside_tests_ci Continuous Integration
We use several systems to automatically test SimGrid with a large set
of parameters, across as many platforms as possible.
in a global and compact way instead. This eases the modeling of very
large systems, and allows highly optimized datastructures and
algorithms in the simulator. The proposed description mechanism is
-thus much more convinient and efficient. In addition, it is more
+thus much more convenient and efficient. In addition, it is more
expressive than the classical solution based on forwarding tables on
each host and router.
@defgroup SURF_API SURF
@brief Internal kernel of all the simulators used in SimGrid, and associated models.
-SURF provides the core functionnalities to simulate a virtual
+SURF provides the core functionalities to simulate a virtual
platform. It is very low-level and is not intended to be used by end
users, but rather to serve as a basis for higher-level simulators. Its
interfaces are not frozen (and probably never will be), and the
@defgroup SD_API SimDag: Legacy handling of DAG algorithms
@brief Programming environment for DAG applications
-SimDag provides functionnalities to simulate parallel task scheduling
+SimDag provides functionalities to simulate parallel task scheduling
arranged in DAGs (Direct Acyclic Graphs). Only centralized algorithms
can be expressed with SimDag; consider using @ref MSG_API "MSG" for
distributed algorithms).
-SimDag is the oldest interface in SimGrid, even if it was temporarly
+SimDag is the oldest interface in SimGrid, even if it was temporarily
removed when the new superfast kernel was added in SimGrid v3.0. It
will certainly be deprecated by future releases of the S4U API, when
inter-activity dependencies are added.
/** @defgroup XBT_ex Exception support (C++) */
/** @defgroup XBT_ex_c Exception support (C) */
/** @defgroup XBT_log Logging support */
- /** @defgroup XBT_error Assert macro familly */
+ /** @defgroup XBT_error Assert macro family */
/** @defgroup XBT_config Configuration support */
/** @defgroup XBT_mallocator Mallocators */
/** @} */
displayed by setting a threshold to each category through the
<tt>threshold</tt> keyword.
-For example, @verbatim --log=root.threshold:debug@endverbatim will make
+For example, @verbatim --log=root.thresh:debug@endverbatim will make
SimGrid <b>extremely</b> verbose while @verbatim
--log=root.thres:critical@endverbatim should shut it almost
completely off.
all the following notations have the same result.
@verbatim
--log=root.threshold:debug
---log=root.threshol:debug
+--log=root.threshold:debug
--log=root.thresho:debug
--log=root.thresh:debug
--log=root.thres:debug
- trace: enter and return of some functions
- debug: crufty output
- verbose: verbose output for the user wanting more
- - info: output about the regular functionning
+ - info: output about the regular functioning
- warning: minor issue encountered
- error: issue encountered
- critical: major issue encountered
is set to ``Cluster``.
The ``<cabinet />`` tag is, like the @ref pf_tag_cluster "<cluster>" tag,
-a meta-tag. This means that it is simply a shortcut for creating a set of (homogenous) hosts and links quickly;
+a meta-tag. This means that it is simply a shortcut for creating a set of (homogeneous) hosts and links quickly;
unsurprisingly, this tag was introduced to setup cabinets in data centers quickly. Unlike
<cluster>, however, the <cabinet> assumes that you create the backbone
and routers yourself; see our examples below.
etc.
-@subsection pf_ne Network equipments
+@subsection pf_ne Network equipment
There are two tags at all times available to represent network entities and
several other tags that are available only in certain contexts.
The purpose of this entity is to define a route between two
NetZones. Recall that all zones form a tree, so to connect two
-sibiling zones, you must give such a zoneRoute specifying the source
+sibling zones, you must give such a zoneRoute specifying the source
and destination zones, along with the gateway in each zone (ie, the
point to reach within that zone to reach the netzone), and the list of
links in the ancestor zone to go from one zone to another.
| Attribute name | Mandatory | Values | Description |
| --------------- | --------- | ---------------------- | ----------- |
| id | yes | String | Identifier of this trace; this is the name you pass on to @c trace_connect. |
-| file | no | String | Filename of the file that contains the information - the path must follow the style of your OS. You can omit this, but then you must specifiy the values inside of <trace> and </trace> - see the example below. |
+| file | no | String | Filename of the file that contains the information - the path must follow the style of your OS. You can omit this, but then you must specify the values inside of <trace> and </trace> - see the example below. |
| trace_periodicity | yes | String | This is the same as for @ref pf_tag_host "hosts" (see there for details) |
Here is an example of trace when no file name is provided:
- the Maestro object and the corresponding S4U object have the same lifetime
(and share the same reference count).
-The ability to manipulate thge objects thought pointers and have the ability
+The ability to manipulate the objects through pointers and have the ability
to use explicit reference count management is useful for creating C wrappers
to the S4U and should play nicely with other language bindings (such as
SWIG-based ones).
Some objects currently live for the whole duration of the simulation and do
-not have refertence counts. We still provide dummy `intrusive_ptr_add_ref(p)`,
+not have reference counts. We still provide dummy `intrusive_ptr_add_ref(p)`,
`intrusive_ptr_release(p)` and `FooPtr` for consistency.
In many cases, we try to have a API which is consistent with the API or
- the SimGrid model-checker (`simgrid-mc`) itself lives in the parent process;
- - it spaws a child process for the SimGrid simulator/maestro and the simulated
+ - it spawns a child process for the SimGrid simulator/maestro and the simulated
processes.
They communicate using a `AF_UNIX` `SOCK_SEQPACKET` socket and exchange messages
- the model-cheker `ptrace()`s the model-checked process and is thus able to
know the state of the model-checked process if it crashes;
-- DWARF debug informations are used to unwind the stack and identify local
+- DWARF debug information are used to unwind the stack and identify local
variables;
- a custom heap is enabled in the model-checked process which allows the model
[ELF](http://refspecs.linuxbase.org/elf/elf.pdf) is a standard executable file
and dynamic libraries file format.
-[DWARF](http://dwarfstd.org/) is a standard for debug informations.
+[DWARF](http://dwarfstd.org/) is a standard for debug information.
Both are used on GNU/Linux systems and exploited by the model-checker to
understand the model-checked process:
- - `ObjectInformation` represents the informations about a given ELF module
+ - `ObjectInformation` represents the information about a given ELF module
(executable or shared-object);
- `Frame` represents a subprogram scope (either a subprogram or a scope within
This page presents the current code organization, as you will see it
if you dig into the src/ directory. <b>But things will change during
the current Gran Refactoring leading to SimGrid 4</b>. So take the
-information on this page with a grain of salt, and don't be affraid if
+information on this page with a grain of salt, and don't be afraid if
things are not exactly as documented here.
At some point, we at least extend this page to present the overall
@section ug_overview Overview of the toolkit components
-@subsection ug_overview_envs Programing environments layer
+@subsection ug_overview_envs Programming environments layer
SimGrid provides several programming environments built on top of a unique
simulation kernel. Each environment targets a specific audience and
# Python needs to find simgrid on my machine, but not ctest -- sorry for the hack
if [ -e /opt/simgrid ] ; then chmod +x /opt/simgrid; fi
-set -e
+set -ex
set -o pipefail
if [ "x$1" != 'xdoxy' ] && [ -e build/xml ] ; then
echo "javasphinx not rerun: 'java' was not provided as an argument"
else
rm -rf source/java
- javasphinx-apidoc --force -o source/java/ ../src/bindings/java/org/simgrid/msg
+
+ # Use that script without installing javasphinx: javasphinx-apidoc --force -o source/java/ ../src/bindings/java/org/simgrid/msg
+ PYTHONPATH=${PYTHONPATH}:source/_ext/javasphinx python3 - --force -o source/java/ ../src/bindings/java/org/simgrid/msg <<EOF
+import re
+import sys
+from javasphinx.apidoc import main
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
+EOF
+
rm -f source/java/packages.rst # api_generated/source_java_packages.rst
rm -f source/java/org/simgrid/msg/package-index.rst # api_generated/source_java_org_simgrid_msg_package-index.rst
for f in source/java/org/simgrid/msg/* ; do
echo "javasphinx relaunched"
fi
-PYTHONPATH=../lib sphinx-build -M html source build ${SPHINXOPTS} 2>&1 \
- | grep -v 'WARNING: cpp:identifier reference target not found: simgrid$' \
- | grep -v 'WARNING: cpp:identifier reference target not found: simgrid::s4u$' \
- | grep -v 'WARNING: cpp:identifier reference target not found: boost'
+PYTHONPATH=../lib:source/_ext/javasphinx sphinx-build -M html source build ${SPHINXOPTS} 2>&1
set +x
This file lists the symbols ignored in the documentation.
-It follows the RST syntact but is completely ignored by sphinx.
+It follows the RST syntax but is completely ignored by sphinx.
It is only used by find-missing, that will not report any definition linked here as missing.
# These ones trigger a bug in autodoxy, that get confused with the const in the function parameter
! output sort 19
$ ./some_simgrid_simulator --log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n
-This approach may seem surprizing at the first glance but it does its job:
+This approach may seem surprising at the first glance but it does its job:
=over 4
=head2 Ignoring some output
-Some outputed lines can be ignored by setting the ignore command followed
+Some outputted lines can be ignored by setting the ignore command followed
by a regular expression:
! ignore .*0x[0-9A-F]+\.
=head2 Colored and formatted text
-Tesh removes ANSI/VT100 control sequences from outputed text to make easier the writing of tests.
+Tesh removes ANSI/VT100 control sequences from outputted text to make easier the writing of tests.
$ printf "I \033[0;31mlove\033[0m tesh\n"
> I love tesh
breathe
-sphinx>=1.8.0
+sphinx>=3.2.1
sphinx_rtd_theme
-sphinx_tabs
-javasphinx
+# sphinx_tabs v1.2.1 is required for Sphinx 2
+sphinx_tabs>=1.2.1
int main(int argc, char *argv[]) {
simgrid::s4u::Engine e(&argc, argv);
- e.set_config("Item:Value");
+ simgrid::s4u::Engine::set_config("Item:Value");
// Rest of your code
}
It is possible to specify that messages below a certain size (in bytes) will be
sent as soon as the call to MPI_Send is issued, without waiting for
-the correspondant receive. This threshold can be configured through
+the correspondent receive. This threshold can be configured through
the ``smpi/async-small-thresh`` item. The default value is 0. This
behavior can also be manually set for mailboxes, by setting the
receiving mode of the mailbox with a call to
.. _cfg=storage/max_file_descriptors:
-File Descriptor Cound per Host
+File Descriptor Count per Host
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Option** ``storage/max_file_descriptors`` **Default:** 1024
If you want to specify liveness properties, you have to pass them on
the command line, specifying the name of the file containing the
property, as formatted by the `ltl2ba <https://github.com/utwente-fmt/ltl2ba>`_ program.
-Note that ltl2ba is not part of SimGrid and must be installed separatly.
+Note that ltl2ba is not part of SimGrid and must be installed separately.
.. code-block:: shell
Currently, if the path is of the form ``X;Y;Z``, each number denotes
the actor's pid that is selected at each indecision point. If it's of
the form ``X/a;Y/b``, the X and Y are the selected pids while the a
-and b are the return values of their simcalls. In the previouse
+and b are the return values of their simcalls. In the previous
example, ``1/3;1/4``, you can see from the full output that the actor
1 is doing MC_RANDOM simcalls, so the 3 and 4 simply denote the values
that these simcall return.
this code, and create an execution task within the simulator to take
this into account. For that, the actual duration is measured on the
host machine and then scaled to the power of the corresponding
-simulated machine. The variable ``smpi/host-speed`` allows one to specify
-the computational speed of the host machine (in flop/s) to use when
-scaling the execution times. It defaults to 20000, but you really want
-to adjust it to get accurate simulation results.
+simulated machine. The variable ``smpi/host-speed`` allows one to
+specify the computational speed of the host machine (in flop/s by
+default) to use when scaling the execution times.
+
+The default value is ``smpi/host-speed=20kf`` (= 20,000 flop/s). This
+is probably underestimated for most machines, leading SimGrid to
+overestimate the amount of flops in the execution blocks that are
+automatically injected in the simulator. As a result, the execution
+time of the whole application will probably be overestimated until you
+use a realistic value.
When the code consists of numerous consecutive MPI calls, the
previous mechanism feeds the simulation kernel with numerous tiny
With the ``global`` algorithm, each call to SMPI_SHARED_MALLOC()
returns a new address, but it only points to a shadow block: its memory
area is mapped on a 1 MiB file on disk. If the returned block is of size
-N MiB, then the same file is mapped N times to cover the whole bloc.
+N MiB, then the same file is mapped N times to cover the whole block.
At the end, no matter how many times you call SMPI_SHARED_MALLOC, this will
only consume 1 MiB in memory.
should be started on which host. You can do so directly in your program (as
shown in :ref:`these examples <s4u_ex_actors>`), or using an XML deployment
file. Unless you have a good reason, you should keep your application apart
-from the deployment as it will :ref:`ease your experimental campain afterward
+from the deployment as it will :ref:`ease your experimental campaign afterward
<howto_science>`.
Deploying actors from XML is easy: it only involves 3 tags: :ref:`pf_tag_actor`,
one year: Code compiling without warning on 3.24 will still compile
with 3.28, but maybe with some deprecation warnings. You should update
your SimGrid installation at least once a year and fix those
-deprecation warnings: the compatiblity wrappers are usually removed
+deprecation warnings: the compatibility wrappers are usually removed
after 4 versions. Another approach is to never update your SimGrid
installation, but we don't provide any support to old versions.
errors. A possible cause is that the system selected an old version of
the SimGrid library somewhere on your disk.
-Dicover which version is used with ``ldd name-of-yoursimulator``.
+Discover which version is used with ``ldd name-of-yoursimulator``.
Once you've found the obsolete copy of SimGrid, just erase it, and
recompile and relaunch your program.
:append: $$$
:dedent: 2
+Each example included in the SimGrid distribution comes with a `tesh`
+file that presents how to start the example once compiled, along with
+the expected output. These files are used for the automatic testing of
+the framework, but can be used to see the examples' output without
+compiling them. See e.g. the file
+`examples/s4u/app-masterworkers/s4u-app-masterworkers.tesh <https://framagit.org/simgrid/simgrid/-/blob/master/examples/s4u/app-masterworkers/s4u-app-masterworkers.tesh>`_.
+Lines starting with `$` are the commands to execute;
+lines starting with `>` are the expected output of each command while
+lines starting with `!` are configuration items for the test runner.
+
Improve it Yourself
-------------------
# Uncomment to view the generated rst for the class.
# print('\n'.join(self.directive.result))
+autodoxy_requalified_identifiers = []
+def fix_namespaces(str):
+ for unqualified,fullyqualif in autodoxy_requalified_identifiers:
+ p = re.compile("(^| ){:s}".format(unqualified))
+ str = p.sub(' {:s}'.format(fullyqualif), str)
+ return str
+
class DoxygenMethodDocumenter(DoxygenDocumenter):
objtype = 'doxymethod'
directivetype = 'function'
rtype = rtype_el.text
# print("rtype: {}".format(rtype))
- signame = (rtype and (rtype + ' ') or '') + self.klassname + "::"+ self.objname
+ signame = fix_namespaces((rtype and (rtype + ' ') or '') + self.klassname + "::"+ self.objname )
+# print("signame: '{}'".format(signame))
return self.format_template_name() + signame
def format_template_name(self):
return ret
def format_signature(self):
- args = self.object.find('argsstring').text
+ args = fix_namespaces(self.object.find('argsstring').text)
+# print ("signature: {}".format(args))
return args
def document_members(self, all_members=False):
# print("rtype: {}".format(rtype))
signame = (rtype and (rtype + ' ') or '') + self.klassname + "::" + self.objname
- return self.format_template_name() + signame
+ return fix_namespaces(self.format_template_name() + signame)
def get_doc(self, encoding=None): # This method is called with 1 parameter in Sphinx 2.x and 2 parameters in Sphinx 1.x
detaileddescription = self.object.find('detaileddescription')
for node in root:
setup.DOXYGEN_ROOT.append(node)
+ if app.config.autodoxy_requalified_identifiers is not None:
+ global autodoxy_requalified_identifiers
+ autodoxy_requalified_identifiers = app.config.autodoxy_requalified_identifiers
def get_doxygen_root():
"""Get the root element of the doxygen XML document.
app.add_autodocumenter(DoxygenMethodDocumenter)
app.add_autodocumenter(DoxygenVariableDocumenter)
app.add_config_value("doxygen_xml", "", True)
+ app.add_config_value("autodoxy_requalified_identifiers", [], True)
# app.add_directive('autodoxysummary', DoxygenAutosummary)
# app.add_directive('autodoxyenum', DoxygenAutoEnum)
--- /dev/null
+*.pyc
+dist/
+*.egg-info/
+.vscode
+.DS_Store
\ No newline at end of file
--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+include README.rst
--- /dev/null
+
+# javasphinx
+
+[![Documentation Status](https://readthedocs.org/projects/bronto-javasphinx/badge/?version=latest)](http://bronto-javasphinx.readthedocs.io/en/latest/?badge=latest)
+
+**This project is no longer maintained and should be used for historical purposes only.**
+
+javasphinx is an extension to the Sphinx documentation system which adds support for documenting Java projects. It includes a Java domain for writing documentation manually and a javasphinx-apidoc utility which will automatically generate API documentation from existing Javadoc markup.
+
+javasphinx is available in the Python Package Index (PyPi) under the name _javasphinx_ and can be installed using tools such as `pip` or `easy_install`.
+
+Documentation for javasphinx is available at http://bronto-javasphinx.readthedocs.io
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+project = 'javasphinx'
+version = '0.9.15'
+release = version
+
+extensions = ['javasphinx']
+
+master_doc = 'index'
+copyright = u'2012-2017, Bronto Software Inc. and contributors'
+primary_domain = 'rst'
--- /dev/null
+
+#######################
+javasphinx User's Guide
+#######################
+
+Welcome to the javasphinx user's guide.
+
+Introduction
+============
+
+javasphinx is a Sphinx_ extension that provides a Sphinx domain_ for documenting
+Java projects and a ``javasphinx-apidoc`` command line tool for automatically
+generating API documentation from existing Java source code and Javadoc
+documentation.
+
+.. _Sphinx: http://sphinx-doc.org
+.. _domain: http://sphinx-doc.org/domains.html
+
+Installing
+==========
+
+javasphinx is available in the Python Package Index (PyPi) and can be installed
+using tools such as ``pip`` or ``easy_install``,
+
+.. code-block:: sh
+
+ $ pip install javasphinx
+
+or,
+
+.. code-block:: sh
+
+ $ easy_install -U javasphinx
+
+Configuration
+=============
+
+To enable javasphinx for your existing Sphinx configuration add ``'javasphinx'``
+to the list of extensions in your conf.py file. javasphinx can be configured to
+cross link to external sources of documentation using the ``javadoc_url_map``
+option,
+
+.. code-block:: python
+
+ javadoc_url_map = {
+ 'com.netflix.curator' : ('http://netflix.github.com/curator/doc', 'javadoc'),
+ 'org.springframework' : ('http://static.springsource.org/spring/docs/3.1.x/javadoc-api/', 'javadoc'),
+ 'org.springframework.data.redis' : ('http://static.springsource.org/spring-data/data-redis/docs/current/api/', 'javadoc')
+ }
+
+Each key in the map should be a Java package. Each value is a tuple of the form
+``(base_url, doc_type)`` where ``base_url`` is the base URL of the documentation
+source, and ``doc_type`` is one of,
+
+``javadoc``
+ For documentation generated by the Javadoc tool *before* version 8.
+
+``javadoc8``
+ For documentation generated by the Javadoc tool after version 8. This is
+ required due to changes in how method anchors are generated (see JDK-8144118_).
+
+``sphinx``
+ For external documentation generated by javasphinx.
+
+When comparing referenced types to the list of available packages the longest
+match will be used. Entries for ``java``, ``javax``, ``org.xml``, and
+``org.w3c`` packages pointing to http://docs.oracle.com/javase/8/docs/api are
+included automatically and do not need to be defined explicitly.
+
+.. _JDK-8144118: https://bugs.openjdk.java.net/browse/JDK-8144118
+
+Java domain
+===========
+
+Directives
+----------
+
+The Java domain uses the name **java** and provides the following directives,
+
+.. rst:directive:: .. java:type:: type-signature
+
+ Describe a Java type. The signature can represent either a class, interface,
+ enum or annotation declaration.
+
+ Use the ``param`` field to document type parameters.
+
+ Example,
+
+ .. code-block:: rst
+
+ .. java:type:: public interface List<E> extends Collection<E>, Iterable<E>
+
+ An ordered collection (also known as a *sequence*)
+
+ :param E: type of item stored by the list
+
+ produces,
+
+ .. java:type:: public interface List<E> extends Collection<E>, Iterable<E>
+
+ An ordered collection (also known as a *sequence*)
+
+ :param E: type of item stored by the list
+
+.. rst:directive:: .. java:field:: field-signature
+
+ Describe a Java field.
+
+.. rst:directive:: .. java:method:: method-signature
+
+ Describe a Java method.
+
+ Use the ``param`` field to document parameters.
+
+ Use the ``throws`` field to document exceptions thrown by the method.
+
+ Use the ``return`` field to document the return type
+
+.. rst:directive:: .. java:constructor:: constructor-signature
+
+ Describe a Java constructor.
+
+ Use the ``param`` field to document parameters.
+
+ Use the ``throws`` field to document exceptions thrown by the constructor.
+
+.. rst:directive:: .. java:package:: package
+
+ Provide package-level documentation and also sets the active package for the
+ type, method, field, constructors, and references that follow.
+
+ Use the ``:noindex:`` option if the directive is only being used to specify
+ the active package. Only one directive for a given package should exclude
+ ``:noindex:``.
+
+.. rst:directive:: .. java:import:: package type
+
+ Declare the given type as being provided by the given package. This
+ information helps javasphinx create cross references for types in type,
+ method, and field declarations. It also allows explicit cross references
+ (using the ``java:ref`` role) to exclude the package qualification.
+
+The method, construct, field, and type directives all accept the following
+standard options,
+
+.. describe:: package
+
+ Specify the package the declaration is within. Can be used instead of, or to
+ override, a ``java:package`` directive.
+
+.. describe:: outertype
+
+ Specify the class/interface the documented object is contained within. This
+ option should be provided for any constructor, method, or field directive
+ that isn't nested within a corresponding type directive.
+
+Roles
+-----
+
+The following roles are provided,
+
+.. rst:role:: java:ref
+
+ This role can be used to create a cross reference to any object type within
+ the Java domain. Aliases for this role include ``java:meth``, ``java:type``,
+ ``java:field``, ``java:package``, and ``java:construct``.
+
+ An explicit title can be provided by using the standard ``title <reference>``
+ syntax.
+
+.. rst:role:: java:extdoc
+
+ This role can be used to explicitly link to an externally documented
+ type. The reference must be fully qualified and supports an explicit title
+ using the ``title <reference>`` syntax.
+
+ The ``java:ref`` role will also create external references as a fall-back if
+ it can't find a matching local declaration so using this role is not strictly
+ necessary.
+
+javasphinx-apidoc
+=================
+
+The ``javasphinx-apidoc`` tool is the counterpoint to the ``sphinx-apidoc`` tool
+within the Java domain. It can be used to generate reST source from existing
+Java source code which has been marked up with Javadoc-style comments. The
+generated reST is then processed alongside hand-written documentation by Sphinx.
+
+At minimum a source and destination directory must be provided. The input
+directory will be scanned for .java files and documentation will be generated
+for all non-private types and members. A separate output file will be generated
+for each type (including inner classes). Each file is put within a directory
+corresponding to its package (with periods replaced by directory separators) and
+with the basename of the file deriving from the type name. Inner types are
+placed in files with a basename using a hyphen to separate inner and outer
+types, e.g. ``OuterType-InnerType.rst``.
+
+By default ``javasphinx-apidoc`` will not override existing files. Two options
+can change this behavior,
+
+.. option:: -f, --force
+
+ All existing output files will be rewritten. If a cache directory is
+ specified it will be rebuilt.
+
+.. option:: -u, --update
+
+ Updated source files will have their corresponding output files
+ updated. Unchanged files will be left alone. Most projects will want to use
+ this option.
+
+For larger projects it is recommended to use a cache directory. This can speed
+up subsequent runs by an order of magnitude or more. Specify a directory to
+store cached output using the :option:`-c` option,
+
+.. option:: -c, --cache-dir
+
+ Specify a directory to cache intermediate documentation representations. This
+ directory will be created if it does not already exist.
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .domain import JavaDomain
+from .extdoc import javadoc_role
+
+def setup(app):
+ app.add_domain(JavaDomain)
+
+ app.add_config_value('javadoc_url_map', dict(), '')
+ app.add_role('java:extdoc', javadoc_role)
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import print_function, unicode_literals
+
+try:
+ import cPickle as pickle
+except:
+ import pickle
+
+import hashlib
+import logging
+import sys
+import os
+import os.path
+
+from optparse import OptionParser
+
+import javalang
+
+import javasphinx.compiler as compiler
+import javasphinx.util as util
+
+def encode_output(s):
+ if isinstance(s, str):
+ return s
+ else:
+ return s.encode('utf-8')
+
+def find_source_files(input_path, excludes):
+ """ Get a list of filenames for all Java source files within the given
+ directory.
+
+ """
+
+ java_files = []
+
+ input_path = os.path.normpath(os.path.abspath(input_path))
+
+ for dirpath, dirnames, filenames in os.walk(input_path):
+ if is_excluded(dirpath, excludes):
+ del dirnames[:]
+ continue
+
+ for filename in filenames:
+ if filename.endswith(".java"):
+ java_files.append(os.path.join(dirpath, filename))
+
+ return java_files
+
+def write_toc(packages, opts):
+ doc = util.Document()
+ doc.add_heading(opts.toc_title, '=')
+
+ toc = util.Directive('toctree')
+ toc.add_option('maxdepth', '2')
+ doc.add_object(toc)
+
+ for package in sorted(packages.keys()):
+ toc.add_content("%s/package-index\n" % package.replace('.', '/'))
+
+ filename = 'packages.' + opts.suffix
+ fullpath = os.path.join(opts.destdir, filename)
+
+ if os.path.exists(fullpath) and not (opts.force or opts.update):
+ sys.stderr.write(fullpath + ' already exists. Use -f to overwrite.\n')
+ sys.exit(1)
+
+ f = open(fullpath, 'w')
+ f.write(encode_output(doc.build()))
+ f.close()
+
+def write_documents(packages, documents, sources, opts):
+ package_contents = dict()
+
+ # Write individual documents
+ for fullname, (package, name, document) in documents.items():
+ if is_package_info_doc(name):
+ continue
+
+ package_path = package.replace('.', os.sep)
+ filebasename = name.replace('.', '-')
+ filename = filebasename + '.' + opts.suffix
+ dirpath = os.path.join(opts.destdir, package_path)
+ fullpath = os.path.join(dirpath, filename)
+
+ if not os.path.exists(dirpath):
+ os.makedirs(dirpath)
+ elif os.path.exists(fullpath) and not (opts.force or opts.update):
+ sys.stderr.write(fullpath + ' already exists. Use -f to overwrite.\n')
+ sys.exit(1)
+
+ # Add to package indexes
+ package_contents.setdefault(package, list()).append(filebasename)
+
+ if opts.update and os.path.exists(fullpath):
+ # If the destination file is newer than the source file than skip
+ # writing it out
+ source_mod_time = os.stat(sources[fullname]).st_mtime
+ dest_mod_time = os.stat(fullpath).st_mtime
+
+ if source_mod_time < dest_mod_time:
+ continue
+
+ f = open(fullpath, 'w')
+ f.write(encode_output(document))
+ f.close()
+
+ # Write package-index for each package
+ for package, classes in package_contents.items():
+ doc = util.Document()
+ doc.add_heading(package, '=')
+
+ #Adds the package documentation (if any)
+ if packages[package] != '':
+ documentation = packages[package]
+ doc.add_line("\n%s" % documentation)
+
+ doc.add_object(util.Directive('java:package', package))
+
+ toc = util.Directive('toctree')
+ toc.add_option('maxdepth', '1')
+
+ classes.sort()
+ for filebasename in classes:
+ toc.add_content(filebasename + '\n')
+ doc.add_object(toc)
+
+ package_path = package.replace('.', os.sep)
+ filename = 'package-index.' + opts.suffix
+ dirpath = os.path.join(opts.destdir, package_path)
+ fullpath = os.path.join(dirpath, filename)
+
+ if not os.path.exists(dirpath):
+ os.makedirs(dirpath)
+ elif os.path.exists(fullpath) and not (opts.force or opts.update):
+ sys.stderr.write(fullpath + ' already exists. Use -f to overwrite.\n')
+ sys.exit(1)
+
+ f = open(fullpath, 'w')
+ f.write(encode_output(doc.build()))
+ f.close()
+
+def get_newer(a, b):
+ if not os.path.exists(a):
+ return b
+
+ if not os.path.exists(b):
+ return a
+
+ a_mtime = int(os.stat(a).st_mtime)
+ b_mtime = int(os.stat(b).st_mtime)
+
+ if a_mtime < b_mtime:
+ return b
+
+ return a
+
+def format_syntax_error(e):
+ rest = ""
+ if e.at.position:
+ value = e.at.value
+ pos = e.at.position
+ rest = ' at %s line %d, character %d' % (value, pos[0], pos[1])
+ return e.description + rest
+
+def generate_from_source_file(doc_compiler, source_file, cache_dir):
+ if cache_dir:
+ fingerprint = hashlib.md5(source_file.encode()).hexdigest()
+ cache_file = os.path.join(cache_dir, 'parsed-' + fingerprint + '.p')
+
+ if get_newer(source_file, cache_file) == cache_file:
+ return pickle.load(open(cache_file, 'rb'))
+ else:
+ cache_file = None
+
+ f = open(source_file)
+ source = f.read()
+ f.close()
+
+ try:
+ ast = javalang.parse.parse(source)
+ except javalang.parser.JavaSyntaxError as e:
+ util.error('Syntax error in %s: %s', source_file, format_syntax_error(e))
+ except Exception:
+ util.unexpected('Unexpected exception while parsing %s', source_file)
+
+ documents = {}
+ try:
+ if source_file.endswith("package-info.java"):
+ if ast.package is not None:
+ documentation = doc_compiler.compile_docblock(ast.package)
+ documents[ast.package.name] = (ast.package.name, 'package-info', documentation)
+ else:
+ documents = doc_compiler.compile(ast)
+ except Exception:
+ util.unexpected('Unexpected exception while compiling %s', source_file)
+
+ if cache_file:
+ dump_file = open(cache_file, 'wb')
+ pickle.dump(documents, dump_file)
+ dump_file.close()
+
+ return documents
+
+def generate_documents(source_files, cache_dir, verbose, member_headers, parser):
+ documents = {}
+ sources = {}
+ doc_compiler = compiler.JavadocRestCompiler(None, member_headers, parser)
+
+ for source_file in source_files:
+ if verbose:
+ print('Processing', source_file)
+
+ this_file_documents = generate_from_source_file(doc_compiler, source_file, cache_dir)
+ for fullname in this_file_documents:
+ sources[fullname] = source_file
+
+ documents.update(this_file_documents)
+
+ #Existing packages dict, where each key is a package name
+ #and each value is the package documentation (if any)
+ packages = {}
+
+ #Gets the name of the package where the document was declared
+ #and adds it to the packages dict with no documentation.
+ #Package documentation, if any, will be collected from package-info.java files.
+ for package, name, _ in documents.values():
+ packages[package] = ""
+
+ #Gets packages documentation from package-info.java documents (if any).
+ for package, name, content in documents.values():
+ if is_package_info_doc(name):
+ packages[package] = content
+
+ return packages, documents, sources
+
+def normalize_excludes(rootpath, excludes):
+ f_excludes = []
+ for exclude in excludes:
+ if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
+ exclude = os.path.join(rootpath, exclude)
+ f_excludes.append(os.path.normpath(exclude) + os.path.sep)
+ return f_excludes
+
+def is_excluded(root, excludes):
+ sep = os.path.sep
+ if not root.endswith(sep):
+ root += sep
+ for exclude in excludes:
+ if root.startswith(exclude):
+ return True
+ return False
+
+def is_package_info_doc(document_name):
+ ''' Checks if the name of a document represents a package-info.java file. '''
+ return document_name == 'package-info'
+
+
+def main(argv=sys.argv):
+ logging.basicConfig(level=logging.WARN)
+
+ parser = OptionParser(
+ usage="""\
+usage: %prog [options] -o <output_path> <input_path> [exclude_paths, ...]
+
+Look recursively in <input_path> for Java sources files and create reST files
+for all non-private classes, organized by package under <output_path>. A package
+index (package-index.<ext>) will be created for each package, and a top level
+table of contents will be generated named packages.<ext>.
+
+Paths matching any of the given exclude_paths (interpreted as regular
+expressions) will be skipped.
+
+Note: By default this script will not overwrite already created files.""")
+
+ parser.add_option('-o', '--output-dir', action='store', dest='destdir',
+ help='Directory to place all output', default='')
+ parser.add_option('-f', '--force', action='store_true', dest='force',
+ help='Overwrite all files')
+ parser.add_option('-c', '--cache-dir', action='store', dest='cache_dir',
+ help='Directory to stored cachable output')
+ parser.add_option('-u', '--update', action='store_true', dest='update',
+ help='Overwrite new and changed files', default=False)
+ parser.add_option('-T', '--no-toc', action='store_true', dest='notoc',
+ help='Don\'t create a table of contents file')
+ parser.add_option('-t', '--title', dest='toc_title', default='Javadoc',
+ help='Title to use on table of contents')
+ parser.add_option('--no-member-headers', action='store_false', default=True, dest='member_headers',
+ help='Don\'t generate headers for class members')
+ parser.add_option('-s', '--suffix', action='store', dest='suffix',
+ help='file suffix (default: rst)', default='rst')
+ parser.add_option('-I', '--include', action='append', dest='includes',
+ help='Additional input paths to scan', default=[])
+ parser.add_option('-p', '--parser', dest='parser_lib', default='lxml',
+ help='Beautiful Soup---html parser library option.')
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+ help='verbose output')
+
+ (opts, args) = parser.parse_args(argv[1:])
+
+ if not args:
+ parser.error('A source path is required.')
+
+ rootpath, excludes = args[0], args[1:]
+
+ input_paths = opts.includes
+ input_paths.append(rootpath)
+
+ if not opts.destdir:
+ parser.error('An output directory is required.')
+
+ if opts.suffix.startswith('.'):
+ opts.suffix = opts.suffix[1:]
+
+ for input_path in input_paths:
+ if not os.path.isdir(input_path):
+ sys.stderr.write('%s is not a directory.\n' % (input_path,))
+ sys.exit(1)
+
+ if not os.path.isdir(opts.destdir):
+ os.makedirs(opts.destdir)
+
+ if opts.cache_dir and not os.path.isdir(opts.cache_dir):
+ os.makedirs(opts.cache_dir)
+
+ excludes = normalize_excludes(rootpath, excludes)
+ source_files = []
+
+ for input_path in input_paths:
+ source_files.extend(find_source_files(input_path, excludes))
+
+ packages, documents, sources = generate_documents(source_files, opts.cache_dir, opts.verbose,
+ opts.member_headers, opts.parser_lib)
+
+ write_documents(packages, documents, sources, opts)
+
+ if not opts.notoc:
+ write_toc(packages, opts)
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import javalang
+
+import javasphinx.formatter as formatter
+import javasphinx.util as util
+import javasphinx.htmlrst as htmlrst
+
+class JavadocRestCompiler(object):
+ """ Javadoc to ReST compiler. Builds ReST documentation from a Java syntax
+ tree. """
+
+ def __init__(self, filter=None, member_headers=True, parser='lxml'):
+ if filter:
+ self.filter = filter
+ else:
+ self.filter = self.__default_filter
+
+ self.converter = htmlrst.Converter(parser)
+
+ self.member_headers = member_headers
+
+ def __default_filter(self, node):
+ """Excludes private members and those tagged "@hide" / "@exclude" in their
+ docblocks.
+
+ """
+
+ if not isinstance(node, javalang.tree.Declaration):
+ return False
+
+ if 'private' in node.modifiers:
+ return False
+
+ if isinstance(node, javalang.tree.Documented) and node.documentation:
+ doc = javalang.javadoc.parse(node.documentation)
+ if 'hide' in doc.tags or 'exclude' in doc.tags:
+ return False
+
+ return True
+
+ def __html_to_rst(self, s):
+ return self.converter.convert(s)
+
+ def __output_doc(self, documented):
+ if not isinstance(documented, javalang.tree.Documented):
+ raise ValueError('node not documented')
+
+ output = util.Document()
+
+ if not documented.documentation:
+ return output
+
+ doc = javalang.javadoc.parse(documented.documentation)
+
+ if doc.description:
+ output.add(self.__html_to_rst(doc.description))
+ output.clear()
+
+ if doc.authors:
+ output.add_line(':author: %s' % (self.__html_to_rst(', '.join(doc.authors)),))
+
+ for name, value in doc.params:
+ output.add_line(':param %s: %s' % (name, self.__html_to_rst(value)))
+
+ for exception in doc.throws:
+ description = doc.throws[exception]
+ output.add_line(':throws %s: %s' % (exception, self.__html_to_rst(description)))
+
+ if doc.return_doc:
+ output.add_line(':return: %s' % (self.__html_to_rst(doc.return_doc),))
+
+ if doc.tags.get('see'):
+ output.clear()
+
+ see_also = ', '.join(self.__output_see(see) for see in doc.tags['see'])
+ output.add_line('**See also:** %s' % (see_also,))
+
+ return output
+
+ def __output_see(self, see):
+ """ Convert the argument to a @see tag to rest """
+
+ if see.startswith('<a href'):
+ # HTML link -- <a href="...">...</a>
+ return self.__html_to_rst(see)
+ elif '"' in see:
+ # Plain text
+ return see
+ else:
+ # Type reference (default)
+ return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),)
+
+ def compile_type(self, declaration):
+ signature = util.StringBuilder()
+ formatter.output_declaration(declaration, signature)
+
+ doc = self.__output_doc(declaration)
+
+ directive = util.Directive('java:type', signature.build())
+ directive.add_content(doc)
+
+ return directive
+
+ def compile_enum_constant(self, enum, constant):
+ signature = util.StringBuilder()
+
+ for annotation in constant.annotations:
+ formatter.output_annotation(annotation, signature)
+
+ # All enum constants are public, static, and final
+ signature.append('public static final ')
+ signature.append(enum)
+ signature.append(' ')
+ signature.append(constant.name)
+
+ doc = self.__output_doc(constant)
+
+ directive = util.Directive('java:field', signature.build())
+ directive.add_content(doc)
+
+ return directive
+
+ def compile_field(self, field):
+ signature = util.StringBuilder()
+
+ for annotation in field.annotations:
+ formatter.output_annotation(annotation, signature)
+
+ formatter.output_modifiers(field.modifiers, signature)
+ signature.append(' ')
+
+ formatter.output_type(field.type, signature)
+ signature.append(' ')
+ signature.append(field.declarators[0].name)
+
+ doc = self.__output_doc(field)
+
+ directive = util.Directive('java:field', signature.build())
+ directive.add_content(doc)
+
+ return directive
+
+ def compile_constructor(self, constructor):
+ signature = util.StringBuilder()
+
+ for annotation in constructor.annotations:
+ formatter.output_annotation(annotation, signature)
+
+ formatter.output_modifiers(constructor.modifiers, signature)
+ signature.append(' ')
+
+ if constructor.type_parameters:
+ formatter.output_type_params(constructor.type_parameters, signature)
+ signature.append(' ')
+
+ signature.append(constructor.name)
+
+ signature.append('(')
+ formatter.output_list(formatter.output_formal_param, constructor.parameters, signature, ', ')
+ signature.append(')')
+
+ if constructor.throws:
+ signature.append(' throws ')
+ formatter.output_list(formatter.output_exception, constructor.throws, signature, ', ')
+
+ doc = self.__output_doc(constructor)
+
+ directive = util.Directive('java:constructor', signature.build())
+ directive.add_content(doc)
+
+ return directive
+
+ def compile_method(self, method):
+ signature = util.StringBuilder()
+
+ for annotation in method.annotations:
+ formatter.output_annotation(annotation, signature)
+
+ formatter.output_modifiers(method.modifiers, signature)
+ signature.append(' ')
+
+ if method.type_parameters:
+ formatter.output_type_params(method.type_parameters, signature)
+ signature.append(' ')
+
+ formatter.output_type(method.return_type, signature)
+ signature.append(' ')
+
+ signature.append(method.name)
+
+ signature.append('(')
+ formatter.output_list(formatter.output_formal_param, method.parameters, signature, ', ')
+ signature.append(')')
+
+ if method.throws:
+ signature.append(' throws ')
+ formatter.output_list(formatter.output_exception, method.throws, signature, ', ')
+
+ doc = self.__output_doc(method)
+
+ directive = util.Directive('java:method', signature.build())
+ directive.add_content(doc)
+
+ return directive
+
+ def compile_type_document(self, imports_block, package, name, declaration):
+ """ Compile a complete document, documenting a type and its members """
+
+ outer_type = name.rpartition('.')[0]
+
+ document = util.Document()
+ document.add(imports_block)
+ document.add_heading(name, '=')
+
+ method_summary = util.StringBuilder()
+ document.add_object(method_summary)
+
+ package_dir = util.Directive('java:package', package)
+ package_dir.add_option('noindex')
+ document.add_object(package_dir)
+
+ # Add type-level documentation
+ type_dir = self.compile_type(declaration)
+ if outer_type:
+ type_dir.add_option('outertype', outer_type)
+ document.add_object(type_dir)
+
+ if isinstance(declaration, javalang.tree.EnumDeclaration):
+ enum_constants = list(declaration.body.constants)
+ enum_constants.sort(key=lambda c: c.name)
+
+ document.add_heading('Enum Constants')
+ for enum_constant in enum_constants:
+ if self.member_headers:
+ document.add_heading(enum_constant.name, '^')
+ c = self.compile_enum_constant(name, enum_constant)
+ c.add_option('outertype', name)
+ document.add_object(c)
+
+ fields = list(filter(self.filter, declaration.fields))
+ if fields:
+ document.add_heading('Fields', '-')
+ fields.sort(key=lambda f: f.declarators[0].name)
+ for field in fields:
+ if self.member_headers:
+ document.add_heading(field.declarators[0].name, '^')
+ f = self.compile_field(field)
+ f.add_option('outertype', name)
+ document.add_object(f)
+
+ constructors = list(filter(self.filter, declaration.constructors))
+ if constructors:
+ document.add_heading('Constructors', '-')
+ constructors.sort(key=lambda c: c.name)
+ for constructor in constructors:
+ if self.member_headers:
+ document.add_heading(constructor.name, '^')
+ c = self.compile_constructor(constructor)
+ c.add_option('outertype', name)
+ document.add_object(c)
+
+ methods = list(filter(self.filter, declaration.methods))
+ if methods:
+ document.add_heading('Methods', '-')
+ methods.sort(key=lambda m: m.name)
+ for method in methods:
+ if self.member_headers:
+ document.add_heading(method.name, '^')
+ m = self.compile_method(method)
+ m.add_option('outertype', name)
+ document.add_object(m)
+
+ return document
+
+ def compile(self, ast):
+ """ Compile autodocs for the given Java syntax tree. Documents will be
+ returned documenting each separate type. """
+
+ documents = {}
+
+ imports = util.StringBuilder()
+ for imp in ast.imports:
+ if imp.static or imp.wildcard:
+ continue
+
+ package_parts = []
+ cls_parts = []
+
+ for part in imp.path.split('.'):
+ if cls_parts or part[0].isupper():
+ cls_parts.append(part)
+ else:
+ package_parts.append(part)
+
+
+ # If the import's final part wasn't capitalized,
+ # append it to the class parts anyway so sphinx doesn't complain.
+ if cls_parts == []:
+ cls_parts.append(package_parts.pop())
+
+ package = '.'.join(package_parts)
+ cls = '.'.join(cls_parts)
+
+ imports.append(util.Directive('java:import', package + ' ' + cls).build())
+ import_block = imports.build()
+
+ if not ast.package:
+ raise ValueError('File must have package declaration')
+
+ package = ast.package.name
+ type_declarations = []
+ for path, node in ast.filter(javalang.tree.TypeDeclaration):
+ if not self.filter(node):
+ continue
+
+ classes = [n.name for n in path if isinstance(n, javalang.tree.TypeDeclaration)]
+ classes.append(node.name)
+
+ name = '.'.join(classes)
+ type_declarations.append((package, name, node))
+
+ for package, name, declaration in type_declarations:
+ full_name = package + '.' + name
+ document = self.compile_type_document(import_block, package, name, declaration)
+ documents[full_name] = (package, name, document.build())
+ return documents
+
+ def compile_docblock(self, documented):
+ ''' Compiles a single, standalone docblock. '''
+ return self.__output_doc(documented).build()
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+import string
+
+from docutils import nodes
+from docutils.parsers.rst import Directive, directives
+
+from sphinx import addnodes, version_info
+from sphinx.roles import XRefRole
+from sphinx.locale import _
+from sphinx.domains import Domain, ObjType
+from sphinx.directives import ObjectDescription
+from sphinx.util.nodes import make_refnode
+from sphinx.util.docfields import Field, TypedField, GroupedField
+
+import javalang
+
+import javasphinx.extdoc as extdoc
+import javasphinx.formatter as formatter
+import javasphinx.util as util
+
+# Classes in java.lang. These are available without an import.
+java_dot_lang = set([
+ 'AbstractMethodError', 'Appendable', 'ArithmeticException',
+ 'ArrayIndexOutOfBoundsException', 'ArrayStoreException', 'AssertionError',
+ 'AutoCloseable', 'Boolean', 'BootstrapMethodError', 'Byte', 'Character',
+ 'CharSequence', 'Class', 'ClassCastException', 'ClassCircularityError',
+ 'ClassFormatError', 'ClassLoader', 'ClassNotFoundException', 'ClassValue',
+ 'Cloneable', 'CloneNotSupportedException', 'Comparable', 'Compiler',
+ 'Deprecated', 'Double', 'Enum', 'EnumConstantNotPresentException', 'Error',
+ 'Exception', 'ExceptionInInitializerError', 'Float', 'IllegalAccessError',
+ 'IllegalAccessException', 'IllegalArgumentException',
+ 'IllegalMonitorStateException', 'IllegalStateException',
+ 'IllegalThreadStateException', 'IncompatibleClassChangeError',
+ 'IndexOutOfBoundsException', 'InheritableThreadLocal', 'InstantiationError',
+ 'InstantiationException', 'Integer', 'InternalError', 'InterruptedException',
+ 'Iterable', 'LinkageError', 'Long', 'Math', 'NegativeArraySizeException',
+ 'NoClassDefFoundError', 'NoSuchFieldError', 'NoSuchFieldException',
+ 'NoSuchMethodError', 'NoSuchMethodException', 'NullPointerException', 'Number',
+ 'NumberFormatException', 'Object', 'OutOfMemoryError', 'Override', 'Package',
+ 'Process', 'ProcessBuilder', 'Readable', 'ReflectiveOperationException',
+ 'Runnable', 'Runtime', 'RuntimeException', 'RuntimePermission', 'SafeVarargs',
+ 'SecurityException', 'SecurityManager', 'Short', 'StackOverflowError',
+ 'StackTraceElement', 'StrictMath', 'String', 'StringBuffer', 'StringBuilder',
+ 'StringIndexOutOfBoundsException', 'SuppressWarnings', 'System', 'Thread',
+ 'ThreadDeath', 'ThreadGroup', 'ThreadLocal', 'Throwable',
+ 'TypeNotPresentException', 'UnknownError', 'UnsatisfiedLinkError',
+ 'UnsupportedClassVersionError', 'UnsupportedOperationException', 'VerifyError',
+ 'VirtualMachineError', 'Void'])
+
+class JavaObject(ObjectDescription):
+ option_spec = {
+ 'noindex': directives.flag,
+ 'package': directives.unchanged,
+ 'outertype': directives.unchanged
+ }
+
+ def _build_ref_node(self, target):
+ ref = addnodes.pending_xref('', refdomain='java', reftype='type', reftarget=target, modname=None, classname=None)
+ ref['java:outertype'] = self.get_type()
+
+ package = self.env.temp_data.get('java:imports', dict()).get(target, None)
+
+ if not package and target in java_dot_lang:
+ package = 'java.lang'
+
+ if package:
+ ref['java:imported'] = True
+ ref['java:package'] = package
+ else:
+ ref['java:imported'] = False
+ ref['java:package'] = self.get_package()
+
+ return ref
+
+ def _build_type_node(self, typ):
+ if isinstance(typ, javalang.tree.ReferenceType):
+ if typ.dimensions:
+ dim = '[]' * len(typ.dimensions)
+ else:
+ dim = ''
+
+ target = typ.name
+ parts = []
+
+ while typ:
+ ref_node = self._build_ref_node(target)
+ ref_node += nodes.Text(typ.name, typ.name)
+ parts.append(ref_node)
+
+ if typ.arguments:
+ parts.append(nodes.Text('<', '<'))
+
+ first = True
+ for type_arg in typ.arguments:
+ if first:
+ first = False
+ else:
+ parts.append(nodes.Text(', ', ', '))
+
+ if type_arg.pattern_type == '?':
+ parts.append(nodes.Text('?', '?'))
+ else:
+ if type_arg.pattern_type:
+ s = '? %s ' % (type_arg.pattern_type,)
+ parts.append(nodes.Text(s, s))
+ parts.extend(self._build_type_node(type_arg.type))
+
+ parts.append(nodes.Text('>', '>'))
+
+ typ = typ.sub_type
+
+ if typ:
+ target = target + '.' + typ.name
+ parts.append(nodes.Text('.', '.'))
+ elif dim:
+ parts.append(nodes.Text(dim, dim))
+
+ return parts
+ else:
+ type_repr = formatter.output_type(typ).build()
+ return [nodes.Text(type_repr, type_repr)]
+
+ def _build_type_node_list(self, types):
+ parts = self._build_type_node(types[0])
+ for typ in types[1:]:
+ parts.append(nodes.Text(', ', ', '))
+ parts.extend(self._build_type_node(typ))
+ return parts
+
+ def handle_signature(self, sig, signode):
+ handle_name = 'handle_%s_signature' % (self.objtype,)
+ handle = getattr(self, handle_name, None)
+
+ if handle:
+ return handle(sig, signode)
+ else:
+ raise NotImplementedError
+
+ def get_index_text(self, package, type, name):
+ raise NotImplementedError
+
+ def get_package(self):
+ return self.options.get('package', self.env.temp_data.get('java:package'))
+
+ def get_type(self):
+ return self.options.get('outertype', '.'.join(self.env.temp_data.get('java:outertype', [])))
+
+ def add_target_and_index(self, name, sig, signode):
+ package = self.get_package()
+ type = self.get_type();
+
+ fullname = '.'.join(filter(None, (package, type, name)))
+ basename = fullname.partition('(')[0]
+
+ # note target
+ if fullname not in self.state.document.ids:
+ signode['names'].append(fullname)
+ signode['ids'].append(fullname)
+ signode['first'] = (not self.names)
+ self.state.document.note_explicit_target(signode)
+
+ objects = self.env.domaindata['java']['objects']
+ if fullname in objects:
+ self.state_machine.reporter.warning(
+ 'duplicate object description of %s, ' % fullname +
+ 'other instance in ' + self.env.doc2path(objects[fullname][0]) +
+ ', use :noindex: for one of them',
+ line=self.lineno)
+
+ objects[fullname] = (self.env.docname, self.objtype, basename)
+
+ indextext = self.get_index_text(package, type, name)
+ if indextext:
+ self.indexnode['entries'].append(_create_indexnode(indextext, fullname))
+
+ def before_content(self):
+ self.set_type = False
+
+ if self.objtype == 'type' and self.names:
+ self.set_type = True
+ self.env.temp_data.setdefault('java:outertype', list()).append(self.names[0])
+
+ def after_content(self):
+ if self.set_type:
+ self.env.temp_data['java:outertype'].pop()
+
+class JavaMethod(JavaObject):
+ doc_field_types = [
+ TypedField('parameter', label=_('Parameters'),
+ names=('param', 'parameter', 'arg', 'argument'),
+ typerolename='type', typenames=('type',)),
+ Field('returnvalue', label=_('Returns'), has_arg=False,
+ names=('returns', 'return')),
+ GroupedField('throws', names=('throws',), label=_('Throws'), rolename='type')
+ ]
+
+ def handle_method_signature(self, sig, signode):
+ try:
+ member = javalang.parse.parse_member_signature(sig)
+ except javalang.parser.JavaSyntaxError:
+ raise self.error("syntax error in method signature")
+
+ if not isinstance(member, javalang.tree.MethodDeclaration):
+ raise self.error("expected method declaration")
+
+ mods = formatter.output_modifiers(member.modifiers).build()
+ signode += nodes.Text(mods + ' ', mods + ' ')
+
+ if member.type_parameters:
+ type_params = formatter.output_type_params(member.type_parameters).build()
+ signode += nodes.Text(type_params, type_params)
+ signode += nodes.Text(' ', ' ')
+
+ rnode = addnodes.desc_type('', '')
+ rnode += self._build_type_node(member.return_type)
+
+ signode += rnode
+ signode += nodes.Text(' ', ' ')
+ signode += addnodes.desc_name(member.name, member.name)
+
+ paramlist = addnodes.desc_parameterlist()
+ for parameter in member.parameters:
+ param = addnodes.desc_parameter('', '', noemph=True)
+ param += self._build_type_node(parameter.type)
+
+ if parameter.varargs:
+ param += nodes.Text('...', '')
+
+ param += nodes.emphasis(' ' + parameter.name, ' ' + parameter.name)
+ paramlist += param
+ signode += paramlist
+
+ param_reprs = [formatter.output_type(param.type, with_generics=False).build() for param in member.parameters]
+ return member.name + '(' + ', '.join(param_reprs) + ')'
+
+ def get_index_text(self, package, type, name):
+ return _('%s (Java method)' % (name,))
+
+class JavaConstructor(JavaObject):
+ doc_field_types = [
+ TypedField('parameter', label=_('Parameters'),
+ names=('param', 'parameter', 'arg', 'argument'),
+ typerolename='type', typenames=('type',)),
+ GroupedField('throws', names=('throws',), label=_('Throws'))
+ ]
+
+ def handle_constructor_signature(self, sig, signode):
+ try:
+ member = javalang.parse.parse_constructor_signature(sig)
+ except javalang.parser.JavaSyntaxError:
+ raise self.error("syntax error in constructor signature")
+
+ if not isinstance(member, javalang.tree.ConstructorDeclaration):
+ raise self.error("expected constructor declaration")
+
+ mods = formatter.output_modifiers(member.modifiers).build()
+ signode += nodes.Text(mods + ' ', mods + ' ')
+
+ signode += addnodes.desc_name(member.name, member.name)
+
+ paramlist = addnodes.desc_parameterlist()
+ for parameter in member.parameters:
+ param = addnodes.desc_parameter('', '', noemph=True)
+ param += self._build_type_node(parameter.type)
+
+ if parameter.varargs:
+ param += nodes.Text('...', '')
+
+ param += nodes.emphasis(' ' + parameter.name, ' ' + parameter.name)
+ paramlist += param
+ signode += paramlist
+
+ param_reprs = [formatter.output_type(param.type, with_generics=False).build() for param in member.parameters]
+ return '%s(%s)' % (member.name, ', '.join(param_reprs))
+
+ def get_index_text(self, package, type, name):
+ return _('%s (Java constructor)' % (name,))
+
+class JavaType(JavaObject):
+ doc_field_types = [
+ GroupedField('parameter', names=('param',), label=_('Parameters'))
+ ]
+
+ declaration_type = None
+
+ def handle_type_signature(self, sig, signode):
+ try:
+ member = javalang.parse.parse_type_signature(sig)
+ except javalang.parser.JavaSyntaxError:
+ raise self.error("syntax error in field signature")
+
+ if isinstance(member, javalang.tree.ClassDeclaration):
+ self.declaration_type = 'class'
+ elif isinstance(member, javalang.tree.InterfaceDeclaration):
+ self.declaration_type = 'interface'
+ elif isinstance(member, javalang.tree.EnumDeclaration):
+ self.declaration_type = 'enum'
+ elif isinstance(member, javalang.tree.AnnotationDeclaration):
+ self.declaration_type = 'annotation'
+ else:
+ raise self.error("expected type declaration")
+
+ mods = formatter.output_modifiers(member.modifiers).build()
+ signode += nodes.Text(mods + ' ', mods + ' ')
+
+ if self.declaration_type == 'class':
+ signode += nodes.Text('class ', 'class ')
+ elif self.declaration_type == 'interface':
+ signode += nodes.Text('interface ', 'interface ')
+ elif self.declaration_type == 'enum':
+ signode += nodes.Text('enum ', 'enum ')
+ elif self.declaration_type == 'annotation':
+ signode += nodes.Text('@interface ', '@interface ')
+
+ signode += addnodes.desc_name(member.name, member.name)
+
+ if self.declaration_type in ('class', 'interface') and member.type_parameters:
+ type_params = formatter.output_type_params(member.type_parameters).build()
+ signode += nodes.Text(type_params, type_params)
+
+ if self.declaration_type == 'class':
+ if member.extends:
+ extends = ' extends '
+ signode += nodes.Text(extends, extends)
+ signode += self._build_type_node(member.extends)
+ if member.implements:
+ implements = ' implements '
+ signode += nodes.Text(implements, implements)
+ signode += self._build_type_node_list(member.implements)
+ elif self.declaration_type == 'interface':
+ if member.extends:
+ extends = ' extends '
+ signode += nodes.Text(extends, extends)
+ signode += self._build_type_node_list(member.extends)
+ elif self.declaration_type == 'enum':
+ if member.implements:
+ implements = ' implements '
+ signode += nodes.Text(implements, implements)
+ signode += self._build_type_node_list(member.implements)
+
+ return member.name
+
+ def get_index_text(self, package, type, name):
+ return _('%s (Java %s)' % (name, self.declaration_type))
+
+class JavaField(JavaObject):
+ def handle_field_signature(self, sig, signode):
+ try:
+ member = javalang.parse.parse_member_signature(sig)
+ except javalang.parser.JavaSyntaxError:
+ raise self.error("syntax error in field signature")
+
+ if not isinstance(member, javalang.tree.FieldDeclaration):
+ raise self.error("expected field declaration")
+
+ mods = formatter.output_modifiers(member.modifiers).build()
+ signode += nodes.Text(mods + ' ', mods + ' ')
+
+ tnode = addnodes.desc_type('', '')
+ tnode += self._build_type_node(member.type)
+
+ signode += tnode
+ signode += nodes.Text(' ', ' ')
+
+ if len(member.declarators) > 1:
+ self.error('only one field may be documented at a time')
+
+ declarator = member.declarators[0]
+ signode += addnodes.desc_name(declarator.name, declarator.name)
+
+ dim = '[]' * len(declarator.dimensions)
+ signode += nodes.Text(dim)
+
+ if declarator.initializer and isinstance(declarator.initializer, javalang.tree.Literal):
+ signode += nodes.Text(' = ' + declarator.initializer.value)
+
+ return declarator.name
+
+ def get_index_text(self, package, type, name):
+ return _('%s (Java field)' % (name,))
+
+class JavaPackage(Directive):
+ """
+ Directive to mark description of a new package.
+ """
+
+ has_content = False
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {
+ 'noindex': directives.flag,
+ }
+
+ def run(self):
+ env = self.state.document.settings.env
+ package = self.arguments[0].strip()
+ noindex = 'noindex' in self.options
+ env.temp_data['java:package'] = package
+ env.domaindata['java']['objects'][package] = (env.docname, 'package', package)
+ ret = []
+
+ if not noindex:
+ targetnode = nodes.target('', '', ids=['package-' + package], ismod=True)
+ self.state.document.note_explicit_target(targetnode)
+
+ # the platform and synopsis aren't printed; in fact, they are only
+ # used in the modindex currently
+ ret.append(targetnode)
+
+ indextext = _('%s (package)') % (package,)
+ inode = addnodes.index(entries=[_create_indexnode(indextext, 'package-' + package)])
+ ret.append(inode)
+
+ return ret
+
+class JavaImport(Directive):
+ """
+ This directive is just to tell Sphinx the source of a referenced type.
+ """
+
+ has_content = False
+ required_arguments = 2
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {}
+
+ def run(self):
+ env = self.state.document.settings.env
+ package, typename = self.arguments
+
+ env.temp_data.setdefault('java:imports', dict())[typename] = package
+ return []
+
+class JavaXRefRole(XRefRole):
+ def process_link(self, env, refnode, has_explicit_title, title, target):
+ refnode['java:outertype'] = '.'.join(env.temp_data.get('java:outertype', list()))
+
+ target = target.lstrip('~')
+
+ # Strip a method component from the target
+ basetype = target
+ if '(' in basetype:
+ basetype = basetype.partition('(')[0]
+ if '.' in basetype:
+ basetype = basetype.rpartition('.')[0]
+
+ package = env.temp_data.get('java:imports', dict()).get(basetype, None)
+
+ if package:
+ refnode['java:imported'] = True
+ refnode['java:package'] = package
+ else:
+ refnode['java:imported'] = False
+ refnode['java:package'] = env.temp_data.get('java:package')
+
+ if not has_explicit_title:
+ # if the first character is a tilde, don't display the module/class
+ # parts of the contents
+ if title[0:1] == '~':
+ title = title.partition('(')[0]
+ title = title[1:]
+ dot = title.rfind('.')
+ if dot != -1:
+ title = title[dot+1:]
+
+ return title, target
+
+class JavaDomain(Domain):
+ """Java language domain."""
+ name = 'java'
+ label = 'Java'
+
+ object_types = {
+ 'package': ObjType(_('package'), 'package', 'ref'),
+ 'type': ObjType(_('type'), 'type', 'ref'),
+ 'field': ObjType(_('field'), 'field', 'ref'),
+ 'constructor': ObjType(_('constructor'), 'construct', 'ref'),
+ 'method': ObjType(_('method'), 'meth', 'ref')
+ }
+
+ directives = {
+ 'package': JavaPackage,
+ 'type': JavaType,
+ 'field': JavaField,
+ 'constructor': JavaConstructor,
+ 'method': JavaMethod,
+ 'import': JavaImport
+ }
+
+ roles = {
+ 'package': JavaXRefRole(),
+ 'type': JavaXRefRole(),
+ 'field': JavaXRefRole(),
+ 'construct': JavaXRefRole(),
+ 'meth': JavaXRefRole(),
+ 'ref': JavaXRefRole(),
+ }
+
+ initial_data = {
+ 'objects': {}, # fullname -> docname, objtype, basename
+ }
+
+ def clear_doc(self, docname):
+ objects = dict(self.data['objects'])
+
+ for fullname, (fn, _, _) in objects.items():
+ if fn == docname:
+ del self.data['objects'][fullname]
+
+ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ objects = self.data['objects']
+ package = node.get('java:package')
+ imported = node.get('java:imported')
+ type_context = node.get('java:outertype')
+
+ # Partial function to make building the response easier
+ make_ref = lambda fullname: make_refnode(builder, fromdocname, objects[fullname][0], fullname, contnode, fullname)
+
+ # Check for fully qualified references
+ if target in objects:
+ return make_ref(target)
+
+ # Try with package name prefixed
+ if package:
+ fullname = package + '.' + target
+ if fullname in objects:
+ return make_ref(fullname)
+
+ # Try with package and type prefixed
+ if package and type_context:
+ fullname = package + '.' + type_context + '.' + target
+ if fullname in objects:
+ return make_ref(fullname)
+
+ # Try to find a matching suffix
+ suffix = '.' + target
+ basename_match = None
+ basename_suffix = suffix.partition('(')[0]
+
+ for fullname, (_, _, basename) in objects.items():
+ if fullname.endswith(suffix):
+ return make_ref(fullname)
+ elif basename.endswith(basename_suffix):
+ basename_match = fullname
+
+ if basename_match:
+ return make_ref(basename_match)
+
+ # Try creating an external documentation reference
+ ref = extdoc.get_javadoc_ref(self.env, target, target)
+
+ if not ref and target in java_dot_lang:
+ fulltarget = 'java.lang.' + target
+ ref = extdoc.get_javadoc_ref(self.env, fulltarget, fulltarget)
+
+ # If the target was imported try with the package prefixed
+ if not ref and imported:
+ fulltarget = package + '.' + target
+ ref = extdoc.get_javadoc_ref(self.env, fulltarget, fulltarget)
+
+ if ref:
+ ref.append(contnode)
+ return ref
+ else:
+ return None
+
+ def get_objects(self):
+ for refname, (docname, type, _) in self.data['objects'].items():
+ yield (refname, refname, type, docname, refname, 1)
+
+
+def _create_indexnode(indextext, fullname):
+ # See https://github.com/sphinx-doc/sphinx/issues/2673
+ if version_info < (1, 4):
+ return ('single', indextext, fullname, '')
+ else:
+ return ('single', indextext, fullname, '', None)
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import re
+
+from docutils import nodes, utils
+from sphinx.util.nodes import split_explicit_title
+
+def get_javadoc_ref(app, rawtext, text):
+ javadoc_url_map = app.config.javadoc_url_map
+
+ # Add default Java SE sources
+ if not javadoc_url_map.get("java"):
+ javadoc_url_map["java"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8')
+ if not javadoc_url_map.get("javax"):
+ javadoc_url_map["javax"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8')
+ if not javadoc_url_map.get("org.xml"):
+ javadoc_url_map["org.xml"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8')
+ if not javadoc_url_map.get("org.w3c"):
+ javadoc_url_map["org.w3c"] = ("http://docs.oracle.com/javase/8/docs/api", 'javadoc8')
+
+ source = None
+ package = ''
+ method = None
+
+ if '(' in text:
+ # If the javadoc contains a line like this:
+ # {@link #sort(List)}
+ # there is no package so the text.rindex will fail
+ try:
+ split_point = text.rindex('.', 0, text.index('('))
+ method = text[split_point + 1:]
+ text = text[:split_point]
+ except ValueError:
+ pass
+
+ for pkg, (baseurl, ext_type) in javadoc_url_map.items():
+ if text.startswith(pkg + '.') and len(pkg) > len(package):
+ source = baseurl, ext_type
+ package = pkg
+
+ if not source:
+ return None
+
+ baseurl, ext_type = source
+
+ package_parts = []
+ cls_parts = []
+
+ for part in text.split('.'):
+ if cls_parts or part[0].isupper():
+ cls_parts.append(part)
+ else:
+ package_parts.append(part)
+
+ package = '.'.join(package_parts)
+ cls = '.'.join(cls_parts)
+
+ if not baseurl.endswith('/'):
+ baseurl = baseurl + '/'
+
+ if ext_type == 'javadoc':
+ if not cls:
+ cls = 'package-summary'
+ source = baseurl + package.replace('.', '/') + '/' + cls + '.html'
+ if method:
+ source = source + '#' + method
+ elif ext_type == 'javadoc8':
+ if not cls:
+ cls = 'package-summary'
+ source = baseurl + package.replace('.', '/') + '/' + cls + '.html'
+ if method:
+ source = source + '#' + re.sub(r'[()]', '-', method)
+ elif ext_type == 'sphinx':
+ if not cls:
+ cls = 'package-index'
+ source = baseurl + package.replace('.', '/') + '/' + cls.replace('.', '-') + '.html'
+ if method:
+ source = source + '#' + package + '.' + cls + '.' + method
+ else:
+ raise ValueError('invalid target specifier ' + ext_type)
+
+ title = '.'.join(filter(None, (package, cls, method)))
+ node = nodes.reference(rawtext, '')
+ node['refuri'] = source
+ node['reftitle'] = title
+
+ return node
+
+def javadoc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+ """ Role for linking to external Javadoc """
+
+ has_explicit_title, title, target = split_explicit_title(text)
+ title = utils.unescape(title)
+ target = utils.unescape(target)
+
+ if not has_explicit_title:
+ target = target.lstrip('~')
+
+ if title[0] == '~':
+ title = title[1:].rpartition('.')[2]
+
+ app = inliner.document.settings.env.app
+ ref = get_javadoc_ref(app, rawtext, target)
+
+ if not ref:
+ raise ValueError("no Javadoc source found for %s in javadoc_url_map" % (target,))
+
+ ref.append(nodes.Text(title, title))
+
+ return [ref], []
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Convert Java syntax tree nodes to string representations.
+
+"""
+
+import javalang
+
+from .util import StringBuilder
+
+# The order for displaying modifiers
+__modifiers_order = ('public', 'protected', 'private', 'static', 'abstract', 'final',
+ 'native', 'synchronized', 'transient', 'volatile', 'strictfp')
+
+def formatter(f):
+ def _f(node, output=None, **kwargs):
+ if output is None:
+ output = StringBuilder()
+
+ f(node, output, **kwargs)
+ return output
+ return _f
+
+def output_list(f, items, output=None, sep=', '):
+ if items:
+ f(items[0], output)
+ for item in items[1:]:
+ output.append(sep)
+ f(item, output)
+
+@formatter
+def output_annotation(annotation, output):
+ output.append('@')
+ output.append(annotation.name)
+ output.append(' ')
+
+@formatter
+def output_type(type, output, with_generics=True):
+ if not type:
+ output.append('void')
+ return
+
+ if type.dimensions:
+ dim = '[]' * len(type.dimensions)
+ else:
+ dim = ''
+
+ if isinstance(type, javalang.tree.BasicType):
+ output.append(type.name)
+ else:
+ while type:
+ output.append(type.name)
+
+ if with_generics:
+ output_type_args(type.arguments, output)
+
+ type = type.sub_type
+
+ if type:
+ output.append('.')
+ output.append(dim)
+
+@formatter
+def output_exception(exception, output):
+ output.append(exception)
+
+@formatter
+def output_type_arg(type_arg, output):
+ if type_arg.pattern_type == '?':
+ output.append('?')
+ else:
+ if type_arg.pattern_type:
+ output.append('? ')
+ output.append(type_arg.pattern_type)
+ output.append(' ')
+
+ output_type(type_arg.type, output)
+
+@formatter
+def output_type_args(type_args, output):
+ if type_args:
+ output.append('<')
+ output_list(output_type_arg, type_args, output, ', ')
+ output.append('>')
+
+@formatter
+def output_type_param(type_param, output):
+ output.append(type_param.name)
+
+ if type_param.extends:
+ output.append(' extends ')
+ output_list(output_type, type_param.extends, output, ' & ')
+
+@formatter
+def output_type_params(type_params, output):
+ if type_params:
+ output.append('<')
+ output_list(output_type_param, type_params, output, ', ')
+ output.append('>')
+
+@formatter
+def output_declaration(declaration, output):
+ for annotation in declaration.annotations:
+ output_annotation(annotation, output)
+
+ output_modifiers(declaration.modifiers, output)
+ output.append(' ')
+
+ if isinstance(declaration, javalang.tree.ClassDeclaration):
+ output.append('class ')
+ elif isinstance(declaration, javalang.tree.EnumDeclaration):
+ output.append('enum ')
+ elif isinstance(declaration, javalang.tree.InterfaceDeclaration):
+ output.append('interface ')
+ elif isinstance(declaration, javalang.tree.AnnotationDeclaration):
+ output.append('@interface ')
+
+ output.append(declaration.name)
+
+ if isinstance(declaration, (javalang.tree.ClassDeclaration, javalang.tree.InterfaceDeclaration)):
+ output_type_params(declaration.type_parameters, output)
+
+ if isinstance(declaration, javalang.tree.ClassDeclaration) and declaration.extends:
+ output.append(' extends ')
+ output_type(declaration.extends, output)
+
+ if isinstance(declaration, javalang.tree.InterfaceDeclaration) and declaration.extends:
+ output.append(' extends ')
+ output_list(output_type, declaration.extends, output, ', ')
+
+ if isinstance(declaration, (javalang.tree.ClassDeclaration, javalang.tree.EnumDeclaration)) and declaration.implements:
+ output.append(' implements ')
+ output_list(output_type, declaration.implements, output, ', ')
+
+@formatter
+def output_formal_param(param, output):
+ output_type(param.type, output)
+
+ if param.varargs:
+ output.append('...')
+
+ output.append(' ')
+ output.append(param.name)
+
+@formatter
+def output_modifiers(modifiers, output):
+ ordered_modifiers = [mod for mod in __modifiers_order if mod in modifiers]
+ output_list(lambda mod, output: output.append(mod), ordered_modifiers, output, ' ')
--- /dev/null
+#
+# Copyright 2013-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import unicode_literals
+from builtins import str
+
+import collections
+import re
+
+from xml.sax.saxutils import escape as html_escape
+from bs4 import BeautifulSoup
+
+Cell = collections.namedtuple('Cell', ['type', 'rowspan', 'colspan', 'contents'])
+
+class Converter(object):
+ def __init__(self, parser):
+ self._unknown_tags = set()
+ self._clear = '\n\n..\n\n'
+
+ # Regular expressions
+ self._preprocess_anchors = re.compile(r'<a\s+name\s*=\s*["\']?(.+?)["\']?\s*>')
+ self._post_process_empty_lines = re.compile(r'^\s+$', re.MULTILINE)
+ self._post_process_compress_lines = re.compile(r'\n{3,}')
+ self._whitespace_with_newline = re.compile(r'[\s\n]+')
+ self._whitespace = re.compile(r'\s+')
+ self._html_tag = re.compile(r'<.*?>')
+
+ self._preprocess_entity = re.compile(r'&(nbsp|lt|gt|amp)([^;]|[\n])')
+ self._parser = parser
+
+ # --------------------------------------------------------------------------
+ # ---- reST Utility Methods ----
+
+ def _unicode(self, s):
+ if isinstance(s, unicode):
+ return s
+ else:
+ return unicode(s, 'utf8')
+
+ def _separate(self, s):
+ return u'\n\n' + s + u'\n\n'
+
+ def _escape_inline(self, s):
+ return '\\ ' + s + '\\ '
+
+ def _inline(self, tag, s):
+ # Seems fishy if our inline markup spans lines. We will instead just return
+ # the string as is
+ if '\n' in s:
+ return s
+
+ s = s.strip()
+
+ if not s:
+ return s
+
+ return self._escape_inline(tag + s.strip() + tag)
+
+ def _role(self, role, s, label=None):
+ if label:
+ return self._escape_inline(':%s:`%s <%s>`' % (role, label, s))
+ else:
+ return self._escape_inline(':%s:`%s`' % (role, s))
+
+ def _directive(self, directive, body=None):
+ header = '\n\n.. %s::\n\n' % (directive,)
+
+ if body:
+ return header + self._left_justify(body, 3) + '\n\n'
+ else:
+ return header + '\n'
+
+ def _hyperlink(self, target, label):
+ return self._escape_inline('`%s <%s>`_' % (label, target))
+
+ def _listing(self, marker, items):
+ items = [self._left_justify(item, len(marker) + 1) for item in items]
+ items = [marker + item[len(marker):] for item in items]
+ return self._separate('..') + self._separate('\n'.join(items))
+
+ def _left_justify(self, s, indent=0):
+ lines = [l.rstrip() for l in s.split('\n')]
+ indents = [len(l) - len(l.lstrip()) for l in lines if l]
+
+ if not indents:
+ return s
+
+ shift = indent - min(indents)
+
+ if shift < 0:
+ return '\n'.join(l[-shift:] for l in lines)
+ else:
+ prefix = ' ' * shift
+ return '\n'.join(prefix + l for l in lines)
+
+ def _compress_whitespace(self, s, replace=' ', newlines=True):
+ if newlines:
+ return self._whitespace_with_newline.sub(replace, s)
+ else:
+ return self._whitespace.sub(replace, s)
+
+ # --------------------------------------------------------------------------
+ # ---- DOM Tree Processing ----
+
+ def _process_table_cells(self, table):
+ """ Compile all the table cells.
+
+ Returns a list of rows. The rows may have different lengths because of
+ column spans.
+
+ """
+
+ rows = []
+
+ for i, tr in enumerate(table.find_all('tr')):
+ row = []
+
+ for c in tr.contents:
+ cell_type = getattr(c, 'name', None)
+
+ if cell_type not in ('td', 'th'):
+ continue
+
+ rowspan = int(c.attrs.get('rowspan', 1))
+ colspan = int(c.attrs.get('colspan', 1))
+ contents = self._process_children(c).strip()
+
+ if cell_type == 'th' and i > 0:
+ contents = self._inline('**', contents)
+
+ row.append(Cell(cell_type, rowspan, colspan, contents))
+
+ rows.append(row)
+
+ return rows
+
+ def _process_table(self, node):
+ rows = self._process_table_cells(node)
+
+ if not rows:
+ return ''
+
+ table_num_columns = max(sum(c.colspan for c in row) for row in rows)
+
+ normalized = []
+
+ for row in rows:
+ row_num_columns = sum(c.colspan for c in row)
+
+ if row_num_columns < table_num_columns:
+ cell_type = row[-1].type if row else 'td'
+ row.append(Cell(cell_type, 1, table_num_columns - row_num_columns, ''))
+
+ col_widths = [0] * table_num_columns
+ row_heights = [0] * len(rows)
+
+ for i, row in enumerate(rows):
+ j = 0
+ for cell in row:
+ current_w = sum(col_widths[j:j + cell.colspan])
+ required_w = max(len(l) for l in cell.contents.split('\n'))
+
+ if required_w > current_w:
+ additional = required_w - current_w
+ col_widths[j] += additional - (cell.colspan - 1) * (additional // cell.colspan)
+ for jj in range(j + 1, j + cell.colspan):
+ col_widths[jj] += (additional // cell.colspan)
+
+ current_h = row_heights[i]
+ required_h = len(cell.contents.split('\n'))
+
+ if required_h > current_h:
+ row_heights[i] = required_h
+
+ j += cell.colspan
+
+ row_sep = '+' + '+'.join('-' * (l + 2) for l in col_widths) + '+'
+ header_sep = '+' + '+'.join('=' * (l + 2) for l in col_widths) + '+'
+ lines = [row_sep]
+
+ for i, row in enumerate(rows):
+ for y in range(0, row_heights[i]):
+ line = []
+ j = 0
+ for c in row:
+ w = sum(n + 3 for n in col_widths[j:j+c.colspan]) - 2
+ h = row_heights[i]
+
+ line.append('| ')
+ cell_lines = c.contents.split('\n')
+ content = cell_lines[y] if y < len(cell_lines) else ''
+ line.append(content.ljust(w))
+
+ j += c.colspan
+
+ line.append('|')
+ lines.append(''.join(line))
+
+ if i == 0 and all(c.type == 'th' for c in row):
+ lines.append(header_sep)
+ else:
+ lines.append(row_sep)
+
+ return self._separate('\n'.join(lines))
+
+ def _process_children(self, node):
+ parts = []
+ is_newline = False
+
+ for c in node.contents:
+ part = self._process(c)
+
+ if is_newline:
+ part = part.lstrip()
+
+ if part:
+ parts.append(part)
+ is_newline = part.endswith('\n')
+
+ return ''.join(parts)
+
+ def _process_text(self, node):
+ return ''.join(node.strings)
+
+ def _process(self, node):
+ if isinstance(node, str):
+ return self._compress_whitespace(node)
+
+ simple_tags = {
+ 'b' : lambda s: self._inline('**', s),
+ 'strong' : lambda s: self._inline('**', s),
+ 'i' : lambda s: self._inline('*', s),
+ 'em' : lambda s: self._inline('*', s),
+ 'tt' : lambda s: self._inline('``', s),
+ 'code' : lambda s: self._inline('``', s),
+ 'h1' : lambda s: self._inline('**', s),
+ 'h2' : lambda s: self._inline('**', s),
+ 'h3' : lambda s: self._inline('**', s),
+ 'h4' : lambda s: self._inline('**', s),
+ 'h5' : lambda s: self._inline('**', s),
+ 'h6' : lambda s: self._inline('**', s),
+ 'sub' : lambda s: self._role('sub', s),
+ 'sup' : lambda s: self._role('sup', s),
+ 'hr' : lambda s: self._separate('') # Transitions not allowed
+ }
+
+ if node.name in simple_tags:
+ return simple_tags[node.name](self._process_text(node))
+
+ if node.name == 'p':
+ return self._separate(self._process_children(node).strip())
+
+ if node.name == 'pre':
+ return self._directive('parsed-literal', self._process_text(node))
+
+ if node.name == 'a':
+ if 'name' in node.attrs:
+ return self._separate('.. _' + node['name'] + ':')
+ elif 'href' in node.attrs:
+ target = node['href']
+ label = self._compress_whitespace(self._process_text(node).strip('\n'))
+
+ if target.startswith('#'):
+ return self._role('ref', target[1:], label)
+ elif target.startswith('@'):
+ return self._role('java:ref', target[1:], label)
+ else:
+ return self._hyperlink(target, label)
+
+ if node.name == 'ul':
+ items = [self._process(n) for n in node.find_all('li', recursive=False)]
+ return self._listing('*', items)
+
+ if node.name == 'ol':
+ items = [self._process(n) for n in node.find_all('li', recursive=False)]
+ return self._listing('#.', items)
+
+ if node.name == 'li':
+ s = self._process_children(node)
+ s = s.strip()
+
+ # If it's multiline clear the end to correcly support nested lists
+ if '\n' in s:
+ s = s + '\n\n'
+
+ return s
+
+ if node.name == 'table':
+ return self._process_table(node)
+
+ self._unknown_tags.add(node.name)
+
+ return self._process_children(node)
+
+ # --------------------------------------------------------------------------
+ # ---- HTML Preprocessing ----
+
+ def _preprocess_inline_javadoc_replace(self, tag, f, s):
+ parts = []
+
+ start = '{@' + tag
+ start_length = len(start)
+
+ i = s.find(start)
+ j = 0
+
+ while i != -1:
+ parts.append(s[j:i])
+
+ # Find a closing bracket such that the brackets are balanced between
+ # them. This is necessary since code examples containing { and } are
+ # commonly wrapped in {@code ...} tags
+
+ try:
+ j = s.find('}', i + start_length) + 1
+ while s.count('{', i, j) != s.count('}', i, j):
+ j = s.index('}', j) + 1
+ except ValueError:
+ raise ValueError('Unbalanced {} brackets in ' + tag + ' tag')
+
+ parts.append(f(s[i + start_length:j - 1].strip()))
+ i = s.find(start, j)
+
+ parts.append(s[j:])
+
+ return ''.join(parts)
+
+ def _preprocess_replace_javadoc_link(self, s):
+ s = self._compress_whitespace(s)
+
+ target = None
+ label = ''
+
+ if ' ' not in s:
+ target = s
+ else:
+ i = s.find(' ')
+
+ while s.count('(', 0, i) != s.count(')', 0, i):
+ i = s.find(' ', i + 1)
+
+ if i == -1:
+ i = len(s)
+ break
+
+ target = s[:i]
+ label = s[i:]
+
+ if target[0] == '#':
+ target = target[1:]
+
+ target = target.replace('#', '.').replace(' ', '').strip()
+
+ # Strip HTML tags from the target
+ target = self._html_tag.sub('', target)
+
+ label = label.strip()
+
+ return '<a href="@%s">%s</a>' % (target, label)
+
+ def _preprocess_close_anchor_tags(self, s):
+ # Add closing tags to all anchors so they are better handled by the parser
+ return self._preprocess_anchors.sub(r'<a name="\1"></a>', s)
+
+ def _preprocess_fix_entities(self, s):
+ return self._preprocess_entity.sub(r'&\1;\2', s)
+
+ def _preprocess(self, s_html):
+ to_tag = lambda t: lambda m: '<%s>%s</%s>' % (t, html_escape(m), t)
+ s_html = self._preprocess_inline_javadoc_replace('code', to_tag('code'), s_html)
+ s_html = self._preprocess_inline_javadoc_replace('literal', to_tag('span'), s_html)
+ s_html = self._preprocess_inline_javadoc_replace('docRoot', lambda m: '', s_html)
+ s_html = self._preprocess_inline_javadoc_replace('linkplain', self._preprocess_replace_javadoc_link, s_html)
+ s_html = self._preprocess_inline_javadoc_replace('link', self._preprocess_replace_javadoc_link, s_html)
+
+ # Make sure all anchor tags are closed
+ s_html = self._preprocess_close_anchor_tags(s_html)
+
+ # Fix up some entitities without closing ;
+ s_html = self._preprocess_fix_entities(s_html)
+
+ return s_html
+
+ # --------------------------------------------------------------------------
+ # ---- Conversion entry point ----
+
+ def convert(self, s_html):
+ if not isinstance(s_html, str):
+ s_html = str(s_html, 'utf8')
+
+ s_html = self._preprocess(s_html)
+
+ if not s_html.strip():
+ return ''
+
+ soup = BeautifulSoup(s_html, self._parser)
+ top = soup.html.body
+
+ result = self._process_children(top)
+
+ # Post processing
+ result = self._post_process_empty_lines.sub('', result)
+ result = self._post_process_compress_lines.sub('\n\n', result)
+ result = result.strip()
+
+ return result
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import unicode_literals
+from builtins import str
+
+import logging
+import re
+import sys
+
+class StringBuilder(list):
+ def build(self):
+ return str(self)
+
+ def __str__(self):
+ return ''.join(self)
+
+class Directive(object):
+
+ def __init__(self, type, argument=''):
+ self.type = type
+ self.argument = argument
+
+ self.options = []
+ self.content = []
+
+ def add_option(self, name, value=''):
+ self.options.append((name, value))
+
+ def add_content(self, o):
+ assert o is not None
+ self.content.append(o)
+
+ def build(self):
+ doc = Document()
+ doc.add_line('.. %s:: %s' % (self.type, self.argument))
+
+ for name, value in self.options:
+ doc.add_line(' :%s: %s\n' % (name, value))
+
+ content = Document()
+
+ for obj in self.content:
+ content.add_object(obj)
+
+ doc.clear()
+ for line in content.build().splitlines():
+ doc.add_line(' ' + line)
+ doc.clear()
+
+ return doc.build()
+
+class Document(object):
+ remove_trailing_whitespace_re = re.compile('[ \t]+$', re.MULTILINE)
+ collapse_empty_lines_re = re.compile('\n' + '{3,}', re.DOTALL)
+
+ def __init__(self):
+ self.content = []
+
+ def add_object(self, o):
+ assert o is not None
+
+ self.content.append(o)
+
+ def add(self, s):
+ self.add_object(s)
+
+ def add_line(self, s):
+ self.add(s)
+ self.add('\n')
+
+ def add_heading(self, s, t='-'):
+ self.add_line(s)
+ self.add_line(t * len(s))
+
+ def clear(self):
+ self.add('\n\n')
+
+ def build(self):
+ output = StringBuilder()
+
+ for obj in self.content:
+ if isinstance(obj, Directive):
+ output.append('\n\n')
+ output.append(obj.build())
+ output.append('\n\n')
+ elif isinstance(obj, Document):
+ output.append(obj.build())
+ else:
+ output.append(str(obj))
+
+ output.append('\n\n')
+
+ output = str(output)
+ output = self.remove_trailing_whitespace_re.sub('', output)
+ output = self.collapse_empty_lines_re.sub('\n\n', output)
+
+ return output
+
+def error(s, *args, **kwargs):
+ logging.error(s, *args, **kwargs)
+ sys.exit(1)
+
+def unexpected(s, *args, **kwargs):
+ logging.exception(s, *args, **kwargs)
+ sys.exit(1)
--- /dev/null
+#
+# Copyright 2012-2015 Bronto Software, Inc. and contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from setuptools import setup
+
+setup(
+ name = "javasphinx",
+ packages = ["javasphinx"],
+ version = "0.9.15",
+ author = "Chris Thunes",
+ author_email = "cthunes@brewtab.com",
+ url = "http://github.com/bronto/javasphinx",
+ description = "Sphinx extension for documenting Java projects",
+ license = "Apache 2.0",
+ classifiers = [
+ "Programming Language :: Python",
+ "Development Status :: 4 - Beta",
+ "Operating System :: OS Independent",
+ "License :: OSI Approved :: Apache Software License",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries"
+ ],
+ install_requires=[
+ "javalang>=0.10.1",
+ "lxml",
+ "beautifulsoup4",
+ "future",
+ "docutils",
+ "sphinx"
+ ],
+ entry_points={
+ 'console_scripts': [
+ 'javasphinx-apidoc = javasphinx.apidoc:main'
+ ]
+ },
+ long_description = """\
+==========
+javasphinx
+==========
+
+javasphinx is an extension to the Sphinx documentation system which adds support
+for documenting Java projects. It includes a Java domain for writing
+documentation manually and a javasphinx-apidoc utility which will automatically
+generate API documentation from existing Javadoc markup.
+"""
+)
return [par, node]
def add_assets(app):
- app.add_stylesheet(CSS_FILE)
- app.add_javascript(JS_FILE)
+ app.add_css_file(CSS_FILE)
+ app.add_js_file(JS_FILE)
def copy_assets(app, exception):
Java bindings
*************
-This section describes jMSG, the Java API to Simgrid. This API mimicks
+This section describes jMSG, the Java API to Simgrid. This API mimics
:ref:`MSG <MSG_doc>`, which is a simple yet somehow realistic interface.
The full reference documentation is provided at the end of this page.
A big difference with TCP sockets or MPI communications is that
communications do not start right away after a
-:cpp:func:`Mailbox::put() <simgrid::s4u::Mailbox::put()>`, but wait
-for the corresponding :cpp:func:`Mailbox::get() <simgrid::s4u::Mailbox::get()>`.
+:cpp:func:`Mailbox::put() <simgrid::s4u::Mailbox::put>`, but wait
+for the corresponding :cpp:func:`Mailbox::get() <simgrid::s4u::Mailbox::get>`.
You can change this by :ref:`declaring a receiving actor <s4u_receiving_actor>`.
A big difference with twitter hashtags is that SimGrid does not
on the data you want to get from the mailbox. To model such settings
in SimGrid, you'd have one mailbox per potential topic, and subscribe
to each topic individually with a
-:cpp:func:`get_async() <simgrid::s4u::Mailbox::get_async()>` on each mailbox.
-Then, use :cpp:func:`Comm::wait_any() <simgrid::s4u::Comm::wait_any()>`
+:cpp:func:`get_async() <simgrid::s4u::Mailbox::get_async>` on each mailbox.
+Then, use :cpp:func:`Comm::wait_any() <simgrid::s4u::Comm::wait_any>`
to get the first message on any of the mailbox you are subscribed onto.
The mailboxes are not located on the network, and you can access
.. autodoxymethod:: simgrid::s4u::Actor::get_host
.. autodoxymethod:: simgrid::s4u::Actor::set_host
- .. autodoxymethod:: simgrid::s4u::Actor::get_refcount()
+ .. autodoxymethod:: simgrid::s4u::Actor::get_refcount
.. autodoxymethod:: simgrid::s4u::Actor::get_impl
.. group-tab:: Python
.. autodoxymethod:: simgrid::s4u::Actor::suspend()
.. autodoxymethod:: simgrid::s4u::Actor::resume()
- .. autodoxymethod:: simgrid::s4u::Actor::is_suspended()
+ .. autodoxymethod:: simgrid::s4u::Actor::is_suspended
.. group-tab:: Python
.. autodoxymethod:: sg_actor_suspend(sg_actor_t actor)
.. autodoxymethod:: sg_actor_resume(sg_actor_t actor)
- .. autodoxymethod:: sg_actor_is_suspended(sg_actor_t actor)
+ .. autodoxymethod:: sg_actor_is_suspended(const_sg_actor_t actor)
Specifying when actors should terminate
---------------------------------------
.. autodoxymethod:: simgrid::s4u::Actor::kill()
.. autodoxymethod:: simgrid::s4u::Actor::kill_all()
.. autodoxymethod:: simgrid::s4u::Actor::set_kill_time(double time)
- .. autodoxymethod:: simgrid::s4u::Actor::get_kill_time()
+ .. autodoxymethod:: simgrid::s4u::Actor::get_kill_time
.. autodoxymethod:: simgrid::s4u::Actor::restart()
.. autodoxymethod:: simgrid::s4u::Actor::daemonize()
.. autodoxyvar:: simgrid::s4u::Actor::on_creation
.. autodoxyvar:: simgrid::s4u::Actor::on_suspend
- .. cpp:var:: xbt::signal<void(const Actor&, const Host & previous_location)> Actor::on_host_change
+ .. cpp:var:: xbt::signal<void(const simgrid::s4u::Actor&, const simgrid::s4u::Host & previous_location)> Actor::on_host_change
Signal fired when an actor is migrated from one host to another.
.. autodoxymethod:: simgrid::s4u::this_actor::is_maestro()
.. autodoxymethod:: simgrid::s4u::this_actor::get_host()
- .. autodoxymethod:: simgrid::s4u::this_actor::set_host(Host *new_host)
+ .. autodoxymethod:: simgrid::s4u::this_actor::set_host(simgrid::s4u::Host *new_host)
.. group-tab:: Python
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::this_actor::exec_async(double flops_amounts)
- .. autodoxymethod:: simgrid::s4u::this_actor::exec_init(const std::vector< s4u::Host *> &hosts, const std::vector< double > &flops_amounts, const std::vector< double > &bytes_amounts)
+ .. autodoxymethod:: simgrid::s4u::this_actor::exec_async
+ .. autodoxymethod:: simgrid::s4u::this_actor::exec_init(const std::vector< s4u::Host * > &hosts, const std::vector< double > &flops_amounts, const std::vector< double > &bytes_amounts)
.. autodoxymethod:: simgrid::s4u::this_actor::exec_init(double flops_amounts)
.. autodoxymethod:: simgrid::s4u::this_actor::execute(double flop)
.. autodoxymethod:: simgrid::s4u::this_actor::execute(double flop, double priority)
.. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, bool value)
.. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, double value)
.. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, int value)
- .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, std::string value)
+ .. autodoxymethod:: simgrid::s4u::Engine::set_config(const std::string &name, const std::string &value)
- .. autodoxymethod:: simgrid::s4u::Engine::load_deployment(const std::string &deploy)
- .. autodoxymethod:: simgrid::s4u::Engine::load_platform(const std::string &platf)
+ .. autodoxymethod:: simgrid::s4u::Engine::load_deployment
+ .. autodoxymethod:: simgrid::s4u::Engine::load_platform
.. autodoxymethod:: simgrid::s4u::Engine::register_actor(const std::string &name)
.. autodoxymethod:: simgrid::s4u::Engine::register_actor(const std::string &name, F code)
.. autodoxymethod:: simgrid::s4u::Engine::register_default(void(*code)(int, char **))
.. group-tab:: C++
.. autodoxymethod:: simgrid::s4u::Engine::get_clock()
- .. autodoxymethod:: simgrid::s4u::Engine::run()
+ .. autodoxymethod:: simgrid::s4u::Engine::run
.. group-tab:: Python
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Engine::get_actor_count()
- .. autodoxymethod:: simgrid::s4u::Engine::get_all_actors()
- .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_actors(const std::function< bool(ActorPtr)> &filter)
+ .. autodoxymethod:: simgrid::s4u::Engine::get_actor_count
+ .. autodoxymethod:: simgrid::s4u::Engine::get_all_actors
+ .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_actors
.. group-tab:: C
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Engine::get_all_hosts()
- .. autodoxymethod:: simgrid::s4u::Engine::get_host_count()
- .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_hosts(const std::function< bool(Host *)> &filter)
- .. autodoxymethod:: simgrid::s4u::Engine::host_by_name(const std::string &name)
- .. autodoxymethod:: simgrid::s4u::Engine::host_by_name_or_null(const std::string &name)
+ .. autodoxymethod:: simgrid::s4u::Engine::get_all_hosts
+ .. autodoxymethod:: simgrid::s4u::Engine::get_host_count
+ .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_hosts
+ .. autodoxymethod:: simgrid::s4u::Engine::host_by_name
+ .. autodoxymethod:: simgrid::s4u::Engine::host_by_name_or_null
.. group-tab:: Python
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Engine::get_all_links()
- .. autodoxymethod:: simgrid::s4u::Engine::get_link_count()
+ .. autodoxymethod:: simgrid::s4u::Engine::get_all_links
+ .. autodoxymethod:: simgrid::s4u::Engine::get_link_count
.. autodoxymethod:: simgrid::s4u::Engine::get_filtered_links
- .. autodoxymethod:: simgrid::s4u::Engine::link_by_name(const std::string &name)
- .. autodoxymethod:: simgrid::s4u::Engine::link_by_name_or_null(const std::string &name)
+ .. autodoxymethod:: simgrid::s4u::Engine::link_by_name
+ .. autodoxymethod:: simgrid::s4u::Engine::link_by_name_or_null
Interacting with the routing
----------------------------
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Engine::get_all_netpoints()
- .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_netzones()
+ .. autodoxymethod:: simgrid::s4u::Engine::get_all_netpoints
+ .. autodoxymethod:: simgrid::s4u::Engine::get_filtered_netzones
.. autodoxymethod:: simgrid::s4u::Engine::get_instance()
- .. autodoxymethod:: simgrid::s4u::Engine::get_netzone_root()
- .. autodoxymethod:: simgrid::s4u::Engine::netpoint_by_name_or_null(const std::string &name)
- .. autodoxymethod:: simgrid::s4u::Engine::netzone_by_name_or_null(const std::string &name)
+ .. autodoxymethod:: simgrid::s4u::Engine::get_netzone_root
+ .. autodoxymethod:: simgrid::s4u::Engine::netpoint_by_name_or_null
+ .. autodoxymethod:: simgrid::s4u::Engine::netzone_by_name_or_null
.. autodoxymethod:: simgrid::s4u::Engine::set_netzone_root(const NetZone *netzone)
Signals
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Mailbox::get_cname() const
- .. autodoxymethod:: simgrid::s4u::Mailbox::get_name() const
+ .. autodoxymethod:: simgrid::s4u::Mailbox::get_cname
+ .. autodoxymethod:: simgrid::s4u::Mailbox::get_name
.. group-tab:: Python
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Mailbox::empty()
- .. autodoxymethod:: simgrid::s4u::Mailbox::front()
+ .. autodoxymethod:: simgrid::s4u::Mailbox::empty
+ .. autodoxymethod:: simgrid::s4u::Mailbox::front
.. autodoxymethod:: simgrid::s4u::Mailbox::get()
.. autodoxymethod:: simgrid::s4u::Mailbox::get(double timeout)
.. autodoxymethod:: simgrid::s4u::Mailbox::get_async(void **data)
.. autodoxymethod:: simgrid::s4u::Mailbox::get_init()
.. autodoxymethod:: simgrid::s4u::Mailbox::iprobe(int type, bool(*match_fun)(void *, void *, kernel::activity::CommImpl *), void *data)
- .. autodoxymethod:: simgrid::s4u::Mailbox::listen()
- .. autodoxymethod:: simgrid::s4u::Mailbox::ready()
+ .. autodoxymethod:: simgrid::s4u::Mailbox::listen
+ .. autodoxymethod:: simgrid::s4u::Mailbox::ready
.. group-tab:: Python
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Mailbox::get_receiver()
+ .. autodoxymethod:: simgrid::s4u::Mailbox::get_receiver
.. autodoxymethod:: simgrid::s4u::Mailbox::set_receiver(ActorPtr actor)
.. group-tab:: C
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::Host::exec_async(double flops_amounts)
- .. autodoxymethod:: simgrid::s4u::Host::execute(double flops)
- .. autodoxymethod:: simgrid::s4u::Host::execute(double flops, double priority)
+ .. autodoxymethod:: simgrid::s4u::Host::exec_async
+ .. autodoxymethod:: simgrid::s4u::Host::execute(double flops) const
+ .. autodoxymethod:: simgrid::s4u::Host::execute(double flops, double priority) const
Platform and routing
--------------------
.. autodoxymethod:: simgrid::s4u::Host::get_englobing_zone()
.. autodoxymethod:: simgrid::s4u::Host::get_netpoint() const
- .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< Link *> &links, double *latency) const
- .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< kernel::resource::LinkImpl *> &links, double *latency) const
+ .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< Link * > &links, double *latency) const
+ .. autodoxymethod:: simgrid::s4u::Host::route_to(const Host *dest, std::vector< kernel::resource::LinkImpl * > &links, double *latency) const
.. autodoxymethod:: simgrid::s4u::Host::sendto(Host *dest, double byte_amount)
.. autodoxymethod:: simgrid::s4u::Host::sendto_async(Host *dest, double byte_amount)
.. autodoxymethod:: sg_link_latency(const_sg_link_t link)
.. autodoxymethod:: sg_link_name(const_sg_link_t link)
+Modifying characteristics
+-------------------------
+
+.. tabs::
+
+ .. group-tab:: C++
+
+ .. autodoxymethod:: simgrid::s4u::Link::set_bandwidth(double value)
+ .. autodoxymethod:: simgrid::s4u::Link::set_latency(double value)
+
User data and properties
------------------------
Dynamic profiles
----------------
+See :ref:`howto_churn` for more details.
+
.. tabs::
.. group-tab:: C++
.. autodoxymethod:: simgrid::s4u::Link::set_latency_profile(kernel::profile::Profile *profile)
.. autodoxymethod:: simgrid::s4u::Link::set_state_profile(kernel::profile::Profile *profile)
+WIFI links
+----------
+
+.. tabs::
+
+ .. group-tab:: C++
+
+ .. autodoxymethod:: simgrid::s4u::Link::set_host_wifi_rate
+
Signals
-------
.. group-tab:: C++
- .. autodoxymethod:: simgrid::s4u::NetZone::add_bypass_route(kernel::routing::NetPoint *src, kernel::routing::NetPoint *dst, kernel::routing::NetPoint *gw_src, kernel::routing::NetPoint *gw_dst, std::vector< kernel::resource::LinkImpl *> &link_list, bool symmetrical)
+ .. autodoxymethod:: simgrid::s4u::NetZone::add_bypass_route
.. autodoxymethod:: simgrid::s4u::NetZone::add_component(kernel::routing::NetPoint *elm)
- .. autodoxymethod:: simgrid::s4u::NetZone::add_route(kernel::routing::NetPoint *src, kernel::routing::NetPoint *dst, kernel::routing::NetPoint *gw_src, kernel::routing::NetPoint *gw_dst, std::vector< kernel::resource::LinkImpl *> &link_list, bool symmetrical)
+ .. autodoxymethod:: simgrid::s4u::NetZone::add_route
.. autodoxymethod:: simgrid::s4u::NetZone::get_children() const
.. autodoxymethod:: simgrid::s4u::NetZone::get_father()
.. autodoxymethod:: simgrid::s4u::Activity::wait_until(double time_limit)
.. autodoxymethod:: simgrid::s4u::Activity::vetoable_start()
+Suspending and resuming an activity
+-----------------------------------
+
+.. tabs::
+
+ .. group-tab:: C++
+
+ .. autodoxymethod:: simgrid::s4u::Activity::suspend
+ .. autodoxymethod:: simgrid::s4u::Activity::resume
+ .. autodoxymethod:: simgrid::s4u::Activity::is_suspended
+
.. _API_s4u_Comm:
=============
- In some cases, you may want to replay an execution trace in the simulator. This
trace lists the events of your application or of your workload, and
your application is decomposed as a list of event handlers that are
- fired according to the trace. SimGrid comes with a build-in support
+ fired according to the trace. SimGrid comes with a built-in support
for MPI traces (with solutions to import traces captured by several
MPI profilers). You can reuse this mechanism for any kind of trace
that you want to replay, for example to study how a P2P DHT overlay
# Setup the autodoxy extension
doxygen_xml = os.path.join(os.path.dirname(__file__), "..", "build", "xml")
+autodoxy_requalified_identifiers = [ # The first element will be substituted into the second one if it's starting an element or preceded by a space
+ ("Activity", "simgrid::s4u::Activity"),
+ ("Actor", "simgrid::s4u::Actor"),
+ ("ActorPtr", "simgrid::s4u::ActorPtr"),
+ ("Barrier", "simgrid::s4u::Barrier"),
+ ("BarrierPtr", "simgrid::s4u::BarrierPtr"),
+ ("Comm", "simgrid::s4u::Comm"),
+ ("CommPtr", "simgrid::s4u::CommPtr"),
+ ("ConditionVariable", "simgrid::s4u::ConditionVariable"),
+ ("ConditionVariablePtr", "simgrid::s4u::ConditionVariablePtr"),
+ ("Disk", "simgrid::s4u::Disk"),
+ ("Exec", "simgrid::s4u::Exec"),
+ ("ExecPtr", "simgrid::s4u::ExecPtr"),
+ ("Host", "simgrid::s4u::Host"),
+ ("s4u::Host", "simgrid::s4u::Host"),
+ ("Engine", "simgrid::s4u::Engine"),
+ ("Io", "simgrid::s4u::Io"),
+ ("Link", "simgrid::s4u::Link"),
+ ("Mailbox", "simgrid::s4u::Mailbox"),
+ ("Mutex", "simgrid::s4u::Mutex"),
+ ("s4u::Mutex", "simgrid::s4u::Mutex"),
+ ("s4u::MutexPtr", "simgrid::s4u::MutexPtr"),
+ ("NetZone", "simgrid::s4u::NetZone"),
+ ("Semaphore", "simgrid::s4u::Semaphore"),
+ ("VirtualMachine", "simgrid::s4u::VirtualMachine"),
+ ]
+
+# Generate a warning for all a cross-reference (such as :func:`myfunc`) that cannot be found
+nitpicky = True
+nitpick_ignore = [
+ ('cpp:identifier', 'boost'),
+ ('cpp:identifier', 'boost::intrusive_ptr<Activity>'),
+ ('cpp:identifier', 'boost::intrusive_ptr<Actor>'),
+ ('cpp:identifier', 'boost::intrusive_ptr<Barrier>'),
+ ('cpp:identifier', 'boost::intrusive_ptr<Comm>'),
+ ('cpp:identifier', 'boost::intrusive_ptr<Io>'),
+ ('cpp:identifier', 'boost::intrusive_ptr<Mutex>'),
+ ('cpp:identifier', 'kernel'),
+ ('cpp:identifier', 'kernel::activity'),
+ ('cpp:identifier', 'kernel::profile'),
+ ('cpp:identifier', 'kernel::resource'),
+ ('cpp:identifier', 'kernel::routing'),
+ ('cpp:identifier', 'simgrid'),
+ ('cpp:identifier', 'simgrid::s4u'),
+ ('cpp:identifier', 'this_actor'),
+ ('cpp:identifier', 's4u'),
+ ('cpp:identifier', 'size_t'),
+ ('cpp:identifier', 'uint64_t'),
+ ('cpp:identifier', 'xbt'),
+ ('cpp:identifier', 'xbt::string'),
+]
# For cross-ref generation
primary_domain = 'cpp'
# -- Other options
-nitpicky = True # Generate a warning for all a cross-reference (such as :func:`myfunc`) that cannot be found
validity of your simulation. Just install ns-3 and recompile SimGrid
accordingly.
+The SimGrid/ns-3 binding only contains features that are common to both systems.
+Not all ns-3 models are available from SimGrid (only the TCP and WiFi ones are),
+while not all SimGrid platform files can be used in conjunction ns-3 (routes
+must be of length 1). Also, the platform built in ns-3 from the SimGrid
+description is very basic.
+
+
Compiling the ns-3/SimGrid binding
**********************************
recent version of both SimGrid and ns-3. While the Debian package of SimGrid
don't have the ns-3 bindings activated, you can still use the packaged version
of ns-3 by grabbing the ``libns3-dev ns3`` packages. Alternatively, you can
-install ns-3 from scratch as follows:
-
-.. code-block:: shell
-
- # Download the source
- wget http://www.nsnam.org/release/ns-allinone-3.29.tar.bz2
- tar -xf ns-allinone-3.29.tar.bz2
- cd ns-allinone-3.29/ns-3.29/
- # Configure, build and install
- ./waf configure --prefix="/opt/ns3" # or give another path if you prefer
- ./waf
- ./waf install
-
-For more information, please refer to the ns-3 documentation
-(`official website <http://www.nsnam.org>`_).
+install ns-3 from scratch (see the `ns-3 documentation <http://www.nsnam.org>`_).
Enabling ns-3 in SimGrid
========================
SimGrid must be recompiled with the ``enable_ns3`` option activated in cmake.
-Optionally, use ``NS3_HINT`` to hint cmake about where to find ns-3.
+Optionally, use ``NS3_HINT`` to tell cmake where ns3 is installed on
+your disk.
.. code-block:: shell
was correctly detected. Otherwise, explore ``CMakeFiles/CMakeOutput.log`` and
``CMakeFiles/CMakeError.log`` to diagnose the problem.
-Test your installation after compilation as follows:
+Test that ns-3 was successfully integrated with the following (from your SimGrid
+build directory). It will run all SimGrid tests that are related to the ns-3
+integration. If no test is run at all, you probably forgot to enable ns-3 in cmake.
.. code-block:: shell
Using ns-3 from SimGrid
***********************
-The SimGrid/ns-3 binding only contains features that are common to both
-systems. Also, the platform built in ns-3 from the
-SimGrid description is very basic.
-
Platform files compatibility
============================
Any route longer than one will be ignored when using ns-3. They are
harmless, but you still need to connect your hosts using one-hop routes.
The best solution is to add routers to split your route. Here is an
-example of invalid platform:
-
-.. code-block:: shell
-
- <?xml version='1.0'?><!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
- <platform version="4.1">
- <AS id="AS0" routing="Floyd">
-
- <host id="alice" speed="1Gf" />
- <host id="bob" speed="1Gf" />
-
- <link id="l1" bandwidth="1Mbps" latency="5ms" />
- <link id="l2" bandwidth="1Mbps" latency="5ms" />
-
- <route src="alice" dst="bob">
- <link_ctn id="l1"/> <!-- !!!! INVALID WITH ns-3 !!!! -->
- <link_ctn id="l2"/> <!-- !!!! length=2 IS TOO MUCH !!!! -->
- </route>
- </AS>
- </platform>
-
+example of an invalid platform:
+
+.. code-block:: xml
+
+ <?xml version='1.0'?>
+ <!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
+ <platform version="4.1">
+ <zone id="zone0" routing="Floyd">
+ <host id="alice" speed="1Gf" />
+ <host id="bob" speed="1Gf" />
+
+ <link id="l1" bandwidth="1Mbps" latency="5ms" />
+ <link id="l2" bandwidth="1Mbps" latency="5ms" />
+
+ <route src="alice" dst="bob">
+ <link_ctn id="l1"/> <!-- !!!! IGNORED WHEN USED WITH ns-3 !!!! -->
+ <link_ctn id="l2"/> <!-- !!!! ROUTES MUST CONTAIN ONE LINK ONLY !!!! -->
+ </route>
+ </zone>
+ </platform>
+
This can be reformulated as follows to make it usable with the ns-3 binding.
-There is no direct connection from alice to bob, but that's OK because
-ns-3 automatically routes from point to point.
-
-.. code-block:: shell
-
- <?xml version='1.0'?><!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
- <platform version="4.1">
- <AS id="AS0" routing="Floyd">
-
- <host id="alice" speed="1Gf"/>
- <host id="bob" speed="1Gf"/>
-
- <router id="r1"/> <!-- routers are compute-less hosts -->
-
- <link id="l1" bandwidth="1Mbps" latency="5ms"/>
- <link id="l2" bandwidth="1Mbps" latency="5ms"/>
-
- <route src="alice" dst="r1">
- <link_ctn id="l1"/>
- </route>
-
- <route src="r1" dst="bob">
- <link_ctn id="l2"/>
- </route>
- </AS>
- </platform>
+There is no direct connection from alice to bob, but that's OK because ns-3
+automatically routes from point to point (using
+``ns3::Ipv4GlobalRoutingHelper::PopulateRoutingTables``).
+
+.. code-block:: xml
+
+ <?xml version='1.0'?>
+ <!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
+ <platform version="4.1">
+ <zone id="zone0" routing="Full">
+ <host id="alice" speed="1Gf" />
+ <host id="bob" speed="1Gf" />
+
+ <router id="r1" /> <!-- routers are compute-less hosts -->
+
+ <link id="l1" bandwidth="1Mbps" latency="5ms"/>
+ <link id="l2" bandwidth="1Mbps" latency="5ms"/>
+
+ <route src="alice" dst="r1">
+ <link_ctn id="l1"/>
+ </route>
+
+ <route src="r1" dst="bob">
+ <link_ctn id="l2"/>
+ </route>
+ </zone>
+ </platform>
Once your platform is OK, just change the :ref:`network/model
-<options_model_select>`_ configuration option to "ns-3" as follows. The rest
-is unchanged.
+<options_model_select>` configuration option to `ns-3` as follows. The other
+options can be used as usual.
.. code-block:: shell
./network-ns3 --cfg=network/model:ns-3 (other parameters)
-Many other files from the ``examples/platform directory`` are usable with the
-ns-3 model.
+Many other files from the ``examples/platform`` directory are usable with the
+ns-3 model, such as `examples/platforms/dogbone.xml <https://framagit.org/simgrid/simgrid/tree/master/examples/platforms/dogbone.xml>`_.
+Check the file `examples/s4u/network-ns3/network-ns3.tesh <https://framagit.org/simgrid/simgrid/tree/master/examples/s4u/network-ns3/network-ns3.tesh>`_
+to see which ones are used in our regression tests.
-Build a wifi-compatible platform
-===================================
+WiFi platforms
+--------------
-We describe here a simple platform allowing ns3 wifi communication
-between two simgrid hosts.
+In SimGrid, WiFi networks are modeled as regular links with a specific
+attribute, and these links are then added to routes between hosts. The main
+difference When using ns-3 WiFi networks is that the network performance is not
+given by the link bandwidth and latency but by the access point WiFi
+characteristics, and the distance between the access point and the hosts (called
+station in the WiFi world).
-First, here are the mandatory information necessary to create a
-simgrid platform:
+So, to declare a new WiFi network, simply declare a link with the ``WiFi``
+sharing policy as you would do in a pure SimGrid simulation (you must still
+provide the ``bandwidth`` and ``latency`` attributes even if they are ignored,
+because they are mandatory to the SimGrid XML parser).
-.. code-block:: shell
-
- <?xml version='1.0'?><!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
- <platform version="4.1">
- <AS id="AS0" routing="Floyd">
-
-Then, we create our access point and station hosts:
+.. code-block:: xml
-.. code-block:: shell
+ <link id="net0" bandwidth="0" latency="0" sharing_policy="WIFI"/>
- <host id="alice" speed="1Gf"/>
- <host id="bob" speed="1Gf"/>
+To declare that a given host is connected to this WiFi zone, use the
+``wifi_link`` property of that host. The property value must be the link id that
+you want to use as a WiFi zone. This is not needed when using pure SimGrid wifi,
+only when using ns-3 wifi, because the wifi performance is :ref:`configured <ns3_wifi_perf>`.
-We must specify that alice will be our access point. To do that we
-simply add the property ``wifi_link`` to the host ``alice``:
+.. code-block:: xml
-.. code-block:: shell
+ <host id="alice" speed="1Gf">
+ <prop id="wifi_link" value="net0"/>
+ </host>
- <host id="alice" speed="1Gf">
- <prop id="wifi_link" value="net0"/>
- </host>
+To connect the station node to the access point node, simply create a route
+between them:
- <host id="bob" speed="1Gf"/>
+.. code-block:: xml
-The value ``net0`` of this property defines the name of the wifi network
-generated. To generate this wifi network we create a wifi link:
+ <route src="alice" dst="bob">
+ <link_ctn id="net0" />
+ </route>
-.. code-block:: shell``
+.. _ns3_wifi_perf:
- <link id="net0" bandwidth="0" latency="0" sharing_policy="WIFI"/>
+WiFi network performance
+^^^^^^^^^^^^^^^^^^^^^^^^
-The important information here are:
- * The id of the link, ``net0``, must match the network name defined by the property ``wifi_link`` of the access point node
- * The sharing policy must be set to ``WIFI``
-Note: bandwidth and latency are mandatory by simgrid to create a link but are NOT used to create a wifi network. Instead the
-wifi network capabilities are defined by its MCS, NSS and distance from access point to station. Those properties are described in section :ref:`Optional access point node properties <optional_prop>`_
+The performance of a wifi network is controlled by 3 property that can be added
+to the an host connected to the wifi zone:
-To connect the station node to the access point node, we
-create a route between the hosts:
+ * ``wifi_mcs`` (`Modulation and Coding Scheme <https://en.wikipedia.org/wiki/Link_adaptation>`_)
+ Roughly speaking, it defines the speed at which the access point is
+ exchanging data with all stations. It depends on its model and configuration,
+ and the possible values are listed for example on Wikipedia.
+ |br| By default, ``wifi_mcs=3``.
+ * ``wifi_nss`` (Number of Spatial Streams, or `number of antennas <https://en.wikipedia.org/wiki/IEEE_802.11n-2009#Number_of_antennas>`_)
+ defines the amount of simultaneous data streams that the AP can sustain.
+ Not all value of MCS and NSS are valid nor compatible (cf. `802.11n standard <https://en.wikipedia.org/wiki/IEEE_802.11n-2009#Data_rates>`_).
+ |br| By default, ``wifi_nss=1``.
+ * ``wifi_distance`` is the distance from the station to the access point. Each
+ station can have a specific value.
+ |br| By default, ``wifi_distance=10``.
-.. code-block:: shell
+Here is an example of host changing all these values:
- <route src="alice" dst="bob">
- <link_ctn id="net0" />
- </route>
+.. code-block:: xml
-Finally, we end the xml file with the missing closing tags:
-
-.. code-block:: shell
-
- </AS>
- </platform>
-
-.. _optional_prop:
-
-Optional access point node properties
---------------------------------------
-
-The MCS (`Modulation and Coding Scheme <https://en.wikipedia.org/wiki/Link_adaptation>`_) can be set with the property ``wifi_mcs``:
-
-.. code-block:: shell
-
- <host id="alice" speed="1Gf">
- <prop id="wifi_link" value="net0"/>
- <prop id="wifi_mcs" value="5"/>
- </host>
-
-Its default value is 3.
-
-The NSS (Number of Spatial Streams, also known as the `number of antennas <https://en.wikipedia.org/wiki/IEEE_802.11n-2009#Number_of_antennas>`_) can be set with the property ``wifi_nss``:
-
-.. code-block:: shell
-
- <host id="alice" speed="1Gf">
- <prop id="wifi_link" value="net0"/>
- <prop id="wifi_nss" value="2"/>
- </host>
-
-Its default value is 1.
-
-Note: not all value of MCS and NSS are valid nor compatible. Check `802.11n standard <https://en.wikipedia.org/wiki/IEEE_802.11n-2009#Data_rates>`_ for more information.
-
-Optional station node properties
----------------------------------
-
-The distance in meter at which the station is placed from the access point can
-be set with the property ``wifi_distance``.
-
-.. code-block:: shell
-
- <host id="alice" speed="100.0Mf,50.0Mf,20.0Mf" pstate="0">
- <prop id="wifi_distance" value="30" />
- </host>
-
-Its default value is 10.
+ <host id="alice" speed="100.0Mf,50.0Mf,20.0Mf" pstate="0">
+ <prop id="wifi_link" value="net0"/>
+ <prop id="wifi_mcs" value="5"/>
+ <prop id="wifi_nss" value="2"/>
+ <prop id="wifi_distance" value="30" />
+ </host>
Limitations
===========
A ns-3 platform is automatically created from the provided SimGrid
platform. However, there are some known caveats:
- * The default values (e.g., TCP parameters) are the ns-3 default values.
- * ns-3 networks are routed using the shortest path algorithm, using
- ``ns3::Ipv4GlobalRoutingHelper::PopulateRoutingTables``.
-
+ * The default values (e.g., TCP parameters) are the ns-3 default values.
+ * ns-3 networks are routed using the shortest path algorithm, using ``ns3::Ipv4GlobalRoutingHelper::PopulateRoutingTables``.
+ * End hosts cannot have more than one interface card. So, your SimGrid hosts
+ should be connected to the platform through only one link. Otherwise, your
+ SimGrid host will be considered as a router (FIXME: is it still true?).
+
Our goal is to keep the ns-3 plugin of SimGrid as easy (and hopefully readable)
as possible. If the current state does not fit your needs, you should modify
this plugin, and/or create your own plugin from the existing one. If you come up
use routes of length 1, and that any host is connected to the platform.
Arguably, SimGrid could detect this situation and report it, but unfortunately,
this is still to be done.
+
+.. |br| raw:: html
+
+ <br />
- When data is transferred from A to B, some TCP ACK messages travel in the
opposite direction. To reflect the impact of this `cross-traffic`, SimGrid
simulates a flow from B to A that represents an additional bandwidth
- consumption of `0.05`. The route from B to A is implicity declared in the
- platfrom file and uses the same link `link1` as if the two hosts were
+ consumption of `0.05`. The route from B to A is implicitly declared in the
+ platform file and uses the same link `link1` as if the two hosts were
connected through a communication bus. The bandwidth share allocated to the
flow from A to B is then the available bandwidth of `link1` (i.e., 97% of
the nominal bandwidth of 1Mb/s) divided by 1.05 (i.e., the total consumption).
Actors: the Active Entities
===========================
-Starting and Stoping Actors
----------------------------
+Starting and Stopping Actors
+----------------------------
- **Creating actors:**
Most actors are started from the deployment XML file, because this
- is a :ref:`better scientific habbit <howto_science>`, but you can
+ is a :ref:`better scientific habit <howto_science>`, but you can
also create them directly from your code.
.. tabs::
See also :cpp:func:`sg_actor_kill`, :cpp:func:`sg_actor_kill_all`, :cpp:func:`sg_actor_exit`, :cpp:func:`sg_actor_on_exit`.
- - **Controling the actor life cycle from the XML:**
+ - **Controlling the actor life cycle from the XML:**
You can specify a start time and a kill time in the deployment file.
.. tabs::
- **Specify the stack size to use**
The stack size can be specified by default on the command line,
- globally by changing the configuration with :cpp:func:`simgrid::s4u::Engine::set_config(std::string)`,
+ globally by changing the configuration with :cpp:func:`simgrid::s4u::Engine::set_config`,
or for a specific actor using :cpp:func:`simgrid::s4u::Actor::set_stacksize` before its start.
.. tabs::
.. tabs::
- .. example-tab:: examples/s4u/async-wait/s4u-async-wait.cpp
+ .. example-tab:: examples/s4u/comm-wait/s4u-comm-wait.cpp
See also :cpp:func:`simgrid::s4u::Mailbox::put_async()` and :cpp:func:`simgrid::s4u::Comm::wait()`.
- .. example-tab:: examples/python/async-wait/async-wait.py
+ .. example-tab:: examples/python/comm-wait/comm-wait.py
See also :py:func:`simgrid.Mailbox.put_async()` and :py:func:`simgrid.Comm.wait()`.
- .. example-tab:: examples/c/async-wait/async-wait.c
+ .. example-tab:: examples/c/comm-wait/comm-wait.c
See also :cpp:func:`sg_mailbox_put_async()` and :cpp:func:`sg_comm__wait()`.
+ - **Suspending communications:**
+ The ``suspend()`` and ``resume()`` functions allow to block the
+ progression of a given communication for a while and then unblock it.
+ ``is_suspended()`` can be used to retrieve whether the activity is
+ currently blocked or not.
+
+ .. tabs::
+
+ .. example-tab:: examples/s4u/comm-suspend/s4u-comm-suspend.cpp
+
+ See also :cpp:func:`simgrid::s4u::Activity::suspend()`
+ :cpp:func:`simgrid::s4u::Activity::resume()` and
+ :cpp:func:`simgrid::s4u::Activity::is_suspended()`.
+
+
- **Waiting for all communications in a set:**
The ``wait_all()`` function is useful when you want to block until
all activities in a given set have completed.
.. tabs::
- .. example-tab:: examples/s4u/async-waitall/s4u-async-waitall.cpp
+ .. example-tab:: examples/s4u/comm-waitall/s4u-comm-waitall.cpp
See also :cpp:func:`simgrid::s4u::Comm::wait_all()`.
- .. example-tab:: examples/python/async-waitall/async-waitall.py
+ .. example-tab:: examples/python/comm-waitall/comm-waitall.py
See also :py:func:`simgrid.Comm.wait_all()`.
- .. example-tab:: examples/c/async-waitall/async-waitall.c
+ .. example-tab:: examples/c/comm-waitall/comm-waitall.c
See also :cpp:func:`sg_comm_wait_all()`.
.. tabs::
- .. example-tab:: examples/s4u/async-waitany/s4u-async-waitany.cpp
+ .. example-tab:: examples/s4u/comm-waitany/s4u-comm-waitany.cpp
See also :cpp:func:`simgrid::s4u::Comm::wait_any()`.
- .. example-tab:: examples/python/async-waitany/async-waitany.py
+ .. example-tab:: examples/python/comm-waitany/comm-waitany.py
See also :py:func:`simgrid.Comm.wait_any()`.
- .. example-tab:: examples/c/async-waitany/async-waitany.c
+ .. example-tab:: examples/c/comm-waitany/comm-waitany.c
See also :cpp:func:`sg_comm_wait_any`.
- **Platform Tracing:**
This program is a toy example just loading the platform, so that
- you can play with the platform visualization. Recommanded options:
+ you can play with the platform visualization. Recommended options:
``--cfg=tracing:yes --cfg=tracing/categorized:yes``
.. tabs::
.. showfile:: examples/platforms/small_platform_one_link_routes.xml
:language: xml
+
+ - **wifi links**
+
+ This demonstrates how to declare a wifi link in your platform and
+ how to use it in your simulation. The basics is to have a link
+ which sharing policy is set to `WIFI`. Such links can have more
+ than one bandwidth value (separated by commas), corresponding to
+ the several SNR level of your wifi link.
+
+ In this case, SimGrid automatically switches to validated
+ performance models of wifi networks, where the time is shared
+ between users instead of the bandwidth for wired links (the
+ corresponding publication is currently being written).
+
+ If your wifi link provides more than one SNR level, you can switch
+ the level of a given host using
+ :cpp:func:`simgrid::s4u::Link::set_host_wifi_rate`. By default,
+ the first level is used.
+
+ .. tabs::
+
+ .. example-tab:: examples/s4u/network-wifi/s4u-network-wifi.cpp
+
+ .. group-tab:: XML
+
+ **Platform files:**
+
+ .. showfile:: examples/platforms/wifi.xml
+ :language: xml
=======================
Model-Checking Examples
actor-create actor-daemon actor-exiting actor-join actor-kill actor-lifetime actor-migrate actor-stacksize
actor-suspend actor-yield
app-masterworker app-pingpong app-token-ring
- async-wait async-waitall async-waitany
+ comm-wait comm-waitall comm-waitany
cloud-capping cloud-masterworker cloud-migration cloud-simple
dht-pastry
exec-async exec-basic exec-dvfs exec-remote exec-waitany
${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/app-masterworker-multicore_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/app-masterworker-vivaldi_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/app-pingpong/app-pingpong_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait2_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait3_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/async-wait4_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-waitall/async-waitall_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-waitany/async-waitany_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait2_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait3_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/comm-wait4_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitall/comm-waitall_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitany/comm-waitany_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/dht-kademlia/dht-kademlia_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/dht-pastry/dht-pastry_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/io-file-remote/io-file-remote_d.xml
actor-create actor-daemon actor-exiting actor-join actor-kill actor-lifetime actor-migrate actor-stacksize
actor-suspend actor-yield
app-bittorrent app-chainsend app-masterworker app-pingpong app-token-ring
- async-wait async-waitall async-waitany
+ comm-wait comm-waitall comm-waitany
cloud-capping cloud-masterworker cloud-migration cloud-simple
dht-kademlia dht-pastry
exec-async exec-basic exec-dvfs exec-remote exec-waitany
*
* The first step is to declare the code of your actors (what they do exactly does not matter to this example) and then
* you ask SimGrid to start your actors. There is three ways of doing so:
- * - Directly, by instantiating your actor as paramter to Actor::create()
+ * - Directly, by instantiating your actor as parameter to Actor::create()
* - By first registering your actors before instantiating it;
* - Through the deployment file.
*
xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments");
// Build peer object
- peer_t peer = peer_init(xbt_str_parse_int(argv[1], "Invalid ID: %s"), argc == 4 ? 1 : 0);
+ peer_t peer = peer_init((int)xbt_str_parse_int(argv[1], "Invalid ID: %s"), argc == 4 ? 1 : 0);
// Retrieve deadline
peer->deadline = xbt_str_parse_double(argv[2], "Invalid deadline: %s");
// Sending peers to the requesting peer
tracker_answer_t ta = tracker_answer_new(TRACKER_QUERY_INTERVAL);
int next_peer;
- int peers_length = xbt_dynar_length(peers_list);
+ int peers_length = (int)xbt_dynar_length(peers_list);
for (int i = 0; i < MAXIMUM_PEERS && i < peers_length; i++) {
do {
next_peer = xbt_dynar_get_as(peers_list, rand() % peers_length, int);
{
XBT_DEBUG("broadcaster");
xbt_assert(argc > 2);
- unsigned int host_count = xbt_str_parse_int(argv[1], "Invalid number of peers: %s");
+ unsigned int host_count = (unsigned int)xbt_str_parse_int(argv[1], "Invalid number of peers: %s");
sg_mailbox_t* mailboxes = xbt_malloc(sizeof(sg_mailbox_t) * host_count);
free(name);
}
- unsigned int piece_count = xbt_str_parse_int(argv[2], "Invalid number of pieces: %s");
+ unsigned int piece_count = (unsigned int)xbt_str_parse_int(argv[2], "Invalid number of pieces: %s");
broadcaster_t bc = broadcaster_init(mailboxes, host_count, piece_count);
xbt_assert(argc == 5, "The master function expects 4 arguments from the XML deployment file");
long number_of_tasks = xbt_str_parse_int(argv[1], "Invalid amount of tasks: %s"); /* - Number of tasks */
double comp_size = xbt_str_parse_double(argv[2], "Invalid computational size: %s"); /* - Compute cost */
- double comm_size = xbt_str_parse_double(argv[3], "Invalid communication size: %s"); /* - Communication size */
+ long comm_size = xbt_str_parse_int(argv[3], "Invalid communication size: %s"); /* - Communication size */
long workers_count = xbt_str_parse_int(argv[4], "Invalid amount of workers: %s"); /* - Number of workers */
XBT_INFO("Got %ld workers and %ld tasks to process", workers_count, number_of_tasks);
xbt_assert(argc == 0, "The relay_runner function does not accept any parameter from the XML deployment file");
const char* name = sg_actor_self_get_name();
- int rank = xbt_str_parse_int(name, "Any actor of this example must have a numerical name, not %s");
+ int rank = (int)xbt_str_parse_int(name, "Any actor of this example must have a numerical name, not %s");
sg_mailbox_t my_mailbox = sg_mailbox_by_name(name);
{
xbt_assert(argc == 4);
double computation_amount = xbt_str_parse_double(argv[1], "Invalid computation amount: %s");
- int use_bound = xbt_str_parse_int(argv[2], "Second parameter (use_bound) should be 0 or 1 but is: %s");
+ int use_bound = !!xbt_str_parse_int(argv[2], "Second parameter (use_bound) should be 0 or 1 but is: %s");
double bound = xbt_str_parse_double(argv[3], "Invalid bound: %s");
double clock_sta = simgrid_get_clock();
#define FINALIZE 221297 /* a magic number to tell people to stop working */
const double comp_size = 10000000;
-const double comm_size = 10000000;
+const long comm_size = 10000000;
static void send_tasks(int nb_workers)
{
#include <stdio.h>
-XBT_LOG_NEW_DEFAULT_CATEGORY(async_wait, "Messages specific for this example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(comm_wait, "Messages specific for this example");
/* Main function of the Sender actor */
static void sender(int argc, char* argv[])
{
xbt_assert(argc == 5, "The sender function expects 4 arguments from the XML deployment file");
long messages_count = xbt_str_parse_int(argv[1], "Invalid amount of messages: %s"); /* - number of messages */
- double message_size = xbt_str_parse_double(argv[2], "Invalid message size: %s"); /* - communication cost */
+ long message_size = xbt_str_parse_int(argv[2], "Invalid message size: %s"); /* - communication cost */
double sleep_start_time = xbt_str_parse_double(argv[3], "Invalid sleep start time: %s"); /* - start time */
double sleep_test_time = xbt_str_parse_double(argv[4], "Invalid test time: %s"); /* - test time */
p Test1 sg_comm_test() with Sleep_sender > Sleep_receiver
-$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) sleep_start_time : 5.000000 , sleep_test_time : 0.100000
> [ 0.000000] (2:receiver@Ruby) sleep_start_time : 1.000000 , sleep_test_time : 0.100000
> [ 1.000000] (2:receiver@Ruby) Wait for my first message
p Test2 sg_comm_test() with Sleep_sender < Sleep_receiver
-$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait2_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait2_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) sleep_start_time : 1.000000 , sleep_test_time : 0.100000
> [ 0.000000] (2:receiver@Ruby) sleep_start_time : 5.000000 , sleep_test_time : 0.100000
> [ 1.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver'
p Test1 sg_comm_wait() with Sleep_sender > Sleep_receiver
-$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait3_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait3_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) sleep_start_time : 5.000000 , sleep_test_time : 0.000000
> [ 0.000000] (2:receiver@Ruby) sleep_start_time : 1.000000 , sleep_test_time : 0.000000
> [ 1.000000] (2:receiver@Ruby) Wait for my first message
p Test2 sg_comm_wait() with Sleep_sender < Sleep_receiver
-$ ${bindir:=.}/c-async-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-wait4_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/c-comm-wait ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-wait4_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) sleep_start_time : 1.000000 , sleep_test_time : 0.000000
> [ 0.000000] (2:receiver@Ruby) sleep_start_time : 5.000000 , sleep_test_time : 0.000000
> [ 1.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver'
#include <stdio.h> /* snprintf */
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_async_waitall, "Messages specific for this msg example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(comm_waitall, "Messages specific for this msg example");
static void sender(int argc, char* argv[])
{
long messages_count = xbt_str_parse_int(argv[1], "Invalid message count: %s");
long message_size = xbt_str_parse_int(argv[2], "Invalid message size: %s");
long receivers_count = xbt_str_parse_int(argv[3], "Invalid amount of receivers: %s");
+ xbt_assert(receivers_count > 0);
/* Array in which we store all ongoing communications */
sg_comm_t* pending_comms = xbt_malloc(sizeof(sg_comm_t) * (messages_count + receivers_count));
}
/* Start dispatching all messages to receivers, in a round robin fashion */
- for (int i = 0; i < messages_count; i++) {
+ for (long i = 0; i < messages_count; i++) {
char msg_content[80];
- snprintf(msg_content, 79, "Message %d", i);
+ snprintf(msg_content, 79, "Message %ld", i);
sg_mailbox_t mbox = mboxes[i % receivers_count];
XBT_INFO("Send '%s' to '%s'", msg_content, sg_mailbox_get_name(mbox));
/* Create a communication representing the ongoing communication, and store it in pending_comms */
}
/* Start sending messages to let the workers know that they should stop */
- for (int i = 0; i < receivers_count; i++) {
- XBT_INFO("Send 'finalize' to 'receiver-%d'", i);
+ for (long i = 0; i < receivers_count; i++) {
+ XBT_INFO("Send 'finalize' to 'receiver-%ld'", i);
char* end_msg = xbt_strdup("finalize");
sg_mailbox_t mbox = mboxes[i % receivers_count];
pending_comms[pending_comms_count++] = sg_mailbox_put_async(mbox, end_msg, 0);
static void receiver(int argc, char* argv[])
{
xbt_assert(argc == 2, "Expecting one parameter from the XML deployment file but got %d", argc);
- int id = xbt_str_parse_int(argv[1], "ID should be numerical, not %s");
+ int id = (int)xbt_str_parse_int(argv[1], "ID should be numerical, not %s");
char mailbox_name[80];
snprintf(mailbox_name, 79, "receiver-%d", id);
sg_mailbox_t mbox = sg_mailbox_by_name(mailbox_name);
#!/usr/bin/env tesh
! output sort 19
-$ ${bindir:=.}/c-async-waitall ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/async-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/c-comm-waitall ${platfdir:=.}/small_platform_fatpipe.xml ${srcdir:=.}/comm-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0'
> [ 0.000000] (2:receiver@Ruby) Wait for my first message
> [ 0.000000] (3:receiver@Perl) Wait for my first message
#include <stdio.h> /* snprintf */
-XBT_LOG_NEW_DEFAULT_CATEGORY(async_waitany, "Messages specific for this example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(comm_waitany, "Messages specific for this example");
static void sender(int argc, char* argv[])
{
long messages_count = xbt_str_parse_int(argv[1], "Invalid message count: %s");
long msg_size = xbt_str_parse_int(argv[2], "Invalid message size: %s");
long receivers_count = xbt_str_parse_int(argv[3], "Invalid amount of receivers: %s");
+ xbt_assert(receivers_count > 0);
/* Array in which we store all ongoing communications */
sg_comm_t* pending_comms = xbt_malloc(sizeof(sg_comm_t) * (messages_count + receivers_count));
}
/* Start dispatching all messages to receivers, in a round robin fashion */
- for (int i = 0; i < messages_count; i++) {
+ for (long i = 0; i < messages_count; i++) {
char msg_content[80];
- snprintf(msg_content, 79, "Message %d", i);
+ snprintf(msg_content, 79, "Message %ld", i);
sg_mailbox_t mbox = mboxes[i % receivers_count];
XBT_INFO("Send '%s' to '%s'", msg_content, sg_mailbox_get_name(mbox));
pending_comms[pending_comms_count++] = sg_mailbox_put_async(mbox, xbt_strdup(msg_content), msg_size);
}
/* Start sending messages to let the workers know that they should stop */
- for (int i = 0; i < receivers_count; i++) {
- XBT_INFO("Send 'finalize' to 'receiver-%d'", i);
+ for (long i = 0; i < receivers_count; i++) {
+ XBT_INFO("Send 'finalize' to 'receiver-%ld'", i);
char* end_msg = xbt_strdup("finalize");
sg_mailbox_t mbox = mboxes[i % receivers_count];
pending_comms[pending_comms_count++] = sg_mailbox_put_async(mbox, end_msg, 0);
static void receiver(int argc, char* argv[])
{
xbt_assert(argc == 2, "Expecting one parameter from the XML deployment file but got %d", argc);
- int id = xbt_str_parse_int(argv[1], "ID should be numerical, not %s");
+ int id = (int)xbt_str_parse_int(argv[1], "ID should be numerical, not %s");
char mailbox_name[80];
snprintf(mailbox_name, 79, "receiver-%d", id);
sg_mailbox_t mbox = sg_mailbox_by_name(mailbox_name);
#!/usr/bin/env tesh
! output sort 19
-$ ${bindir:=.}/c-async-waitany ${platfdir:=.}/small_platform.xml ${srcdir:=.}/async-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/c-comm-waitany ${platfdir:=.}/small_platform.xml ${srcdir:=.}/comm-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0'
> [ 0.000000] (1:sender@Tremblay) Send 'Message 1' to 'receiver-1'
> [ 0.000000] (1:sender@Tremblay) Send 'Message 2' to 'receiver-0'
double deadline;
xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments");
/* Node initialization */
- unsigned int id = strtoul(argv[1], NULL, 0);
+ unsigned int id = (unsigned int)strtoul(argv[1], NULL, 0);
node_t node = node_init(id);
if (argc == 4) {
XBT_INFO("Hi, I'm going to join the network with id %s", sg_mailbox_get_name(node->mailbox));
- unsigned int id_known = strtoul(argv[2], NULL, 0);
+ unsigned int id_known = (unsigned int)strtoul(argv[2], NULL, 0);
join_success = join(node, id_known);
deadline = strtod(argv[3], NULL);
} else {
if (prefix == 0) {
return 0;
} else {
- return (1U << ((unsigned int)(prefix - 1))) ^ id;
+ return (1U << (prefix - 1)) ^ id;
}
}
#include "xbt/str.h"
#include "xbt/sysdep.h"
-#include <math.h>
#include <stdio.h>
XBT_LOG_NEW_DEFAULT_CATEGORY(pastry, "Messages specific for this example");
static int domain(unsigned int a, unsigned int level)
{
if (domain_mask == 0)
- domain_mask = pow(2, DOMAIN_SIZE) - 1;
+ domain_mask = (1U << DOMAIN_SIZE) - 1;
unsigned int shift = (LEVELS_COUNT - level - 1) * DOMAIN_SIZE;
return (a >> shift) & domain_mask;
}
/* Update routing table */
for (i = shl(node->id, message->state->id); i < LEVELS_COUNT; i++) {
for (j = 0; j < LEVEL_SIZE; j++) {
+ // FIXME: this is a no-op!
if (node->routing_table[i][j] == -1 && message->state->routing_table[i][j] == -1)
node->routing_table[i][j] = message->state->routing_table[i][j];
}
double deadline;
xbt_assert(argc == 3 || argc == 5, "Wrong number of arguments for this node");
s_node_t node = {0};
- node.id = xbt_str_parse_int(argv[1], "Invalid ID: %s");
+ node.id = (int)xbt_str_parse_int(argv[1], "Invalid ID: %s");
node.known_id = -1;
node.ready = -1;
node.pending_messages = xbt_dynar_new(sizeof(pastry_message_t), NULL);
XBT_DEBUG("Create a new Pastry ring...");
join_success = 1;
} else {
- node.known_id = xbt_str_parse_int(argv[2], "Invalid known ID: %s");
+ node.known_id = (int)xbt_str_parse_int(argv[2], "Invalid known ID: %s");
double sleep_time = xbt_str_parse_double(argv[3], "Invalid sleep time: %s");
deadline = xbt_str_parse_double(argv[4], "Invalid deadline: %s");
while (!strncmp(options[0], "-", 1)) {
int length = strlen("-nb_bits=");
if (!strncmp(options[0], "-nb_bits=", length) && strlen(options[0]) > length) {
- nb_bits = xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s");
+ nb_bits = (int)xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s");
XBT_DEBUG("Set nb_bits to %d", nb_bits);
} else {
length = strlen("-timeout=");
if (!strncmp(options[0], "-timeout=", length) && strlen(options[0]) > length) {
- timeout = xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s");
+ timeout = (int)xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s");
XBT_DEBUG("Set timeout to %d", timeout);
} else {
xbt_die("Invalid pastry option '%s'", options[0]);
static void runner(int argc, char* argv[])
{
/* Retrieve the list of all hosts as an array of hosts */
- int host_count = sg_host_count();
+ int host_count = (int)sg_host_count();
sg_host_t* hosts = sg_host_list();
XBT_INFO("First, build a classical parallel task, with 1 Gflop to execute on each node, "
xbt_assert(argc == 5);
long number_of_tasks = xbt_str_parse_int(argv[1], "Invalid amount of tasks: %s");
double task_comp_size = xbt_str_parse_double(argv[2], "Invalid computational size: %s");
- double task_comm_size = xbt_str_parse_double(argv[3], "Invalid communication size: %s");
+ long task_comm_size = xbt_str_parse_int(argv[3], "Invalid communication size: %s");
long workers_count = xbt_str_parse_int(argv[4], "Invalid amount of workers: %s");
XBT_INFO("Got %ld workers and %ld tasks to process", workers_count, number_of_tasks);
> [ 43.774742] (1:master@Tremblay) Goodbye now!
> [ 43.774742] (0:maestro@) Simulation time 43.7747
-p NOT testing the mixure of failures and CpuTI:
+p NOT testing the mixture of failures and CpuTI:
p This test leads to a deadlock because of a bug somewhere in surf_solve.
p We should debug this instead of ignoring the issue, but it's utterly
p complex with such an integration test. One day, we will setup a set of
package async.waitall;
-/** This example demonstrates the use of the asynchrounous communications
+/** This example demonstrates the use of the asynchronous communications
*
* Task.isend() and Task.irecv() are used to start the communications in non-blocking mode.
*
public void main(String[] args) {
int yieldsCount = Integer.parseInt(args[0]);
for (int i=0; i<yieldsCount; i++)
- yield();
+ Process.yield();
Msg.info("Yielded "+yieldsCount+". Good bye now!");
}
}
Msg.debug("Caught exception: " + e);
}
}
- Msg.info(findNodeSuccedded + "/" + (findNodeSuccedded + findNodeFailed) + " FIND_NODE have succedded.");
+ Msg.info(findNodeSuccedded + "/" + (findNodeSuccedded + findNodeFailed) + " FIND_NODE have succeeded.");
}
/**
if (counts) {
findNodeSuccedded++;
}
- Msg.debug("Find node on " + destination + " succedded");
+ Msg.debug("Find node on " + destination + " succeeded");
} else {
Msg.debug("Find node on " + destination + " failed");
Msg.debug("Queried " + queries + " nodes to find " + destination);
}
/**
- * @brief Handles an incomming task
+ * @brief Handles an incoming task
* @param task The task we need to handle
*/
public void handleTask(Task task) {
> [0.000000] [java/INFO] Using regular java threads.
> [900.000000] [java/INFO] Terminating the simulation...
> [node-0.simgrid.org:dht.kademlia.Node:(1) 0.000000] [java/INFO] Hi, I'm going to create the network with the id 0!
-> [node-0.simgrid.org:dht.kademlia.Node:(1) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded.
+> [node-0.simgrid.org:dht.kademlia.Node:(1) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded.
> [node-1.simgrid.org:dht.kademlia.Node:(2) 0.000000] [java/INFO] Hi, I'm going to join the network with the id 1!
-> [node-1.simgrid.org:dht.kademlia.Node:(2) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded.
+> [node-1.simgrid.org:dht.kademlia.Node:(2) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded.
> [node-2.simgrid.org:dht.kademlia.Node:(3) 0.000000] [java/INFO] Hi, I'm going to join the network with the id 2!
-> [node-2.simgrid.org:dht.kademlia.Node:(3) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded.
+> [node-2.simgrid.org:dht.kademlia.Node:(3) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded.
> [node-3.simgrid.org:dht.kademlia.Node:(4) 0.000000] [java/INFO] Hi, I'm going to join the network with the id 4!
-> [node-3.simgrid.org:dht.kademlia.Node:(4) 900.000000] [java/INFO] 8/8 FIND_NODE have succedded.
+> [node-3.simgrid.org:dht.kademlia.Node:(4) 900.000000] [java/INFO] 8/8 FIND_NODE have succeeded.
This example declares several tracing categories
to that are used to classify its tasks. When the program is executed,
the tracing mechanism registers the resource utilization of hosts
- and links according to these categories. Recommanded options:
+ and links according to these categories. Recommended options:
@verbatim --cfg=tracing:yes --cfg=tracing/categorized:yes --cfg=tracing/uncategorized:yes
@endverbatim
This is an augmented version of our basic master/worker example
using several tracing features. It traces resource usage, sorted
out in several categories; Trace marks and user variables are also
- used. Recommanded options:
+ used. Recommended options:
@verbatim --cfg=tracing/categorized:yes --cfg=tracing/uncategorized:yes
@endverbatim
- <b>Process migration tracing</b>.
@ref examples/deprecated/msg/trace-process-migration/trace-process-migration.c \n
This version is enhanced so that the process migrations can be
- displayed as arrows in a Gantt-chart visualization. Recommanded
+ displayed as arrows in a Gantt-chart visualization. Recommended
options to that extend:
@verbatim -cfg=tracing:yes --cfg=tracing/msg/process:yes
@endverbatim
static int trace_fun(XBT_ATTRIB_UNUSED int argc, XBT_ATTRIB_UNUSED char* argv[])
{
//Set initial values for the link user variables
- //This example uses source and destination where source and destination are the name of hosts inthe platform file.
+ //This example uses source and destination where source and destination are the name of hosts in the platform file.
//The functions will set/change the value of the variable for all links in the route between source and destination.
//Set the Link_Capacity variable
<platform version="4.1">
<zone id="AS0" routing="Full">
<!-- Multiple pstate processor capacities can be defined as a list of powers specified for a given host -->
- <!-- Attribute 'pstate' specifies the initialy selected pstate (here, the lowest pstate corresponds to the highest
+ <!-- Attribute 'pstate' specifies the initially selected pstate (here, the lowest pstate corresponds to the highest
processor speed) -->
<host core="4" id="MyHost1" pstate="0" speed="100.0Mf,50.0Mf,20.0Mf">
<!-- List of Idle:Epsilon:AllCores (in Watts) corresponding to the speed consumed when the processor is idle,
(see http://grid5000.fr for more information on that platform)
It was generated manually in 2011 and manually edited since then.
- A better (and more uptodate) description file can probably be
+ A better (and more up-to-date) description file can probably be
retrieved using https://github.com/lpouillo/topo5k as described on
https://www.grid5000.fr/mediawiki/index.php/TechTeam:Topo5k
<zone id="WIFI zone" routing="Cluster">
<!-- First declare the Access Point (ie, the wifi media) -->
- <link id="AP1" sharing_policy="WIFI" bandwidth="54Mbps" latency="0ms" />
+ <link id="AP1" sharing_policy="WIFI" bandwidth="54Mbps,36Mbps,24Mbps" latency="0ms" />
<!-- Two stations in the wifi zone -->
<host id="Station 1" speed="100.0Mf,50.0Mf,20.0Mf" />
+++ /dev/null
-<?xml version='1.0'?>
-
-<!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
-<platform version="4.1">
- <zone id="world" routing="Full">
-
- <zone id="WIFI zone" routing="Cluster">
- <!-- First declare the Access Point (ie, the wifi media) -->
- <link id="AP1" sharing_policy="WIFI" bandwidth="54Mbps" latency="0ms" />
-
- <!-- Two stations in the wifi zone -->
- <host id="Station 1" speed="100.0Mf,50.0Mf,20.0Mf" />
- <host id="Station 2" speed="100.0Mf,50.0Mf,20.0Mf" />
-
- <!-- Specify that stations use the WIFI link for every communication (incoming or outgoing) -->
- <host_link id="Station 1" up="AP1" down="AP1"/>
- <host_link id="Station 2" up="AP1" down="AP1"/>
-
- <router id="WIFI router"/>
- </zone>
-
-
- <!-- NODE1 AS -->
- <zone id="Wired zone" routing="Full">
- <host id="NODE1" speed="100.0Mf,50.0Mf,20.0Mf" />
- </zone>
-
-
- <!-- AS Routing -->
- <link id="Collector" sharing_policy="SHARED" bandwidth="100Mbps" latency="0ms" />
- <zoneRoute src="WIFI zone" dst="Wired zone" gw_src="WIFI router" gw_dst="NODE1">
- <link_ctn id="Collector" />
- </zoneRoute>
-
- </zone>
-</platform>
foreach(example actor-create actor-daemon actor-join actor-kill actor-migrate actor-suspend actor-yield actor-lifetime
- async-wait async-waitall async-waitany
+ comm-wait comm-waitall comm-waitany
exec-async exec-basic exec-dvfs exec-remote)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.tesh)
set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/${example}.py)
set(examples_src ${examples_src} PARENT_SCOPE)
set(tesh_files ${tesh_files} examples/python/actor-create/actor-create_d.xml
examples/python/actor-lifetime/actor-lifetime_d.xml
- examples/python/async-wait/async-wait_d.xml
- examples/python/async-waitall/async-waitall_d.xml
- examples/python/async-waitany/async-waitany_d.xml PARENT_SCOPE)
+ examples/python/comm-wait/comm-wait_d.xml
+ examples/python/comm-waitall/comm-waitall_d.xml
+ examples/python/comm-waitany/comm-waitany_d.xml PARENT_SCOPE)
#!/usr/bin/env tesh
-$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/async-wait.py ${platfdir}/small_platform_fatpipe.xml async-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/comm-wait.py ${platfdir}/small_platform_fatpipe.xml comm-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'Mailbox(receiver-0)'
> [ 0.000000] (2:receiver@Ruby) Wait for my first message
> [ 0.000000] (1:sender@Tremblay) Send 'Message 1' to 'Mailbox(receiver-0)'
#!/usr/bin/env tesh
-$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/async-waitall.py ${platfdir}/small_platform_fatpipe.xml async-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/comm-waitall.py ${platfdir}/small_platform_fatpipe.xml comm-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'Mailbox(receiver-0)'
> [ 0.000000] (2:receiver@Ruby) Wait for my first message
> [ 0.000000] (3:receiver@Perl) Wait for my first message
p Testing Comm.wait_any()
! output sort 19
-$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/async-waitany.py ${platfdir}/small_platform.xml async-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${pythoncmd:=python3} ${PYTHON_TOOL_OPTIONS:=} ${bindir:=.}/comm-waitany.py ${platfdir}/small_platform.xml comm-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'Mailbox(receiver-0)'
> [ 0.000000] (2:receiver@Fafard) Wait for my first message
> [ 0.000000] (3:receiver@Jupiter) Wait for my first message
foreach (example actor-create actor-daemon actor-exiting actor-join actor-kill
actor-lifetime actor-migrate actor-suspend actor-yield actor-stacksize
app-bittorrent app-chainsend app-pingpong app-token-ring
- async-ready async-wait async-waitany async-waitall async-waituntil
+ comm-ready comm-suspend comm-wait comm-waitany comm-waitall comm-waituntil
comm-dependent
cloud-capping cloud-migration cloud-simple
dht-chord dht-kademlia
exec-async exec-basic exec-dvfs exec-ptask exec-remote exec-waitany exec-waitfor exec-dependent
maestro-set
mc-bugged1 mc-bugged1-liveness mc-bugged2 mc-electric-fence mc-failing-assert
+ network-wifi
io-async io-file-system io-file-remote io-disk-raw io-dependent
platform-failures platform-profile platform-properties
plugin-hostload
${CMAKE_CURRENT_SOURCE_DIR}/actor-yield/s4u-actor-yield_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/app-bittorrent/s4u-app-bittorrent_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/app-masterworkers/s4u-app-masterworkers_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-wait/s4u-async-wait_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-waitany/s4u-async-waitany_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-waitall/s4u-async-waitall_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-ready/s4u-async-ready_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/async-waituntil/s4u-async-waituntil_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-suspend/s4u-comm-suspend_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-wait/s4u-comm-wait_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitany/s4u-comm-waitany_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-waitall/s4u-comm-waitall_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-ready/s4u-comm-ready_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/comm-waituntil/s4u-comm-waituntil_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u-dht-chord_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/dht-kademlia/s4u-dht-kademlia_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/energy-boot/platform_boot.xml
msg = args[1];
mbox = args[2];
}
- void operator()() /* This is the main code of the actor */
+ void operator()() const /* This is the main code of the actor */
{
XBT_INFO("Hello s4u, I have something to send");
simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(mbox);
XBT_INFO("Exiting now (done sleeping or got killed).");
});
}
- void operator()()
+ void operator()() const
{
XBT_INFO("Hello! I go to sleep.");
simgrid::s4u::this_actor::sleep_for(10);
// You can use set_config(string) to pass a size that will be parsed. That value will be used for any subsequent
// actors
- e.set_config("contexts/stack-size:16384");
+ simgrid::s4u::Engine::set_config("contexts/stack-size:16384");
simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor);
simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor);
// You can use set_config(key, value) for the same effect.
- e.set_config("contexts/stack-size", 32 * 1024);
+ simgrid::s4u::Engine::set_config("contexts/stack-size", 32 * 1024);
simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor);
simgrid::s4u::Actor::create("actor", simgrid::s4u::Host::by_name("Tremblay"), actor);
long number_of_yields;
public:
- explicit yielder(std::vector<std::string> args) { number_of_yields = std::stod(args[1]); }
- void operator()()
+ explicit yielder(std::vector<std::string> args) { number_of_yields = std::stol(args[1]); }
+ void operator()() const
{
for (int i = 0; i < number_of_yields; i++)
simgrid::s4u::this_actor::yield();
xbt_assert(remote_peer->hasPiece(piece));
int block_index = getFirstMissingBlockFrom(piece);
if (block_index != -1) {
- int block_length = std::min(BLOCKS_REQUESTED, PIECES_BLOCKS - block_index);
+ int block_length = static_cast<int>(std::min(BLOCKS_REQUESTED, PIECES_BLOCKS - block_index));
XBT_DEBUG("Sending a REQUEST to %s for piece %u (%d,%d)", remote_peer->mailbox_->get_cname(), piece, block_index,
block_length);
remote_peer->mailbox_
}
}
-std::string Peer::getStatus()
+std::string Peer::getStatus() const
{
std::string res;
for (unsigned i = 0; i < FILE_PIECES; i++)
return res;
}
-bool Peer::hasFinished()
+bool Peer::hasFinished() const
{
return bitfield_ == (1U << FILE_PIECES) - 1U;
}
pieces_count[i]++;
}
-unsigned int Peer::countPieces(unsigned int bitfield)
+unsigned int Peer::countPieces(unsigned int bitfield) const
{
unsigned int count = 0U;
unsigned int n = bitfield;
return count;
}
-int Peer::nbInterestedPeers()
+int Peer::nbInterestedPeers() const
{
int nb = 0;
for (auto const& kv : connected_peers)
do {
// We choose a random peer to unchoke.
std::unordered_map<int, Connection>::iterator chosen_peer_it = connected_peers.begin();
- std::advance(chosen_peer_it, random.uniform_int(0, connected_peers.size() - 1));
+ std::advance(chosen_peer_it, random.uniform_int(0, static_cast<int>(connected_peers.size() - 1)));
chosen_peer = &chosen_peer_it->second;
if (not chosen_peer->interested || not chosen_peer->choked_upload)
chosen_peer = nullptr;
bitfield_blocks |= (1ULL << static_cast<unsigned int>(piece * PIECES_BLOCKS + i));
}
-bool Peer::hasCompletedPiece(unsigned int piece)
+bool Peer::hasCompletedPiece(unsigned int piece) const
{
for (unsigned int i = 0; i < PIECES_BLOCKS; i++)
if (not(bitfield_blocks & 1ULL << (piece * PIECES_BLOCKS + i)))
return true;
}
-int Peer::getFirstMissingBlockFrom(int piece)
+int Peer::getFirstMissingBlockFrom(int piece) const
{
for (unsigned int i = 0; i < PIECES_BLOCKS; i++)
if (not(bitfield_blocks & 1ULL << (piece * PIECES_BLOCKS + i)))
}
/** Returns a piece that is partially downloaded and stored by the remote peer if any -1 otherwise. */
-int Peer::partiallyDownloadedPiece(const Connection* remote_peer)
+int Peer::partiallyDownloadedPiece(const Connection* remote_peer) const
{
for (unsigned int i = 0; i < FILE_PIECES; i++)
if (remotePeerHasMissingPiece(remote_peer, i) && isNotDownloadingPiece(i) && getFirstMissingBlockFrom(i) > 0)
Peer& operator=(const Peer&) = delete;
void operator()();
- std::string getStatus();
- bool hasFinished();
- int nbInterestedPeers();
+ std::string getStatus() const;
+ bool hasFinished() const;
+ int nbInterestedPeers() const;
bool isInterestedBy(const Connection* remote_peer) const;
bool isInterestedByFree(const Connection* remote_peer) const;
void updateActivePeersSet(Connection* remote_peer);
void updateChokedPeers();
bool hasNotPiece(unsigned int piece) const { return not(bitfield_ & 1U << piece); }
- bool remotePeerHasMissingPiece(const Connection* remote_peer, unsigned int piece)
+ bool remotePeerHasMissingPiece(const Connection* remote_peer, unsigned int piece) const
{
return hasNotPiece(piece) && remote_peer->hasPiece(piece);
}
- bool hasCompletedPiece(unsigned int piece);
- unsigned int countPieces(unsigned int bitfield);
+ bool hasCompletedPiece(unsigned int piece) const;
+ unsigned int countPieces(unsigned int bitfield) const;
/** Check that a piece is not currently being download by the peer. */
bool isNotDownloadingPiece(unsigned int piece) const { return not(current_pieces & 1U << piece); }
- int partiallyDownloadedPiece(const Connection* remote_peer);
+ int partiallyDownloadedPiece(const Connection* remote_peer) const;
void updatePiecesCountFromBitfield(unsigned int bitfield);
void removeCurrentPiece(Connection* remote_peer, unsigned int current_piece);
void updateBitfieldBlocks(int piece, int block_index, int block_length);
- int getFirstMissingBlockFrom(int piece);
+ int getFirstMissingBlockFrom(int piece) const;
int selectPieceToDownload(const Connection* remote_peer);
void requestNewPieceTo(Connection* remote_peer);
// Sending back peers to the requesting peer
TrackerAnswer* ta = new TrackerAnswer(TRACKER_QUERY_INTERVAL);
std::set<int>::iterator next_peer;
- int nb_known_peers = known_peers.size();
+ int nb_known_peers = static_cast<int>(known_peers.size());
int max_tries = std::min(MAXIMUM_PEERS, nb_known_peers);
int tried = 0;
while (tried < max_tries) {
public:
explicit TrackerQuery(int peer_id, simgrid::s4u::Mailbox* return_mailbox)
: peer_id(peer_id), return_mailbox(return_mailbox){};
- int getPeerId() { return peer_id; }
- simgrid::s4u::Mailbox* getReturnMailbox() { return return_mailbox; }
+ int getPeerId() const { return peer_id; }
+ simgrid::s4u::Mailbox* getReturnMailbox() const { return return_mailbox; }
};
class TrackerAnswer {
public:
explicit TrackerAnswer(int /*interval*/) /*: interval(interval)*/ {}
void addPeer(int peer) { peers.insert(peer); }
- const std::set<int>& getPeers() { return peers; }
+ const std::set<int>& getPeers() const { return peers; }
};
class Tracker {
XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_app_masterworker, "Messages specific for this s4u example");
class Master {
- long tasks_count = 0;
- double compute_cost = 0;
- double communicate_cost = 0;
+ long tasks_count = 0;
+ double compute_cost = 0;
+ long communicate_cost = 0;
std::vector<simgrid::s4u::Mailbox*> workers;
public:
tasks_count = std::stol(args[1]);
compute_cost = std::stod(args[2]);
- communicate_cost = std::stod(args[3]);
+ communicate_cost = std::stol(args[3]);
for (unsigned int i = 4; i < args.size(); i++)
workers.push_back(simgrid::s4u::Mailbox::by_name(args[i]));
{
xbt_assert(args.size() > 4, "The master function expects at least 3 arguments");
- long tasks_count = std::stol(args[1]);
- double compute_cost = std::stod(args[2]);
- double communication_cost = std::stod(args[3]);
+ long tasks_count = std::stol(args[1]);
+ double compute_cost = std::stod(args[2]);
+ long communication_cost = std::stol(args[3]);
std::vector<simgrid::s4u::Mailbox*> workers;
for (unsigned int i = 4; i < args.size(); i++)
workers.push_back(simgrid::s4u::Mailbox::by_name(args[i]));
p Testing with default compound
-$ ${bindir:=.}/s4u-comm-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:debug "--log=root.fmt:[%6.2r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-comm-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:verbose "--log=root.fmt:[%6.2r]%e(%i:%P@%h)%e%m%n"
> [ 2.00] (1:sender@Tremblay) Remove a dependency from 'exec on sender' on 'comm to receiver'
> [ 2.00] (1:sender@Tremblay) All dependencies are solved, let's start 'comm to receiver'
> [ 3.07] (2:receiver@Jupiter) Remove a dependency from 'comm from sender' on 'exec on receiver'
xbt_assert(argc == 5, "Expecting 4 parameters from the XML deployment file but got %d", argc);
int my_id = std::stoi(argv[1]); /* - my id */
long messages_count = std::stol(argv[2]); /* - number of message */
- double msg_size = std::stol(argv[3]); /* - message size in bytes */
- long peers_count = std::stod(argv[4]); /* - number of peers */
+ long msg_size = std::stol(argv[3]); /* - message size in bytes */
+ long peers_count = std::stol(argv[4]); /* - number of peers */
- /* Set myself as the persistent receiver of my mailbox so that messages start flowing to me as soon as they are put into it */
+ /* Set myself as the persistent receiver of my mailbox so that messages start flowing to me as soon as they are put
+ * into it */
simgrid::s4u::Mailbox* my_mbox = simgrid::s4u::Mailbox::by_name(std::string("peer-") + std::to_string(my_id));
my_mbox->set_receiver(simgrid::s4u::Actor::self());
for (int i = 0; i < messages_count; i++) {
for (int peer_id = 0; peer_id < peers_count; peer_id++) {
if (peer_id != my_id) {
- std::string mboxName = std::string("peer-") + std::to_string(peer_id);
- simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
- std::string msgName = std::string("Message ") + std::to_string(i) + std::string(" from peer ") + std::to_string(my_id);
- std::string* payload = new std::string(msgName); // copy the data we send:
+ std::string mboxName = std::string("peer-") + std::to_string(peer_id);
+ simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
+ std::string msgName =
+ std::string("Message ") + std::to_string(i) + std::string(" from peer ") + std::to_string(my_id);
+ std::string* payload = new std::string(msgName); // copy the data we send:
// 'msgName' is not a stable storage location
XBT_INFO("Send '%s' to '%s'", msgName.c_str(), mboxName.c_str());
/* Create a communication representing the ongoing communication */
/* Start sending messages to let peers know that they should stop */
for (int peer_id = 0; peer_id < peers_count; peer_id++) {
if (peer_id != my_id) {
- std::string mboxName = std::string("peer-") + std::to_string(peer_id);
- simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
- std::string* payload = new std::string("finalize"); // Make a copy of the data we will send
+ std::string mboxName = std::string("peer-") + std::to_string(peer_id);
+ simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
+ std::string* payload = new std::string("finalize"); // Make a copy of the data we will send
pending_comms.push_back(mbox->put_async(payload, msg_size));
XBT_INFO("Send 'finalize' to 'peer-%d'", peer_id);
}
}
XBT_INFO("Done dispatching all messages");
- /* Retrieve all the messages other peers have been sending to me until I receive all the corresponding "Finalize" messages */
- int pending_finalize_messages = peers_count - 1;
+ /* Retrieve all the messages other peers have been sending to me until I receive all the corresponding "Finalize"
+ * messages */
+ long pending_finalize_messages = peers_count - 1;
while (pending_finalize_messages > 0) {
if (my_mbox->ready()) {
- double start = simgrid::s4u::Engine::get_clock();
+ double start = simgrid::s4u::Engine::get_clock();
const std::string* received = static_cast<std::string*>(my_mbox->get());
- double waiting_time = simgrid::s4u::Engine::get_clock() - start;
- xbt_assert(waiting_time == 0, "Expecting the waiting time to be 0 because the communication was supposedly ready, but got %f instead", waiting_time);
+ double waiting_time = simgrid::s4u::Engine::get_clock() - start;
+ xbt_assert(
+ waiting_time == 0,
+ "Expecting the waiting time to be 0 because the communication was supposedly ready, but got %f instead",
+ waiting_time);
XBT_INFO("I got a '%s'.", received->c_str());
if (*received == "finalize") {
pending_finalize_messages--;
XBT_INFO("Goodbye now!");
}
-
-int main(int argc, char *argv[])
+int main(int argc, char* argv[])
{
xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]);
p Test1 Peer sending and receiving
-$ ${bindir:=.}/s4u-async-ready ${platfdir}/small_platform_fatpipe.xml s4u-async-ready_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-comm-ready ${platfdir}/small_platform_fatpipe.xml s4u-comm-ready_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:peer@Tremblay) Send 'Message 0 from peer 0' to 'peer-1'
> [ 0.000000] (2:peer@Ruby) Send 'Message 0 from peer 1' to 'peer-0'
> [ 0.000000] (1:peer@Tremblay) Send 'Message 0 from peer 0' to 'peer-2'
--- /dev/null
+/* Copyright (c) 2010-2020. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/* This example shows how to suspend and resume an asynchronous communication. */
+
+#include "simgrid/s4u.hpp"
+#include <cstdlib>
+#include <iostream>
+#include <string>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_wait, "Messages specific for this s4u example");
+
+static void sender(int argc, char**)
+{
+ xbt_assert(argc == 1, "Expecting no parameter from the XML deployment file but got %d", argc - 1);
+
+ simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name("receiver");
+
+ // Copy the data we send: the 'msg_content' variable is not a stable storage location.
+ // It will be destroyed when this actor leaves the loop, ie before the receiver gets the data
+ std::string* payload = new std::string("Sent message");
+
+ /* Create a communication representing the ongoing communication and then */
+ simgrid::s4u::CommPtr comm = mbox->put_init(payload, 13194230);
+ XBT_INFO("Suspend the communication before it starts (remaining: %.0f bytes) and wait a second.",
+ comm->get_remaining());
+ simgrid::s4u::this_actor::sleep_for(1);
+ XBT_INFO("Now, start the communication (remaining: %.0f bytes) and wait another second.", comm->get_remaining());
+ comm->start();
+ simgrid::s4u::this_actor::sleep_for(1);
+
+ XBT_INFO("There is still %.0f bytes to transfer in this communication. Suspend it for one second.",
+ comm->get_remaining());
+ comm->suspend();
+ XBT_INFO("Now there is %.0f bytes to transfer. Resume it and wait for its completion.", comm->get_remaining());
+ comm->resume();
+ comm->wait();
+ XBT_INFO("There is %f bytes to transfer after the communication completion.", comm->get_remaining());
+ XBT_INFO("Suspending a completed activity is a no-op.");
+ comm->suspend();
+}
+
+static void receiver(int, char**)
+{
+ simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name("receiver");
+
+ XBT_INFO("Wait for the message.");
+ void* payload = mbox->get();
+
+ const std::string* received = static_cast<std::string*>(payload);
+ XBT_INFO("I got '%s'.", received->c_str());
+
+ delete received;
+}
+
+int main(int argc, char* argv[])
+{
+ xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]);
+
+ simgrid::s4u::Engine e(&argc, argv);
+ e.register_function("sender", &sender);
+ e.register_function("receiver", &receiver);
+
+ e.load_platform(argv[1]);
+ e.load_deployment(argv[2]);
+ e.run();
+
+ return 0;
+}
--- /dev/null
+#!/usr/bin/env tesh
+
+$ ${bindir:=.}/s4u-comm-suspend ${platfdir}/small_platform.xml s4u-comm-suspend_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:sender@Tremblay) Suspend the communication before it starts (remaining: 13194230 bytes) and wait a second.
+> [ 0.000000] (2:receiver@Jupiter) Wait for the message.
+> [ 1.000000] (1:sender@Tremblay) Now, start the communication (remaining: 13194230 bytes) and wait another second.
+> [ 2.000000] (1:sender@Tremblay) There is still 6660438 bytes to transfer in this communication. Suspend it for one second.
+> [ 2.000000] (1:sender@Tremblay) Now there is 6660438 bytes to transfer. Resume it and wait for its completion.
+> [ 3.000000] (2:receiver@Jupiter) I got 'Sent message'.
+> [ 3.000000] (1:sender@Tremblay) There is 0.000000 bytes to transfer after the communication completion.
+> [ 3.000000] (1:sender@Tremblay) Suspending a completed activity is a no-op.
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
+<platform version="4.1">
+ <actor host="Tremblay" function="sender" />
+ <actor host="Jupiter" function="receiver" />
+</platform>
#include <iostream>
#include <string>
-XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_wait, "Messages specific for this s4u example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_wait, "Messages specific for this s4u example");
static void sender(int argc, char** argv)
{
xbt_assert(argc == 3, "Expecting 2 parameters from the XML deployment file but got %d", argc);
long messages_count = std::stol(argv[1]); /* - number of messages */
- double msg_size = std::stod(argv[2]); /* - message size in bytes */
+ long msg_size = std::stol(argv[2]); /* - message size in bytes */
double sleep_start_time = 5.0;
double sleep_test_time = 0;
}
}
-int main(int argc, char *argv[])
+int main(int argc, char* argv[])
{
xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]);
p Test1 Sleep_sender > Sleep_receiver
-$ ${bindir:=.}/s4u-async-wait ${platfdir}/small_platform_fatpipe.xml s4u-async-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-comm-wait ${platfdir}/small_platform_fatpipe.xml s4u-comm-wait_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) sleep_start_time : 5.000000 , sleep_test_time : 0.000000
> [ 0.000000] (2:receiver@Ruby) sleep_start_time : 1.000000 , sleep_test_time : 0.100000
> [ 1.000000] (2:receiver@Ruby) Wait for my first message
class Sender {
long messages_count; /* - number of messages */
long receivers_count; /* - number of receivers */
- double msg_size; /* - message size in bytes */
+ long msg_size; /* - message size in bytes */
public:
explicit Sender(std::vector<std::string> args)
{
xbt_assert(args.size() == 4, "Expecting 3 parameters from the XML deployment file but got %zu", args.size());
messages_count = std::stol(args[1]);
- msg_size = std::stod(args[2]);
+ msg_size = std::stol(args[2]);
receivers_count = std::stol(args[3]);
}
- void operator()()
+ void operator()() const
{
// sphinx-doc: init-begin (this line helps the doc to build; ignore it)
/* Vector in which we store all ongoing communications */
}
};
-int main(int argc, char *argv[])
+int main(int argc, char* argv[])
{
xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]);
#!/usr/bin/env tesh
-$ ${bindir:=.}/s4u-async-waitall ${platfdir}/small_platform_fatpipe.xml s4u-async-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-comm-waitall ${platfdir}/small_platform_fatpipe.xml s4u-comm-waitall_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0'
> [ 0.000000] (2:receiver@Ruby) Wait for my first message
> [ 0.000000] (3:receiver@Perl) Wait for my first message
#include <iostream>
#include <string>
-XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_waitall, "Messages specific for this s4u example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_waitall, "Messages specific for this s4u example");
class Sender {
long messages_count; /* - number of messages */
long receivers_count; /* - number of receivers */
- double msg_size; /* - message size in bytes */
+ long msg_size; /* - message size in bytes */
public:
explicit Sender(std::vector<std::string> args)
{
xbt_assert(args.size() == 4, "Expecting 3 parameters from the XML deployment file but got %zu", args.size());
messages_count = std::stol(args[1]);
- msg_size = std::stod(args[2]);
+ msg_size = std::stol(args[2]);
receivers_count = std::stol(args[3]);
}
- void operator()()
+ void operator()() const
{
/* Vector in which we store all ongoing communications */
std::vector<simgrid::s4u::CommPtr> pending_comms;
}
};
-int main(int argc, char *argv[])
+int main(int argc, char* argv[])
{
xbt_assert(argc > 2, "Usage: %s platform_file deployment_file\n", argv[0]);
p Testing this_actor->wait_any()
! output sort 19
-$ ${bindir:=.}/s4u-async-waitany ${platfdir}/small_platform.xml s4u-async-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-comm-waitany ${platfdir}/small_platform.xml s4u-comm-waitany_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0'
> [ 0.000000] (2:receiver@Fafard) Wait for my first message
> [ 0.000000] (3:receiver@Jupiter) Wait for my first message
/* This example shows how to use simgrid::s4u::Activity::wait_until() and
* simgrid::s4u::Activity::wait_for() on a given communication.
*
- * It is very similar to the async-wait example, but the sender initially
+ * It is very similar to the comm-wait example, but the sender initially
* does some waits that are too short before doing an infinite wait.
*/
#include <iostream>
#include <string>
-XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_async_waituntil, "Messages specific for this s4u example");
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_comm_waituntil, "Messages specific for this s4u example");
static void sender(int argc, char** argv)
{
xbt_assert(argc == 4, "Expecting 3 parameters from the XML deployment file but got %d", argc);
long messages_count = std::stol(argv[1]); /* - number of messages */
- double msg_size = std::stol(argv[2]); /* - message size in bytes */
- long receivers_count = std::stod(argv[3]); /* - number of receivers */
+ long msg_size = std::stol(argv[2]); /* - message size in bytes */
+ long receivers_count = std::stol(argv[3]); /* - number of receivers */
std::vector<simgrid::s4u::CommPtr> pending_comms;
/* Start dispatching all messages to receivers, in a round robin fashion */
for (int i = 0; i < messages_count; i++) {
- std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count);
- simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
- std::string msgName = std::string("Message ") + std::to_string(i);
- std::string* payload = new std::string(msgName); // copy the data we send:
+ std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count);
+ simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
+ std::string msgName = std::string("Message ") + std::to_string(i);
+ std::string* payload = new std::string(msgName); // copy the data we send:
// 'msgName' is not a stable storage location
XBT_INFO("Send '%s' to '%s'", msgName.c_str(), mboxName.c_str());
/* Start sending messages to let the workers know that they should stop */
for (int i = 0; i < receivers_count; i++) {
- std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count);
- simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
- std::string* payload = new std::string("finalize"); // Make a copy of the data we will send
+ std::string mboxName = std::string("receiver-") + std::to_string(i % receivers_count);
+ simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name(mboxName);
+ std::string* payload = new std::string("finalize"); // Make a copy of the data we will send
simgrid::s4u::CommPtr comm = mbox->put_async(payload, 0);
pending_comms.push_back(comm);
p Test1 Sleep_sender > Sleep_receiver
-$ ${bindir:=.}/s4u-async-waituntil ${platfdir}/small_platform_fatpipe.xml s4u-async-waituntil_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-comm-waituntil ${platfdir}/small_platform_fatpipe.xml s4u-comm-waituntil_d.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:sender@Tremblay) Send 'Message 0' to 'receiver-0'
> [ 0.000000] (2:receiver@Ruby) Wait for my first message
> [ 0.000000] (1:sender@Tremblay) Send 'Message 1' to 'receiver-0'
}
/* Notifies a remote node that its predecessor may have changed. */
-void Node::remoteNotify(int notify_id, int predecessor_candidate_id)
+void Node::remoteNotify(int notify_id, int predecessor_candidate_id) const
{
ChordMessage* message = new ChordMessage(NOTIFY);
message->request_id = predecessor_candidate_id;
while (not strncmp(options[0], "-", 1)) {
unsigned int length = strlen("-nb_bits=");
if (not strncmp(options[0], "-nb_bits=", length) && strlen(options[0]) > length) {
- nb_bits = xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s");
+ nb_bits = static_cast<int>(xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s"));
XBT_DEBUG("Set nb_bits to %d", nb_bits);
} else {
length = strlen("-timeout=");
if (not strncmp(options[0], "-timeout=", length) && strlen(options[0]) > length) {
- timeout = xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s");
+ timeout = static_cast<int>(xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s"));
XBT_DEBUG("Set timeout to %d", timeout);
} else {
xbt_die("Invalid chord option '%s'", options[0]);
int remoteFindSuccessor(int ask_to, int id);
void notify(int predecessor_candidate_id);
- void remoteNotify(int notify_id, int predecessor_candidate_id);
+ void remoteNotify(int notify_id, int predecessor_candidate_id) const;
void stabilize();
void handleMessage(ChordMessage* message);
namespace kademlia {
/** @brief Prints a answer_t, for debugging purposes */
-void Answer::print()
+void Answer::print() const
{
XBT_INFO("Searching %08x, size %zu", destination_id_, nodes_.size());
unsigned int i = 0;
unsigned int getDestinationId() const { return destination_id_; }
size_t getSize() const { return nodes_.size(); }
const std::vector<std::pair<unsigned int, unsigned int>>& getNodes() const { return nodes_; }
- void print();
+ void print() const;
unsigned int merge(const Answer* a);
void trim();
bool destinationFound() const;
* @param id node we are querying
* @param destination node we are trying to find.
*/
-void Node::sendFindNode(unsigned int id, unsigned int destination)
+void Node::sendFindNode(unsigned int id, unsigned int destination) const
{
/* Gets the mailbox to send to */
simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(std::to_string(id));
* Sends to the best "KADEMLIA_ALPHA" nodes in the "node_list" array a "FIND_NODE" request, to ask them for their best
* nodes
*/
-unsigned int Node::sendFindNodeToBest(const Answer* node_list)
+unsigned int Node::sendFindNodeToBest(const Answer* node_list) const
{
unsigned int i = 0;
unsigned int j = 0;
msg->answer_to_->put_init(answer, 1)->detach(kademlia::destroy);
}
-void Node::displaySuccessRate()
+void Node::displaySuccessRate() const
{
XBT_INFO("%u/%u FIND_NODE have succeeded", find_node_success, find_node_success + find_node_failed);
}
if (prefix == 0) {
return 0;
} else {
- return (1U << ((unsigned int)(prefix - 1))) ^ id;
+ return (1U << (prefix - 1)) ^ id;
}
}
explicit Node(unsigned int node_id) : id_(node_id), table(node_id) {}
Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
- unsigned int getId() { return id_; }
+ unsigned int getId() const { return id_; }
bool join(unsigned int known_id);
- void sendFindNode(unsigned int id, unsigned int destination);
- unsigned int sendFindNodeToBest(const Answer* node_list);
+ void sendFindNode(unsigned int id, unsigned int destination) const;
+ unsigned int sendFindNodeToBest(const Answer* node_list) const;
void routingTableUpdate(unsigned int id);
Answer* findClosest(unsigned int destination_id);
bool findNode(unsigned int id_to_find, bool count_in_stats);
void randomLookup();
void handleFindNode(const Message* msg);
- void displaySuccessRate();
+ void displaySuccessRate() const;
};
} // namespace kademlia
// identifier functions
double deadline;
xbt_assert(argc == 3 || argc == 4, "Wrong number of arguments");
/* Node initialization */
- unsigned int node_id = strtoul(argv[1], nullptr, 0);
+ unsigned int node_id = static_cast<unsigned int>(strtoul(argv[1], nullptr, 0));
kademlia::Node node(node_id);
if (argc == 4) {
XBT_INFO("Hi, I'm going to join the network with id %u", node.getId());
- unsigned int known_id = strtoul(argv[2], NULL, 0);
+ unsigned int known_id = static_cast<unsigned int>(strtoul(argv[2], NULL, 0));
join_success = node.join(known_id);
deadline = std::stod(argv[3]);
} else {
{
sg_host_energy_plugin_init();
simgrid::s4u::Engine e(&argc, argv);
- e.set_config("host/model:ptask_L07");
+ simgrid::s4u::Engine::set_config("host/model:ptask_L07");
xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s ../platforms/energy_platform.xml\n", argv[0], argv[0]);
static void sender(std::vector<std::string> args)
{
xbt_assert(args.size() == 2, "The master function expects 2 arguments.");
- int flow_amount = std::stoi(args.at(0));
- double comm_size = std::stod(args.at(1));
- XBT_INFO("Send %.0f bytes, in %d flows", comm_size, flow_amount);
+ int flow_amount = std::stoi(args.at(0));
+ long comm_size = std::stol(args.at(1));
+ XBT_INFO("Send %ld bytes, in %d flows", comm_size, flow_amount);
simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(std::string("message"));
if (flow_amount == 1) {
/* - Send the task to the @ref worker */
- char* payload = bprintf("%f", comm_size);
+ char* payload = bprintf("%ld", comm_size);
mailbox->put(payload, comm_size);
} else {
// Start all comms in parallel, and wait for all completions in one shot
*/
class SingleCore {
public:
- bool operator()(const simgrid::s4u::Host* host) { return host->get_core_count() == 1; }
+ bool operator()(const simgrid::s4u::Host* host) const { return host->get_core_count() == 1; }
};
/* This functor is a bit more complex, as it saves the current state when created.
std::map<simgrid::s4u::Host*, int> host_list;
public:
- explicit FrequencyChanged(simgrid::s4u::Engine& e)
+ explicit FrequencyChanged(const simgrid::s4u::Engine& e)
{
std::vector<simgrid::s4u::Host*> list = e.get_all_hosts();
for (auto& host : list) {
#!/usr/bin/env tesh
! output sort
-$ ${bindir:=.}/s4u-exec-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:debug "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-exec-dependent ${platfdir}/small_platform.xml --log=s4u_activity.t:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 2.000000] (1:worker@Fafard) Remove a dependency from 'parent 1' on 'child'
> [ 2.000000] (1:worker@Fafard) Exec 'parent 1' is complete
> [ 3.000000] (1:worker@Fafard) Remove a dependency from 'parent 2' on 'child'
#!/usr/bin/env tesh
-$ ${bindir:=.}/s4u-exec-ptask ${platfdir}/energy_platform.xml --cfg=host/model:ptask_L07 --cfg=tracing:yes --cfg=tracing/uncategorized:yes --log=instr_resource.t:debug --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-exec-ptask ${platfdir}/energy_platform.xml --cfg=host/model:ptask_L07 --cfg=tracing:yes --cfg=tracing/uncategorized:yes --log=instr_resource.t:verbose --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'ptask_L07'
> [ 0.000000] (0:maestro@) Configuration change: Set 'tracing' to 'yes'
> [ 0.000000] (0:maestro@) Configuration change: Set 'tracing/uncategorized' to 'yes'
#!/usr/bin/env tesh
! output sort
-$ ${bindir:=.}/s4u-io-dependent ${platfdir}/hosts_with_disks.xml --log=s4u_activity.t:debug "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/s4u-io-dependent ${platfdir}/hosts_with_disks.xml --log=s4u_activity.t:verbose "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 1.000000] (1:bob@bob) All dependencies are solved, let's start 'bob write'
> [ 1.000000] (1:bob@bob) Remove a dependency from 'bob compute' on 'bob write'
> [ 1.100000] (1:bob@bob) All dependencies are solved, let's start 'carl read'
class MyHost {
public:
- void show_info(std::vector<simgrid::s4u::Disk*> const& disks)
+ void show_info(std::vector<simgrid::s4u::Disk*> const& disks) const
{
XBT_INFO("Storage info on %s:", simgrid::s4u::Host::current()->get_cname());
}
}
- void operator()()
+ void operator()() const
{
std::vector<simgrid::s4u::Disk*> const& disks = simgrid::s4u::Host::current()->get_disks();
> [ 0.000000] (0:maestro@) Check the liveness property promela_bugged1_liveness
> [ 0.000000] (2:client@Boivin) Ask the request
> [ 0.000000] (3:client@Fafard) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (1:coordinator@Tremblay) CS already used. Queue the request.
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (3:client@Fafard) Propositions changed : r=1, cs=0
> [ 0.000000] (1:coordinator@Tremblay) CS release. Grant to queued requests (queue size: 1)
> [ 0.000000] (2:client@Boivin) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (2:client@Boivin) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (0:maestro@) Pair 58 already reached (equal to pair 46) !
> [ 0.000000] (0:maestro@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
simgrid::s4u::Mailbox* mbox = simgrid::s4u::Mailbox::by_name("coordinator");
- while (1) {
+ while (true) {
m = static_cast<Message*>(mbox->get());
if (m->kind == Message::Kind::REQUEST) {
if (CS_used) {
requests.push(m->return_mailbox);
} else {
if (m->return_mailbox->get_name() != "1") {
- XBT_INFO("CS idle. Grant immediatly");
+ XBT_INFO("CS idle. Grant immediately");
m->return_mailbox->put(new Message(Message::Kind::GRANT, mbox), 1000);
CS_used = 1;
}
static void client(int id)
{
- int my_pid = simgrid::s4u::this_actor::get_pid();
+ aid_t my_pid = simgrid::s4u::this_actor::get_pid();
simgrid::s4u::Mailbox* my_mailbox = simgrid::s4u::Mailbox::by_name(std::to_string(id));
- while (1) {
+ while (true) {
XBT_INFO("Ask the request");
simgrid::s4u::Mailbox::by_name("coordinator")->put(new Message(Message::Kind::REQUEST, my_mailbox), 1000);
simgrid::s4u::Mailbox::by_name("coordinator")->put(new Message(Message::Kind::RELEASE, my_mailbox), 1000);
- simgrid::s4u::this_actor::sleep_for(my_pid);
+ simgrid::s4u::this_actor::sleep_for(static_cast<double>(my_pid));
if (id == 1) {
cs = 0;
> [ 0.000000] (2:client@Boivin) Ask the request
> [ 0.000000] (3:client@Fafard) Ask the request
> [ 0.000000] (2:client@Boivin) Propositions changed : r=1, cs=0
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (3:client@Fafard) 2 got the answer. Sleep a bit and release it
> [ 0.000000] (1:coordinator@Tremblay) CS release. resource now idle
> [ 0.000000] (3:client@Fafard) Ask the request
-> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediatly
+> [ 0.000000] (1:coordinator@Tremblay) CS idle. Grant immediately
> [ 0.000000] (0:maestro@) Pair 22 already reached (equal to pair 10) !
> [ 0.000000] (0:maestro@) *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
> [ 0.000000] (0:maestro@) | ACCEPTANCE CYCLE |
--- /dev/null
+/* Copyright (c) 2017-2020. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+
+/* This example demonstrates how to use wifi links in SimGrid. Most of the interesting things happen in the
+ * corresponding XML file.
+ */
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_network_wifi, "Messages specific for this s4u example");
+
+static void sender(simgrid::s4u::Mailbox* mailbox, int data_size)
+{
+ XBT_INFO("Send a message to the other station.");
+ static char message[] = "message";
+ mailbox->put(message, data_size);
+ XBT_INFO("Done.");
+}
+static void receiver(simgrid::s4u::Mailbox* mailbox)
+{
+ XBT_INFO("Wait for a message.");
+ mailbox->get();
+ XBT_INFO("Done.");
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine e(&argc, argv);
+
+ xbt_assert(argc > 1, "Usage: %s platform_file\n\tExample: %s platform.xml deployment.xml\n", argv[0], argv[0]);
+
+ e.load_platform(argv[1]);
+
+ auto mailbox = simgrid::s4u::Mailbox::by_name("mailbox");
+ auto station1 = simgrid::s4u::Host::by_name("Station 1");
+ auto station2 = simgrid::s4u::Host::by_name("Station 2");
+ simgrid::s4u::Actor::create("sender", station1, sender, mailbox, 1e7);
+ simgrid::s4u::Actor::create("receiver", station2, receiver, mailbox);
+
+ auto ap = simgrid::s4u::Link::by_name("AP1");
+ ap->set_host_wifi_rate(station1, 1); // The host "Station 1" uses the second level of bandwidths on that AP
+ ap->set_host_wifi_rate(station2, 0); // This is perfectly useless as level 0 is used by default
+
+ e.run();
+
+ return 0;
+}
--- /dev/null
+#!/usr/bin/env tesh
+
+$ ${bindir:=.}/s4u-network-wifi ${platfdir}/wifi.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:sender@Station 1) Send a message to the other station.
+> [ 0.000000] (2:receiver@Station 2) Wait for a message.
+> [ 3.888889] (1:sender@Station 1) Done.
+> [ 3.888889] (2:receiver@Station 2) Done.
simgrid::s4u::Mailbox* mailbox;
long number_of_tasks = xbt_str_parse_int(argv[1], "Invalid amount of tasks: %s");
double comp_size = xbt_str_parse_double(argv[2], "Invalid computational size: %s");
- double comm_size = xbt_str_parse_double(argv[3], "Invalid communication size: %s");
+ long comm_size = xbt_str_parse_int(argv[3], "Invalid communication size: %s");
long workers_count = xbt_str_parse_int(argv[4], "Invalid amount of workers: %s");
XBT_INFO("Got %ld workers and %ld tasks to process", workers_count, number_of_tasks);
xbt_assert(argc == 2, "Expecting one parameter");
long id = xbt_str_parse_int(argv[1], "Invalid argument %s");
simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(std::string("worker-") + std::to_string(id));
- while (1) {
+ while (true) {
try {
XBT_INFO("Waiting a message on %s", mailbox->get_cname());
const double* payload = static_cast<double*>(mailbox->get());
#include "xbt/replay.hpp"
#include "xbt/str.h"
#include <boost/algorithm/string/join.hpp>
+#include <cinttypes>
#include <string>
XBT_LOG_NEW_DEFAULT_CATEGORY(replay_comm, "Messages specific for this msg example");
simgrid::xbt::replay_runner(actor_name, trace_filename);
}
- void operator()()
+ void operator()() const
{
// Nothing to do here
}
static void send(simgrid::xbt::ReplayAction& action)
{
- double size = std::stod(action[3]);
- std::string* payload = new std::string(action[3]);
- double clock = simgrid::s4u::Engine::get_clock();
+ uint64_t size = static_cast<uint64_t>(std::stod(action[3]));
+ std::string* payload = new std::string(action[3]);
+ double clock = simgrid::s4u::Engine::get_clock();
simgrid::s4u::Mailbox* to = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_name() + "_" + action[2]);
- ACT_DEBUG("Entering Send: %s (size: %g) -- Actor %s on mailbox %s", NAME.c_str(), size,
+ ACT_DEBUG("Entering Send: %s (size: %" PRIu64 ") -- Actor %s on mailbox %s", NAME.c_str(), size,
simgrid::s4u::this_actor::get_cname(), to->get_cname());
to->put(payload, size);
delete payload;
simgrid::xbt::replay_runner(actor_name, nullptr);
}
- void operator()()
+ void operator()() const
{
// Nothing to do here
}
if(timer_on != 0 && dg->numNodes+1>timers_tot){
timer_on=0;
if(my_rank==0)
- fprintf(stderr,"Not enough timers. Node timeing is off. \n");
+ fprintf(stderr,"Not enough timers. Node timing is off. \n");
}
if(dg->numNodes && dg->numNodes>comm_size){
if(my_rank==0){
1220703125.00 ), /* Random number gen mult */
1220703125.00 ); /* Random number gen mult */
-/* Do one interation for free (i.e., untimed) to guarantee initialization of
+/* Do one iteration for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank(gd, 1 );
p Test instrumentation of SMPI
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1 -np 8 ${bindir:=.}/smpi_gemm 1000 native
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1f -np 8 ${bindir:=.}/smpi_gemm 1000 native
> You requested to use 8 ranks, but there is only 5 processes in your hostfile...
> Matrix Size : 1000x1000
> Native mode
> Performance= 227.29 GFlop/s, Time= 8.799 sec, Size= 2000000000 Ops
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1 -np 8 ${bindir:=.}/smpi_gemm 1000 sampling
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/host-speed:1f -np 8 ${bindir:=.}/smpi_gemm 1000 sampling
> You requested to use 8 ranks, but there is only 5 processes in your hostfile...
> Matrix Size : 1000x1000
> Sampling mode
xbt_dynar_push(requests, &recv_buff);
}else{
if(recv_buff != size - 1){
- printf("CS idle. Grant immediatly.\n");
+ printf("CS idle. Grant immediately.\n");
MPI_Send(&rank, 1, MPI_INT, recv_buff, GRANT_TAG, MPI_COMM_WORLD);
CS_used = 1;
}
printf("CS already used. Queue the request.\n");
xbt_dynar_push(requests, &recv_buff);
}else{
- printf("CS idle. Grant immediatly.\n");
+ printf("CS idle. Grant immediately.\n");
MPI_Send(&rank, 1, MPI_INT, recv_buff, GRANT_TAG, MPI_COMM_WORLD);
CS_used = 1;
}
#!/usr/bin/env tesh
! timeout 60
-$ ../../../smpi_script/bin/smpirun -wrapper ${bindir:=.}/../../../bin/simgrid-mc -hostfile ${srcdir:=.}/hostfile_non_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --log=xbt_cfg.thresh:warning --cfg=model-check/communications-determinism:1 --cfg=smpi/send-is-detached-thresh:0 --cfg=smpi/host-speed:1e9 ./smpi_non_deterministic
+$ ../../../smpi_script/bin/smpirun -wrapper ${bindir:=.}/../../../bin/simgrid-mc -hostfile ${srcdir:=.}/hostfile_non_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --log=xbt_cfg.thresh:warning --cfg=model-check/communications-determinism:1 --cfg=smpi/send-is-detached-thresh:0 --cfg=smpi/host-speed:1Gf ./smpi_non_deterministic
> [0.000000] [mc_global/INFO] Check communication determinism
> [0.000000] [mc_comm_determinism/INFO] The communications pattern of the process 1 is different! (Different communication : 1)
> [0.000000] [mc_comm_determinism/INFO] ****************************************************
#!/usr/bin/env tesh
! timeout 60
-$ ../../../smpi_script/bin/smpirun -wrapper "${bindir:=.}/../../../bin/simgrid-mc" --log=xbt_cfg.thresh:warning -hostfile ${srcdir:=.}/hostfile_only_send_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --cfg=model-check/communications-determinism:1 --cfg=smpi/buffering:zero --cfg=smpi/host-speed:1e9 ./smpi_only_send_deterministic
+$ ../../../smpi_script/bin/smpirun -wrapper "${bindir:=.}/../../../bin/simgrid-mc" --log=xbt_cfg.thresh:warning -hostfile ${srcdir:=.}/hostfile_only_send_deterministic -platform ${srcdir:=.}/../../platforms/cluster_backbone.xml --cfg=model-check/communications-determinism:1 --cfg=smpi/buffering:zero --cfg=smpi/host-speed:1Gf ./smpi_only_send_deterministic
> [0.000000] [mc_comm_determinism/INFO] Check communication determinism
> [0.000000] [mc_comm_determinism/INFO] *******************************************************
> [0.000000] [mc_comm_determinism/INFO] **** Only-send-deterministic communication pattern ****
int main(int argc, char* argv[])
{
const char* instance_id = simgrid::s4u::Actor::self()->get_property("instance_id");
- const int rank = xbt_str_parse_int(simgrid::s4u::Actor::self()->get_property("rank"), "Cannot parse rank");
+ const int rank =
+ static_cast<int>(xbt_str_parse_int(simgrid::s4u::Actor::self()->get_property("rank"), "Cannot parse rank"));
const char* trace_filename = argv[1];
double start_delay_flops = 0;
2 smpi_replay.txt 32 125000000000
-This launchs an instance "2" that will replay file smpi_replay.txt with 32 processes, and each one of these processes
+This launches an instance "2" that will replay file smpi_replay.txt with 32 processes, and each one of these processes
will sleep for 125000000000 flops before the run.
In order to be replayed, a deployment file must be generatedfrom this description file, and from the intended platform
if [ -z "${HOSTFILE}" ] ; then
HOSTFILETMP=1
HOSTFILE="$(mktemp tmphostXXXXXX)"
- perl -ne 'print "$1\n" if /.*<host.*?id="(.*?)".*?\/>.*/' ${PLATFORM} > ${HOSTFILE}
+ perl -ne 'print "$1\n" if /.*<host.*?id="(.*?)".*?\/>.*/' "${PLATFORM}" > "${HOSTFILE}"
fi
UNROLLEDHOSTFILETMP=0
#parse if our lines are terminated by :num_process
-multiple_processes=$(grep -c ":" $HOSTFILE)
+multiple_processes=$(grep -c ":" "$HOSTFILE")
if [ "${multiple_processes}" -gt 0 ] ; then
UNROLLEDHOSTFILETMP=1
UNROLLEDHOSTFILE="$(mktemp tmphostXXXXXX)"
- perl -ne ' do{ for ( 1 .. $2 ) { print "$1\n" } } if /(.*?):(\d+).*/' ${HOSTFILE} > ${UNROLLEDHOSTFILE}
+ perl -ne ' do{ for ( 1 .. $2 ) { print "$1\n" } } if /(.*?):(\d+).*/' "${HOSTFILE}" > "${UNROLLEDHOSTFILE}"
if [ ${HOSTFILETMP} = 1 ] ; then
- rm ${HOSTFILE}
+ rm "${HOSTFILE}"
HOSTFILETMP=0
fi
HOSTFILE=$UNROLLEDHOSTFILE
fi
# Don't use wc -l to compute it to avoid issues with trailing \n at EOF
-hostfile_procs=$(grep -c "[a-zA-Z0-9]" $HOSTFILE)
-if [ ${hostfile_procs} = 0 ] ; then
- echo "[$(basename $0)] ** error: the hostfile '${HOSTFILE}' is empty. Aborting." >&2
+hostfile_procs=$(grep -c "[a-zA-Z0-9]" "$HOSTFILE")
+if [ "${hostfile_procs}" = 0 ] ; then
+ echo "[$(basename "$0")] ** error: the hostfile '${HOSTFILE}' is empty. Aborting." >&2
exit 1
fi
##-------------------------------- DEFAULT APPLICATION --------------------------------------
-APPLICATIONTMP=$(echo ${PROC_ARGS}|cut -d' ' -f2 -s)
+APPLICATIONTMP=$(echo "${PROC_ARGS}"|cut -d' ' -f2 -s)
-cat > ${APPLICATIONTMP} <<APPLICATIONHEAD
+cat > "${APPLICATIONTMP}" <<APPLICATIONHEAD
<?xml version='1.0'?>
<!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
<platform version="4.1">
APPLICATIONHEAD
##---- cache hostnames of hostfile---------------
-if [ -n "${HOSTFILE}" ] && [ -f ${HOSTFILE} ]; then
- hostnames=$(tr '\n\r' ' ' < ${HOSTFILE})
- NUMHOSTS=$(wc -l < ${HOSTFILE})
+if [ -n "${HOSTFILE}" ] && [ -f "${HOSTFILE}" ]; then
+ hostnames=$(tr '\n\r' ' ' < "${HOSTFILE}")
+ NUMHOSTS=$(wc -l < "${HOSTFILE}")
fi
-DESCRIPTIONFILE=$(echo $PROC_ARGS|cut -d' ' -f1)
+DESCRIPTIONFILE=$(echo "$PROC_ARGS"|cut -d' ' -f1)
if [ -n "${DESCRIPTIONFILE}" ] && [ -f "${DESCRIPTIONFILE}" ]; then
IFS_OLD=$IFS
if [ -n "${HOSTFILE}" ]; then
j=$(( NUMPROCS % NUMHOSTS + 1))
fi
- hostname=$(echo $hostnames|cut -d' ' -f$j)
+ hostname=$(echo "$hostnames"|cut -d' ' -f$j)
if [ -z "${hostname}" ]; then
host="host"$($j)
else
host="${hostname}"
fi
-
- echo " <actor host=\"${host}\" function=\"${instance}\"> <!-- function name used only for logging -->" >> ${APPLICATIONTMP}
- echo " <argument value=\"${instance}\"/> <!-- instance -->" >> ${APPLICATIONTMP}
- echo " <argument value=\"${i}\"/> <!-- rank -->" >> ${APPLICATIONTMP}
- echo " <argument value=\"$(echo $hosttrace|cut -d' ' -f$((i+1)))\"/>" >> ${APPLICATIONTMP}
+ {
+ echo " <actor host=\"${host}\" function=\"${instance}\"> <!-- function name used only for logging -->"
+ echo " <argument value=\"${instance}\"/> <!-- instance -->"
+ echo " <argument value=\"${i}\"/> <!-- rank -->"
+ echo " <argument value=\"$(echo "$hosttrace"|cut -d' ' -f$((i+1)))\"/>"
- echo " <argument value=\"${sleeptime}\"/> <!-- delay -->" >> ${APPLICATIONTMP}
- echo " </actor>" >> ${APPLICATIONTMP}
+ echo " <argument value=\"${sleeptime}\"/> <!-- delay -->"
+ echo " </actor>"
+ } >> "${APPLICATIONTMP}"
NUMPROCS=$(( NUMPROCS + 1))
done
# return IFS back to newline for "for" loop
IFS_OLD=$IFS
IFS=$(printf '\n_'); IFS=${IFS%_} # protect trailing \n
- done < ${DESCRIPTIONFILE}
+ done < "${DESCRIPTIONFILE}"
# return delimiter to previous value
IFS=$IFS_OLD
IFS_OLD=
else
- printf "File not found: %s\n", ${DESCRIPTIONFILE} >&2
+ printf "File not found: %s\n", "${DESCRIPTIONFILE}" >&2
exit 1
fi
-cat >> ${APPLICATIONTMP} <<APPLICATIONFOOT
+cat >> "${APPLICATIONTMP}" <<APPLICATIONFOOT
</platform>
APPLICATIONFOOT
##-------------------------------- end DEFAULT APPLICATION --------------------------------------
if [ ${HOSTFILETMP} = 1 ] ; then
- rm ${HOSTFILE}
+ rm "${HOSTFILE}"
fi
if [ ${UNROLLEDHOSTFILETMP} = 1 ] ; then
- rm ${UNROLLEDHOSTFILE}
+ rm "${UNROLLEDHOSTFILE}"
fi
exit 0
static void smpi_replay(int argc, char* argv[])
{
const char* instance_id = argv[1];
- int rank = xbt_str_parse_int(argv[2], "Cannot parse rank '%s'");
+ int rank = (int)xbt_str_parse_int(argv[2], "Cannot parse rank '%s'");
const char* trace_filename = argv[3];
double start_delay_flops = 0;
const char** line_char= xbt_dynar_to_array(elems);
instance_id = line_char[0];
- int instance_size = xbt_str_parse_int(line_char[2], "Invalid size: %s");
+ int instance_size = (int)xbt_str_parse_int(line_char[2], "Invalid size: %s");
XBT_INFO("Initializing instance %s of size %d", instance_id, instance_size);
SMPI_app_instance_register(instance_id, smpi_replay,instance_size);
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='alone', file='coll.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone')
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone')
p Workload with one coll job (with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_coll1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='alone', file='coll.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone')
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_coll2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_coll2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='coll.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='coll.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='alone', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone')
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone')
p Workload with one empty job (with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='alone', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone')
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='2,3'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
p Workload with two empty jobs (not at the same time, not on the same resources, with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='2,3'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
p Workload with two empty jobs (not at the same time, but on the same resources, with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=1000, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='2,3'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
p Workload with two empty jobs (at the same time but not on the same resources, with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='2,3'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_empty2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='empty.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='alone', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone')
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone')
p Workload with one mixed job (with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed1 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='alone', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'alone')
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'alone')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0')
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='2,3'
> [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0')
p Workload with two mixed jobs (not at the same time, not on the same resources, with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2 --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0')
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='2,3'
> [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0')
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='0,1'
> [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0')
p Workload with two mixed jobs (not at the same time, but on the same resources, with noise)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
> [ 0.000000] (job_0000@Bourassa) Executing job 0 (smpi_app 'job0')
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=1000, alloc='0,1'
> [ 0.000000] (rank_0_0@Bourassa) Replaying rank 0 of job 0 (smpi_app 'job0')
p Workload with two mixed jobs (at the same time but not on the same resources)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='2,3'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
p Workload with two mixed jobs (at the same time but not on the same resources)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='2,3'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
p Workload with two mixed jobs (at the same time and on the same resources)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
p Workload with two mixed jobs (at the same time and on the same resources)
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/workload_mixed2_same_time_and_resources --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Job read: app='job0', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (maestro@) Job read: app='job1', file='mixed.txt', size=2, start=0, alloc='0,1'
> [ 0.000000] (workload@Bourassa) Launching the job executor of job 0 (app 'job0')
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 0 0
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 0.000000] (maestro@) Simulation finished! Final time: 0
! timeout 120
! output sort 25
-$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100 "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
-> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100'
+$ ./replay_multiple_manual ${platfdir}/small_platform_with_routers.xml ${srcdir:=.}/workload_nojob --log=smpi.:info --cfg=smpi/host-speed:100f "--log=root.fmt:[%11.6r]%e(%P@%h)%e%m%n" 7 13
+> [ 0.000000] (maestro@) Configuration change: Set 'smpi/host-speed' to '100f'
> [ 7.000000] (maestro@) Simulation finished! Final time: 7
$ rm -f ${srcdir:=.}/workload_nojob
+++ /dev/null
-if(enable_smpi)
- include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
-
- add_executable (masterslave_mailbox_smpi EXCLUDE_FROM_ALL masterslave_mailbox_smpi.cpp)
- target_link_libraries(masterslave_mailbox_smpi simgrid)
- ADD_TESH_FACTORIES(smpi-s4u-masterslave "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterslave --cd ${CMAKE_BINARY_DIR}/examples/smpi/smpi_s4u_masterslave ${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterslave/s4u_smpi.tesh)
- add_dependencies(tests masterslave_mailbox_smpi)
-endif()
-
-set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/s4u_smpi.tesh PARENT_SCOPE)
-set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/deployment_masterslave_mailbox_smpi.xml PARENT_SCOPE)
-set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/masterslave_mailbox_smpi.cpp PARENT_SCOPE)
--- /dev/null
+if(enable_smpi)
+ include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
+
+ add_executable (masterworker_mailbox_smpi EXCLUDE_FROM_ALL masterworker_mailbox_smpi.cpp)
+ target_link_libraries(masterworker_mailbox_smpi simgrid)
+ ADD_TESH_FACTORIES(smpi-s4u-masterworker "thread;ucontext;raw;boost" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterworker --cd ${CMAKE_BINARY_DIR}/examples/smpi/smpi_s4u_masterworker ${CMAKE_HOME_DIRECTORY}/examples/smpi/smpi_s4u_masterworker/s4u_smpi.tesh)
+ add_dependencies(tests masterworker_mailbox_smpi)
+endif()
+
+set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/s4u_smpi.tesh PARENT_SCOPE)
+set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/deployment_masterworker_mailbox_smpi.xml PARENT_SCOPE)
+set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/masterworker_mailbox_smpi.cpp PARENT_SCOPE)
{
xbt_assert(args.size() > 4, "The master function expects at least 3 arguments");
- long tasks_count = std::stol(args[1]);
- double compute_cost = std::stod(args[2]);
- double communication_cost = std::stod(args[3]);
+ long tasks_count = std::stol(args[1]);
+ double compute_cost = std::stod(args[2]);
+ long communication_cost = std::stol(args[3]);
std::vector<simgrid::s4u::Mailbox*> workers;
for (unsigned int i = 4; i < args.size(); i++)
workers.push_back(simgrid::s4u::Mailbox::by_name(args[i]));
p Test the use of SMPI+MSG in the same file, as well as several different SMPI instances at the same time
-$ ./masterslave_mailbox_smpi ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/deployment_masterslave_mailbox_smpi.xml --log=smpi.:info --cfg=smpi/simulate-computation:no
+$ ./masterworker_mailbox_smpi ${srcdir:=.}/../../platforms/small_platform_with_routers.xml ${srcdir:=.}/deployment_masterworker_mailbox_smpi.xml --log=smpi.:info --cfg=smpi/simulate-computation:no
> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
> [0.000000] [smpi_config/INFO] You did not set the power of the host running the simulation. The timings will certainly not be accurate. Use the option "--cfg=smpi/host-speed:<flops>" to set its value. Check https://simgrid.org/doc/latest/Configuring_SimGrid.html#automatic-benchmarking-of-smpi-code for more information.
> [Tremblay:master:(1) 0.000000] [msg_test/INFO] Got 2 workers and 20 tasks to process
$ rm -f ${bindir:=.}/smpi_trace.trace
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-resource -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1 -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-resource -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1f -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning
$ rm -f ${bindir:=.}/smpi_trace.trace
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace --cfg=tracing/smpi/display-sizes:yes --cfg=tracing/smpi/computing:yes --cfg=tracing/smpi/internals:yes -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1 -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace --cfg=tracing/smpi/display-sizes:yes --cfg=tracing/smpi/computing:yes --cfg=tracing/smpi/internals:yes -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/../hostfile -platform ${platfdir:=.}/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/host-speed:1f -np 3 ${bindir:=.}/smpi_trace --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning
$ rm -f ${bindir:=.}/smpi_trace.trace
p Test SMPI with call-location tracing. This means that the binary must have
p already been compiled with the -trace-call-location switch.
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/trace-call-use-absolute-path:1 --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace -trace-file ${bindir:=.}/smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/trace-call-use-absolute-path:1 --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning
$ grep -q "12 0.* 2 1 7 .*trace_call_location\.c\" 14$" ${bindir:=.}/smpi_trace.trace
$ rm -f ${bindir:=.}/smpi_trace.trace
#the same, but with trace-ti
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti -trace-file ${bindir:=.}/smpi_trace.txt --cfg=tracing/smpi/sleeping:1 -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/cpu-threshold:0
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti -trace-file ${bindir:=.}/smpi_trace.txt --cfg=tracing/smpi/sleeping:1 -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/smpi_trace_call_location --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/cpu-threshold:0
#Test replaying the trace, without altering the time.
#We disable computation, but leave the sleep.
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no
> [Fafard:2:(3) 1000.007967] [smpi_replay/INFO] Simulation time 1000.007967
#Test replaying the trace, altering the time to have the sleep much faster (1 instead of 1000).
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/comp-adjustment-file:${srcdir:=.}/trace_call_location/adjust -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1 --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${platfdir}/small_platform.xml --cfg=smpi/comp-adjustment-file:${srcdir:=.}/trace_call_location/adjust -np 3 -replay ${bindir:=.}/smpi_trace.txt ${bindir:=.}/../replay/smpi_replay --cfg=smpi/host-speed:1f --log=smpi_config.thres:warning --log=xbt_cfg.thres:warning --cfg=smpi/simulate-computation:no
> [Fafard:2:(3) 1.007967] [smpi_replay/INFO] Simulation time 1.007967
$ rm -f ${bindir:=.}/smpi_trace.tx*
XBT_PUBLIC xbt_dict_t sg_actor_get_properties(const_sg_actor_t actor);
XBT_PUBLIC void sg_actor_suspend(sg_actor_t actor);
XBT_PUBLIC void sg_actor_resume(sg_actor_t actor);
-XBT_PUBLIC int sg_actor_is_suspended(sg_actor_t actor);
+XBT_PUBLIC int sg_actor_is_suspended(const_sg_actor_t actor);
XBT_PUBLIC sg_actor_t sg_actor_restart(sg_actor_t actor);
XBT_PUBLIC void sg_actor_set_auto_restart(sg_actor_t actor, int auto_restart);
XBT_PUBLIC void sg_actor_daemonize(sg_actor_t actor);
const char* get_cname() const { return name.c_str(); }
void set_parent(Container* parent) { parent_ = parent; }
- bool has_children() { return not children_.empty(); }
+ bool has_children() const { return not children_.empty(); }
int get_child_position(const Container* child) const;
- unsigned int get_id_by_name(const char* name) { return name2id.at(name); }
+ unsigned int get_id_by_name(const char* name) const { return name2id.at(name); }
void add_child(Container* child);
void add_resources(std::vector<sg_host_t> hosts);
FutureStateBase(FutureStateBase const&) = delete;
FutureStateBase& operator=(FutureStateBase const&) = delete;
- XBT_PUBLIC void schedule(simgrid::xbt::Task<void()>&& job);
+ XBT_PUBLIC void schedule(simgrid::xbt::Task<void()>&& job) const;
void set_exception(std::exception_ptr exception)
{
future_get_ = true;
return Future<void>(state_);
}
- void set_value()
+ void set_value() const
{
if (state_ == nullptr)
throw std::future_error(std::future_errc::no_state);
state_->set_value();
}
- void set_exception(std::exception_ptr exception)
+ void set_exception(std::exception_ptr exception) const
{
if (state_ == nullptr)
throw std::future_error(std::future_errc::no_state);
public:
enum class Type {
- latency = 100, /* this is a heap entry to warn us when the latency is payed */
+ latency = 100, /* this is a heap entry to warn us when the latency is paid */
max_duration, /* this is a heap entry to warn us when the max_duration limit (timeout) is reached */
normal, /* this is a normal heap entry stating the date to finish transmitting */
unset
virtual ~Model();
- bool is_update_lazy() { return update_algorithm_ == UpdateAlgo::LAZY; }
+ bool is_update_lazy() const { return update_algorithm_ == UpdateAlgo::LAZY; }
/** @brief Get the set of [actions](@ref Action) in *inited* state */
Action::StateSet* get_inited_action_set() { return &inited_action_set_; }
/* The pair is {link_up, link_down} */
std::unordered_map<unsigned int, std::pair<kernel::resource::LinkImpl*, kernel::resource::LinkImpl*>> private_links_;
- unsigned int node_pos(int id) { return id * num_links_per_node_; }
- unsigned int node_pos_with_loopback(int id) { return node_pos(id) + (has_loopback_ ? 1 : 0); }
- unsigned int node_pos_with_loopback_limiter(int id) { return node_pos_with_loopback(id) + (has_limiter_ ? 1 : 0); }
+ unsigned int node_pos(int id) const { return id * num_links_per_node_; }
+ unsigned int node_pos_with_loopback(int id) const { return node_pos(id) + (has_loopback_ ? 1 : 0); }
+ unsigned int node_pos_with_loopback_limiter(int id) const
+ {
+ return node_pos_with_loopback(id) + (has_limiter_ ? 1 : 0);
+ }
void* loopback_ = nullptr;
kernel::resource::LinkImpl* backbone_ = nullptr;
void parse_specific_arguments(ClusterCreationArgs* cluster) override;
void seal() override;
- void rankId_to_coords(int rank_id, unsigned int coords[4]);
+ void rankId_to_coords(int rank_id, unsigned int coords[4]) const;
private:
void generate_routers();
void generate_links();
- void create_link(const std::string& id, int numlinks, resource::LinkImpl** linkup, resource::LinkImpl** linkdown);
+ void create_link(const std::string& id, int numlinks, resource::LinkImpl** linkup,
+ resource::LinkImpl** linkdown) const;
simgrid::s4u::Link::SharingPolicy sharing_policy_;
double bw_ = 0;
void generate_labels();
void generate_switches();
int connect_node_to_parents(FatTreeNode* node);
- bool are_related(FatTreeNode* parent, FatTreeNode* child);
- bool is_in_sub_tree(FatTreeNode* root, FatTreeNode* node);
+ bool are_related(FatTreeNode* parent, FatTreeNode* child) const;
+ bool is_in_sub_tree(FatTreeNode* root, FatTreeNode* node) const;
};
} // namespace routing
} // namespace kernel
~NetPoint() = default;
// Our rank in the vertices_ array of the netzone that contains us.
- unsigned int id() { return id_; }
+ unsigned int id() const { return id_; }
const std::string& get_name() const { return name_; }
const char* get_cname() const { return name_.c_str(); }
/** @brief the NetZone in which this NetPoint is included */
*
* @param src where from
* @param dst where to
- * @param into Container into which the traversed links and gateway informations should be pushed
+ * @param into Container into which the traversed links and gateway information should be pushed
* @param latency Accumulator in which the latencies should be added (caller must set it to 0)
*/
virtual void get_local_route(NetPoint* src, NetPoint* dst, RouteCreationArgs* into, double* latency) = 0;
virtual RouteCreationArgs* new_extended_route(RoutingMode hierarchy, NetPoint* src, NetPoint* dst, NetPoint* gw_src,
NetPoint* gw_dst, std::vector<resource::LinkImpl*>& link_list,
bool symmetrical, bool change_order);
- void get_route_check_params(NetPoint* src, NetPoint* dst);
+ void get_route_check_params(NetPoint* src, NetPoint* dst) const;
void add_route_check_params(NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoint* gw_dst,
- const std::vector<resource::LinkImpl*>& link_list, bool symmetrical);
+ const std::vector<resource::LinkImpl*>& link_list, bool symmetrical) const;
};
} // namespace routing
} // namespace kernel
XBT_PUBLIC void MSG_process_suspend(msg_process_t process);
XBT_PUBLIC void MSG_process_resume(msg_process_t process);
-XBT_PUBLIC int MSG_process_is_suspended(msg_process_t process);
+XBT_PUBLIC int MSG_process_is_suspended(const_sg_actor_t process);
XBT_PUBLIC void MSG_process_restart(msg_process_t process);
/** @brief Sets the "auto-restart" flag of the process.
*
XBT_PUBLIC sg_size_t sg_file_write(sg_file_t fd, sg_size_t size);
XBT_PUBLIC void sg_file_close(const_sg_file_t fd);
-XBT_PUBLIC const char* sg_file_get_name(sg_file_t fd);
-XBT_PUBLIC sg_size_t sg_file_get_size(sg_file_t fd);
-XBT_PUBLIC void sg_file_dump(sg_file_t fd);
+XBT_PUBLIC const char* sg_file_get_name(const_sg_file_t fd);
+XBT_PUBLIC sg_size_t sg_file_get_size(const_sg_file_t fd);
+XBT_PUBLIC void sg_file_dump(const_sg_file_t fd);
XBT_PUBLIC void* sg_file_get_data(const_sg_file_t fd);
XBT_PUBLIC void sg_file_set_data(sg_file_t fd, void* data);
XBT_PUBLIC void sg_file_seek(sg_file_t fd, sg_offset_t offset, int origin);
-XBT_PUBLIC sg_size_t sg_file_tell(sg_file_t fd);
-XBT_PUBLIC void sg_file_move(sg_file_t fd, const char* fullpath);
+XBT_PUBLIC sg_size_t sg_file_tell(const_sg_file_t fd);
+XBT_PUBLIC void sg_file_move(const_sg_file_t fd, const char* fullpath);
XBT_PUBLIC void sg_file_unlink(sg_file_t fd);
XBT_PUBLIC int sg_file_rcopy(sg_file_t file, sg_host_t host, const char* fullpath);
XBT_PUBLIC int sg_file_rmove(sg_file_t file, sg_host_t host, const char* fullpath);
~File();
/** Retrieves the path to the file */
- const char* get_path() { return fullpath_.c_str(); }
+ const char* get_path() const { return fullpath_.c_str(); }
/** Simulates a local read action. Returns the size of data actually read */
sg_size_t read(sg_size_t size);
/** Retrieves the previously stored data */
XBT_ATTRIB_DEPRECATED_v329("Please use get_data()") void* get_userdata() { return get_data(); }
- sg_size_t size();
+ sg_size_t size() const;
void seek(sg_offset_t pos); /** Sets the file head to the given position. */
void seek(sg_offset_t pos, int origin); /** Sets the file head to the given position from a given origin. */
- sg_size_t tell(); /** Retrieves the current file position */
+ sg_size_t tell() const; /** Retrieves the current file position */
/** Rename a file. WARNING: It is forbidden to move the file to another mount point */
- void move(const std::string& fullpath);
+ void move(const std::string& fullpath) const;
int remote_copy(sg_host_t host, const char* fullpath);
int remote_move(sg_host_t host, const char* fullpath);
- int unlink(); /** Remove a file from the contents of a disk */
- void dump();
+ int unlink() const; /** Remove a file from the contents of a disk */
+ void dump() const;
};
class XBT_PUBLIC FileSystemDiskExt {
FileSystemDiskExt& operator=(const FileSystemDiskExt&) = delete;
std::map<std::string, sg_size_t>* parse_content(const std::string& filename);
std::map<std::string, sg_size_t>* get_content() const { return content_.get(); }
- const char* get_mount_point() { return mount_point_.c_str(); }
+ const char* get_mount_point() const { return mount_point_.c_str(); }
const char* get_mount_point(s4u::Host* remote_host) { return remote_mount_points_[remote_host].c_str(); }
void add_remote_mount(Host* host, const std::string& mount_point)
{
FileSystemStorageExt& operator=(const FileSystemStorageExt&) = delete;
std::map<std::string, sg_size_t>* parse_content(const std::string& filename);
std::map<std::string, sg_size_t>* get_content() { return content_.get(); }
- sg_size_t get_size() { return size_; }
- sg_size_t get_used_size() { return used_size_; }
+ sg_size_t get_size() const { return size_; }
+ sg_size_t get_used_size() const { return used_size_; }
void decr_used_size(sg_size_t size);
void incr_used_size(sg_size_t size);
};
{
while (not successors_.empty()) {
ActivityPtr b = successors_.back();
- XBT_CDEBUG(s4u_activity, "Remove a dependency from '%s' on '%s'", get_cname(), b->get_cname());
+ XBT_CVERB(s4u_activity, "Remove a dependency from '%s' on '%s'", get_cname(), b->get_cname());
b->dependencies_.erase(this);
if (b->dependencies_.empty()) {
b->vetoable_start();
{
state_ = State::STARTING;
if (dependencies_.empty()) {
- XBT_CDEBUG(s4u_activity, "All dependencies are solved, let's start '%s'", get_cname());
+ XBT_CVERB(s4u_activity, "All dependencies are solved, let's start '%s'", get_cname());
start();
}
}
Activity& operator=(Activity const&) = delete;
#endif
- enum class State { INITED = 0, STARTING, STARTED, CANCELED,
- // ERRORED, // FIXME: state has never been used
- FINISHED };
+ enum class State { INITED = 0, STARTING, STARTED, CANCELED, FINISHED };
/** Starts a previously created activity.
*
* This function is optional: you can call wait() even if you didn't call start()
*/
virtual Activity* start() = 0;
- /** Blocks until the activity is terminated */
+ /** Blocks the current actor until the activity is terminated */
virtual Activity* wait() = 0;
- /** Blocks until the activity is terminated, or until the timeout is elapsed
+ /** Blocks the current actor until the activity is terminated, or until the timeout is elapsed\n
* Raises: timeout exception.*/
virtual Activity* wait_for(double timeout) = 0;
- /** Blocks until the activity is terminated, or until the time limit is reached
+ /** Blocks the current actor until the activity is terminated, or until the time limit is reached\n
* Raises: timeout exception. */
void wait_until(double time_limit);
/** Tests whether the given activity is terminated yet. */
virtual bool test();
+ /** Blocks the progression of this activity until it gets resumed */
+ virtual Activity* suspend();
+ /** Unblock the progression of this activity if it was suspended previously */
+ virtual Activity* resume();
+ /** Whether or not the progression of this activity is blocked */
+ bool is_suspended() const { return suspended_; }
+
virtual const char* get_cname() const = 0;
virtual const std::string& get_name() const = 0;
kernel::activity::ActivityImplPtr pimpl_ = nullptr;
Activity::State state_ = Activity::State::INITED;
double remains_ = 0;
+ bool suspended_ = false;
std::vector<ActivityPtr> successors_;
std::set<ActivityPtr> dependencies_;
std::atomic_int_fast32_t refcount_{0};
friend XBT_PUBLIC void intrusive_ptr_release(const Actor* actor);
#endif
/** Retrieve the amount of references on that object. Useful to debug the automatic refcounting */
- int get_refcount();
+ int get_refcount() const;
// ***** Actor creation *****
/** Retrieve a reference to myself */
static ActorPtr create(const std::string& name, s4u::Host* host, const std::function<void()>& code);
/** Create an actor, but don't start it yet.
*
- * This is usefull to set some properties or extension before actually starting it */
+ * This is useful to set some properties or extension before actually starting it */
static ActorPtr init(const std::string& name, s4u::Host* host);
ActorPtr set_stacksize(unsigned stacksize);
/** Start a previously initialized actor */
void resume();
/** Returns true if the actor is suspended. */
- bool is_suspended();
+ bool is_suspended() const;
/** If set to true, the actor will automatically restart when its host reboots */
void set_auto_restart(bool autorestart);
/** Sets the time at which that actor should be killed */
void set_kill_time(double time);
/** Retrieves the time at which that actor will be killed (or -1 if not set) */
- double get_kill_time();
+ double get_kill_time() const;
/** @brief Moves the actor to another host
*
static void shutdown();
/** Run the simulation after initialization */
- void run();
+ void run() const;
/** @brief Retrieve the simulation time (in seconds) */
static double get_clock();
/** @brief Retrieve the engine singleton */
static s4u::Engine* get_instance();
- void load_platform(const std::string& platf);
+ void load_platform(const std::string& platf) const;
XBT_ATTRIB_DEPRECATED_v330("Please change the return code of your actors to void") void register_function(
const std::string& name, int (*code)(int, char**));
register_function(name, std::move(code_factory));
}
- void load_deployment(const std::string& deploy);
+ void load_deployment(const std::string& deploy) const;
protected:
#ifndef DOXYGEN
public:
/** Returns the amount of hosts existing in the platform. */
- size_t get_host_count();
+ size_t get_host_count() const;
/** Returns a vector of all hosts found in the platform.
*
* The order is generally different from the creation/declaration order in the XML platform because we use a hash
* table internally.
*/
- std::vector<Host*> get_all_hosts();
- std::vector<Host*> get_filtered_hosts(const std::function<bool(Host*)>& filter);
- Host* host_by_name(const std::string& name);
- Host* host_by_name_or_null(const std::string& name);
+ std::vector<Host*> get_all_hosts() const;
+ std::vector<Host*> get_filtered_hosts(const std::function<bool(Host*)>& filter) const;
+ Host* host_by_name(const std::string& name) const;
+ Host* host_by_name_or_null(const std::string& name) const;
- size_t get_link_count();
- std::vector<Link*> get_all_links();
- std::vector<Link*> get_filtered_links(const std::function<bool(Link*)>& filter);
- Link* link_by_name(const std::string& name);
- Link* link_by_name_or_null(const std::string& name);
+ size_t get_link_count() const;
+ std::vector<Link*> get_all_links() const;
+ std::vector<Link*> get_filtered_links(const std::function<bool(Link*)>& filter) const;
+ Link* link_by_name(const std::string& name) const;
+ Link* link_by_name_or_null(const std::string& name) const;
- size_t get_actor_count();
- std::vector<ActorPtr> get_all_actors();
- std::vector<ActorPtr> get_filtered_actors(const std::function<bool(ActorPtr)>& filter);
+ size_t get_actor_count() const;
+ std::vector<ActorPtr> get_all_actors() const;
+ std::vector<ActorPtr> get_filtered_actors(const std::function<bool(ActorPtr)>& filter) const;
#ifndef DOXYGEN
- size_t get_storage_count();
- std::vector<Storage*> get_all_storages();
- Storage* storage_by_name(const std::string& name);
- Storage* storage_by_name_or_null(const std::string& name);
+ size_t get_storage_count() const;
+ std::vector<Storage*> get_all_storages() const;
+ Storage* storage_by_name(const std::string& name) const;
+ Storage* storage_by_name_or_null(const std::string& name) const;
#endif
- std::vector<kernel::routing::NetPoint*> get_all_netpoints();
- kernel::routing::NetPoint* netpoint_by_name_or_null(const std::string& name);
+ std::vector<kernel::routing::NetPoint*> get_all_netpoints() const;
+ kernel::routing::NetPoint* netpoint_by_name_or_null(const std::string& name) const;
- NetZone* get_netzone_root();
+ NetZone* get_netzone_root() const;
void set_netzone_root(const NetZone* netzone);
- NetZone* netzone_by_name_or_null(const std::string& name);
+ NetZone* netzone_by_name_or_null(const std::string& name) const;
/** @brief Retrieves all netzones of the type indicated by the template argument */
- template <class T> std::vector<T*> get_filtered_netzones()
+ template <class T> std::vector<T*> get_filtered_netzones() const
{
static_assert(std::is_base_of<kernel::routing::NetZoneImpl, T>::value,
"Filtering netzones is only possible for subclasses of kernel::routing::NetZoneImpl");
* @endrst
*
* Example:
- * e->set_config("host/model:ptask_L07");
+ * simgrid::s4u::Engine::set_config("host/model:ptask_L07");
*/
- void set_config(const std::string& str);
- void set_config(const std::string& name, int value);
- void set_config(const std::string& name, bool value);
- void set_config(const std::string& name, double value);
- void set_config(const std::string& name, const std::string& value);
+ static void set_config(const std::string& str);
+ static void set_config(const std::string& name, int value);
+ static void set_config(const std::string& name, bool value);
+ static void set_config(const std::string& name, double value);
+ static void set_config(const std::string& name, const std::string& value);
/** Callback fired when the platform is created (ie, the xml file parsed),
* right before the actual simulation starts. */
* @rst
* Most of them are created with :cpp:func:`simgrid::s4u::this_actor::exec_init()` or
* :cpp:func:`simgrid::s4u::Host::execute()`, and represent a classical (sequential) execution. This can be used to
- * simulate some computation occuring in another thread when the calling actor is not blocked during the execution.
+ * simulate some computation occurring in another thread when the calling actor is not blocked during the execution.
*
* You can also use :cpp:func:`simgrid::s4u::this_actor::parallel_execute()` to create *parallel* executions. These
* objects represent distributed computations involving computations on several hosts and communications between them.
* It is not a problem if the actor is not located on the called host.
* The actor will not be migrated in this case. Such remote execution are easy in simulation.
*/
- void execute(double flops);
+ void execute(double flops) const;
/** Start an asynchronous computation on that host (possibly remote) */
- ExecPtr exec_async(double flops_amounts);
+ ExecPtr exec_async(double flops_amounts) const;
/** Block the calling actor on an execution located on the called host (with explicit priority) */
- void execute(double flops, double priority);
+ void execute(double flops, double priority) const;
private:
xbt::string name_{"noname"};
/** @brief Retrieves the name of that link as a C string */
const char* get_cname() const;
- /** @brief Get the bandwidth in bytes per second of current Link */
+ /** Get the bandwidth of the current Link (in bytes per second) */
double get_bandwidth() const;
+ /** Set the bandwidth of the current Link (in bytes per second) */
void set_bandwidth(double value);
- /** @brief Get the latency in seconds of current Link */
+ /** Get the latency of the current Link (in seconds) */
double get_latency() const;
+ /** Set the latency of the current Link (in seconds) */
void set_latency(double value);
/** @brief Describes how the link is shared between flows */
SharingPolicy get_sharing_policy() const;
- /** @brief Returns the current load (in flops per second) */
+ /** @brief Set the level of communication speed of the given host on this wifi link.
+ *
+ * The bandwidth of a wifi link for a given host depends on its SNR (signal to noise ratio),
+ * which ultimately depends on the distance between the host and the station and the material between them.
+ *
+ * This is modeled in SimGrid by providing several bandwidths to wifi links, one per SNR level (just provide
+ * comma-separated values in the XML file). By default, the first level in the list is used, but you can use the
+ * current function to specify that a given host uses another level of bandwidth. This can be used to take the
+ * location of hosts into account, or even to model mobility in your SimGrid simulation.
+ *
+ * Note that this function asserts that the link is actually a wifi link */
+ void set_host_wifi_rate(const s4u::Host* host, int level) const;
+
+ /** @brief Returns the current load (in bytes per second) */
double get_usage() const;
/** @brief Check if the Link is used (at least one flow uses the link) */
static Mailbox* by_name(const std::string& name);
/** Returns whether the mailbox contains queued communications */
- bool empty();
+ bool empty() const;
/** Check if there is a communication going on in a mailbox. */
- bool listen();
+ bool listen() const;
/** Look if there is a communication going on in a mailbox and return the PID of the sender actor */
- aid_t listen_from();
+ aid_t listen_from() const;
/** Check if there is a communication ready to be consumed from a mailbox. */
- bool ready();
+ bool ready() const;
/** Gets the first element in the queue (without dequeuing it), or nullptr if none is there */
- kernel::activity::CommImplPtr front();
+ kernel::activity::CommImplPtr front() const;
/** Declare that the specified actor is a permanent receiver on that mailbox
*
void set_receiver(ActorPtr actor);
/** Return the actor declared as permanent receiver, or nullptr if none **/
- ActorPtr get_receiver();
+ ActorPtr get_receiver() const;
/** Creates (but don't start) a data transmission to that mailbox */
CommPtr put_init();
void destroy() override;
Host* get_pm() const;
- void set_pm(Host* pm);
+ VirtualMachine* set_pm(Host* pm);
size_t get_ramsize() const;
- void set_ramsize(size_t ramsize);
- void set_bound(double bound);
+ VirtualMachine* set_ramsize(size_t ramsize);
+ VirtualMachine* set_bound(double bound);
VirtualMachine::state get_state();
static xbt::signal<void(VirtualMachine const&)> on_start;
Timer(double date, simgrid::xbt::Task<void()>&& callback) : date(date), callback(std::move(callback)) {}
simgrid::xbt::Task<void()> callback;
- double get_date() { return date; }
+ double get_date() const { return date; }
void remove();
template <class F> static inline Timer* set(double date, F callback)
double precision MPI_WTIME
double precision MPI_WTICK
+
+ external smpi_execute_flops
+ external smpi_execute_flops_benched
+ external smpi_execute
+ external smpi_execute_benched
@MODULE_MPIF_OUT@
#define MPI_LOGICAL MPI_LONG
#endif
-#define MPI_Fint int
+typedef int MPI_Fint;
#define MPI_COMPLEX MPI_C_FLOAT_COMPLEX
#define MPI_DOUBLE_COMPLEX MPI_C_DOUBLE_COMPLEX
#define MPI_Win_get_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Win_get_errhandler(__VA_ARGS__))
#define MPI_Win_create_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Win_create_errhandler(__VA_ARGS__))
#define MPI_Win_call_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Win_call_errhandler(__VA_ARGS__))
+#define MPI_Type_get_contents(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_get_contents(__VA_ARGS__))
+#define MPI_Type_get_envelope(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_get_envelope(__VA_ARGS__))
#define MPI_File_call_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_File_call_errhandler(__VA_ARGS__))
#define MPI_File_create_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_File_create_errhandler(__VA_ARGS__))
#define MPI_File_set_errhandler(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_File_set_errhandler(__VA_ARGS__))
#define MPI_Irsend(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Irsend(__VA_ARGS__))
#define MPI_Get_elements(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Get_elements(__VA_ARGS__))
#define MPI_Pcontrol(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Pcontrol(__VA_ARGS__))
-#define MPI_Type_get_contents(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_get_contents(__VA_ARGS__))
#define MPI_Type_create_darray(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Type_create_darray(__VA_ARGS__))
#define MPI_Pack_external_size(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Pack_external_size(__VA_ARGS__))
#define MPI_Pack_external(...) (smpi_trace_set_call_location(__FILE__, __LINE__), MPI_Pack_external(__VA_ARGS__))
#define MPI_WIN_CREATE_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Win_create_errhandler
#define mpi_win_call_errhandler smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Win_call_errhandler
#define MPI_WIN_CALL_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Win_call_errhandler
+#define mpi_type_get_contents smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents
+#define MPI_TYPE_GET_CONTENTS smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents
+#define mpi_type_get_envelope smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_envelope
+#define MPI_TYPE_GET_ENVELOPE smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_envelope
#define mpi_file_call_errhandler smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_File_call_errhandler
#define MPI_FILE_CALL_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_File_call_errhandler
#define mpi_file_create_errhandler smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_File_create_errhandler
#define MPI_GET_ELEMENTS smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Get_elements
#define mpi_pcontrol smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Pcontrol
#define MPI_PCONTROL smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Pcontrol
-#define mpi_type_get_contents smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents
-#define MPI_TYPE_GET_CONTENTS smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_get_contents
#define mpi_type_create_darray smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_create_darray
#define MPI_TYPE_CREATE_DARRAY smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Type_create_darray
#define mpi_pack_external_size smpi_trace_set_call_location(__FILE__,__LINE__); call MPI_Pack_external_size
/** @brief Returns the full path name */
const std::string& get_name() const { return path_; }
/** @brief Returns the directory component of a path (reimplementation of POSIX dirname) */
- std::string get_dir_name();
+ std::string get_dir_name() const;
/** @brief Returns the file component of a path (reimplementation of POSIX basename) */
- std::string get_base_name();
+ std::string get_base_name() const;
private:
std::string path_;
xbt_log_priority_trace = 1, /**< enter and return of some functions */
xbt_log_priority_debug = 2, /**< crufty output */
xbt_log_priority_verbose = 3, /**< verbose output for the user wanting more */
- xbt_log_priority_info = 4, /**< output about the regular functionning */
+ xbt_log_priority_info = 4, /**< output about the regular functioning */
xbt_log_priority_warning = 5, /**< minor issue encountered */
xbt_log_priority_error = 6, /**< issue encountered */
xbt_log_priority_critical = 7, /**< major issue encountered */
namespace simgrid {
namespace xbt {
-/** Display informations about an exception
+/** Display information about an exception
*
* We display: the exception type, name, attached backtraces (if any) and
* the nested exception (if any).
--- /dev/null
+/* Copyright (c) 2007-2020. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef SIMGRID_XBT_PARSE_UNITS_HPP
+#define SIMGRID_XBT_PARSE_UNITS_HPP
+
+double xbt_parse_get_time(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name);
+double surf_parse_get_size(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name);
+double xbt_parse_get_bandwidth(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name);
+std::vector<double> xbt_parse_get_bandwidths(const std::string& filename, int lineno, const char* string,
+ const char* entity_kind, const std::string& name);
+double xbt_parse_get_speed(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name);
+std::vector<double> xbt_parse_get_all_speeds(const std::string& filename, int lineno, char* speeds,
+ const char* entity_kind, const std::string& id);
+
+#endif
/**
* @brief Write the state of the Mersenne-Twister RNG to a file
*/
- bool write_state(const std::string& filename);
+ bool write_state(const std::string& filename) const;
/**
* @brief Draws an integer number uniformly in range [min, max] (min and max included)
# Disable some rules on some files
-sonar.issue.ignore.multicriteria=j1,j2,j3,j4,j5,jni1,jni2,c1,c2a,c2b,c3,c5a,c5b,c6a,c6b,c7,c8a,c8b,c9,c10a,c10b,c10c,cex1a,cex1b,cex2a,cex2b,cex3,cex4,f1,p1,s1,s2,s3,s4,s5
+sonar.issue.ignore.multicriteria=j1,j2,j3,j4,j5,jni1,jni2,c1,c2a,c2b,c3,c4,c5a,c5b,c6a,c6b,c7,c8a,c8b,c9,c10a,c10b,c10c,cex1a,cex1b,cex2a,cex2b,cex3,cex4,f1,p1,s1,s2,s3,s4,s5
# The Object.finalize() method should not be overridden
# But we need to clean the native memory with JNI
sonar.issue.ignore.multicriteria.c3.ruleKey=c:PPMacroName
sonar.issue.ignore.multicriteria.c3.resourceKey=include/smpi/smpi_extended_traces.h
+# Concise syntax should be used for concatenatable namespaces
+# This is C++17, and we still support C++11
+sonar.issue.ignore.multicriteria.c4.ruleKey=cpp:S5812
+sonar.issue.ignore.multicriteria.c4.resourceKey=**/*
+
# Replace alternative operator "not" with "!"
# I like it better, so please leave me alone
sonar.issue.ignore.multicriteria.c5a.ruleKey=cpp:S3659
/** Helper class loading the native functions of SimGrid that we use for downcalls
*
- * Almost all org.simgrid.msg.* classes contain a static bloc (thus executed when the class is loaded)
+ * Almost all org.simgrid.msg.* classes contain a static block (thus executed when the class is loaded)
* containing a call to this.
*/
public final class NativeLib {
* declared in the deployment file (XML format).
* To create your own process you must inherit your own process from this
* class and override the method "main()". For example if you want to use
- * a process named Slave proceed as it :
+ * a process named Worker proceed as it :
*
* (1) import the class Process of the package simgrid.msg
* import simgrid.msg.Process;
*
- * public class Slave extends simgrid.msg.Process {
+ * public class Worker extends simgrid.msg.Process {
*
* (2) Override the method function
*
* \endverbatim
* }
* The name of your process must be declared in the deployment file of your simulation.
- * For the example, for the previous process Slave this file must contains a line :
- * <process host="Maxims" function="Slave"/>, where Maxims is the host of the process
- * Slave. All the process of your simulation are automatically launched and managed by Msg.
+ * For the example, for the previous process Worker this file must contains a line :
+ * <process host="Maxims" function="Worker"/>, where Maxims is the host of the process
+ * Worker. All the process of your simulation are automatically launched and managed by Msg.
* A process use tasks to simulate communications or computations with another process.
* For more information see Task. For more information on host concept
* see Host.
* If 0, then it cannot be executed with the execute() method.
* This value has to be ≥ 0.
*
- * @param bytesAmount A value of amount of data (in bytes) needed to transfert this task.
+ * @param bytesAmount A value of amount of data (in bytes) needed to transfer this task.
* If 0, then it cannot be transferred with the get() and put() methods.
* This value has to be ≥ 0.
*/
* @param flopsAmount A value of the processing amount (in flop) needed
* to process the task. If 0, then it cannot be executed
* with the execute() method. This value has to be >= 0.
- * @param bytesAmount A value of amount of data (in bytes) needed to transfert
+ * @param bytesAmount A value of amount of data (in bytes) needed to transfer
* this task. If 0, then it cannot be transferred this task.
* If 0, then it cannot be transferred with the get() and put()
* methods. This value has to be >= 0.
Msg.info("Migration of VM "+this.getName()+" to "+destination.getName()+" is impossible ("+e.getMessage()+")");
throw new HostFailureException(e.getMessage());
}
- // If the migration correcly returned, then we should change the currentHost value.
+ // If the migration correctly returned, then we should change the currentHost value.
this.currentHost = destination;
}
private native void nativeMigration(Host destination) throws MsgException;
*/
static int l_host_at(lua_State * L)
{
- int index = luaL_checkinteger(L, 1);
+ lua_Integer index = luaL_checkinteger(L, 1);
std::vector<sg_host_t> hosts = simgrid::s4u::Engine::get_instance()->get_all_hosts();
sg_host_t host = hosts[index - 1]; // lua indexing start by 1 (lua[1] <=> C[0])
lua_newtable(L); /* create a table, put the userdata on top of it */
#include "src/surf/network_interface.hpp"
#include "src/surf/surf_private.hpp"
#include "src/surf/xml/platf_private.hpp"
+#include "xbt/parse_units.hpp"
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
int console_add_backbone(lua_State *L) {
simgrid::kernel::routing::LinkCreationArgs link;
+ lua_Debug ar;
+ lua_getstack(L, 1, &ar);
+ lua_getinfo(L, "Sl", &ar);
link.properties = nullptr;
type = lua_gettable(L, -2);
lua_ensure(type == LUA_TSTRING || type == LUA_TNUMBER,
"Attribute 'bandwidth' must be specified for backbone and must either be a string (in the right format; see docs) or a number.");
- link.bandwidths.push_back(surf_parse_get_bandwidth(lua_tostring(L, -1), "bandwidth of backbone", link.id.c_str()));
+ link.bandwidths.push_back(xbt_parse_get_bandwidth(ar.short_src, ar.currentline, lua_tostring(L, -1),
+ "bandwidth of backbone", link.id.c_str()));
lua_pop(L, 1);
lua_pushstring(L, "lat");
type = lua_gettable(L, -2);
lua_ensure(type == LUA_TSTRING || type == LUA_TNUMBER,
"Attribute 'lat' must be specified for backbone and must either be a string (in the right format; see docs) or a number.");
- link.latency = surf_parse_get_time(lua_tostring(L, -1), "latency of backbone", link.id.c_str());
+ link.latency =
+ xbt_parse_get_time(ar.short_src, ar.currentline, lua_tostring(L, -1), "latency of backbone", link.id.c_str());
lua_pop(L, 1);
lua_pushstring(L, "sharing_policy");
int console_add_host(lua_State *L) {
simgrid::kernel::routing::HostCreationArgs host;
int type;
+ lua_Debug ar;
+ lua_getstack(L, 1, &ar);
+ lua_getinfo(L, "Sl", &ar);
// we get values from the table passed as argument
lua_ensure(lua_istable(L, -1),
lua_ensure(type == LUA_TSTRING || type == LUA_TNUMBER,
"Attribute 'speed' must be specified for host and must either be a string (in the correct format; check documentation) or a number.");
if (type == LUA_TNUMBER)
- host.speed_per_pstate.push_back(lua_tointeger(L, -1));
+ host.speed_per_pstate.push_back(lua_tonumber(L, -1));
else // LUA_TSTRING
- host.speed_per_pstate.push_back(surf_parse_get_speed(lua_tostring(L, -1), "speed of host", host.id));
+ host.speed_per_pstate.push_back(
+ xbt_parse_get_speed(ar.short_src, ar.currentline, lua_tostring(L, -1), "speed of host", host.id));
lua_pop(L, 1);
// get core
if (not lua_isnumber(L, -1))
host.core_amount = 1; // Default value
else
- host.core_amount = lua_tonumber(L, -1);
+ host.core_amount = static_cast<int>(lua_tointeger(L, -1));
if (host.core_amount == 0)
host.core_amount = 1;
lua_pop(L, 1);
int console_add_link(lua_State *L) {
simgrid::kernel::routing::LinkCreationArgs link;
+ lua_Debug ar;
+ lua_getstack(L, 1, &ar);
+ lua_getinfo(L, "Sl", &ar);
const char* policy;
if (type == LUA_TNUMBER)
link.bandwidths.push_back(lua_tonumber(L, -1));
else // LUA_TSTRING
- link.bandwidths.push_back(surf_parse_get_bandwidth(lua_tostring(L, -1), "bandwidth of link", link.id.c_str()));
+ link.bandwidths.push_back(xbt_parse_get_bandwidth(ar.short_src, ar.currentline, lua_tostring(L, -1),
+ "bandwidth of link", link.id.c_str()));
lua_pop(L, 1);
//get latency value
if (type == LUA_TNUMBER)
link.latency = lua_tonumber(L, -1);
else // LUA_TSTRING
- link.latency = surf_parse_get_time(lua_tostring(L, -1), "latency of link", link.id.c_str());
+ link.latency =
+ xbt_parse_get_time(ar.short_src, ar.currentline, lua_tostring(L, -1), "latency of link", link.id.c_str());
lua_pop(L, 1);
/*Optional Arguments */
#include <lua.hpp>
/* ********************************************************************************* */
-/* Plaftorm functions */
+/* Platform functions */
/* ********************************************************************************* */
extern "C" {
py::class_<Engine>(m, "Engine", "Simulation Engine")
.def(py::init([](std::vector<std::string> args) {
static char noarg[] = {'\0'};
- int argc = args.size();
+ int argc = static_cast<int>(args.size());
std::unique_ptr<char* []> argv(new char*[argc + 1]);
for (int i = 0; i != argc; ++i)
argv[i] = args[i].empty() ? noarg : &args[i].front();
/* Class Host */
py::class_<simgrid::s4u::Host, std::unique_ptr<Host, py::nodelete>>(m, "Host", "Simulated host")
.def("by_name", &Host::by_name, "Retrieves a host from its name, or die")
- .def("get_pstate_count", &Host::get_pstate_count, "Retrieve the cound of defined pstate levels")
+ .def("get_pstate_count", &Host::get_pstate_count, "Retrieve the count of defined pstate levels")
.def("get_pstate_speed", &Host::get_pstate_speed, "Retrieve the maximal speed at the given pstate")
.def_property(
"pstate", &Host::get_pstate,
XBT_CDEBUG(xbt_parmap, "New worker thread created");
/* Worker's main loop */
- while (1) {
+ while (true) {
round++; // New scheduling round
parmap.synchro->worker_wait(round);
if (parmap.destroying)
{
std::string str = std::string("--cfg=") + option + " ";
- int len = str.size();
+ int len = static_cast<int>(str.size());
XBT_HELP("%s%*.*s %s", str.c_str(), 30 - len, 30 - len, "", desc);
if (longdesc != nullptr) {
XBT_HELP("%s\n", longdesc);
xbt::signal<void(StateEvent&)> StateEvent::on_destruction;
xbt::signal<void(EntityValue&)> EntityValue::on_creation;
-static void on_container_creation_paje(Container& c)
+static void on_container_creation_paje(const Container& c)
{
double timestamp = SIMIX_get_clock();
std::stringstream stream;
tracing_file << stream.str() << std::endl;
}
-static void on_container_destruction_paje(Container& c)
+static void on_container_destruction_paje(const Container& c)
{
// trace my destruction, but not if user requests so or if the container is root
if (not trace_disable_destroy && &c != Container::get_root()) {
}
}
-static void on_entity_value_creation(EntityValue& value)
+static void on_entity_value_creation(const EntityValue& value)
{
std::stringstream stream;
XBT_DEBUG("%s: event_type=%u", __func__, PAJE_DefineEntityValue);
event.stream_ << event.get_type()->get_id() << " " << event.get_container()->get_id();
}
-static void on_event_destruction(PajeEvent& event)
+static void on_event_destruction(const PajeEvent& event)
{
XBT_DEBUG("Dump %s", event.stream_.str().c_str());
tracing_file << event.stream_.str() << std::endl;
}
-static void on_state_event_destruction(StateEvent& event)
+static void on_state_event_destruction(const StateEvent& event)
{
if (event.has_extra())
*tracing_files.at(event.get_container()) << event.stream_.str() << std::endl;
}
-static void on_type_creation(Type& type, e_event_type event_type)
+static void on_type_creation(const Type& type, e_event_type event_type)
{
if (event_type == PAJE_DefineLinkType)
return; // this kind of type has to be handled differently
tracing_file << stream.str() << std::endl;
}
-static void on_link_type_creation(Type& type, Type& source, Type& dest)
+static void on_link_type_creation(const Type& type, const Type& source, const Type& dest)
{
std::stringstream stream;
XBT_DEBUG("%s: event_type=%u, timestamp=%.*f", __func__, PAJE_DefineLinkType, trace_precision, 0.);
static xbt::signal<void(StateEvent&)> on_destruction;
StateEvent(Container* container, Type* type, e_event_type event_type, EntityValue* value, TIData* extra);
~StateEvent() { on_destruction(*this); }
- bool has_extra() { return extra_ != nullptr; }
+ bool has_extra() const { return extra_ != nullptr; }
void print() override;
};
Type* father);
virtual ~Type() = default;
- long long int get_id() { return id_; }
+ long long int get_id() const { return id_; }
const std::string& get_name() const { return name_; }
- const char* get_cname() { return name_.c_str(); }
+ const char* get_cname() const { return name_.c_str(); }
const std::string& get_color() const { return color_; }
Type* get_father() const { return father_; }
- const std::map<std::string, std::unique_ptr<Type>>& get_children() { return children_; }
- bool is_colored() { return not color_.empty(); }
+ const std::map<std::string, std::unique_ptr<Type>>& get_children() const { return children_; }
+ bool is_colored() const { return not color_.empty(); }
Type* by_name(const std::string& name);
LinkType* by_name_or_create(const std::string& name, Type* source, Type* dest);
// find the lowest ancestor
p = nullptr;
- int i = ancestors_a1.size() - 1;
- int j = ancestors_a2.size() - 1;
+ int i = static_cast<int>(ancestors_a1.size()) - 1;
+ int j = static_cast<int>(ancestors_a2.size()) - 1;
while (i >= 0 && j >= 0) {
container_t a1p = ancestors_a1.at(i);
const simgrid::instr::Container* a2p = ancestors_a2.at(j);
}
if (TRACE_needs_platform()) {
- NetZoneContainer* container = new NetZoneContainer(id, currentContainer.size(), currentContainer.back());
+ unsigned level = static_cast<unsigned>(currentContainer.size());
+ NetZoneContainer* container = new NetZoneContainer(id, level, currentContainer.back());
currentContainer.push_back(container);
}
}
static void on_action_state_change(kernel::resource::Action const& action,
kernel::resource::Action::State /* previous */)
{
- int n = action.get_variable()->get_number_of_constraint();
+ unsigned n = static_cast<unsigned>(action.get_variable()->get_number_of_constraint());
- for (int i = 0; i < n; i++) {
+ for (unsigned i = 0; i < n; i++) {
double value = action.get_variable()->get_value() * action.get_variable()->get_constraint_weight(i);
/* Beware of composite actions: ptasks put links and cpus together. Extra pb: we cannot dynamic_cast from void* */
kernel::resource::Resource* resource = action.get_variable()->get_constraint(i)->get_id();
{
// only trace resource utilization if resource is known by tracing mechanism
container_t container = Container::by_name_or_null(resource);
- if (not container || not value)
+ if (container == nullptr || value == 0.0)
return;
// trace uncategorized resource utilization
if (TRACE_uncategorized()){
- XBT_DEBUG("UNCAT %s [%f - %f] %s %s %f", type, now, now + delta, resource, name, value);
+ XBT_VERB("UNCAT %s [%f - %f] %s %s %f", type, now, now + delta, resource, name, value);
container->get_variable(name)->instr_event(now, delta, resource, value);
}
std::string previous_filename;
int previous_linenumber;
- std::string get_composed_key()
+ std::string get_composed_key() const
{
return previous_filename + ':' + std::to_string(previous_linenumber) + ':' + filename + ':' +
std::to_string(linenumber);
{
auto it = std::find_if(begin(children_), end(children_),
[&child](const std::unique_ptr<Container>& c) { return c.get() == child; });
- return it == end(children_) ? -1 : std::distance(begin(children_), it);
+ return it == end(children_) ? -1 : static_cast<int>(std::distance(begin(children_), it));
}
std::vector<int> Container::get_hierarchy()
std::vector<int> heir_list = this->get_hierarchy();
- unsigned int length = heir_list.size();
+ unsigned int length = static_cast<unsigned int>(heir_list.size());
unsigned int i = 0;
for (auto const& id : heir_list) {
output += std::to_string(id);
unsigned int i=0;
xbt_assert(not this->resource_list.empty());
- unsigned int res_nb = this->resource_list.size();
+ unsigned int res_nb = static_cast<unsigned int>(this->resource_list.size());
std::string resid = this->get_hierarchy_as_string();
fprintf(jed_file, " <rset id=\"%s\" nb=\"%u\" names=\"", resid.c_str(), res_nb);
for (auto const& host_name : hostgroup) {
xbt_assert( host_name != nullptr );
- jed_container_t parent_cont = host2_simgrid_parent_container.at(host_name);
+ const simgrid::jedule::Container* parent_cont = host2_simgrid_parent_container.at(host_name);
unsigned int id = parent_cont->get_id_by_name(host_name);
id_list.push_back(id);
}
- unsigned int nb_ids = id_list.size();
+ unsigned int nb_ids = static_cast<unsigned int>(id_list.size());
std::sort(id_list.begin(), id_list.end());
if( nb_ids > 0 ) {
kv.second->destroy();
}
-void EngineImpl::load_deployment(const std::string& file)
+void EngineImpl::load_deployment(const std::string& file) const
{
sg_platf_exit();
sg_platf_init();
EngineImpl& operator=(const EngineImpl&) = delete;
virtual ~EngineImpl();
- void load_deployment(const std::string& file);
+ void load_deployment(const std::string& file) const;
void register_function(const std::string& name, const actor::ActorCodeFactory& code);
void register_default(const actor::ActorCodeFactory& code);
} // namespace kernel
} // namespace simgrid
-#endif
\ No newline at end of file
+#endif
}
public:
- const std::string& get_name() { return name_; }
- const char* get_cname() { return name_.c_str(); }
+ const std::string& get_name() const { return name_; }
+ const char* get_cname() const { return name_.c_str(); }
bool test();
void wait_for(actor::ActorImpl* issuer, double timeout);
tracing_category_ = category;
return static_cast<AnyActivityImpl&>(*this);
}
- const std::string& get_tracing_category() { return tracing_category_; }
+ const std::string& get_tracing_category() const { return tracing_category_; }
};
} // namespace activity
void* data, double timeout)
{
simgrid::kernel::activity::ActivityImplPtr comm = simcall_HANDLER_comm_isend(
- simcall, src, mbox, task_size, rate, src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, data, 0);
+ simcall, src, mbox, task_size, rate, src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, data, false);
SIMCALL_SET_MC_VALUE(*simcall, 0);
simcall_HANDLER_comm_wait(simcall, static_cast<simgrid::kernel::activity::CommImpl*>(comm.get()), timeout);
}
void lock(actor::ActorImpl* issuer);
bool try_lock(actor::ActorImpl* issuer);
void unlock(actor::ActorImpl* issuer);
- bool is_locked() { return locked_; }
+ bool is_locked() const { return locked_; }
MutexImpl* ref();
void unref();
void acquire(actor::ActorImpl* issuer, double timeout);
void release();
- bool would_block() { return (value_ == 0); }
+ bool would_block() const { return (value_ == 0); }
void remove_sleeping_actor(actor::ActorImpl& actor) { xbt::intrusive_erase(sleeping_, actor); }
- unsigned int get_capacity() { return value_; }
- bool is_used() { return not sleeping_.empty(); }
+ unsigned int get_capacity() const { return value_; }
+ bool is_used() const { return not sleeping_.empty(); }
friend void intrusive_ptr_add_ref(SemaphoreImpl* sem)
{
this->throw_exception(std::make_exception_ptr(ForcefulKillException(host_->is_on() ? "exited" : "host failed")));
}
-void ActorImpl::kill(ActorImpl* actor)
+void ActorImpl::kill(ActorImpl* actor) const
{
xbt_assert(actor != simix_global->maestro_, "Killing maestro is a rather bad idea");
if (actor->finished_) {
}
}
-void ActorImpl::kill_all()
+void ActorImpl::kill_all() const
{
for (auto const& kv : simix_global->process_list)
if (kv.second != this)
});
}
-double ActorImpl::get_kill_time()
+double ActorImpl::get_kill_time() const
{
return kill_timer_ ? kill_timer_->get_date() : 0;
}
dest->pimpl_->add_actor(this);
}
-ActorImplPtr ActorImpl::init(const std::string& name, s4u::Host* host)
+ActorImplPtr ActorImpl::init(const std::string& name, s4u::Host* host) const
{
ActorImpl* actor = new ActorImpl(xbt::string(name), host);
actor->set_ppid(this->pid_);
}
ActorImplPtr ActorImpl::create(const std::string& name, const ActorCode& code, void* data, s4u::Host* host,
- const std::unordered_map<std::string, std::string>* properties, ActorImpl* parent_actor)
+ const std::unordered_map<std::string, std::string>* properties,
+ const ActorImpl* parent_actor)
{
XBT_DEBUG("Start actor %s@'%s'", name.c_str(), host->get_cname());
~ActorImpl();
static ActorImpl* self();
- double get_kill_time();
+ double get_kill_time() const;
void set_kill_time(double kill_time);
boost::intrusive::list_member_hook<> host_actor_list_hook; /* simgrid::simix::Host::process_list */
boost::intrusive::list_member_hook<> smx_destroy_list_hook; /* simix_global->actors_to_destroy */
aid_t get_pid() const { return pid_; }
aid_t get_ppid() const { return ppid_; }
void set_ppid(aid_t ppid) { ppid_ = ppid; }
- bool is_daemon() { return daemon_; } /** Whether this actor has been daemonized */
- bool has_to_auto_restart() { return auto_restart_; }
+ bool is_daemon() const { return daemon_; } /** Whether this actor has been daemonized */
+ bool has_to_auto_restart() const { return auto_restart_; }
void set_auto_restart(bool autorestart) { auto_restart_ = autorestart; }
void set_stacksize(unsigned stacksize) { stacksize_ = stacksize; }
- unsigned get_stacksize() { return stacksize_; }
+ unsigned get_stacksize() const { return stacksize_; }
std::unique_ptr<context::Context> context_; /* the context (uctx/raw/thread) that executes the user function */
std::atomic_int_fast32_t refcount_{0};
public:
- int get_refcount() { return refcount_; }
+ int get_refcount() const { return refcount_; }
friend void intrusive_ptr_add_ref(ActorImpl* actor)
{
// This whole memory consistency semantic drives me nuts.
s4u::ActorPtr iface() { return s4u::ActorPtr(&piface_); }
s4u::Actor* ciface() { return &piface_; }
- ActorImplPtr init(const std::string& name, s4u::Host* host);
+ ActorImplPtr init(const std::string& name, s4u::Host* host) const;
ActorImpl* start(const ActorCode& code);
static ActorImplPtr create(const std::string& name, const ActorCode& code, void* data, s4u::Host* host,
- const std::unordered_map<std::string, std::string>* properties, ActorImpl* parent_actor);
+ const std::unordered_map<std::string, std::string>* properties,
+ const ActorImpl* parent_actor);
static ActorImplPtr attach(const std::string& name, void* data, s4u::Host* host,
const std::unordered_map<std::string, std::string>* properties);
static void detach();
void cleanup();
void exit();
- void kill(ActorImpl* actor);
- void kill_all();
+ void kill(ActorImpl* actor) const;
+ void kill_all() const;
void yield();
void daemonize();
- bool is_suspended() { return suspended_; }
+ bool is_suspended() const { return suspended_; }
s4u::Actor* restart();
void suspend();
void resume();
bool wannadie() const { return iwannadie_; }
void set_wannadie(bool value = true) { iwannadie_ = value; }
- void operator()() { code_(); }
+ void operator()() const { code_(); }
bool has_code() const { return static_cast<bool>(code_); }
actor::ActorImpl* get_actor() const { return this->actor_; }
namespace simgrid {
namespace kernel {
-void FutureStateBase::schedule(simgrid::xbt::Task<void()>&& job)
+void FutureStateBase::schedule(simgrid::xbt::Task<void()>&& job) const
{
simix_global->tasks.push_back(std::move(job));
}
elem.constraint = cnst;
elem.variable = var;
- if (var->sharing_penalty_) {
+ if (var->sharing_penalty_ != 0.0) {
elem.constraint->enabled_element_set_.push_front(elem);
elem.increase_concurrency();
} else
std::find_if(begin(var->cnsts_), end(var->cnsts_), [&cnst](Element const& x) { return x.constraint == cnst; });
if (elem_it != end(var->cnsts_)) {
Element& elem = *elem_it;
- if (var->sharing_penalty_)
+ if (var->sharing_penalty_ != 0.0)
elem.decrease_concurrency();
if (cnst->sharing_policy_ != s4u::Link::SharingPolicy::FATPIPE)
elem.consumption_weight = std::max(elem.consumption_weight, value);
// We need to check that increasing value of the element does not cross the concurrency limit
- if (var->sharing_penalty_) {
+ if (var->sharing_penalty_ != 0.0) {
if (cnst->get_concurrency_slack() < elem.get_concurrency()) {
double penalty = var->sharing_penalty_;
disable_var(var);
cnst.usage_ = elem.consumption_weight / elem.variable->sharing_penalty_;
elem.make_active();
- resource::Action* action = static_cast<resource::Action*>(elem.variable->id_);
+ resource::Action* action = elem.variable->id_;
if (modified_set_ && not action->is_within_modified_set())
modified_set_->push_back(*action);
}
int Constraint::get_variable_amount() const
{
- return std::count_if(std::begin(enabled_element_set_), std::end(enabled_element_set_),
- [](const Element& elem) { return elem.consumption_weight > 0; });
+ return static_cast<int>(std::count_if(std::begin(enabled_element_set_), std::end(enabled_element_set_),
+ [](const Element& elem) { return elem.consumption_weight > 0; }));
}
} // namespace lmm
Constraint* get_constraint(unsigned num) const { return num < cnsts_.size() ? cnsts_[num].constraint : nullptr; }
/**
- * @brief Get the weigth of the numth constraint associated to the variable
+ * @brief Get the weight of the numth constraint associated to the variable
* @param num The rank of constraint we want to get
* @return The numth constraint
*/
s4u::Host* get_host() const { return host_; }
void set_host(s4u::Host* host) { host_ = host; }
- double get_read_bandwidth() { return read_bw_; }
- double get_write_bandwidth() { return write_bw_; }
+ double get_read_bandwidth() const { return read_bw_; }
+ double get_write_bandwidth() const { return write_bw_; }
lmm::Constraint* get_read_constraint() const { return constraint_read_; }
lmm::Constraint* get_write_constraint() const { return constraint_write_; }
for (auto const& link : e_route->link_list) {
route->link_list.insert(route->link_list.begin(), link);
if (lat)
- *lat += static_cast<resource::LinkImpl*>(link)->get_latency();
+ *lat += link->get_latency();
}
}
for (auto const& link : e_route->link_list) {
route->link_list.insert(route->link_list.begin(), link);
if (lat)
- *lat += static_cast<resource::LinkImpl*>(link)->get_latency();
+ *lat += link->get_latency();
}
}
{
add_route_check_params(src, dst, gw_src, gw_dst, link_list, symmetrical);
- new_edge(src->id(), dst->id(), new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, 1));
+ new_edge(src->id(), dst->id(),
+ new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, true));
if (symmetrical == true)
- new_edge(dst->id(), src->id(), new_extended_route(hierarchy_, dst, src, gw_dst, gw_src, link_list, symmetrical, 0));
+ new_edge(dst->id(), src->id(),
+ new_extended_route(hierarchy_, dst, src, gw_dst, gw_src, link_list, symmetrical, false));
}
void DijkstraZone::new_edge(int src_id, int dst_id, RouteCreationArgs* route)
{
}
-void DragonflyZone::rankId_to_coords(int rankId, unsigned int coords[4])
+void DragonflyZone::rankId_to_coords(int rankId, unsigned int coords[4]) const
{
// coords : group, chassis, blade, node
coords[0] = rankId / (num_chassis_per_group_ * num_blades_per_chassis_ * num_nodes_per_blade_);
}
void DragonflyZone::create_link(const std::string& id, int numlinks, resource::LinkImpl** linkup,
- resource::LinkImpl** linkdown)
+ resource::LinkImpl** linkdown) const
{
*linkup = nullptr;
*linkdown = nullptr;
}
}
-bool FatTreeZone::is_in_sub_tree(FatTreeNode* root, FatTreeNode* node)
+bool FatTreeZone::is_in_sub_tree(FatTreeNode* root, FatTreeNode* node) const
{
XBT_DEBUG("Is %d(%u,%u) in the sub tree of %d(%u,%u) ?", node->id, node->level, node->position, root->id, root->level,
root->position);
return connectionsNumber;
}
-bool FatTreeZone::are_related(FatTreeNode* parent, FatTreeNode* child)
+bool FatTreeZone::are_related(FatTreeNode* parent, FatTreeNode* child) const
{
std::stringstream msgBuffer;
: id(id), level(level), position(position)
{
LinkCreationArgs linkTemplate;
- if (cluster->limiter_link) {
+ if (cluster->limiter_link != 0.0) {
linkTemplate.bandwidths.push_back(cluster->limiter_link);
linkTemplate.latency = 0;
linkTemplate.policy = s4u::Link::SharingPolicy::SHARED;
sg_platf_new_link(&linkTemplate);
this->limiter_link_ = s4u::Link::by_name(linkTemplate.id)->get_impl();
}
- if (cluster->loopback_bw || cluster->loopback_lat) {
+ if (cluster->loopback_bw != 0.0 || cluster->loopback_lat != 0.0) {
linkTemplate.bandwidths.push_back(cluster->loopback_bw);
linkTemplate.latency = cluster->loopback_lat;
linkTemplate.policy = s4u::Link::SharingPolicy::FATPIPE;
dst->get_cname());
TO_FLOYD_LINK(src->id(), dst->id()) =
- new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, 1);
+ new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, true);
TO_FLOYD_PRED(src->id(), dst->id()) = src->id();
TO_FLOYD_COST(src->id(), dst->id()) = (TO_FLOYD_LINK(src->id(), dst->id()))->link_list.size();
src->get_cname(), gw_dst->get_cname());
TO_FLOYD_LINK(dst->id(), src->id()) =
- new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, 0);
+ new_extended_route(hierarchy_, src, dst, gw_src, gw_dst, link_list, symmetrical, false);
TO_FLOYD_PRED(dst->id(), src->id()) = dst->id();
TO_FLOYD_COST(dst->id(), src->id()) =
(TO_FLOYD_LINK(dst->id(), src->id()))->link_list.size(); /* count of links, old model assume 1 */
NetZoneImpl* current = src->get_englobing_zone();
while (current != nullptr) {
path_src.push_back(current);
- current = static_cast<NetZoneImpl*>(current->get_father());
+ current = current->get_father();
}
std::vector<NetZoneImpl*> path_dst;
current = dst->get_englobing_zone();
while (current != nullptr) {
path_dst.push_back(current);
- current = static_cast<NetZoneImpl*>(current->get_father());
+ current = current->get_father();
}
/* (3) find the common father.
std::vector<NetZoneImpl*> path_src;
NetZoneImpl* current = src->get_englobing_zone();
while (current != nullptr) {
- path_src.push_back(static_cast<NetZoneImpl*>(current));
+ path_src.push_back(current);
current = current->father_;
}
std::vector<NetZoneImpl*> path_dst;
current = dst->get_englobing_zone();
while (current != nullptr) {
- path_dst.push_back(static_cast<NetZoneImpl*>(current));
+ path_dst.push_back(current);
current = current->father_;
}
return result;
}
-void RoutedZone::get_route_check_params(NetPoint* src, NetPoint* dst)
+void RoutedZone::get_route_check_params(NetPoint* src, NetPoint* dst) const
{
xbt_assert(src, "Cannot find a route from nullptr to %s", dst->get_cname());
xbt_assert(dst, "Cannot find a route from %s to nullptr", src->get_cname());
src->get_cname(), dst->get_cname(), src_as->get_cname(), dst_as->get_cname(), get_cname());
}
void RoutedZone::add_route_check_params(NetPoint* src, NetPoint* dst, NetPoint* gw_src, NetPoint* gw_dst,
- const std::vector<resource::LinkImpl*>& link_list, bool symmetrical)
+ const std::vector<resource::LinkImpl*>& link_list, bool symmetrical) const
{
const char* srcName = src->get_cname();
const char* dstName = dst->get_cname();
int current_dimension = dimensions_[j]; // which dimension are we currently in?
// we need to iterate over all dimensions and create all links there
// The other node the link connects
- int neighbor_rank_id = ((static_cast<int>(rank) / dim_product) % current_dimension == current_dimension - 1)
+ int neighbor_rank_id = ((rank / dim_product) % current_dimension == current_dimension - 1)
? rank - (current_dimension - 1) * dim_product
: rank + dim_product;
// name of neighbor is not right for non contiguous cluster radicals (as id != rank in this case)
/** The process of this address space
*
- * This is where we can get debug informations, memory layout, etc.
+ * This is where we can get debug information, memory layout, etc.
*/
simgrid::mc::RemoteSimulation* get_remote_simulation() const { return remote_simulation_; }
void ModelChecker::setup_ignore()
{
- RemoteSimulation& process = this->get_remote_simulation();
+ const RemoteSimulation& process = this->get_remote_simulation();
for (std::pair<const char*, const char*> const& var :
ignored_local_variables)
process.ignore_local_variable(var.first, var.second);
void ModelChecker::shutdown()
{
- XBT_DEBUG("Shuting down model-checker");
+ XBT_DEBUG("Shutting down model-checker");
RemoteSimulation* process = &this->get_remote_simulation();
if (process->running()) {
case MC_MESSAGE_IGNORE_HEAP:
{
s_mc_message_ignore_heap_t message;
- xbt_assert(size == sizeof(message), "Broken messsage");
+ xbt_assert(size == sizeof(message), "Broken message");
memcpy(&message, buffer, sizeof(message));
IgnoredHeapRegion region;
case MC_MESSAGE_UNIGNORE_HEAP:
{
s_mc_message_ignore_memory_t message;
- xbt_assert(size == sizeof(message), "Broken messsage");
+ xbt_assert(size == sizeof(message), "Broken message");
memcpy(&message, buffer, sizeof(message));
get_remote_simulation().unignore_heap((void*)(std::uintptr_t)message.addr, message.size);
break;
case MC_MESSAGE_IGNORE_MEMORY:
{
s_mc_message_ignore_memory_t message;
- xbt_assert(size == sizeof(message), "Broken messsage");
+ xbt_assert(size == sizeof(message), "Broken message");
memcpy(&message, buffer, sizeof(message));
this->get_remote_simulation().ignore_region(message.addr, message.size);
break;
case MC_MESSAGE_STACK_REGION:
{
s_mc_message_stack_region_t message;
- xbt_assert(size == sizeof(message), "Broken messsage");
+ xbt_assert(size == sizeof(message), "Broken message");
memcpy(&message, buffer, sizeof(message));
this->get_remote_simulation().stack_areas().push_back(message.stack_region);
}
initial_snapshot_ = std::make_shared<simgrid::mc::Snapshot>(0);
}
-void Session::execute(Transition const& transition)
+void Session::execute(Transition const& transition) const
{
model_checker_->handle_simcall(transition);
model_checker_->wait_for_requests();
}
-void Session::restore_initial_state()
+void Session::restore_initial_state() const
{
this->initial_snapshot_->restore(&model_checker_->get_remote_simulation());
}
-void Session::log_state()
+void Session::log_state() const
{
model_checker_->getChecker()->log_state();
}
}
-bool Session::actor_is_enabled(aid_t pid)
+bool Session::actor_is_enabled(aid_t pid) const
{
s_mc_message_actor_enabled_t msg{MC_MESSAGE_ACTOR_ENABLED, pid};
model_checker_->channel().send(msg);
void close();
void initialize();
- void execute(Transition const& transition);
- void log_state();
+ void execute(Transition const& transition) const;
+ void log_state() const;
- void restore_initial_state();
- bool actor_is_enabled(aid_t pid);
+ void restore_initial_state() const;
+ bool actor_is_enabled(aid_t pid) const;
};
// Temporary :)
if (diff != NONE_DIFF) {
if (comm->type == PatternCommunicationType::send) {
- this->send_deterministic = 0;
+ this->send_deterministic = false;
if (this->send_diff != nullptr)
xbt_free(this->send_diff);
this->send_diff = print_determinism_result(diff, process, comm, list.index_comm + 1);
} else {
- this->recv_deterministic = 0;
+ this->recv_deterministic = false;
if (this->recv_diff != nullptr)
xbt_free(this->recv_diff);
this->recv_diff = print_determinism_result(diff, process, comm, list.index_comm + 1);
pattern->comm_addr = static_cast<kernel::activity::CommImpl*>(simcall_comm_isend__getraw__result(request));
Remote<kernel::activity::CommImpl> temp_synchro;
- mc_model_checker->get_remote_simulation().read(
- temp_synchro, remote(static_cast<kernel::activity::CommImpl*>(pattern->comm_addr)));
- const kernel::activity::CommImpl* synchro = static_cast<kernel::activity::CommImpl*>(temp_synchro.get_buffer());
+ mc_model_checker->get_remote_simulation().read(temp_synchro, remote(pattern->comm_addr));
+ const kernel::activity::CommImpl* synchro = temp_synchro.get_buffer();
char* remote_name = mc_model_checker->get_remote_simulation().read<char*>(RemotePtr<char*>(
(uint64_t)(synchro->get_mailbox() ? &synchro->get_mailbox()->name_ : &synchro->mbox_cpy->name_)));
#endif
Remote<kernel::activity::CommImpl> temp_comm;
- mc_model_checker->get_remote_simulation().read(
- temp_comm, remote(static_cast<kernel::activity::CommImpl*>(pattern->comm_addr)));
+ mc_model_checker->get_remote_simulation().read(temp_comm, remote(pattern->comm_addr));
const kernel::activity::CommImpl* comm = temp_comm.get_buffer();
char* remote_name;
case xbt_automaton_exp_label::AUT_ONE:
return true;
default:
- xbt_die("Unexpected vaue for automaton");
+ xbt_die("Unexpected value for automaton");
}
}
Pair::Pair(unsigned long expanded_pairs) : num(expanded_pairs)
{}
-std::shared_ptr<const std::vector<int>> LivenessChecker::get_proposition_values()
+std::shared_ptr<const std::vector<int>> LivenessChecker::get_proposition_values() const
{
std::vector<int> values;
unsigned int cursor = 0;
// For each enabled transition in the property automaton, push a
// (application_state, automaton_state) pair to the exploration stack:
for (int i = xbt_dynar_length(current_pair->automaton_state->out) - 1; i >= 0; i--) {
- const xbt_automaton_transition* transition_succ = (xbt_automaton_transition_t)xbt_dynar_get_as(
- current_pair->automaton_state->out, i, xbt_automaton_transition_t);
+ const xbt_automaton_transition* transition_succ =
+ xbt_dynar_get_as(current_pair->automaton_state->out, i, xbt_automaton_transition_t);
if (evaluate_label(transition_succ->label, *prop_values))
exploration_stack_.push_back(this->create_pair(current_pair.get(), transition_succ->dst, prop_values));
}
void log_state() override;
private:
- std::shared_ptr<const std::vector<int>> get_proposition_values();
+ std::shared_ptr<const std::vector<int>> get_proposition_values() const;
std::shared_ptr<VisitedPair> insert_acceptance_pair(Pair* pair);
int insert_visited_pair(std::shared_ptr<VisitedPair> visited_pair, Pair* pair);
void show_acceptance_cycle(std::size_t depth);
continue;
}
- xbt_assert(heapinfo1->type >= 0, "Unkown mmalloc block type: %d", heapinfo1->type);
+ xbt_assert(heapinfo1->type >= 0, "Unknown mmalloc block type: %d", heapinfo1->type);
void* addr_block1 = ((void*)(((ADDR2UINT(i1)) - 1) * BLOCKSIZE + (char*)state.std_heap_copy.heapbase));
continue;
}
- xbt_assert(heapinfo2b->type >= 0, "Unkown mmalloc block type: %d", heapinfo2b->type);
+ xbt_assert(heapinfo2b->type >= 0, "Unknown mmalloc block type: %d", heapinfo2b->type);
for (size_t j2 = 0; j2 < (size_t)(BLOCKSIZE >> heapinfo2b->type); j2++) {
if (i2 == i1 && j2 == j1)
} else if ((heapinfo1->type > 0) && (heapinfo2->type > 0)) { /* Fragmented block */
// Fragment number:
- ssize_t frag1 = ((uintptr_t)(ADDR2UINT(area1) % (BLOCKSIZE))) >> heapinfo1->type;
- ssize_t frag2 = ((uintptr_t)(ADDR2UINT(area2) % (BLOCKSIZE))) >> heapinfo2->type;
+ ssize_t frag1 = (ADDR2UINT(area1) % BLOCKSIZE) >> heapinfo1->type;
+ ssize_t frag2 = (ADDR2UINT(area2) % BLOCKSIZE) >> heapinfo2->type;
// Process address of the fragment_:
void* real_addr_frag1 = (void*)((char*)real_addr_block1 + (frag1 << heapinfo1->type));
// If the variable is not in this object, skip it:
// We do not expect to find a pointer to something which is not reachable
// by the global variables.
- if ((char *) current_var.address < (char *) object_info->start_rw
- || (char *) current_var.address > (char *) object_info->end_rw)
+ if ((char*)current_var.address < object_info->start_rw || (char*)current_var.address > object_info->end_rw)
continue;
const simgrid::mc::Type* bvariable_type = current_var.type;
/** Context of evaluation of a DWARF expression
*
* Some DWARF instructions need to read the CPU registers,
- * the process memory, etc. All those informations are gathered in
+ * the process memory, etc. All those information are gathered in
* the evaluation context.
*/
struct ExpressionContext {
{
LocationList locations;
std::ptrdiff_t offset = 0;
- while (1) {
+ while (true) {
Dwarf_Addr base;
Dwarf_Addr start;
Dwarf_Addr end;
public:
ObjectInformation() = default;
- // Not copyable:
+ // Not copiable:
ObjectInformation(ObjectInformation const&) = delete;
ObjectInformation& operator=(ObjectInformation const&) = delete;
XBT_PRIVATE std::shared_ptr<ObjectInformation> createObjectInformation(std::vector<simgrid::xbt::VmMap> const& maps,
const char* name);
-/** Augment the current module with informations about the other ones */
+/** Augment the current module with information about the other ones */
XBT_PRIVATE void postProcessObjectInformation(const simgrid::mc::RemoteSimulation* process,
simgrid::mc::ObjectInformation* info);
} // namespace mc
/** @brief Process a DIE
*
- * @param info the resulting object fot the library/binary file (output)
+ * @param info the resulting object for the library/binary file (output)
* @param die the current DIE
* @param unit the DIE of the compile unit of the current DIE
* @param frame containing frame if any
/** @brief Calls MC_dwarf_handle_die on all children of the given die
*
- * @param info the resulting object fot the library/binary file (output)
+ * @param info the resulting object for the library/binary file (output)
* @param die the current DIE
* @param unit the DIE of the compile unit of the current DIE
* @param frame containing frame if any
/** @brief Handle a variable (DW_TAG_variable or other)
*
- * @param info the resulting object fot the library/binary file (output)
+ * @param info the resulting object for the library/binary file (output)
* @param die the current DIE
* @param unit the DIE of the compile unit of the current DIE
* @param frame containing frame if any
// TODO, support DW_AT_ranges
uint64_t low_pc = MC_dwarf_attr_integrate_addr(die, DW_AT_low_pc);
- frame.range.begin() = low_pc ? (std::uint64_t)base + low_pc : 0;
+ frame.range.begin() = low_pc ? base + low_pc : 0;
if (low_pc) {
// DW_AT_high_pc:
Dwarf_Attribute attr;
memcmp((char*)data->d_buf + name_pos, "GNU", sizeof("GNU")) == 0) {
XBT_DEBUG("Found GNU/NT_GNU_BUILD_ID note");
char* start = (char*)data->d_buf + desc_pos;
- char* end = (char*)start + nhdr.n_descsz;
+ char* end = start + nhdr.n_descsz;
return std::vector<char>(start, end);
}
}
return std::string();
}
-/** @brief Populate the debugging informations of the given ELF object
+/** @brief Populate the debugging information of the given ELF object
*
* Read the DWARf information of the EFFL object and populate the
* lists of types, variables, functions.
dwarf_end(dwarf);
// If there was no DWARF in the file, try to find it in a separate file.
- // Different methods might be used to store the DWARF informations:
+ // Different methods might be used to store the DWARF information:
// * GNU NT_GNU_BUILD_ID
// * .gnu_debuglink
// See https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
namespace simgrid {
namespace mc {
-/** @brief Finds informations about a given shared object/executable */
+/** @brief Finds information about a given shared object/executable */
std::shared_ptr<ObjectInformation> createObjectInformation(std::vector<xbt::VmMap> const& maps, const char* name)
{
std::shared_ptr<ObjectInformation> result = std::make_shared<ObjectInformation>();
return 0;
}
-/** Find informations about a function (libunwind method)
+/** Find information about a function (libunwind method)
*/
int UnwindContext::get_proc_name(unw_addr_space_t /*as*/, unw_word_t addr, char* bufp, size_t buf_len, unw_word_t* offp,
void* arg) noexcept
* Libunwind implementation for the model-checker
*
* Libunwind provides an pluggable stack unwinding API: the way the current
- * registers and memory is accessed, the way unwinding informations is found
+ * registers and memory is accessed, the way unwinding information is found
* is pluggable.
*
* This component implements the libunwind API for he model-checker:
case SIMCALL_COMM_WAIT: {
/* FIXME: check also that src and dst processes are not suspended */
- const kernel::activity::CommImpl* act =
- static_cast<kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req));
+ const kernel::activity::CommImpl* act = simcall_comm_wait__getraw__comm(req);
if (act->src_timeout_ || act->dst_timeout_) {
/* If it has a timeout it will be always be enabled (regardless of who declared the timeout),
{
simgrid::mc::RemotePtr<simgrid::kernel::activity::CommImpl> comm_addr{nullptr};
if (call_type == MC_CALL_TYPE_WAIT)
- comm_addr = remote(static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req)));
+ comm_addr = remote(simcall_comm_wait__getraw__comm(req));
else {
simgrid::kernel::activity::ActivityImpl* addr;
{
state_ = (state_ << 5) + state_ + x;
}
- hash_type value()
- {
- return state_;
- }
+ hash_type value() const { return state_; }
};
}
namespace mc {
struct DerefAndCompareByActorsCountAndUsedHeap {
- template <class X, class Y> bool operator()(X const& a, Y const& b)
+ template <class X, class Y> bool operator()(X const& a, Y const& b) const
{
return std::make_pair(a->actors_count, a->heap_bytes_used) < std::make_pair(b->actors_count, b->heap_bytes_used);
}
/** \file mc_record.hpp
*
- * This file contains the MC replay/record functionnality.
+ * This file contains the MC replay/record functionality.
* The recorded path is written in the log output and can be replayed with MC disabled
* (even with an non-MC build) using `--cfg=model-check/replay:$replayPath`.
*
{
switch (r->call_) {
case SIMCALL_COMM_WAIT:
- return static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(r));
+ return simcall_comm_wait__getraw__comm(r);
case SIMCALL_COMM_TEST:
- return static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_test__getraw__comm(r));
+ return simcall_comm_test__getraw__comm(r);
default:
return nullptr;
}
}
case SIMCALL_COMM_WAIT: {
- simgrid::kernel::activity::CommImpl* remote_act =
- static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req));
+ simgrid::kernel::activity::CommImpl* remote_act = simcall_comm_wait__getraw__comm(req);
char* p;
if (value == -1) {
type = "WaitTimeout";
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_synchro;
const simgrid::kernel::activity::CommImpl* act;
if (use_remote_comm) {
- mc_model_checker->get_remote_simulation().read(
- temp_synchro, remote(static_cast<simgrid::kernel::activity::CommImpl*>(remote_act)));
+ mc_model_checker->get_remote_simulation().read(temp_synchro, remote(remote_act));
act = temp_synchro.get_buffer();
} else
act = remote_act;
}
case SIMCALL_COMM_TEST: {
- simgrid::kernel::activity::CommImpl* remote_act =
- static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_test__getraw__comm(req));
+ simgrid::kernel::activity::CommImpl* remote_act = simcall_comm_test__getraw__comm(req);
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_synchro;
const simgrid::kernel::activity::CommImpl* act;
if (use_remote_comm) {
- mc_model_checker->get_remote_simulation().read(
- temp_synchro, remote(static_cast<simgrid::kernel::activity::CommImpl*>(remote_act)));
+ mc_model_checker->get_remote_simulation().read(temp_synchro, remote(remote_act));
act = temp_synchro.get_buffer();
} else
act = remote_act;
case SIMCALL_COMM_WAIT: {
simgrid::mc::RemotePtr<simgrid::kernel::activity::CommImpl> remote_act =
- remote(static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(&actor->simcall_)));
+ remote(simcall_comm_wait__getraw__comm(&actor->simcall_));
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_act;
mc_model_checker->get_remote_simulation().read(temp_act, remote_act);
const simgrid::kernel::activity::CommImpl* act = temp_act.get_buffer();
return instance_.get();
}
-void AppSide::handle_deadlock_check(const s_mc_message_t*)
+void AppSide::handle_deadlock_check(const s_mc_message_t*) const
{
bool deadlock = false;
if (not simix_global->process_list.empty()) {
s_mc_message_int_t answer{MC_MESSAGE_DEADLOCK_CHECK_REPLY, deadlock};
xbt_assert(channel_.send(answer) == 0, "Could not send response");
}
-void AppSide::handle_continue(const s_mc_message_t*)
+void AppSide::handle_continue(const s_mc_message_t*) const
{
/* Nothing to do */
}
-void AppSide::handle_simcall(const s_mc_message_simcall_handle_t* message)
+void AppSide::handle_simcall(const s_mc_message_simcall_handle_t* message) const
{
smx_actor_t process = SIMIX_process_from_PID(message->pid);
xbt_assert(process != nullptr, "Invalid pid %lu", message->pid);
xbt_die("Could not send MESSAGE_WAITING to model-checker");
}
-void AppSide::handle_actor_enabled(const s_mc_message_actor_enabled_t* msg)
+void AppSide::handle_actor_enabled(const s_mc_message_actor_enabled_t* msg) const
{
bool res = simgrid::mc::actor_is_enabled(SIMIX_process_from_PID(msg->aid));
s_mc_message_int_t answer{MC_MESSAGE_ACTOR_ENABLED_REPLY, res};
xbt_assert(received_size == sizeof(_type_), "Unexpected size for " _name_ " (%zd != %zu)", received_size, \
sizeof(_type_))
-void AppSide::handle_messages()
+void AppSide::handle_messages() const
{
- while (1) {
+ while (true) {
XBT_DEBUG("Waiting messages from model-checker");
char message_buffer[MC_MESSAGE_LENGTH];
}
}
-void AppSide::main_loop()
+void AppSide::main_loop() const
{
- while (1) {
+ while (true) {
simgrid::mc::wait_for_requests();
xbt_assert(channel_.send(MC_MESSAGE_WAITING) == 0, "Could not send WAITING message to model-checker");
this->handle_messages();
}
}
-void AppSide::report_assertion_failure()
+void AppSide::report_assertion_failure() const
{
if (channel_.send(MC_MESSAGE_ASSERTION_FAILED))
xbt_die("Could not send assertion to model-checker");
this->handle_messages();
}
-void AppSide::ignore_memory(void* addr, std::size_t size)
+void AppSide::ignore_memory(void* addr, std::size_t size) const
{
s_mc_message_ignore_memory_t message;
message.type = MC_MESSAGE_IGNORE_MEMORY;
message.addr = (std::uintptr_t)addr;
message.size = size;
if (channel_.send(message))
- xbt_die("Could not send IGNORE_MEMORY mesage to model-checker");
+ xbt_die("Could not send IGNORE_MEMORY message to model-checker");
}
-void AppSide::ignore_heap(void* address, std::size_t size)
+void AppSide::ignore_heap(void* address, std::size_t size) const
{
const s_xbt_mheap_t* heap = mmalloc_get_current_heap();
message.fragment = -1;
heap->heapinfo[message.block].busy_block.ignore++;
} else {
- message.fragment = ((uintptr_t)(ADDR2UINT(address) % (BLOCKSIZE))) >> heap->heapinfo[message.block].type;
+ message.fragment = (ADDR2UINT(address) % BLOCKSIZE) >> heap->heapinfo[message.block].type;
heap->heapinfo[message.block].busy_frag.ignore[message.fragment]++;
}
xbt_die("Could not send ignored region to MCer");
}
-void AppSide::unignore_heap(void* address, std::size_t size)
+void AppSide::unignore_heap(void* address, std::size_t size) const
{
s_mc_message_ignore_memory_t message;
message.type = MC_MESSAGE_UNIGNORE_HEAP;
xbt_die("Could not send IGNORE_HEAP message to model-checker");
}
-void AppSide::declare_symbol(const char* name, int* value)
+void AppSide::declare_symbol(const char* name, int* value) const
{
s_mc_message_register_symbol_t message;
message.type = MC_MESSAGE_REGISTER_SYMBOL;
xbt_die("Could send REGISTER_SYMBOL message to model-checker");
}
-void AppSide::declare_stack(void* stack, size_t size, ucontext_t* context)
+void AppSide::declare_stack(void* stack, size_t size, ucontext_t* context) const
{
const s_xbt_mheap_t* heap = mmalloc_get_current_heap();
public:
AppSide();
explicit AppSide(int fd) : channel_(fd) {}
- void handle_messages();
+ void handle_messages() const;
private:
- void handle_deadlock_check(const s_mc_message_t* msg);
- void handle_continue(const s_mc_message_t* msg);
- void handle_simcall(const s_mc_message_simcall_handle_t* message);
- void handle_actor_enabled(const s_mc_message_actor_enabled_t* msg);
+ void handle_deadlock_check(const s_mc_message_t* msg) const;
+ void handle_continue(const s_mc_message_t* msg) const;
+ void handle_simcall(const s_mc_message_simcall_handle_t* message) const;
+ void handle_actor_enabled(const s_mc_message_actor_enabled_t* msg) const;
public:
Channel const& get_channel() const { return channel_; }
Channel& get_channel() { return channel_; }
- XBT_ATTRIB_NORETURN void main_loop();
- void report_assertion_failure();
- void ignore_memory(void* addr, std::size_t size);
- void ignore_heap(void* addr, std::size_t size);
- void unignore_heap(void* addr, std::size_t size);
- void declare_symbol(const char* name, int* value);
+ XBT_ATTRIB_NORETURN void main_loop() const;
+ void report_assertion_failure() const;
+ void ignore_memory(void* addr, std::size_t size) const;
+ void ignore_heap(void* addr, std::size_t size) const;
+ void unignore_heap(void* addr, std::size_t size) const;
+ void declare_symbol(const char* name, int* value) const;
#if HAVE_UCONTEXT_H
- void declare_stack(void* stack, size_t size, ucontext_t* context);
+ void declare_stack(void* stack, size_t size, ucontext_t* context) const;
#endif
// Singleton :/
"libboost_system",
"libboost_thread",
"libboost_timer",
+ "libbrotlicommon",
+ "libbrotlidec",
"libbz2",
"libc",
"libc++",
"libcdt",
"libcgraph",
+ "libcom_err",
+ "libcrypt",
"libcrypto",
+ "libcurl",
"libcxxrt",
+ "libdebuginfod",
"libdl",
"libdw",
"libelf",
"libflangrti",
"libgcc_s",
"libgfortran",
+ "libgssapi_krb5",
+ "libidn2",
"libimf",
"libintlc",
"libirng",
+ "libk5crypto",
+ "libkeyutils",
+ "libkrb5",
+ "libkrb5support", /*odd behaviour on fedora rawhide ... remove these when fixed*/
+ "liblber",
+ "libldap",
"liblua5.1",
"liblua5.3",
"liblzma",
"libm",
+ "libnghttp2",
"libomp",
"libpapi",
"libpcre2",
"libpfm",
"libpgmath",
+ "libpsl",
"libpthread",
"libquadmath",
+ "libresolv",
"librt",
+ "libsasl2",
+ "libselinux",
+ "libssh",
+ "libssh2",
+ "libssl",
"libstdc++",
"libsvml",
"libtsan", /* gcc sanitizers */
"libubsan", /* gcc sanitizers */
+ "libunistring",
"libunwind",
"libunwind-ptrace",
"libunwind-x86",
"libunwind-x86_64",
"libz",
- "libkrb5support", /*odd behaviour on fedora rawhide ... remove these when fixed*/
- "libkeyutils",
- "libunistring",
- "libbrotlidec",
- "liblber",
- "libldap",
- "libcom_err",
- "libk5crypto",
- "libkrb5",
- "libgssapi_krb5",
- "libssl",
- "libpsl",
- "libssh",
- "libssh2",
- "libidn2",
- "libnghttp2",
- "libcurl",
- "libdebuginfod",
- "libbrotlicommon",
- "libsasl2",
- "libresolv",
- "libcrypt",
- "libselinux"};
+ "libzstd"};
static bool is_simgrid_lib(const std::string& libname)
{
std::vector<char> res(128);
off_t off = 0;
- while (1) {
+ while (true) {
ssize_t c = pread(this->memory_file, res.data() + off, res.size() - off, (off_t)address.address() + off);
if (c == -1 && errno == EINTR)
continue;
* @param len data size
* @param address target process memory address (target)
*/
-void RemoteSimulation::write_bytes(const void* buffer, size_t len, RemotePtr<void> address)
+void RemoteSimulation::write_bytes(const void* buffer, size_t len, RemotePtr<void> address) const
{
if (pwrite_whole(this->memory_file, buffer, len, (size_t)address.address()) < 0)
xbt_die("Write to process %lli failed", (long long)this->pid_);
}
-void RemoteSimulation::clear_bytes(RemotePtr<void> address, size_t len)
+void RemoteSimulation::clear_bytes(RemotePtr<void> address, size_t len) const
{
pthread_once(&zero_buffer_flag, zero_buffer_init);
while (len) {
}
}
-void RemoteSimulation::ignore_local_variable(const char* var_name, const char* frame_name)
+void RemoteSimulation::ignore_local_variable(const char* var_name, const char* frame_name) const
{
if (frame_name != nullptr && strcmp(frame_name, "*") == 0)
frame_name = nullptr;
return smx_dead_actors_infos;
}
-void RemoteSimulation::dump_stack()
+void RemoteSimulation::dump_stack() const
{
unw_addr_space_t as = unw_create_addr_space(&_UPT_accessors, BYTE_ORDER);
if (as == nullptr) {
using AddressSpace::read_string;
// Write memory:
- void write_bytes(const void* buffer, size_t len, RemotePtr<void> address);
- void clear_bytes(RemotePtr<void> address, size_t len);
+ void write_bytes(const void* buffer, size_t len, RemotePtr<void> address) const;
+ void clear_bytes(RemotePtr<void> address, size_t len) const;
// Debug information:
std::shared_ptr<ObjectInformation> find_object_info(RemotePtr<void> addr) const;
void terminate() { running_ = false; }
- void ignore_global_variable(const char* name)
+ void ignore_global_variable(const char* name) const
{
for (std::shared_ptr<ObjectInformation> const& info : this->object_infos)
info->remove_global_variable(name);
void ignore_heap(IgnoredHeapRegion const& region);
void unignore_heap(void* address, size_t size);
- void ignore_local_variable(const char* var_name, const char* frame_name);
+ void ignore_local_variable(const char* var_name, const char* frame_name) const;
std::vector<ActorInformation>& actors();
std::vector<ActorInformation>& dead_actors();
return nullptr;
}
- void dump_stack();
+ void dump_stack() const;
private:
void init_memory_map_info();
/** @brief Storage for snapshot memory pages
*
* The first (lower) layer of the per-page snapshot mechanism is a page store:
- * its responsibility is to store immutable sharable reference-counted memory
+ * its responsibility is to store immutable shareable reference-counted memory
* pages independently of the snapshotting logic. Snapshot management and
* representation is handled to an higher layer. READMORE
*
// Debug/test methods
/** @brief Get the number of references for a page */
- std::size_t get_ref(std::size_t pageno);
+ std::size_t get_ref(std::size_t pageno) const;
/** @brief Get the number of used pages */
- std::size_t size();
+ std::size_t size() const;
/** @brief Get the capacity of the page store
*
* The capacity is expanded by a system call (mremap).
* */
- std::size_t capacity();
+ std::size_t capacity() const;
};
XBT_ALWAYS_INLINE void PageStore::unref_page(std::size_t pageno)
return (void*)simgrid::mc::mmu::join(pageno, (std::uintptr_t)this->memory_);
}
-XBT_ALWAYS_INLINE std::size_t PageStore::get_ref(std::size_t pageno)
+XBT_ALWAYS_INLINE std::size_t PageStore::get_ref(std::size_t pageno) const
{
return this->page_counts_[pageno];
}
-XBT_ALWAYS_INLINE std::size_t PageStore::size()
+XBT_ALWAYS_INLINE std::size_t PageStore::size() const
{
return this->top_index_ - this->free_pages_.size();
}
-XBT_ALWAYS_INLINE std::size_t PageStore::capacity()
+XBT_ALWAYS_INLINE std::size_t PageStore::capacity() const
{
return this->capacity_;
}
*
* @param region Target region
*/
-void Region::restore()
+void Region::restore() const
{
xbt_assert(((start().address()) & (xbt_pagesize - 1)) == 0, "Not at the beginning of a page");
xbt_assert(simgrid::mc::mmu::chunk_count(size()) == get_chunks().page_count());
bool contain(RemotePtr<void> p) const { return p >= start() && p < end(); }
/** @brief Restore a region from a snapshot */
- void restore();
+ void restore() const;
/** @brief Read memory that was snapshotted in this region
*
// TODO, check condition check (unw_init_local==0 means end of frame)
- while (1) {
+ while (true) {
s_mc_stack_frame_t stack_frame;
stack_frame.unw_cursor = c;
return get_region(addr);
}
-void Snapshot::restore(RemoteSimulation* process)
+void Snapshot::restore(RemoteSimulation* process) const
{
XBT_DEBUG("Restore snapshot %i", num_state_);
ReadOptions options = ReadOptions::none()) const override;
Region* get_region(const void* addr) const;
Region* get_region(const void* addr, Region* hinted_region) const;
- void restore(RemoteSimulation* get_remote_simulation);
+ void restore(RemoteSimulation* get_remote_simulation) const;
// To be private
int num_state_;
{
sg_actor_resume(actor);
}
-int MSG_process_is_suspended(sg_actor_t actor)
+int MSG_process_is_suspended(const_sg_actor_t actor)
{
return sg_actor_is_suspended(actor);
}
public:
void start_tracking();
void stop_tracking() { dp_tracking_ = false; }
- bool is_tracking() { return dp_tracking_; }
+ bool is_tracking() const { return dp_tracking_; }
void track(kernel::activity::ExecImpl const* exec, double amount) { dp_objs_.insert({exec, amount}); }
void untrack(kernel::activity::ExecImpl const* exec) { dp_objs_.erase(exec); }
double get_stored_remains(kernel::activity::ExecImpl const* exec) { return dp_objs_.at(exec); }
void update_dirty_page_count(double delta) { dp_updated_by_deleted_tasks_ += delta; }
double computed_flops_lookup();
- double get_intensity() { return dp_intensity_; }
+ double get_intensity() const { return dp_intensity_; }
void set_intensity(double intensity) { dp_intensity_ = intensity; }
- double get_working_set_memory() { return working_set_memory_; }
+ sg_size_t get_working_set_memory() const { return working_set_memory_; }
void set_working_set_memory(sg_size_t size) { working_set_memory_ = size; }
void set_migration_speed(double speed) { mig_speed_ = speed; }
- double get_migration_speed() { return mig_speed_; }
- double get_max_downtime() { return max_downtime_; }
+ double get_migration_speed() const { return mig_speed_; }
+ double get_max_downtime() const { return max_downtime_; }
static simgrid::xbt::Extension<VirtualMachineImpl, DirtyPageTrackingExt> EXTENSION_ID;
virtual ~DirtyPageTrackingExt() = default;
#include "src/surf/HostImpl.hpp"
#include "src/surf/xml/platf_private.hpp"
#include "xbt/config.hpp"
+#include "xbt/parse_units.hpp"
#include <algorithm>
#include <boost/algorithm/string.hpp>
kernel::actor::simcall([this, desc_table] { desc_table->push_back(this->desc_id); });
}
-void File::dump()
+void File::dump() const
{
if (local_storage_)
XBT_INFO("File Descriptor information:\n"
return 0;
}
-sg_size_t File::size()
+sg_size_t File::size() const
{
return size_;
}
}
}
-sg_size_t File::tell()
+sg_size_t File::tell() const
{
return current_position_;
}
-void File::move(const std::string& fullpath)
+void File::move(const std::string& fullpath) const
{
/* Check if the new full path is on the same mount point */
if (fullpath.compare(0, mount_point_.length(), mount_point_) == 0) {
}
}
-int File::unlink()
+int File::unlink() const
{
/* Check if the file is on local storage */
std::map<std::string, sg_size_t>* content = nullptr;
FileSystemDiskExt::FileSystemDiskExt(const Disk* ptr)
{
const char* size_str = ptr->get_property("size");
+ std::string dummyfile;
if (size_str)
- size_ = surf_parse_get_size(size_str, "disk size", ptr->get_name());
+ size_ = surf_parse_get_size(dummyfile, -1, size_str, "disk size", ptr->get_name());
const char* current_mount_str = ptr->get_property("mount");
if (current_mount_str)
/** Retrieves the path to the file
* @ingroup plugin_filesystem
*/
-const char* sg_file_get_name(sg_file_t fd)
+const char* sg_file_get_name(const_sg_file_t fd)
{
xbt_assert((fd != nullptr), "Invalid file descriptor");
return fd->get_path();
/** Retrieves the size of the file
* @ingroup plugin_filesystem
*/
-sg_size_t sg_file_get_size(sg_file_t fd)
+sg_size_t sg_file_get_size(const_sg_file_t fd)
{
return fd->size();
}
-void sg_file_dump(sg_file_t fd)
+void sg_file_dump(const_sg_file_t fd)
{
fd->dump();
}
fd->seek(offset, origin);
}
-sg_size_t sg_file_tell(sg_file_t fd)
+sg_size_t sg_file_tell(const_sg_file_t fd)
{
return fd->tell();
}
-void sg_file_move(sg_file_t fd, const char* fullpath)
+void sg_file_move(const_sg_file_t fd, const char* fullpath)
{
fd->move(fullpath);
}
}
}();
- while (1) {
+ while (true) {
// Sleep *before* updating; important for startup (i.e., t = 0).
// In the beginning, we want to go with the pstates specified in the platform file
// (so we sleep first)
~HostEnergy();
double get_current_watts_value();
- double get_current_watts_value(double cpu_load);
+ double get_current_watts_value(double cpu_load) const;
double get_consumed_energy();
- double get_watt_idle_at(int pstate);
- double get_watt_min_at(int pstate);
- double get_watt_max_at(int pstate);
- double get_power_range_slope_at(int pstate);
- double get_last_update_time() { return last_updated_; }
+ double get_watt_idle_at(int pstate) const;
+ double get_watt_min_at(int pstate) const;
+ double get_watt_max_at(int pstate) const;
+ double get_power_range_slope_at(int pstate) const;
+ double get_last_update_time() const { return last_updated_; }
void update();
};
HostEnergy::~HostEnergy() = default;
-double HostEnergy::get_watt_idle_at(int pstate)
+double HostEnergy::get_watt_idle_at(int pstate) const
{
xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s",
host_->get_cname());
return power_range_watts_list_[pstate].idle_;
}
-double HostEnergy::get_watt_min_at(int pstate)
+double HostEnergy::get_watt_min_at(int pstate) const
{
xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s",
host_->get_cname());
return power_range_watts_list_[pstate].epsilon_;
}
-double HostEnergy::get_watt_max_at(int pstate)
+double HostEnergy::get_watt_max_at(int pstate) const
{
xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s",
host_->get_cname());
return power_range_watts_list_[pstate].max_;
}
-double HostEnergy::get_power_range_slope_at(int pstate)
+double HostEnergy::get_power_range_slope_at(int pstate) const
{
xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s",
host_->get_cname());
*
* Whether the host is ON or OFF is not taken into account.
*/
-double HostEnergy::get_current_watts_value(double cpu_load)
+double HostEnergy::get_current_watts_value(double cpu_load) const
{
xbt_assert(not power_range_watts_list_.empty(), "No power range properties specified for host %s",
host_->get_cname());
explicit HostLoad(simgrid::s4u::Host& ptr) = delete;
explicit HostLoad(simgrid::s4u::Host&& ptr) = delete;
- double get_current_load();
+ double get_current_load() const;
/** Get the the average load since last reset(), as a ratio
*
* That's the ratio (amount of flops that were actually computed) / (amount of flops that could have been computed at full speed)
* But still, if you call this function between the two events (in the simulator course), it
* returns 0 although there is no time (in the simulated time) where this value is valid.
*/
-double HostLoad::get_current_load()
+double HostLoad::get_current_load() const
{
// We don't need to call update() here because it is called every time an action terminates or starts
return current_flops_ / (host_->get_speed() * host_->get_core_count());
// If SimGrid is already initialized, we need to attach an extension to each existing host
if (simgrid::s4u::Engine::is_initialized()) {
- simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
+ const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
for (auto& host : e->get_all_hosts()) {
host->extension_set(new HostLoad(host));
}
void update();
private:
- double get_power();
+ double get_power() const;
s4u::Link* link_{};
}
}
-double LinkEnergy::get_power()
+double LinkEnergy::get_power() const
{
if (!inited_)
return 0.0;
/* iterate for all virtual machines */
for (s4u::VirtualMachine* const& ws_vm : VirtualMachineImpl::allVms_) {
+ if (ws_vm->get_state() == s4u::VirtualMachine::state::SUSPENDED) // Ignore suspended VMs
+ continue;
+
const kernel::resource::Cpu* cpu = ws_vm->pimpl_cpu;
// solved_value below is X1 in comment above: what this VM got in the sharing on the PM
/* update net_elm with that of the destination physical host */
piface_->set_netpoint(destination->get_netpoint());
+ /* Adapt the speed, pstate and other physical characteristics to the one of our new physical CPU */
+ piface_->pimpl_cpu->reset_vcpu(destination->pimpl_cpu);
+
physical_host_ = destination;
/* Update vcpu's action for the new pm */
/* create a cpu action bound to the pm model at the destination. */
- kernel::resource::CpuAction* new_cpu_action =
- static_cast<kernel::resource::CpuAction*>(destination->pimpl_cpu->execution_start(0, this->core_amount_));
+ kernel::resource::CpuAction* new_cpu_action = destination->pimpl_cpu->execution_start(0, this->core_amount_);
if (action_->get_remains_no_update() > 0)
XBT_CRITICAL("FIXME: need copy the state(?), %f", action_->get_remains_no_update());
/** @brief Change the physical host on which the given VM is running */
virtual void set_physical_host(s4u::Host* dest);
/** @brief Get the physical host on which the given VM is running */
- s4u::Host* get_physical_host() { return physical_host_; }
+ s4u::Host* get_physical_host() const { return physical_host_; }
sg_size_t get_ramsize() const { return ramsize_; }
void set_ramsize(sg_size_t ramsize) { ramsize_ = ramsize; }
s4u::VirtualMachine::state get_state() const { return vm_state_; }
void set_state(s4u::VirtualMachine::state state) { vm_state_ = state; }
- unsigned int get_core_amount() { return core_amount_; }
- kernel::resource::Action* get_action() { return action_; }
+ unsigned int get_core_amount() const { return core_amount_; }
+ kernel::resource::Action* get_action() const { return action_; }
virtual void set_bound(double bound);
double clock_end = s4u::Engine::get_clock();
double duration = clock_end - clock_sta;
- double actual_speed = size / duration;
+ double actual_speed = static_cast<double>(size) / duration;
if (stage == 2)
XBT_DEBUG("mig-stage%d.%d: sent %llu duration %f actual_speed %f (target %f)", stage, stage2_round, size, duration,
double host_speed = vm_->get_pm()->get_speed();
const sg_size_t ramsize = vm_->get_ramsize();
const double dp_rate =
- host_speed ? (sg_vm_get_migration_speed(vm_) * sg_vm_get_dirty_page_intensity(vm_)) / host_speed : 1;
+ host_speed != 0.0 ? (sg_vm_get_migration_speed(vm_) * sg_vm_get_dirty_page_intensity(vm_)) / host_speed : 1;
const sg_size_t dp_cap = sg_vm_get_working_set_memory(vm_);
const double mig_speed = sg_vm_get_migration_speed(vm_);
double max_downtime = sg_vm_get_max_downtime(vm_);
return pimpl_vm_->get_physical_host();
}
-void VirtualMachine::set_pm(simgrid::s4u::Host* pm)
+VirtualMachine* VirtualMachine::set_pm(simgrid::s4u::Host* pm)
{
kernel::actor::simcall([this, pm]() { pimpl_vm_->set_physical_host(pm); });
+ return this;
}
VirtualMachine::state VirtualMachine::get_state()
return pimpl_vm_->get_ramsize();
}
-void VirtualMachine::set_ramsize(size_t ramsize)
+VirtualMachine* VirtualMachine::set_ramsize(size_t ramsize)
{
pimpl_vm_->set_ramsize(ramsize);
+ return this;
}
/** @brief Set a CPU bound for a given VM.
* @ingroup msg_VMs
* 2. Note that bound == 0 means no bound (i.e., unlimited). But, if a host has multiple CPU cores, the CPU share of a
* computation task (or a VM) never exceeds the capacity of a CPU core.
*/
-void VirtualMachine::set_bound(double bound)
+VirtualMachine* VirtualMachine::set_bound(double bound)
{
kernel::actor::simcall([this, bound]() { pimpl_vm_->set_bound(bound); });
+ return this;
}
} // namespace simgrid
return false;
}
+Activity* Activity::suspend()
+{
+ if (suspended_)
+ return this; // Already suspended
+ suspended_ = true;
+
+ if (state_ == State::STARTED)
+ pimpl_->suspend();
+
+ return this;
+}
+
+Activity* Activity::resume()
+{
+ if (not suspended_)
+ return this; // nothing to restore when it's not suspended
+
+ if (state_ == State::STARTED)
+ pimpl_->resume();
+
+ return this;
+}
+
double Activity::get_remaining() const
{
- return remains_;
+ if (state_ == State::INITED || state_ == State::STARTING)
+ return remains_;
+ else
+ return pimpl_->get_remaining();
}
Activity* Activity::set_remaining(double remains)
{
intrusive_ptr_release(actor->pimpl_);
}
-int Actor::get_refcount()
+int Actor::get_refcount() const
{
return pimpl_->get_refcount();
}
s4u::Actor::on_resume(*this);
}
-bool Actor::is_suspended()
+bool Actor::is_suspended() const
{
return pimpl_->is_suspended();
}
}
/** @brief Get the kill time of an actor(or 0 if unset). */
-double Actor::get_kill_time()
+double Actor::get_kill_time() const
{
return pimpl_->get_kill_time();
}
sg_actor_t* sg_actor_list()
{
- simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
+ const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
size_t actor_count = e->get_actor_count();
xbt_assert(actor_count > 0, "There is no actor!");
std::vector<simgrid::s4u::ActorPtr> actors = e->get_all_actors();
*
* This checks whether an actor is suspended or not by inspecting the task on which it was waiting for the completion.
*/
-int sg_actor_is_suspended(sg_actor_t actor)
+int sg_actor_is_suspended(const_sg_actor_t actor)
{
return actor->is_suspended();
}
} else {
xbt_die("Cannot start a communication before specifying whether we are the sender or the receiver");
}
+
+ if (suspended_)
+ pimpl_->suspend();
+
state_ = State::STARTED;
return this;
}
* See also: :ref:`platform`.
* \endrst
*/
-void Engine::load_platform(const std::string& platf)
+void Engine::load_platform(const std::string& platf) const
{
double start = xbt_os_time();
parse_platform_file(platf);
* See also: :ref:`deploy`.
* \endrst
*/
-void Engine::load_deployment(const std::string& deploy)
+void Engine::load_deployment(const std::string& deploy) const
{
pimpl->load_deployment(deploy);
}
/** Returns the amount of hosts in the platform */
-size_t Engine::get_host_count()
+size_t Engine::get_host_count() const
{
return pimpl->hosts_.size();
}
-std::vector<Host*> Engine::get_all_hosts()
+std::vector<Host*> Engine::get_all_hosts() const
{
std::vector<Host*> res;
for (auto const& kv : pimpl->hosts_)
return res;
}
-std::vector<Host*> Engine::get_filtered_hosts(const std::function<bool(Host*)>& filter)
+std::vector<Host*> Engine::get_filtered_hosts(const std::function<bool(Host*)>& filter) const
{
std::vector<Host*> hosts;
for (auto const& kv : pimpl->hosts_) {
*
* @throw std::invalid_argument if the searched host does not exist.
*/
-Host* Engine::host_by_name(const std::string& name)
+Host* Engine::host_by_name(const std::string& name) const
{
if (pimpl->hosts_.find(name) == pimpl->hosts_.end())
throw std::invalid_argument(std::string("Host not found: '") + name + std::string("'"));
}
/** @brief Find a host from its name (or nullptr if that host does not exist) */
-Host* Engine::host_by_name_or_null(const std::string& name)
+Host* Engine::host_by_name_or_null(const std::string& name) const
{
auto host = pimpl->hosts_.find(name);
return host == pimpl->hosts_.end() ? nullptr : host->second;
*
* @throw std::invalid_argument if the searched link does not exist.
*/
-Link* Engine::link_by_name(const std::string& name)
+Link* Engine::link_by_name(const std::string& name) const
{
if (pimpl->links_.find(name) == pimpl->links_.end())
throw std::invalid_argument(std::string("Link not found: ") + name);
}
/** @brief Find an link from its name (or nullptr if that link does not exist) */
-Link* Engine::link_by_name_or_null(const std::string& name)
+Link* Engine::link_by_name_or_null(const std::string& name) const
{
auto link = pimpl->links_.find(name);
return link == pimpl->links_.end() ? nullptr : link->second->get_iface();
}
/** @brief Returns the amount of storages in the platform */
-size_t Engine::get_storage_count()
+size_t Engine::get_storage_count() const
{
return pimpl->storages_.size();
}
/** @brief Returns the list of all storages found in the platform */
-std::vector<Storage*> Engine::get_all_storages()
+std::vector<Storage*> Engine::get_all_storages() const
{
std::vector<Storage*> res;
for (auto const& kv : pimpl->storages_)
*
* @throw std::invalid_argument if the searched storage does not exist.
*/
-Storage* Engine::storage_by_name(const std::string& name)
+Storage* Engine::storage_by_name(const std::string& name) const
{
if (pimpl->storages_.find(name) == pimpl->storages_.end())
throw std::invalid_argument(std::string("Storage not found: ") + name);
}
/** @brief Find a storage from its name (or nullptr if that storage does not exist) */
-Storage* Engine::storage_by_name_or_null(const std::string& name)
+Storage* Engine::storage_by_name_or_null(const std::string& name) const
{
auto storage = pimpl->storages_.find(name);
return storage == pimpl->storages_.end() ? nullptr : storage->second->get_iface();
}
/** @brief Returns the amount of links in the platform */
-size_t Engine::get_link_count()
+size_t Engine::get_link_count() const
{
return pimpl->links_.size();
}
/** @brief Returns the list of all links found in the platform */
-std::vector<Link*> Engine::get_all_links()
+std::vector<Link*> Engine::get_all_links() const
{
std::vector<Link*> res;
for (auto const& kv : pimpl->links_)
return res;
}
-std::vector<Link*> Engine::get_filtered_links(const std::function<bool(Link*)>& filter)
+std::vector<Link*> Engine::get_filtered_links(const std::function<bool(Link*)>& filter) const
{
std::vector<Link*> filtered_list;
for (auto const& kv : pimpl->links_) {
return filtered_list;
}
-size_t Engine::get_actor_count()
+size_t Engine::get_actor_count() const
{
return simix_global->process_list.size();
}
-std::vector<ActorPtr> Engine::get_all_actors()
+std::vector<ActorPtr> Engine::get_all_actors() const
{
std::vector<ActorPtr> actor_list;
for (auto const& kv : simix_global->process_list) {
return actor_list;
}
-std::vector<ActorPtr> Engine::get_filtered_actors(const std::function<bool(ActorPtr)>& filter)
+std::vector<ActorPtr> Engine::get_filtered_actors(const std::function<bool(ActorPtr)>& filter) const
{
std::vector<ActorPtr> actor_list;
for (auto const& kv : simix_global->process_list) {
return actor_list;
}
-void Engine::run()
+void Engine::run() const
{
/* Clean IO before the run */
fflush(stdout);
}
/** @brief Retrieve the root netzone, containing all others */
-s4u::NetZone* Engine::get_netzone_root()
+s4u::NetZone* Engine::get_netzone_root() const
{
return pimpl->netzone_root_->get_iface();
}
}
/** @brief Retrieve the NetZone of the given name (or nullptr if not found) */
-NetZone* Engine::netzone_by_name_or_null(const std::string& name)
+NetZone* Engine::netzone_by_name_or_null(const std::string& name) const
{
return netzone_by_name_recursive(get_netzone_root(), name);
}
/** @brief Retrieve the netpoint of the given name (or nullptr if not found) */
-kernel::routing::NetPoint* Engine::netpoint_by_name_or_null(const std::string& name)
+kernel::routing::NetPoint* Engine::netpoint_by_name_or_null(const std::string& name) const
{
auto netp = pimpl->netpoints_.find(name);
return netp == pimpl->netpoints_.end() ? nullptr : netp->second;
}
-std::vector<kernel::routing::NetPoint*> Engine::get_all_netpoints()
+std::vector<kernel::routing::NetPoint*> Engine::get_all_netpoints() const
{
std::vector<kernel::routing::NetPoint*> res;
for (auto const& kv : pimpl->netpoints_)
.set_flops_amount(flops_amounts_.front())
.start();
});
+
+ if (suspended_)
+ pimpl_->suspend();
+
state_ = State::STARTED;
on_start(*Actor::self(), *this);
return this;
return *mounts_;
}
-ExecPtr Host::exec_async(double flops)
+ExecPtr Host::exec_async(double flops) const
{
return this_actor::exec_init(flops);
}
-void Host::execute(double flops)
+void Host::execute(double flops) const
{
execute(flops, 1.0 /* priority */);
}
-void Host::execute(double flops, double priority)
+void Host::execute(double flops, double priority) const
{
this_actor::exec_init(flops)->set_priority(1 / priority)->start()->wait();
}
}
sg_host_t* sg_host_list()
{
- simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
+ const simgrid::s4u::Engine* e = simgrid::s4u::Engine::get_instance();
size_t host_count = e->get_host_count();
xbt_assert(host_count > 0, "There is no host!");
std::vector<simgrid::s4u::Host*> hosts = e->get_all_hosts();
.start();
}
});
+
+ if (suspended_)
+ pimpl_->suspend();
+
state_ = State::STARTED;
return this;
}
#include "simgrid/simix.hpp"
#include "src/kernel/lmm/maxmin.hpp"
#include "src/surf/network_interface.hpp"
+#include "src/surf/network_wifi.hpp"
#include "xbt/log.h"
namespace simgrid {
return this->pimpl_->get_sharing_policy();
}
+void Link::set_host_wifi_rate(const s4u::Host* host, int level) const
+{
+ xbt_assert(pimpl_->get_sharing_policy() == Link::SharingPolicy::WIFI, "Link %s does not seem to be a wifi link.",
+ get_cname());
+ auto* wlink = dynamic_cast<kernel::resource::NetworkWifiLink*>(pimpl_);
+ xbt_assert(wlink != nullptr, "Cannot convert link %s into a wifi link.", get_cname());
+ wlink->set_host_rate(host, level);
+}
+
double Link::get_usage() const
{
return this->pimpl_->get_constraint()->get_usage();
return &mbox->piface_;
}
-bool Mailbox::empty()
+bool Mailbox::empty() const
{
return pimpl_->comm_queue_.empty();
}
-bool Mailbox::listen()
+bool Mailbox::listen() const
{
return not this->empty() || (pimpl_->permanent_receiver_ && not pimpl_->done_comm_queue_.empty());
}
-aid_t Mailbox::listen_from()
+aid_t Mailbox::listen_from() const
{
kernel::activity::CommImplPtr comm = front();
if (comm && comm->src_actor_)
return -1;
}
-bool Mailbox::ready()
+bool Mailbox::ready() const
{
bool comm_ready = false;
if (not pimpl_->comm_queue_.empty()) {
return comm_ready;
}
-kernel::activity::CommImplPtr Mailbox::front()
+kernel::activity::CommImplPtr Mailbox::front() const
{
return pimpl_->comm_queue_.empty() ? nullptr : pimpl_->comm_queue_.front();
}
}
/** @brief get the receiver (process associated to the mailbox) */
-ActorPtr Mailbox::get_receiver()
+ActorPtr Mailbox::get_receiver() const
{
if (pimpl_->permanent_receiver_ == nullptr)
return ActorPtr();
if (previous_task && not SD_task_dependency_exists(previous_task, cur_task))
SD_task_dependency_add(previous_task, cur_task);
- SD_task_schedulel(cur_task, 1, hosts[std::stod(elm.first)]);
+ SD_task_schedulel(cur_task, 1, hosts[std::stoi(elm.first)]);
previous_task = cur_task;
}
}
task->state= SD_NOT_SCHEDULED;
sd_global->initial_tasks.insert(task);
- task->marked = 0;
+ task->marked = false;
task->start_time = -1.0;
task->finish_time = -1.0;
task->surf_action = nullptr;
*/
int SD_task_get_workstation_count(const_SD_task_t task)
{
- return task->allocation->size();
+ return static_cast<int>(task->allocation->size());
}
/**
if (dst) {
return (src->successors->find(dst) != src->successors->end() || src->outputs->find(dst) != src->outputs->end());
} else {
- return src->successors->size() + src->outputs->size();
+ return static_cast<int>(src->successors->size() + src->outputs->size());
}
} else {
- return dst->predecessors->size() + dst->inputs->size();
+ return static_cast<int>(dst->predecessors->size() + dst->inputs->size());
}
}
/* Iterate over all inputs and outputs to say where I am located (and start them if runnable) */
for (auto const& input : *task->inputs) {
- int src_nb = input->allocation->size();
+ int src_nb = static_cast<int>(input->allocation->size());
int dst_nb = count;
if (input->allocation->empty())
XBT_VERB("Sender side of '%s' not scheduled. Set receiver side to '%s''s allocation", input->name, task->name);
for (auto const& output : *task->outputs) {
int src_nb = count;
- int dst_nb = output->allocation->size();
+ int dst_nb = static_cast<int>(output->allocation->size());
if (output->allocation->empty())
XBT_VERB("Receiver side of '%s' not scheduled. Set sender side to '%s''s allocation", output->name, task->name);
/* the model-checker wants two separate simcalls */
simgrid::kernel::activity::ActivityImplPtr comm =
nullptr; /* MC needs the comm to be set to nullptr during the simcall */
- comm = simcall_comm_isend(sender, mbox, task_size, rate,
- src_buff, src_buff_size, match_fun, nullptr, copy_data_fun, data, 0);
+ comm = simcall_comm_isend(sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, nullptr, copy_data_fun,
+ data, false);
simcall_comm_wait(comm.get(), timeout);
comm = nullptr;
}
inline static int simcall_BODY_execution_waitany_for(simgrid::kernel::activity::ExecImpl** execs, size_t count, double timeout)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_execution_waitany_for(&SIMIX_process_self()->simcall_, execs, count, timeout);
return simcall<int, simgrid::kernel::activity::ExecImpl**, size_t, double>(SIMCALL_EXECUTION_WAITANY_FOR, execs, count, timeout);
}
inline static void simcall_BODY_comm_send(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, unsigned char* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_send(&SIMIX_process_self()->simcall_, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, copy_data_fun, data, timeout);
return simcall<void, smx_actor_t, smx_mailbox_t, double, double, unsigned char*, size_t, simix_match_func_t, simix_copy_data_func_t, void*, double>(SIMCALL_COMM_SEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, copy_data_fun, data, timeout);
}
inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> simcall_BODY_comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, unsigned char* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data, bool detached)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_isend(&SIMIX_process_self()->simcall_, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun, data, detached);
return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, smx_actor_t, smx_mailbox_t, double, double, unsigned char*, size_t, simix_match_func_t, simix_clean_func_t, simix_copy_data_func_t, void*, bool>(SIMCALL_COMM_ISEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun, data, detached);
}
inline static void simcall_BODY_comm_recv(smx_actor_t receiver, smx_mailbox_t mbox, unsigned char* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout, double rate)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_recv(&SIMIX_process_self()->simcall_, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, timeout, rate);
return simcall<void, smx_actor_t, smx_mailbox_t, unsigned char*, size_t*, simix_match_func_t, simix_copy_data_func_t, void*, double, double>(SIMCALL_COMM_RECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, timeout, rate);
}
inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> simcall_BODY_comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, unsigned char* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_irecv(&SIMIX_process_self()->simcall_, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate);
return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, smx_actor_t, smx_mailbox_t, unsigned char*, size_t*, simix_match_func_t, simix_copy_data_func_t, void*, double>(SIMCALL_COMM_IRECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate);
}
inline static int simcall_BODY_comm_waitany(simgrid::kernel::activity::CommImpl** comms, size_t count, double timeout)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_waitany(&SIMIX_process_self()->simcall_, comms, count, timeout);
return simcall<int, simgrid::kernel::activity::CommImpl**, size_t, double>(SIMCALL_COMM_WAITANY, comms, count, timeout);
}
inline static void simcall_BODY_comm_wait(simgrid::kernel::activity::CommImpl* comm, double timeout)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_wait(&SIMIX_process_self()->simcall_, comm, timeout);
return simcall<void, simgrid::kernel::activity::CommImpl*, double>(SIMCALL_COMM_WAIT, comm, timeout);
}
inline static bool simcall_BODY_comm_test(simgrid::kernel::activity::CommImpl* comm)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_test(&SIMIX_process_self()->simcall_, comm);
return simcall<bool, simgrid::kernel::activity::CommImpl*>(SIMCALL_COMM_TEST, comm);
}
inline static int simcall_BODY_comm_testany(simgrid::kernel::activity::CommImpl** comms, size_t count)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_comm_testany(&SIMIX_process_self()->simcall_, comms, count);
return simcall<int, simgrid::kernel::activity::CommImpl**, size_t>(SIMCALL_COMM_TESTANY, comms, count);
}
inline static void simcall_BODY_mutex_lock(smx_mutex_t mutex)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_mutex_lock(&SIMIX_process_self()->simcall_, mutex);
return simcall<void, smx_mutex_t>(SIMCALL_MUTEX_LOCK, mutex);
}
inline static int simcall_BODY_mutex_trylock(smx_mutex_t mutex)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_mutex_trylock(&SIMIX_process_self()->simcall_, mutex);
return simcall<int, smx_mutex_t>(SIMCALL_MUTEX_TRYLOCK, mutex);
}
inline static void simcall_BODY_mutex_unlock(smx_mutex_t mutex)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_mutex_unlock(&SIMIX_process_self()->simcall_, mutex);
return simcall<void, smx_mutex_t>(SIMCALL_MUTEX_UNLOCK, mutex);
}
inline static void simcall_BODY_cond_wait(smx_cond_t cond, smx_mutex_t mutex)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_cond_wait(&SIMIX_process_self()->simcall_, cond, mutex);
return simcall<void, smx_cond_t, smx_mutex_t>(SIMCALL_COND_WAIT, cond, mutex);
}
inline static int simcall_BODY_cond_wait_timeout(smx_cond_t cond, smx_mutex_t mutex, double timeout)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_cond_wait_timeout(&SIMIX_process_self()->simcall_, cond, mutex, timeout);
return simcall<int, smx_cond_t, smx_mutex_t, double>(SIMCALL_COND_WAIT_TIMEOUT, cond, mutex, timeout);
}
inline static void simcall_BODY_sem_acquire(smx_sem_t sem)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_sem_acquire(&SIMIX_process_self()->simcall_, sem);
return simcall<void, smx_sem_t>(SIMCALL_SEM_ACQUIRE, sem);
}
inline static int simcall_BODY_sem_acquire_timeout(smx_sem_t sem, double timeout)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_sem_acquire_timeout(&SIMIX_process_self()->simcall_, sem, timeout);
return simcall<int, smx_sem_t, double>(SIMCALL_SEM_ACQUIRE_TIMEOUT, sem, timeout);
}
inline static int simcall_BODY_mc_random(int min, int max)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
simcall_HANDLER_mc_random(&SIMIX_process_self()->simcall_, min, max);
return simcall<int, int, int>(SIMCALL_MC_RANDOM, min, max);
}
inline static void simcall_BODY_run_kernel(std::function<void()> const* code)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
SIMIX_run_kernel(code);
return simcall<void, std::function<void()> const*>(SIMCALL_RUN_KERNEL, code);
}
inline static void simcall_BODY_run_blocking(std::function<void()> const* code)
{
- if (0) /* Go to that function to follow the code flow through the simcall barrier */
+ if (false) /* Go to that function to follow the code flow through the simcall barrier */
SIMIX_run_blocking(code);
return simcall<void, std::function<void()> const*>(SIMCALL_RUN_BLOCKING, code);
}
smx_timer_t timeout_cb_ = nullptr; // Callback to timeouts
simgrid::mc::SimcallInspector* inspector_ = nullptr; // makes that simcall observable by the MC
int mc_value_ = 0;
- u_smx_scalar args_[11] = {{0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}};
- u_smx_scalar result_ = {0};
+ u_smx_scalar args_[11] = {};
+ u_smx_scalar result_ = {};
};
#define SIMCALL_SET_MC_VALUE(simcall, value) ((simcall).mc_value_ = (value))
def case(self):
res = []
indent = ' '
- args = ["simgrid::simix::unmarshal<%s>(simcall.args_[%d])" % (arg.rettype(), i)
+ args = ["simgrid::simix::unmarshal<%s>(simcall_.args_[%d])" % (arg.rettype(), i)
for i, arg in enumerate(self.args)]
res.append(indent + 'case SIMCALL_%s:' % (self.name.upper()))
if self.need_handler:
- call = "simcall_HANDLER_%s(&simcall%s%s)" % (self.name,
+ call = "simcall_HANDLER_%s(&simcall_%s%s)" % (self.name,
", " if args else "",
', '.join(args))
else:
call = "SIMIX_%s(%s)" % (self.name, ', '.join(args))
if self.call_kind == 'Func':
- res.append(indent + " simgrid::simix::marshal<%s>(simcall.result_, %s);" % (self.res.rettype(), call))
+ res.append(indent + " simgrid::simix::marshal<%s>(simcall_.result_, %s);" % (self.res.rettype(), call))
else:
res.append(indent + " " + call + ";")
if self.call_kind != 'Blck':
self.name,
', '.join('%s %s' % (arg.rettype(), arg.name) for arg in self.args)))
res.append('{')
- res.append(' if (0) /* Go to that function to follow the code flow through the simcall barrier */')
+ res.append(' if (false) /* Go to that function to follow the code flow through the simcall barrier */')
if self.need_handler:
res.append(' simcall_HANDLER_%s(%s);' % (self.name,
- ', '.join(["&SIMIX_process_self()->simcall"] + [arg.name for arg in self.args])))
+ ', '.join(["&SIMIX_process_self()->simcall_"] + [arg.name for arg in self.args])))
else:
res.append(' SIMIX_%s(%s);' % (self.name,
', '.join(arg.name for arg in self.args)))
fd.write(
'void simgrid::kernel::actor::ActorImpl::simcall_handle(int value) {\n')
fd.write(
- ' XBT_DEBUG("Handling simcall %p: %s", &simcall, SIMIX_simcall_name(simcall.call_));\n')
- fd.write(' SIMCALL_SET_MC_VALUE(simcall, value);\n')
+ ' XBT_DEBUG("Handling simcall %p: %s", &simcall_, SIMIX_simcall_name(simcall_.call_));\n')
+ fd.write(' SIMCALL_SET_MC_VALUE(simcall_, value);\n')
fd.write(
' if (context_->wannadie())\n')
fd.write(' return;\n')
- fd.write(' switch (simcall.call_) {\n')
+ fd.write(' switch (simcall_.call_) {\n')
handle(fd, Simcall.case, simcalls, simcalls_dict)
inline static R simcall(e_smx_simcall_t call, T const&... t)
{
smx_actor_t self = SIMIX_process_self();
- simgrid::simix::marshal(&self->simcall, call, t...);
+ simgrid::simix::marshal(&self->simcall_, call, t...);
if (self != simix_global->maestro_) {
- XBT_DEBUG("Yield process '%s' on simcall %s (%d)", self->get_cname(), SIMIX_simcall_name(self->simcall.call_),
- (int)self->simcall.call_);
+ XBT_DEBUG("Yield process '%s' on simcall %s (%d)", self->get_cname(), SIMIX_simcall_name(self->simcall_.call_),
+ (int)self->simcall_.call_);
self->yield();
} else {
self->simcall_handle(0);
}
- return simgrid::simix::unmarshal<R>(self->simcall.result_);
+ return simgrid::simix::unmarshal<R>(self->simcall_.result_);
}
''')
handle(fd, Simcall.body, simcalls, simcalls_dict)
}
/** Wake up all actors waiting for a Surf action to finish */
-void Global::wake_all_waiting_actors()
+void Global::wake_all_waiting_actors() const
{
for (auto const& model : all_existing_models) {
kernel::resource::Action* action;
}
}
-void Global::display_all_actor_status()
+void Global::display_all_actor_status() const
{
XBT_INFO("%zu actors are still running, waiting for something.", process_list.size());
/* List the actors and their state */
*/
void empty_trash();
void run_all_actors();
- void wake_all_waiting_actors();
- void display_all_actor_status();
+ void wake_all_waiting_actors() const;
+ void display_all_actor_status() const;
smx_context_factory_t context_factory = nullptr;
std::vector<kernel::actor::ActorImpl*> actors_to_run;
void mpi_win_allocate_(MPI_Aint* size, int* disp_unit, int* info, int* comm, void* base, int* win, int* ierr)
{
MPI_Win tmp;
- *ierr = MPI_Win_allocate( *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),static_cast<void*>(base),&tmp);
+ *ierr =
+ MPI_Win_allocate(*size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm), base, &tmp);
if(*ierr == MPI_SUCCESS) {
*win = tmp->add_f();
}
MPI_Errhandler PMPI_Errhandler_f2c(MPI_Fint errhan){
if(errhan==-1)
return MPI_ERRHANDLER_NULL;
- return static_cast<MPI_Errhandler>(simgrid::smpi::Errhandler::f2c(errhan));
+ return simgrid::smpi::Errhandler::f2c(errhan);
}
MPI_Fint PMPI_Errhandler_c2f(MPI_Errhandler errhan){
MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){
if(comm==-1)
return MPI_COMM_NULL;
- return static_cast<MPI_Comm>(simgrid::smpi::Comm::f2c(comm));
+ return simgrid::smpi::Comm::f2c(comm);
}
MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){
MPI_Info PMPI_Info_f2c(MPI_Fint info){
if(info==-1)
return MPI_INFO_NULL;
- return static_cast<MPI_Info>(simgrid::smpi::Info::f2c(info));
+ return simgrid::smpi::Info::f2c(info);
}
MPI_Fint PMPI_Info_c2f(MPI_Info info){
MPI_Op PMPI_Op_f2c(MPI_Fint op){
if(op==-1)
return MPI_OP_NULL;
- return static_cast<MPI_Op>(simgrid::smpi::Op::f2c(op));
+ return simgrid::smpi::Op::f2c(op);
}
MPI_Fint PMPI_Op_c2f(MPI_Op op){
TRACE_smpi_comm_in(my_proc_id, __func__, new simgrid::instr::NoOpTIData("Startall"));
if (not TRACE_smpi_view_internals())
for (int i = 0; i < count; i++) {
- MPI_Request req = requests[i];
+ const simgrid::smpi::Request* req = requests[i];
if (req->flags() & MPI_REQ_SEND)
TRACE_smpi_send(my_proc_id, my_proc_id, getPid(req->comm(), req->dst()), req->tag(), req->size());
}
if (not TRACE_smpi_view_internals())
for (int i = 0; i < count; i++) {
- MPI_Request req = requests[i];
+ const simgrid::smpi::Request* req = requests[i];
if (req->flags() & MPI_REQ_RECV)
TRACE_smpi_recv(getPid(req->comm(), req->src()), my_proc_id, req->tag());
}
}
// TODO: cheinrich: Move declaration to other file? Rename this function - it's used for PMPI_Wait*?
-static void trace_smpi_recv_helper(MPI_Request* request, MPI_Status* status);
static void trace_smpi_recv_helper(MPI_Request* request, MPI_Status* status)
{
- MPI_Request req = *request;
+ const simgrid::smpi::Request* req = *request;
if (req != MPI_REQUEST_NULL) { // Received requests become null
int src_traced = req->src();
// the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
MPI_Request savedreq = *request;
if (savedreq != MPI_REQUEST_NULL && not(savedreq->flags() & MPI_REQ_FINISHED)
&& not(savedreq->flags() & MPI_REQ_GENERALIZED))
- savedreq->ref();//don't erase te handle in Request::wait, we'll need it later
+ savedreq->ref();//don't erase the handle in Request::wait, we'll need it later
else
savedreq = MPI_REQUEST_NULL;
MPI_Request PMPI_Request_f2c(MPI_Fint request){
if(request==-1)
return MPI_REQUEST_NULL;
- return static_cast<MPI_Request>(simgrid::smpi::Request::f2c(request));
+ return simgrid::smpi::Request::f2c(request);
}
MPI_Fint PMPI_Request_c2f(MPI_Request request) {
CHECK_COMM(1)
CHECK_NULL(1, MPI_ERR_TOPOLOGY, comm->topo())
CHECK_NULL(2, MPI_ERR_ARG, ndims)
- MPIR_Cart_Topology topo = static_cast<MPIR_Cart_Topology>(comm->topo().get());
+ const simgrid::smpi::Topo_Cart* topo = static_cast<MPIR_Cart_Topology>(comm->topo().get());
if (topo==nullptr) {
return MPI_ERR_ARG;
}
MPI_Win PMPI_Win_f2c(MPI_Fint win){
if(win==-1)
return MPI_WIN_NULL;
- return static_cast<MPI_Win>(simgrid::smpi::Win::f2c(win));
+ return simgrid::smpi::Win::f2c(win);
}
MPI_Fint PMPI_Win_c2f(MPI_Win win){
MPI_Request* rrequest_array = new MPI_Request[size];
MPI_Request* srequest_array = new MPI_Request[size];
- // irregular case use default MPI fucntions
+ // irregular case use default MPI functions
if (scount * sextent != rcount * rextent) {
XBT_WARN("MPI_allgather_NTSLR_NB use default MPI_allgather.");
allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
rextent = rtype->get_extent();
sextent = stype->get_extent();
- // irregular case use default MPI fucntions
+ // irregular case use default MPI functions
if (scount * sextent != rcount * rextent) {
XBT_WARN("MPI_allgather_NTSLR use default MPI_allgather.");
allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
- Rest of the steps:
update recv_data_from according to offset, and
exchange two blocks with appropriate neighbor.
- the send location becomes previous receve location.
+ the send location becomes previous receive location.
*/
tmprecv = (char*)rbuf + neighbor[0] * rcount * rext;
tmpsend = (char*)rbuf + rank * rcount * rext;
- Rest of the steps:
update recv_data_from according to offset, and
exchange two blocks with appropriate neighbor.
- the send location becomes previous receve location.
+ the send location becomes previous receive location.
Note, we need to create indexed datatype to send and receive these
blocks properly.
*/
}
}
- /* Broadcasting the mesage from leader to the rest */
+ /* Broadcasting the message from leader to the rest */
/* Note: shared memory broadcast could improve the performance */
mpi_errno = colls::bcast(recvbuf, count, datatype, 0, shmem_comm);
* [02b] [12b] [22b]
*
* COMPUTATION PHASE 0 (a)
- * Step 0: rank r sends block ra to rank (r+1) and receives bloc (r-1)a
+ * Step 0: rank r sends block ra to rank (r+1) and receives block (r-1)a
* from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [20a]
* [22a+02a] [12a] [22a]
* [02b] [12b] [22b]
*
- * Step 1: rank r sends block (r-1)a to rank (r+1) and receives bloc
+ * Step 1: rank r sends block (r-1)a to rank (r+1) and receives block
* (r-2)a from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [00a+10a+20a]
* [02b] [12b] [22b]
*
* COMPUTATION PHASE 1 (b)
- * Step 0: rank r sends block rb to rank (r+1) and receives bloc (r-1)b
+ * Step 0: rank r sends block rb to rank (r+1) and receives block (r-1)b
* from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [20a]
* [22a+02a] [12a] [22a]
* [22b+02b] [12b] [22b]
*
- * Step 1: rank r sends block (r-1)b to rank (r+1) and receives bloc
+ * Step 1: rank r sends block (r-1)b to rank (r+1) and receives block
* (r-2)b from rank (r-1) [with wraparound].
* # 0 1 2
* [00a] [00a+10a] [00a+10a+20a]
*/
/*
-This fucntion performs all-reduce operation as follow. ** in a pipeline fashion **
+This function performs all-reduce operation as follow. ** in a pipeline fashion **
1) binomial_tree reduce inside each SMP node
2) binomial_tree reduce intra-communication between root of each SMP node
3) binomial_tree bcast intra-communication between root of each SMP node
//#include <star-reduction.c>
/*
-This fucntion performs all-reduce operation as follow.
+This function performs all-reduce operation as follow.
1) binomial_tree reduce inside each SMP node
2) binomial_tree reduce intra-communication between root of each SMP node
3) binomial_tree bcast intra-communication between root of each SMP node
//#include <star-reduction.c>
/*
-This fucntion performs all-reduce operation as follow.
+This function performs all-reduce operation as follow.
1) binomial_tree reduce inside each SMP node
2) Recursive doubling intra-communication between root of each SMP node
3) binomial_tree bcast inside each SMP node
//#include <star-reduction.c>
/*
-This fucntion performs all-reduce operation as follow.
+This function performs all-reduce operation as follow.
1) binomial_tree reduce inside each SMP node
2) reduce-scatter -inter between root of each SMP node
3) allgather - inter between root of each SMP node
/*
-This fucntion performs all-reduce operation as follow.
+This function performs all-reduce operation as follow.
1) binomial_tree reduce inside each SMP node
2) reduce-scatter -inter between root of each SMP node
3) allgather - inter between root of each SMP node
#include "../colls_private.hpp"
/*
-This fucntion performs all-reduce operation as follow.
+This function performs all-reduce operation as follow.
1) binomial_tree reduce inside each SMP node
2) reduce-scatter -inter between root of each SMP node
3) allgather - inter between root of each SMP node
#include "../colls_private.hpp"
/*
- * Barrier is ment to be a synchronous operation, as some BTLs can mark
+ * Barrier is meant to be a synchronous operation, as some BTLs can mark
* a request done before its passed to the NIC and progress might not be made
* elsewhere we cannot allow a process to exit the barrier until its last
* [round of] sends are completed.
int segindex, i, lr, pair;
int segcount[2]; /* Number ompi_request_wait_allof elements sent with each segment */
uint32_t counts[2];
- int num_segments[2]; /* Number of segmenets */
+ int num_segments[2]; /* Number of segments */
int sendcount[2]; /* the same like segcount, except for the last segment */
size_t realsegsize[2];
char *tmpbuf[2];
Request::send(tmpbuf[lr], segcount[lr], datatype, tree->tree_next[i], COLL_TAG_BCAST, comm);
} /* end of for each child */
- /* upate the base request */
+ /* update the base request */
base_req = new_req;
/* go to the next buffer (ie. the one corresponding to the next recv) */
tmpbuf[lr] += realsegsize[lr];
}
// This process is responsible for all processes that have bits
- // set from the LSB upto (but not including) mask. Because of
+ // set from the LSB up to (but not including) mask. Because of
// the "not including", we start by shifting mask back down
// one.
}
/* This process is responsible for all processes that have bits
- set from the LSB upto (but not including) mask. Because of
+ set from the LSB up to (but not including) mask. Because of
the "not including", we start by shifting mask back down
one. */
} else if (!(vrank % 2)) {
/* other non-leaf nodes, allocate temp buffer for data received from
* children, the most we need is half of the total data elements due
- * to the property of binimoal tree */
+ * to the property of binomial tree */
tempbuf = smpi_get_tmp_sendbuffer(strue_extent + (scount * size - 1) * sextent);
if (NULL == tempbuf) {
err = MPI_ERR_OTHER;
*/
else {
- /* If the number of segments is less than a maximum number of oustanding
+ /* If the number of segments is less than a maximum number of outstanding
requests or there is no limit on the maximum number of outstanding
requests, we send data to the parent using blocking send */
if ((0 == max_outstanding_reqs) ||
otherwise the new protocol is used (see variable Ldb).
3) These lines show the bandwidth (= buffer length / execution time)
for both protocols.
- 4) This line shows that the limit is choosen well if the ratio is
- between 0.95 (loosing 5% for buffer length near and >=limit)
+ 4) This line shows that the limit is chosen well if the ratio is
+ between 0.95 (losing 5% for buffer length near and >=limit)
and 1.10 (not gaining 10% for buffer length near and <limit).
5) This line shows that the new protocol is 2..7 times faster
for long counts.
ActorExt& operator=(const ActorExt&) = delete;
~ActorExt();
void finalize();
- int finalized();
- int initializing();
- int initialized();
+ int finalized() const;
+ int initializing() const;
+ int initialized() const;
void mark_as_initialized();
void set_replaying(bool value);
- bool replaying();
+ bool replaying() const;
void set_tracing_category(const std::string& category) { tracing_category_ = category; }
- const std::string& get_tracing_category() { return tracing_category_; }
+ const std::string& get_tracing_category() const { return tracing_category_; }
smpi_trace_call_location_t* call_location();
void set_privatized_region(smpi_privatization_region_t region);
- smpi_privatization_region_t privatized_region();
- s4u::Mailbox* mailbox() { return mailbox_; }
- s4u::Mailbox* mailbox_small() { return mailbox_small_; }
- s4u::MutexPtr mailboxes_mutex();
+ smpi_privatization_region_t privatized_region() const;
+ s4u::Mailbox* mailbox() const { return mailbox_; }
+ s4u::Mailbox* mailbox_small() const { return mailbox_small_; }
+ s4u::MutexPtr mailboxes_mutex() const;
#if HAVE_PAPI
- int papi_event_set();
+ int papi_event_set() const;
papi_counter_t& papi_counters();
#endif
xbt_os_timer_t timer();
void simulated_start();
- double simulated_elapsed();
- MPI_Comm comm_world();
+ double simulated_elapsed() const;
+ MPI_Comm comm_world() const;
MPI_Comm comm_self();
MPI_Comm comm_intra();
void set_comm_intra(MPI_Comm comm);
void set_sampling(int s);
- int sampling();
+ int sampling() const;
static void init();
s4u::ActorPtr get_actor();
- int get_optind();
+ int get_optind() const;
void set_optind(int optind);
MPI_Info info_env();
void bsend_buffer(void** buf, int* size);
int dup(MPI_Comm* newcomm);
int dup_with_info(MPI_Info info, MPI_Comm* newcomm);
MPI_Group group();
- MPI_Topology topo() { return topo_; }
+ MPI_Topology topo() const { return topo_; }
void set_topo(MPI_Topology topo){topo_=topo;}
- int size();
- int rank();
- int id();
- void get_name(char* name, int* len);
+ int size() const;
+ int rank() const;
+ int id() const;
+ void get_name(char* name, int* len) const;
void set_name(const char* name);
MPI_Info info();
void set_info( MPI_Info info);
void set_errhandler( MPI_Errhandler errhandler);
void set_leaders_comm(MPI_Comm leaders);
void set_intra_comm(MPI_Comm leaders) { intra_comm_ = leaders; };
- int* get_non_uniform_map();
- int* get_leaders_map();
- MPI_Comm get_leaders_comm();
- MPI_Comm get_intra_comm();
+ int* get_non_uniform_map() const;
+ int* get_leaders_map() const;
+ MPI_Comm get_leaders_comm() const;
+ MPI_Comm get_intra_comm() const;
MPI_Comm find_intra_comm(int* leader);
- bool is_uniform();
- bool is_blocked();
- bool is_smp_comm();
+ bool is_uniform() const;
+ bool is_blocked() const;
+ bool is_smp_comm() const;
MPI_Comm split(int color, int key);
void cleanup_smp();
void ref();
void add_rma_win(MPI_Win win);
void remove_rma_win(MPI_Win win);
- void finish_rma_calls();
+ void finish_rma_calls() const;
MPI_Comm split_type(int type, int key, const Info* info);
};
XBT_PRIVATE void smpi_check_options();
/********************************** Configuration of SMPI **************************************/
-extern XBT_PRIVATE simgrid::config::Flag<double> _smpi_cfg_host_speed;
+extern XBT_PRIVATE simgrid::config::Flag<std::string> _smpi_cfg_host_speed_string;
extern XBT_PRIVATE simgrid::config::Flag<bool> _smpi_cfg_simulate_computation;
extern XBT_PRIVATE simgrid::config::Flag<std::string> _smpi_cfg_shared_malloc_string;
extern XBT_PRIVATE simgrid::config::Flag<double> _smpi_cfg_cpu_thresh;
#include "smpi_f2c.hpp"
#include "smpi_keyvals.hpp"
#include <string>
+#include <vector>
constexpr unsigned DT_FLAG_DESTROYED = 0x0001; /**< user destroyed but some other layers still have a reference */
constexpr unsigned DT_FLAG_COMMITED = 0x0002; /**< ready to be used for a send/recv operation */
constexpr unsigned DT_FLAG_NO_GAPS = 0x0080; /**< no gaps around the datatype */
constexpr unsigned DT_FLAG_DATA = 0x0100; /**< data or control structure */
constexpr unsigned DT_FLAG_ONE_SIDED = 0x0200; /**< datatype can be used for one sided operations */
-constexpr unsigned DT_FLAG_UNAVAILABLE = 0x0400; /**< datatypes unavailable on the build (OS or compiler dependant) */
+constexpr unsigned DT_FLAG_UNAVAILABLE = 0x0400; /**< datatypes unavailable on the build (OS or compiler dependent) */
constexpr unsigned DT_FLAG_DERIVED = 0x0800; /**< is the datatype derived ? */
/*
* We should make the difference here between the predefined contiguous and non contiguous
class Datatype_contents {
public:
int combiner_;
- int number_of_integers_;
- int* integers_;
- int number_of_addresses_;
- MPI_Aint* addresses_;
- int number_of_datatypes_;
- MPI_Datatype* datatypes_;
+ std::vector<int> integers_;
+ std::vector<MPI_Aint> addresses_;
+ std::vector<MPI_Datatype> datatypes_;
Datatype_contents(int combiner,
int number_of_integers, const int* integers,
int number_of_addresses, const MPI_Aint* addresses,
int number_of_datatypes, const MPI_Datatype* datatypes);
- Datatype_contents(const Datatype_contents&) = delete;
- Datatype_contents& operator=(const Datatype_contents&) = delete;
- ~Datatype_contents();
};
class Datatype : public F2C, public Keyval{
Datatype& operator=(const Datatype&) = delete;
virtual ~Datatype();
- char* name() { return name_; }
- size_t size() { return size_; }
- MPI_Aint lb() { return lb_; }
- MPI_Aint ub() { return ub_; }
- int flags() { return flags_; }
- int refcount() { return refcount_; }
+ char* name() const { return name_; }
+ size_t size() const { return size_; }
+ MPI_Aint lb() const { return lb_; }
+ MPI_Aint ub() const { return ub_; }
+ int flags() const { return flags_; }
+ int refcount() const { return refcount_; }
void ref();
static void unref(MPI_Datatype datatype);
void commit();
int copy_attrs(Datatype* datatype);
- bool is_valid();
- bool is_basic();
+ bool is_valid() const;
+ bool is_basic() const;
static const char* encode(const Datatype* dt) { return dt->id.c_str(); }
static MPI_Datatype decode(const std::string& datatype_id);
- bool is_replayable();
+ bool is_replayable() const;
void addflag(int flag);
- int extent(MPI_Aint* lb, MPI_Aint* extent);
- MPI_Aint get_extent() { return ub_ - lb_; };
- void get_name(char* name, int* length);
+ int extent(MPI_Aint* lb, MPI_Aint* extent) const;
+ MPI_Aint get_extent() const { return ub_ - lb_; };
+ void get_name(char* name, int* length) const;
void set_name(const char* name);
static int copy(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
MPI_Datatype recvtype);
static int keyval_free(int* keyval);
int pack(const void* inbuf, int incount, void* outbuf, int outcount, int* position, const Comm* comm);
int unpack(const void* inbuf, int insize, int* position, void* outbuf, int outcount, const Comm* comm);
- int get_contents(int max_integers, int max_addresses,
- int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
- MPI_Datatype *array_of_datatypes);
- int get_envelope(int* num_integers, int* num_addresses,
- int* num_datatypes, int* combiner);
+ int get_contents(int max_integers, int max_addresses, int max_datatypes, int* array_of_integers,
+ MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes) const;
+ int get_envelope(int* num_integers, int* num_addresses, int* num_datatypes, int* combiner) const;
static int create_contiguous(int count, MPI_Datatype old_type, MPI_Aint lb, MPI_Datatype* new_type);
static int create_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type);
static int create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type);
MPI_Aint block_stride_;
MPI_Datatype old_type_;
-public:
Type_Hvector(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, int block_length, MPI_Aint block_stride,
MPI_Datatype old_type);
Type_Hvector(const Type_Hvector&) = delete;
MPI_Aint* block_indices_;
MPI_Datatype old_type_;
-public:
Type_Hindexed(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, const int* block_lengths,
const MPI_Aint* block_indices, MPI_Datatype old_type);
Type_Hindexed(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, const int* block_lengths, const int* block_indices,
explicit Errhandler(MPI_File_errhandler_fn *function):file_func_(function){};
explicit Errhandler(MPI_Win_errhandler_fn *function):win_func_(function){};
void ref();
- void call(MPI_Comm comm, int errorcode);
- void call(MPI_Win win, int errorcode);
- void call(MPI_File file, int errorcode);
+ void call(MPI_Comm comm, int errorcode) const;
+ void call(MPI_Win win, int errorcode) const;
+ void call(MPI_File file, int errorcode) const;
static void unref(Errhandler* errhandler);
static Errhandler* f2c(int id);
};
-/* Handle Fortan - C conversion for MPI Types*/
+/* Handle Fortran - C conversion for MPI Types*/
/* Copyright (c) 2010-2020. The SimGrid Team.
* All rights reserved. */
File(const File&) = delete;
File& operator=(const File&) = delete;
~File();
- int size();
- int get_position(MPI_Offset* offset);
- int get_position_shared(MPI_Offset* offset);
- int flags();
- MPI_Comm comm();
+ int size() const;
+ int get_position(MPI_Offset* offset) const;
+ int get_position_shared(MPI_Offset* offset) const;
+ int flags() const;
+ MPI_Comm comm() const;
int sync();
int seek(MPI_Offset offset, int whence);
int seek_shared(MPI_Offset offset, int whence);
int set_view(MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char* datarep, const Info* info);
- int get_view(MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep);
+ int get_view(MPI_Offset* disp, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const;
MPI_Info info();
void set_info( MPI_Info info);
- static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
- static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
- static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
- static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
- static int write_shared(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
- static int write_ordered(MPI_File fh, const void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
- template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int read(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
+ static int read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
+ static int read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
+ static int write(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
+ static int write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status);
+ static int write_ordered(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status);
+ template <int (*T)(MPI_File, void*, int, const Datatype*, MPI_Status*)>
+ int op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status);
static int close(MPI_File *fh);
static int del(const char* filename, const Info* info);
MPI_Errhandler errhandler();
static File* f2c(int id);
};
- /* Read_all, Write_all : loosely based on */
- /* @article{Thakur:1996:ETM:245875.245879,*/
- /* author = {Thakur, Rajeev and Choudhary, Alok},*/
- /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
- /* journal = {Sci. Program.},*/
- /* issue_date = {Winter 1996},*/
- /* pages = {301--317},*/
- /* }*/
- template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>
- int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
- //get min and max offsets from everyone.
- int size = comm_->size();
- int rank = comm_-> rank();
- MPI_Offset min_offset = file_->tell();
- MPI_Offset max_offset = min_offset + count * datatype->get_extent();//cheating, as we don't care about exact data location, we can skip extent
- MPI_Offset* min_offsets = new MPI_Offset[size];
- MPI_Offset* max_offsets = new MPI_Offset[size];
- simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
- simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
- MPI_Offset min=min_offset;
- MPI_Offset max=max_offset;
- MPI_Offset tot= 0;
- int empty=1;
- for(int i=0;i<size;i++){
- if(min_offsets[i]!=max_offsets[i])
- empty=0;
- tot+=(max_offsets[i]-min_offsets[i]);
- if(min_offsets[i]<min)
- min=min_offsets[i];
- if(max_offsets[i]>max)
- max=max_offsets[i];
- }
-
- XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max);
- if(empty==1){
- delete[] min_offsets;
- delete[] max_offsets;
- status->count=0;
- return MPI_SUCCESS;
- }
- MPI_Offset total = max-min;
- if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){
- delete[] min_offsets;
- delete[] max_offsets;
- //contiguous. Just have each proc perform its read
- if(status != MPI_STATUS_IGNORE)
- status->count=count * datatype->size();
- return T(this,buf,count,datatype, status);
- }
+/* Read_all, Write_all : loosely based on */
+/* @article{Thakur:1996:ETM:245875.245879,*/
+/* author = {Thakur, Rajeev and Choudhary, Alok},*/
+/* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
+/* journal = {Sci. Program.},*/
+/* issue_date = {Winter 1996},*/
+/* pages = {301--317},*/
+/* }*/
+template <int (*T)(MPI_File, void*, int, const Datatype*, MPI_Status*)>
+int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status)
+{
+ // get min and max offsets from everyone.
+ int size = comm_->size();
+ int rank = comm_->rank();
+ MPI_Offset min_offset = file_->tell();
+ MPI_Offset max_offset =
+ min_offset +
+ count * datatype->get_extent(); // cheating, as we don't care about exact data location, we can skip extent
+ MPI_Offset* min_offsets = new MPI_Offset[size];
+ MPI_Offset* max_offsets = new MPI_Offset[size];
+ simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
+ simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
+ MPI_Offset min = min_offset;
+ MPI_Offset max = max_offset;
+ MPI_Offset tot = 0;
+ int empty = 1;
+ for (int i = 0; i < size; i++) {
+ if (min_offsets[i] != max_offsets[i])
+ empty = 0;
+ tot += (max_offsets[i] - min_offsets[i]);
+ if (min_offsets[i] < min)
+ min = min_offsets[i];
+ if (max_offsets[i] > max)
+ max = max_offsets[i];
+ }
- //Interleaved case : How much do I need to read, and whom to send it ?
- MPI_Offset my_chunk_start=(max-min+1)/size*rank;
- MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1));
- XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
- int* send_sizes = new int[size];
- int* recv_sizes = new int[size];
- int* send_disps = new int[size];
- int* recv_disps = new int[size];
- int total_sent=0;
- for(int i=0;i<size;i++){
- send_sizes[i]=0;
- send_disps[i]=0;//cheat to avoid issues when send>recv as we use recv buffer
- if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||
- ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){
- send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start));
- // store min and max offset to actually read
- min_offset=std::min(min_offset, min_offsets[i]);
- total_sent+=send_sizes[i];
- XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i);
- }
+ XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min,
+ max);
+ if (empty == 1) {
+ delete[] min_offsets;
+ delete[] max_offsets;
+ status->count = 0;
+ return MPI_SUCCESS;
+ }
+ MPI_Offset total = max - min;
+ if (total == tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)) {
+ delete[] min_offsets;
+ delete[] max_offsets;
+ // contiguous. Just have each proc perform its read
+ if (status != MPI_STATUS_IGNORE)
+ status->count = count * datatype->size();
+ return T(this, buf, count, datatype, status);
+ }
+
+ // Interleaved case : How much do I need to read, and whom to send it ?
+ MPI_Offset my_chunk_start = (max - min + 1) / size * rank;
+ MPI_Offset my_chunk_end = ((max - min + 1) / size * (rank + 1));
+ XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
+ int* send_sizes = new int[size];
+ int* recv_sizes = new int[size];
+ int* send_disps = new int[size];
+ int* recv_disps = new int[size];
+ int total_sent = 0;
+ for (int i = 0; i < size; i++) {
+ send_sizes[i] = 0;
+ send_disps[i] = 0; // cheat to avoid issues when send>recv as we use recv buffer
+ if ((my_chunk_start >= min_offsets[i] && my_chunk_start < max_offsets[i]) ||
+ ((my_chunk_end <= max_offsets[i]) && my_chunk_end > min_offsets[i])) {
+ send_sizes[i] = (std::min(max_offsets[i] - 1, my_chunk_end - 1) - std::max(min_offsets[i], my_chunk_start));
+ // store min and max offset to actually read
+ min_offset = std::min(min_offset, min_offsets[i]);
+ total_sent += send_sizes[i];
+ XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i);
}
- min_offset=std::max(min_offset, my_chunk_start);
+ }
+ min_offset = std::max(min_offset, my_chunk_start);
- //merge the ranges of every process
- std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
- for(int i=0; i<size; ++i)
- ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));
- std::sort(ranges.begin(), ranges.end());
- std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
- chunks.push_back(ranges[0]);
+ // merge the ranges of every process
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
+ for (int i = 0; i < size; ++i)
+ ranges.push_back(std::make_pair(min_offsets[i], max_offsets[i]));
+ std::sort(ranges.begin(), ranges.end());
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
+ chunks.push_back(ranges[0]);
- unsigned int nchunks=0;
- unsigned int i=1;
- while(i < ranges.size()){
- if(ranges[i].second>chunks[nchunks].second){
- // else range included - ignore
- if(ranges[i].first>chunks[nchunks].second){
- //new disjoint range
- chunks.push_back(ranges[i]);
- nchunks++;
- } else {
- //merge ranges
- chunks[nchunks].second=ranges[i].second;
- }
+ unsigned int nchunks = 0;
+ unsigned int i = 1;
+ while (i < ranges.size()) {
+ if (ranges[i].second > chunks[nchunks].second) {
+ // else range included - ignore
+ if (ranges[i].first > chunks[nchunks].second) {
+ // new disjoint range
+ chunks.push_back(ranges[i]);
+ nchunks++;
+ } else {
+ // merge ranges
+ chunks[nchunks].second = ranges[i].second;
}
- i++;
- }
- //what do I need to read ?
- MPI_Offset totreads=0;
- for(i=0; i<chunks.size();i++){
- if(chunks[i].second < my_chunk_start)
- continue;
- else if (chunks[i].first > my_chunk_end)
- continue;
- else
- totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start));
}
- XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads);
+ i++;
+ }
+ // what do I need to read ?
+ MPI_Offset totreads = 0;
+ for (i = 0; i < chunks.size(); i++) {
+ if (chunks[i].second < my_chunk_start)
+ continue;
+ else if (chunks[i].first > my_chunk_end)
+ continue;
+ else
+ totreads += (std::min(chunks[i].second, my_chunk_end - 1) - std::max(chunks[i].first, my_chunk_start));
+ }
+ XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads);
- unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent);
+ unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent);
- if(totreads>0){
- seek(min_offset, MPI_SEEK_SET);
- T(this,sendbuf,totreads/datatype->size(),datatype, status);
- }
- simgrid::smpi::colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);
- int total_recv=0;
- for(int i=0;i<size;i++){
- recv_disps[i]=total_recv;
- total_recv+=recv_sizes[i];
- }
- //Set buf value to avoid copying dumb data
- simgrid::smpi::colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE, buf, recv_sizes, recv_disps, MPI_BYTE,
- comm_);
- if(status!=MPI_STATUS_IGNORE)
- status->count=count * datatype->size();
- smpi_free_tmp_buffer(sendbuf);
- delete[] send_sizes;
- delete[] recv_sizes;
- delete[] send_disps;
- delete[] recv_disps;
- delete[] min_offsets;
- delete[] max_offsets;
- return MPI_SUCCESS;
+ if (totreads > 0) {
+ seek(min_offset, MPI_SEEK_SET);
+ T(this, sendbuf, totreads / datatype->size(), datatype, status);
+ }
+ simgrid::smpi::colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);
+ int total_recv = 0;
+ for (int i = 0; i < size; i++) {
+ recv_disps[i] = total_recv;
+ total_recv += recv_sizes[i];
}
+ // Set buf value to avoid copying dumb data
+ simgrid::smpi::colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE, buf, recv_sizes, recv_disps, MPI_BYTE,
+ comm_);
+ if (status != MPI_STATUS_IGNORE)
+ status->count = count * datatype->size();
+ smpi_free_tmp_buffer(sendbuf);
+ delete[] send_sizes;
+ delete[] recv_sizes;
+ delete[] send_disps;
+ delete[] recv_disps;
+ delete[] min_offsets;
+ delete[] max_offsets;
+ return MPI_SUCCESS;
+}
}
}
public:
Group() = default;
explicit Group(int size) : size_(size), rank_to_actor_map_(size, nullptr), index_to_rank_map_(size, MPI_UNDEFINED) {}
- explicit Group(Group* origin);
+ explicit Group(const Group* origin);
void set_mapping(s4u::Actor* actor, int rank);
int rank(int index);
int rank(s4u::Actor* process);
void ref();
static void unref(MPI_Group group);
- int size() { return size_; }
+ int size() const { return size_; }
int compare(MPI_Group group2);
int incl(int n, const int* ranks, MPI_Group* newgroup);
int excl(int n, const int* ranks, MPI_Group* newgroup);
void ref();
static void unref(MPI_Info info);
void set(const char* key, const char* value) { map_[key] = value; }
- int get(const char* key, int valuelen, char* value, int* flag);
+ int get(const char* key, int valuelen, char* value, int* flag) const;
int remove(const char* key);
- int get_nkeys(int* nkeys);
- int get_nthkey(int n, char* key);
- int get_valuelen(const char* key, int* valuelen, int* flag);
+ int get_nkeys(int* nkeys) const;
+ int get_nthkey(int n, char* key) const;
+ int get_valuelen(const char* key, int* valuelen, int* flag) const;
static Info* f2c(int id);
};
public:
Op(MPI_User_function* function, bool commutative, bool predefined=false) : func_(function), is_commutative_(commutative), predefined_(predefined) {}
- bool is_commutative() { return is_commutative_; }
- bool is_fortran_op() { return is_fortran_op_; }
+ bool is_commutative() const { return is_commutative_; }
+ bool is_fortran_op() const { return is_fortran_op_; }
// tell that we were created from fortran, so we need to translate the type to fortran when called
void set_fortran_op() { is_fortran_op_ = true; }
- void apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype);
+ void apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype) const;
static Op* f2c(int id);
void ref();
static void unref(MPI_Op* op);
public:
Request() = default;
Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op = MPI_REPLACE);
- MPI_Comm comm() { return comm_; }
- size_t size() { return size_; }
- size_t real_size() { return real_size_; }
- int src() { return src_; }
- int dst() { return dst_; }
- int tag() { return tag_; }
- int flags() { return flags_; }
- bool detached() { return detached_; }
- MPI_Datatype type() { return old_type_; }
- void print_request(const char* message);
+ MPI_Comm comm() const { return comm_; }
+ size_t size() const { return size_; }
+ size_t real_size() const { return real_size_; }
+ int src() const { return src_; }
+ int dst() const { return dst_; }
+ int tag() const { return tag_; }
+ int flags() const { return flags_; }
+ bool detached() const { return detached_; }
+ MPI_Datatype type() const { return old_type_; }
+ void print_request(const char* message) const;
void start();
void cancel();
void init_buffer(int count);
void ref();
void set_nbc_requests(MPI_Request* reqs, int size);
- int get_nbc_requests_size();
- MPI_Request* get_nbc_requests();
+ int get_nbc_requests_size() const;
+ MPI_Request* get_nbc_requests() const;
static void finish_wait(MPI_Request* request, MPI_Status* status);
static void unref(MPI_Request* request);
static int wait(MPI_Request* req, MPI_Status* status);
static int cancelled (const MPI_Status * status);
static void set_cancelled (MPI_Status * status, int flag);
static void set_elements(MPI_Status* status, const Datatype*, int count);
-static int get_count(const MPI_Status * status, MPI_Datatype datatype);
+static int get_count(const MPI_Status* status, const Datatype* datatype);
};
int get(int maxdims, int* dims, int* periods, int* coords);
int rank(const int* coords, int* rank);
int shift(int direction, int disp, int* rank_source, int* rank_dest);
- int dim_get(int* ndims);
+ int dim_get(int* ndims) const;
static int Dims_create(int nnodes, int ndims, int dims[]);
};
~Win();
int attach (void *base, MPI_Aint size);
int detach (const void *base);
- void get_name( char* name, int* length);
+ void get_name(char* name, int* length) const;
void get_group( MPI_Group* group);
void set_name(const char* name);
- int rank();
- int dynamic();
+ int rank() const;
+ int dynamic() const;
int start(MPI_Group group, int assert);
int post(MPI_Group group, int assert);
int complete();
MPI_Info info();
void set_info( MPI_Info info);
int wait();
- MPI_Aint size();
- void* base();
- int disp_unit();
+ MPI_Aint size() const;
+ void* base() const;
+ int disp_unit() const;
int fence(int assert);
int put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request=nullptr);
int flush_local_all();
int finish_comms();
int finish_comms(int rank);
- int shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr);
+ int shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const;
MPI_Errhandler errhandler();
void set_errhandler( MPI_Errhandler errhandler);
};
}
/** @brief Check if a process is finalized */
-int ActorExt::finalized()
+int ActorExt::finalized() const
{
return (state_ == SmpiProcessState::FINALIZED);
}
/** @brief Check if a process is partially initialized already */
-int ActorExt::initializing()
+int ActorExt::initializing() const
{
return (state_ == SmpiProcessState::INITIALIZING);
}
/** @brief Check if a process is initialized */
-int ActorExt::initialized()
+int ActorExt::initialized() const
{
// TODO cheinrich: Check if we still need this. This should be a global condition, not for a
// single process ... ?
replaying_ = value;
}
-bool ActorExt::replaying()
+bool ActorExt::replaying() const
{
return replaying_;
}
privatized_region_ = region;
}
-smpi_privatization_region_t ActorExt::privatized_region()
+smpi_privatization_region_t ActorExt::privatized_region() const
{
return privatized_region_;
}
-MPI_Comm ActorExt::comm_world()
+MPI_Comm ActorExt::comm_world() const
{
return comm_world_ == nullptr ? MPI_COMM_NULL : *comm_world_;
}
-s4u::MutexPtr ActorExt::mailboxes_mutex()
+s4u::MutexPtr ActorExt::mailboxes_mutex() const
{
return mailboxes_mutex_;
}
#if HAVE_PAPI
-int ActorExt::papi_event_set()
+int ActorExt::papi_event_set() const
{
return papi_event_set_;
}
simulated_ = SIMIX_get_clock();
}
-double ActorExt::simulated_elapsed()
+double ActorExt::simulated_elapsed() const
{
return SIMIX_get_clock() - simulated_;
}
sampling_ = s;
}
-int ActorExt::sampling()
+int ActorExt::sampling() const
{
return sampling_;
}
XBT_DEBUG("<%ld> SMPI process has been initialized: %p", ext->actor_->get_pid(), ext->actor_);
}
-int ActorExt::get_optind()
+int ActorExt::get_optind() const
{
return optind_;
}
double smpi_adjust_comp_speed(){
double speedup=1;
if (smpi_cfg_comp_adjustment_file()[0] != '\0') {
- smpi_trace_call_location_t* loc = smpi_process()->call_location();
+ const smpi_trace_call_location_t* loc = smpi_process()->call_location();
std::string key = loc->get_composed_key();
std::unordered_map<std::string, double>::const_iterator it = location2speedup.find(key);
if (it != location2speedup.end()) {
unsigned long long sec = static_cast<unsigned long long>(now);
unsigned long long pre = (now - sec) * smpi_rastro_resolution();
smpi_bench_begin();
- return static_cast<unsigned long long>(sec) * smpi_rastro_resolution() + pre;
+ return sec * smpi_rastro_resolution() + pre;
}
/* ****************************** Functions related to the SMPI_SAMPLE_ macros ************************************/
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "mc/mc.h"
+#include "smpi_config.hpp"
#include "include/xbt/config.hpp"
+#include "mc/mc.h"
#include "private.hpp"
#include "smpi_coll.hpp"
-#include "smpi_config.hpp"
#include "src/simix/smx_private.hpp"
+#include "xbt/parse_units.hpp"
+
#include <cfloat> /* DBL_MAX */
#include <boost/algorithm/string.hpp> /* trim */
#include <boost/tokenizer.hpp>
bool _smpi_options_initialized=false;
SharedMallocType _smpi_cfg_shared_malloc = SharedMallocType::GLOBAL;
SmpiPrivStrategies _smpi_cfg_privatization = SmpiPrivStrategies::NONE;
+double _smpi_cfg_host_speed;
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_config, smpi, "Logging specific to SMPI (config)");
-simgrid::config::Flag<double> _smpi_cfg_host_speed{
- "smpi/host-speed", "Speed of the host running the simulation (in flop/s). "
- "Used to bench the operations.", 20000.0,
- [](const double& val) { xbt_assert(val > 0.0, "Invalid value (%f) for 'smpi/host-speed': it must be positive.", val); }};
+simgrid::config::Flag<std::string> _smpi_cfg_host_speed_string{
+ "smpi/host-speed",
+ "Speed of the host running the simulation (in flop/s). "
+ "Used to bench the operations.",
+ "20000f", [](const std::string& str) {
+ _smpi_cfg_host_speed = xbt_parse_get_speed("smpi/host-speed", 1, str.c_str(), "option", "smpi/host-speed");
+ xbt_assert(_smpi_cfg_host_speed > 0.0, "Invalid value (%s) for 'smpi/host-speed': it must be positive.",
+ _smpi_cfg_host_speed_string.get().c_str());
+ }};
simgrid::config::Flag<bool> _smpi_cfg_simulate_computation{
"smpi/simulate-computation", "Whether the computational part of the simulated application should be simulated.",
Instance(int max_no_processes, MPI_Comm comm) : size_(max_no_processes), comm_world_(comm)
{
MPI_Group group = new simgrid::smpi::Group(size_);
- comm_world_ = new simgrid::smpi::Comm(group, nullptr, 0, -1);
+ comm_world_ = new simgrid::smpi::Comm(group, nullptr, false, -1);
// FIXME : using MPI_Attr_put with MPI_UNIVERSE_SIZE is forbidden and we make it a no-op (which triggers a warning
// as MPI_ERR_ARG is returned). Directly calling Comm::attr_put breaks for now, as MPI_UNIVERSE_SIZE,is <0
// instance.comm_world->attr_put<simgrid::smpi::Comm>(MPI_UNIVERSE_SIZE, reinterpret_cast<void*>(instance.size));
(static_cast<char*>(buff) < smpi_data_exe_start + smpi_data_exe_size)) {
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
smpi_switch_data_segment(comm->src_actor_->iface());
- tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
+ tmpbuff = xbt_malloc(buff_size);
memcpy_private(tmpbuff, buff, private_blocks);
}
#if HAVE_PRIVATIZATION
// FIXME, cross-process support (mmap across process when necessary)
XBT_DEBUG("Switching data frame to the one of process %ld", actor->get_pid());
- simgrid::smpi::ActorExt* process = smpi_process_remote(actor);
+ const simgrid::smpi::ActorExt* process = smpi_process_remote(actor);
int current = process->privatized_region()->file_descriptor;
const void* tmp = mmap(TOPAGE(smpi_data_exe_start), smpi_data_exe_size, PROT_RW, MAP_FIXED | MAP_SHARED, current, 0);
if (tmp != TOPAGE(smpi_data_exe_start))
public:
RequestStorage() {}
- int size()
- {
- return store.size();
- }
+ int size() const { return store.size(); }
req_storage_t& get_store()
{
return store;
}
- void get_requests(std::vector<MPI_Request>& vec)
+ void get_requests(std::vector<MPI_Request>& vec) const
{
for (auto const& pair : store) {
auto& req = pair.second;
return (it == store.end()) ? MPI_REQUEST_NULL : it->second;
}
- void remove(MPI_Request req)
+ void remove(const Request* req)
{
if (req == MPI_REQUEST_NULL) return;
arg_size = status.count;
}
- bool is_recv = false; // Help analyzers understanding that status is not used unintialized
+ bool is_recv = false; // Help analyzers understanding that status is not used uninitialized
if (get_name() == "recv") {
is_recv = true;
Request::recv(nullptr, arg_size, args.datatype1, args.partner, args.tag, MPI_COMM_WORLD, &status);
}
// Align functions, from http://stackoverflow.com/questions/4840410/how-to-align-a-pointer-in-c
-#define ALIGN_UP(n, align) (((n) + (align)-1) & -(align))
-#define ALIGN_DOWN(n, align) ((n) & -(align))
+#define ALIGN_UP(n, align) (((int64_t)(n) + (int64_t)(align) - 1) & -(int64_t)(align))
+#define ALIGN_DOWN(n, align) ((int64_t)(n) & -(int64_t)(align))
constexpr unsigned PAGE_SIZE = 0x1000;
constexpr unsigned HUGE_PAGE_SIZE = 1U << 21;
"to allow big allocations.\n",
size >> 20);
if(use_huge_page)
- mem = (void*)ALIGN_UP((int64_t)allocated_ptr, HUGE_PAGE_SIZE);
+ mem = (void*)ALIGN_UP(allocated_ptr, HUGE_PAGE_SIZE);
else
mem = allocated_ptr;
if(i_block < nb_shared_blocks-1)
xbt_assert(stop_offset < shared_block_offsets[2*i_block+2],
"stop_offset (%zu) should be lower than its successor start offset (%zu)", stop_offset, shared_block_offsets[2*i_block+2]);
- size_t start_block_offset = ALIGN_UP((int64_t)start_offset, smpi_shared_malloc_blocksize);
- size_t stop_block_offset = ALIGN_DOWN((int64_t)stop_offset, smpi_shared_malloc_blocksize);
+ size_t start_block_offset = ALIGN_UP(start_offset, smpi_shared_malloc_blocksize);
+ size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize);
for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset);
void* pos = (void*)((unsigned long)mem + offset);
"and that the directory you are passing is mounted correctly (mount /path/to/huge -t hugetlbfs -o rw,mode=0777).",
strerror(errno));
}
- size_t low_page_start_offset = ALIGN_UP((int64_t)start_offset, PAGE_SIZE);
- size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN((int64_t)stop_offset, (int64_t)PAGE_SIZE);
+ size_t low_page_start_offset = ALIGN_UP(start_offset, PAGE_SIZE);
+ size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE);
if(low_page_start_offset < low_page_stop_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block start");
void* pos = (void*)((unsigned long)mem + low_page_start_offset);
}
if(low_page_stop_offset <= stop_block_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block stop");
- size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE);
+ size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE);
if(high_page_stop_offset > stop_block_offset) {
void* pos = (void*)((unsigned long)mem + stop_block_offset);
const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE,
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "smpi_utils.hpp"
+
+#include "src/surf/xml/platf_private.hpp"
#include "xbt/log.h"
+#include "xbt/parse_units.hpp"
#include "xbt/sysdep.h"
#include <boost/tokenizer.hpp>
-#include "src/surf/xml/platf_private.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_utils, smpi, "Logging specific to SMPI (utils)");
+extern std::string surf_parsed_filename;
+extern int surf_parse_lineno;
+
std::vector<s_smpi_factor_t> parse_factor(const std::string& smpi_coef_string)
{
std::vector<s_smpi_factor_t> smpi_factor;
}
} else {
try {
- fact.values.push_back(surf_parse_get_time((*factor_iter).c_str(), "smpi factor", ""));
+ fact.values.push_back(
+ xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, (*factor_iter).c_str(), "smpi factor", ""));
} catch (const std::invalid_argument&) {
throw std::invalid_argument(std::string("Invalid factor value ") + std::to_string(iteration) + " in chunk " +
std::to_string(smpi_factor.size() + 1) + ": " + *factor_iter);
return group_;
}
-int Comm::size()
+int Comm::size() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->size();
return group_->size();
}
-int Comm::rank()
+int Comm::rank() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->rank();
return group_->rank(s4u::Actor::self());
}
-int Comm::id()
+int Comm::id() const
{
return id_;
}
-void Comm::get_name (char* name, int* len)
+void Comm::get_name(char* name, int* len) const
{
if (this == MPI_COMM_UNINITIALIZED){
smpi_process()->comm_world()->get_name(name, len);
leaders_comm_=leaders;
}
-int* Comm::get_non_uniform_map(){
+int* Comm::get_non_uniform_map() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_non_uniform_map();
return non_uniform_map_;
}
-int* Comm::get_leaders_map(){
+int* Comm::get_leaders_map() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_leaders_map();
return leaders_map_;
}
-MPI_Comm Comm::get_leaders_comm(){
+MPI_Comm Comm::get_leaders_comm() const
+{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->get_leaders_comm();
return leaders_comm_;
}
-MPI_Comm Comm::get_intra_comm(){
+MPI_Comm Comm::get_intra_comm() const
+{
if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD)
return smpi_process()->comm_intra();
else return intra_comm_;
}
-bool Comm::is_uniform()
+bool Comm::is_uniform() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_uniform();
return is_uniform_ != 0;
}
-bool Comm::is_blocked()
+bool Comm::is_blocked() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_blocked();
return is_blocked_ != 0;
}
-bool Comm::is_smp_comm()
+bool Comm::is_smp_comm() const
{
if (this == MPI_COMM_UNINITIALIZED)
return smpi_process()->comm_world()->is_smp_comm();
}
}
*leader=min_index;
- return new Comm(group_intra, nullptr, 1);
+ return new Comm(group_intra, nullptr, true);
}
void Comm::init_smp(){
//create leader_communicator
for (i=0; i< leader_group_size;i++)
leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
- leader_comm = new Comm(leaders_group, nullptr,1);
+ leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
this->set_intra_comm(comm_intra);
leaders_group->set_mapping(s4u::Actor::by_pid(leader_list[i]).get(), i);
if(this->get_leaders_comm()==MPI_COMM_NULL){
- leader_comm = new Comm(leaders_group, nullptr,1);
+ leader_comm = new Comm(leaders_group, nullptr, true);
this->set_leaders_comm(leader_comm);
}else{
leader_comm=this->get_leaders_comm();
rma_wins_.remove(win);
}
-void Comm::finish_rma_calls(){
+void Comm::finish_rma_calls() const
+{
for (auto const& it : rma_wins_) {
if(it->rank()==this->rank()){//is it ours (for MPI_COMM_WORLD)?
int finished = it->finish_comms();
#include "src/instr/instr_private.hpp"
#include "src/smpi/include/smpi_actor.hpp"
+#include <algorithm>
+#include <functional>
#include <string>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_datatype, smpi, "Logging specific to SMPI (datatype)");
flags_ |= DT_FLAG_COMMITED;
}
-bool Datatype::is_valid(){
+bool Datatype::is_valid() const
+{
return (flags_ & DT_FLAG_COMMITED);
}
-bool Datatype::is_basic()
+bool Datatype::is_basic() const
{
return (flags_ & DT_FLAG_BASIC);
}
-bool Datatype::is_replayable()
+bool Datatype::is_replayable() const
{
return (simgrid::instr::trace_format == simgrid::instr::TraceFormat::Ti) &&
((this == MPI_BYTE) || (this == MPI_DOUBLE) || (this == MPI_INT) || (this == MPI_CHAR) ||
flags_ &= flag;
}
-int Datatype::extent(MPI_Aint * lb, MPI_Aint * extent){
+int Datatype::extent(MPI_Aint* lb, MPI_Aint* extent) const
+{
*lb = lb_;
*extent = ub_ - lb_;
return MPI_SUCCESS;
}
-void Datatype::get_name(char* name, int* length){
+void Datatype::get_name(char* name, int* length) const
+{
if(name_!=nullptr){
*length = strlen(name_);
strncpy(name, name_, *length+1);
return MPI_SUCCESS;
}
-int Datatype::get_contents (int max_integers, int max_addresses,
- int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
- MPI_Datatype *array_of_datatypes)
+int Datatype::get_contents(int max_integers, int max_addresses, int max_datatypes, int* array_of_integers,
+ MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes) const
{
if(contents_==nullptr)
return MPI_ERR_ARG;
- if(max_integers<contents_->number_of_integers_)
+ if (static_cast<unsigned>(max_integers) < contents_->integers_.size())
return MPI_ERR_COUNT;
- for(int i=0; i<contents_->number_of_integers_; i++){
- array_of_integers[i]=contents_->integers_[i];
- }
- if(max_addresses<contents_->number_of_addresses_)
+ std::copy(begin(contents_->integers_), end(contents_->integers_), array_of_integers);
+ if (static_cast<unsigned>(max_addresses) < contents_->addresses_.size())
return MPI_ERR_COUNT;
- for(int i=0; i<contents_->number_of_addresses_; i++){
- array_of_addresses[i]=contents_->addresses_[i];
- }
- if(max_datatypes<contents_->number_of_datatypes_)
+ std::copy(begin(contents_->addresses_), end(contents_->addresses_), array_of_addresses);
+ if (static_cast<unsigned>(max_datatypes) < contents_->datatypes_.size())
return MPI_ERR_COUNT;
- for(int i=0; i<contents_->number_of_datatypes_; i++){
- array_of_datatypes[i]=contents_->datatypes_[i];
- contents_->datatypes_[i]->ref();
- }
+ std::copy(begin(contents_->datatypes_), end(contents_->datatypes_), array_of_datatypes);
+ std::for_each(begin(contents_->datatypes_), end(contents_->datatypes_), std::mem_fn(&Datatype::ref));
return MPI_SUCCESS;
}
-int Datatype::get_envelope (int* num_integers, int* num_addresses,
- int* num_datatypes, int* combiner)
+int Datatype::get_envelope(int* num_integers, int* num_addresses, int* num_datatypes, int* combiner) const
{
if(contents_==nullptr){
*num_integers = 0;
*num_datatypes = 0;
*combiner = MPI_COMBINER_NAMED;
}else{
- *num_integers = contents_->number_of_integers_;
- *num_addresses = contents_->number_of_addresses_;
- *num_datatypes = contents_->number_of_datatypes_;
+ *num_integers = contents_->integers_.size();
+ *num_addresses = contents_->addresses_.size();
+ *num_datatypes = contents_->datatypes_.size();
*combiner = contents_->combiner_;
}
return MPI_SUCCESS;
namespace simgrid{
namespace smpi{
-
-Datatype_contents::Datatype_contents(int combiner,
- int number_of_integers, const int* integers,
- int number_of_addresses, const MPI_Aint* addresses,
- int number_of_datatypes, const MPI_Datatype* datatypes)
-: combiner_(combiner), number_of_integers_(number_of_integers),
- number_of_addresses_(number_of_addresses),
- number_of_datatypes_(number_of_datatypes)
+Datatype_contents::Datatype_contents(int combiner, int number_of_integers, const int* integers, int number_of_addresses,
+ const MPI_Aint* addresses, int number_of_datatypes, const MPI_Datatype* datatypes)
+ : combiner_(combiner)
+ , integers_(integers, integers + number_of_integers)
+ , addresses_(addresses, addresses + number_of_addresses)
+ , datatypes_(datatypes, datatypes + number_of_datatypes)
{
- integers_=new int[number_of_integers_];
- for(int i=0; i<number_of_integers_; i++){
- integers_[i]=integers[i];
- }
- addresses_=new MPI_Aint[number_of_addresses_];
- for(int i=0; i<number_of_addresses_; i++){
- addresses_[i]=addresses[i];
- }
- datatypes_=new MPI_Datatype[number_of_datatypes_];
- for(int i=0; i<number_of_datatypes_; i++){
- datatypes_[i]=datatypes[i];
- }
-};
-Datatype_contents::~Datatype_contents(){
- delete[] integers_;
- delete[] addresses_;
- delete[] datatypes_;
}
Type_Contiguous::Type_Contiguous(int size, MPI_Aint lb, MPI_Aint ub, int flags, int block_count, MPI_Datatype old_type)
char key[KEY_SIZE];
return static_cast<MPI_Errhandler>(F2C::f2c_lookup()->at(get_key(key, id)));
} else {
- return static_cast<MPI_Errhandler>(MPI_ERRHANDLER_NULL);
+ return MPI_ERRHANDLER_NULL;
}
}
-void Errhandler::call(MPI_Comm comm, int errorcode){
+void Errhandler::call(MPI_Comm comm, int errorcode) const
+{
comm_func_(&comm, &errorcode);
}
-void Errhandler::call(MPI_Win win, int errorcode){
+void Errhandler::call(MPI_Win win, int errorcode) const
+{
win_func_(&win, &errorcode);
}
-void Errhandler::call(MPI_File file, int errorcode){
+void Errhandler::call(MPI_File file, int errorcode) const
+{
file_func_(&file, &errorcode);
}
return MPI_SUCCESS;
}
- int File::get_position(MPI_Offset* offset){
+ int File::get_position(MPI_Offset* offset) const
+ {
*offset=file_->tell();
return MPI_SUCCESS;
}
- int File::get_position_shared(MPI_Offset* offset){
+ int File::get_position_shared(MPI_Offset* offset) const
+ {
shared_mutex_->lock();
*offset=*shared_file_pointer_;
shared_mutex_->unlock();
return MPI_SUCCESS;
}
- int File::read(MPI_File fh, void* /*buf*/, int count, MPI_Datatype datatype, MPI_Status* status)
+ int File::read(MPI_File fh, void* /*buf*/, int count, const Datatype* datatype, MPI_Status* status)
{
//get position first as we may be doing non contiguous reads and it will probably be updated badly
MPI_Offset position = fh->file_->tell();
/* address="Berlin, Heidelberg",*/
/* pages="84--93"*/
/* }*/
- int File::read_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ int File::read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status)
+ {
fh->shared_mutex_->lock();
fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);
read(fh, buf, count, datatype, status);
return MPI_SUCCESS;
}
- int File::read_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ int File::read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status)
+ {
//0 needs to get the shared pointer value
MPI_Offset val;
if(fh->comm_->rank()==0){
return ret;
}
- int File::write(MPI_File fh, void* /*buf*/, int count, MPI_Datatype datatype, MPI_Status* status)
+ int File::write(MPI_File fh, void* /*buf*/, int count, const Datatype* datatype, MPI_Status* status)
{
//get position first as we may be doing non contiguous reads and it will probably be updated badly
MPI_Offset position = fh->file_->tell();
return MPI_SUCCESS;
}
- int File::write_shared(MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ int File::write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status)
+ {
fh->shared_mutex_->lock();
XBT_DEBUG("Write shared on %s - Shared ptr before : %lld",fh->file_->get_path(), *(fh->shared_file_pointer_));
fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);
return MPI_SUCCESS;
}
- int File::write_ordered(MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ int File::write_ordered(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status)
+ {
//0 needs to get the shared pointer value
MPI_Offset val;
if(fh->comm_->rank()==0){
return MPI_SUCCESS;
}
- int File::get_view(MPI_Offset* /*disp*/, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep)
+ int File::get_view(MPI_Offset* /*disp*/, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const
{
*etype=etype_;
*filetype=filetype_;
return MPI_SUCCESS;
}
- int File::size(){
+ int File::size() const
+ {
return file_->size();
}
- int File::flags(){
+ int File::flags() const
+ {
return flags_;
}
info_->ref();
}
- MPI_Comm File::comm(){
+ MPI_Comm File::comm() const
+ {
return comm_;
}
namespace simgrid{
namespace smpi{
-Group::Group(Group* origin)
+Group::Group(const Group* origin)
{
if (origin != MPI_GROUP_NULL && origin != MPI_GROUP_EMPTY) {
size_ = origin->size();
char key[KEY_SIZE];
return static_cast<MPI_Group>(F2C::f2c_lookup()->at(get_key(key, id)));
} else {
- return static_cast<MPI_Group>(MPI_GROUP_NULL);
+ return MPI_GROUP_NULL;
}
}
}
}
-int Info::get(const char *key, int valuelen, char *value, int *flag){
+int Info::get(const char* key, int valuelen, char* value, int* flag) const
+{
*flag=false;
auto val = map_.find(key);
if (val != map_.end()) {
return MPI_SUCCESS;
}
-int Info::get_nkeys(int *nkeys){
+int Info::get_nkeys(int* nkeys) const
+{
*nkeys = map_.size();
return MPI_SUCCESS;
}
-int Info::get_nthkey(int n, char *key){
+int Info::get_nthkey(int n, char* key) const
+{
int num=0;
for (auto const& elm : map_) {
if (num == n) {
return MPI_ERR_ARG;
}
-int Info::get_valuelen(const char *key, int *valuelen, int *flag){
+int Info::get_valuelen(const char* key, int* valuelen, int* flag) const
+{
*flag=false;
auto val = map_.find(key);
if (val != map_.end()) {
namespace simgrid{
namespace smpi{
-void Op::apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype)
+void Op::apply(const void* invec, void* inoutvec, const int* len, MPI_Datatype datatype) const
{
if (smpi_cfg_privatization() == SmpiPrivStrategies::MMAP) {
// we need to switch as the called function may silently touch global variables
namespace simgrid{
namespace smpi{
-Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags, MPI_Op op)
- : buf_(const_cast<void*>(buf)), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags), op_(op)
+Request::Request(const void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm,
+ unsigned flags, MPI_Op op)
+ : buf_(const_cast<void*>(buf))
+ , old_type_(datatype)
+ , size_(datatype->size() * count)
+ , src_(src)
+ , dst_(dst)
+ , tag_(tag)
+ , comm_(comm)
+ , flags_(flags)
+ , op_(op)
{
- size_ = datatype->size() * count;
datatype->ref();
comm_->ref();
if(op != MPI_REPLACE && op != MPI_OP_NULL)
// This part handles the problem of non-contiguous memory (for the unserialization at the reception)
if ((((flags_ & MPI_REQ_RECV) != 0) && ((flags_ & MPI_REQ_ACCUMULATE) != 0)) || (old_type_->flags() & DT_FLAG_DERIVED)) {
// This part handles the problem of non-contiguous memory
- old_buf = const_cast<void*>(buf_);
+ old_buf = buf_;
if (count==0){
buf_ = nullptr;
}else {
return match_common(req, ref, req);
}
-void Request::print_request(const char *message)
+void Request::print_request(const char* message) const
{
XBT_VERB("%s request %p [buf = %p, size = %zu, src = %d, dst = %d, tag = %d, flags = %x]",
message, this, buf_, size_, src_, dst_, tag_, flags_);
if (smpi_cfg_async_small_thresh() != 0 || (flags_ & MPI_REQ_RMA) != 0)
mut->unlock();
} else { /* the RECV flag was not set, so this is a send */
- simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
+ const simgrid::smpi::ActorExt* process = smpi_process_remote(simgrid::s4u::Actor::by_pid(dst_));
xbt_assert(process, "Actor pid=%d is gone??", dst_);
int rank = src_;
if (TRACE_smpi_view_internals()) {
return index;
}
-static int sort_accumulates(MPI_Request a, MPI_Request b)
+static int sort_accumulates(const Request* a, const Request* b)
{
return (a->tag() > b->tag());
}
wait(&requests[c],pstat);
index = c;
} else {
- index = waitany(count, (MPI_Request*)requests, pstat);
-
+ index = waitany(count, requests, pstat);
+
if (index == MPI_UNDEFINED)
break;
int index = 0;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
- index = waitany(incount, (MPI_Request*)requests, pstat);
+ index = waitany(incount, requests, pstat);
if(index==MPI_UNDEFINED) return MPI_UNDEFINED;
if(status != MPI_STATUSES_IGNORE) {
status[count] = *pstat;
{
char key[KEY_SIZE];
if(id==MPI_FORTRAN_REQUEST_NULL)
- return static_cast<MPI_Request>(MPI_REQUEST_NULL);
+ return MPI_REQUEST_NULL;
return static_cast<MPI_Request>(F2C::f2c_lookup()->at(get_key(key,id)));
}
}
}
-int Request::get_nbc_requests_size(){
+int Request::get_nbc_requests_size() const
+{
return nbc_requests_size_;
}
-MPI_Request* Request::get_nbc_requests(){
+MPI_Request* Request::get_nbc_requests() const
+{
return nbc_requests_;
}
-
}
}
status->count=count;
}
-int Status::get_count(const MPI_Status * status, MPI_Datatype datatype)
+int Status::get_count(const MPI_Status* status, const Datatype* datatype)
{
return status->count / datatype->size();
}
return MPI_SUCCESS;
}
-int Topo_Cart::dim_get(int *ndims) {
+int Topo_Cart::dim_get(int* ndims) const
+{
*ndims =ndims_;
return MPI_SUCCESS;
}
return MPI_SUCCESS;
}
-void Win::get_name(char* name, int* length){
+void Win::get_name(char* name, int* length) const
+{
if(name_==nullptr){
*length=0;
name=nullptr;
return info_;
}
-int Win::rank(){
+int Win::rank() const
+{
return rank_;
}
-MPI_Aint Win::size(){
+MPI_Aint Win::size() const
+{
return size_;
}
-void* Win::base(){
+void* Win::base() const
+{
return base_;
}
-int Win::disp_unit(){
+int Win::disp_unit() const
+{
return disp_unit_;
}
-int Win::dynamic(){
+int Win::dynamic() const
+{
return dynamic_;
}
return size;
}
-int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr)
+int Win::shared_query(int rank, MPI_Aint* size, int* disp_unit, void* baseptr) const
{
const Win* target_win = rank != MPI_PROC_NULL ? connected_wins_[rank] : nullptr;
for (int i = 0; not target_win && i < comm_->size(); i++) {
void LoadBalancer::run()
{
- s4u::Engine* engine = s4u::Engine::get_instance();
+ const s4u::Engine* engine = s4u::Engine::get_instance();
std::vector<s4u::Host*> available_hosts =
engine->get_filtered_hosts([](const s4u::Host* host) { return host->is_on(); });
xbt_assert(available_hosts.size() > 0, "No hosts available; are they all switched off?");
// after a host got another actor assigned (or moved from).
// We can't use std::priorityQueue here because we modify *two* elements: The top element, which
// we can access and which has the lowest load, gets a new actor assigned.
- // However, the host loosing that actor must be updated as well.
+ // However, the host losing that actor must be updated as well.
// std::priorityQueue is immutable and hence doesn't work for us.
//
// This heap contains the least loaded host at the top
host_to_actors.insert({host, actor});
}
- s4u::Host* get_host(s4u::ActorPtr actor) { return actor_to_host[actor]; }
+ s4u::Host* get_host(s4u::ActorPtr actor) const { return actor_to_host.at(actor); }
- unsigned int count_actors(s4u::Host* host)
+ unsigned int count_actors(s4u::Host* host) const
{
return host_to_actors.count(host); // TODO This is linear in the size of the map. Maybe replace by constant lookup through another map?
}
@SMPITOOLS_SH@
# Don't use -Wl,-z-defs with Clang and address sanitizer
-if [ "@CMAKE_C_COMPILER_ID@" = "Clang" -a "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then
+if [ "@CMAKE_C_COMPILER_ID@" = "Clang" ] && [ "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then
LINKER_UNDEFINED_ERROR=""
else
LINKER_UNDEFINED_ERROR="1"
list_add CMDARGS "-c"
;;
*.c)
- SRCFILE="$(readlink -f ${ARG} 2>/dev/null)"
+ SRCFILE="$(readlink -f "${ARG}" 2>/dev/null)"
if [ -z "$SRCFILE" ] ; then
SRCFILE="$ARG"
fi
list_set CMDLINE "${CC}"
list_add_not_empty CMDLINE "${CFLAGS}"
list_add_not_empty CMDLINE ${INCLUDEARGS}
-list_add_not_empty CMDLINE ${CMAKE_LINKARGS}
+list_add_not_empty CMDLINE "${CMAKE_LINKARGS}"
list_add_not_empty CMDLINE "${CMDARGS}"
list_add_not_empty CMDLINE "${LINKARGS}"
-eval $(list_get CMDLINE)
-if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then
+eval "$(list_get CMDLINE)"
+if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then
echo "$@"
[ "x$show" = x1 ] && exit 0
fi
list_set CXXFLAGS "-std=gnu++11" @SMPI_CXX_FLAGS@
list_set LINKARGS
-if [ "@CMAKE_C_COMPILER_ID@" = "Clang" -a "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then
+if [ "@CMAKE_C_COMPILER_ID@" = "Clang" ] && [ "@HAVE_SANITIZER_ADDRESS@" = "TRUE" ]; then
LINKER_UNDEFINED_ERROR=""
else
LINKER_UNDEFINED_ERROR="1"
list_add CMDARGS "-c"
;;
*.c)
- SRCFILE="$(readlink -f ${ARG} 2>/dev/null)"
+ SRCFILE="$(readlink -f "${ARG}" 2>/dev/null)"
if [ -z "$SRCFILE" ] ; then
SRCFILE="$ARG"
fi
list_set CMDLINE "${CXX}"
list_add_not_empty CMDLINE "${CXXFLAGS}"
list_add_not_empty CMDLINE ${INCLUDEARGS}
-list_add_not_empty CMDLINE ${CMAKE_LINKARGS}
+list_add_not_empty CMDLINE "${CMAKE_LINKARGS}"
list_add_not_empty CMDLINE "${CMDARGS}"
list_add_not_empty CMDLINE "${LINKARGS}"
eval $(list_get CMDLINE)
-if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then
+if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then
echo "$@"
[ "x$show" = x1 ] && exit 0
fi
echo "Warning: smpif90 pretends to be a regular compiler (SMPI_PRETEND_CC is set). Produced binaries will not be usable in SimGrid."
fi
list_set TMPFILES
-main_name=main
cleanup () {
eval $(list_get TMPFILES)
list_add CMDLINE "-c"
;;
*.f90)
- FILENAME=`basename ${ARG}`
+ FILENAME=$(basename "${ARG}")
TMPFILE=$(mymktemp "${ARG}" ".f90")
ORIGFILE="${FILENAME%.f90}"
filter_and_compile
;;
*.F90)
- FILENAME=`basename ${ARG}`
+ FILENAME=$(basename "${ARG}")
TMPFILE=$(mymktemp "${ARG}" ".F90")
ORIGFILE="${FILENAME%.F90}"
filter_and_compile
fi
list_add_not_empty CMDLINE ${INCLUDEARGS}
-list_add_not_empty CMDLINE ${CMAKE_LINKARGS}
+list_add_not_empty CMDLINE "${CMAKE_LINKARGS}"
list_add_not_empty CMDLINE "${LINKARGS}"
eval $(list_get CMDLINE)
-if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then
+if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then
echo "$@"
[ "x$show" = x1 ] && exit 0
fi
echo "Warning: smpiff pretends to be a regular compiler (SMPI_PRETEND_CC is set). Produced binaries will not be usable in SimGrid."
fi
list_set TMPFILES
-main_name=main
cleanup () {
eval $(list_get TMPFILES)
list_add TMPFILES "${TMPFILE}"
if [ "x${SMPI_PRETEND_CC}" = "x" ]; then
#replace "program main_name by subroutine user_main (and the end clause as well)"
- if [ $TRACE_CALL_LOCATION -gt 0 ]; then
- echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > ${TMPFILE}
- echo "#line 1 \"${ARG}\"" >> ${TMPFILE}
+ if [ "$TRACE_CALL_LOCATION" -gt 0 ]; then
+ echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > "${TMPFILE}"
+ echo "#line 1 \"${ARG}\"" >> "${TMPFILE}"
fi
sed 's/^[[:space:]]\{6\}[[:space:]]*\([eE][nN][dD] \)\{0,1\}[pP][rR][oO][gG][rR][aA][mM][^a-zA-Z0-9]*\([a-zA-Z0-9_]*\)/ \1subroutine user_main /g' "${ARG}" >> "${TMPFILE}"
SRCFILE="${TMPFILE}"
filter_and_compile_f90() {
list_add TMPFILES "${TMPFILE}"
#replace "program main_name by subroutine user_main (and the end clause as well)"
- if [ $TRACE_CALL_LOCATION -gt 0 ]; then
- echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > ${TMPFILE}
- echo "#line 1 \"${ARG}\"" >> ${TMPFILE}
+ if [ "$TRACE_CALL_LOCATION" -gt 0 ]; then
+ echo "#include \"@includedir@/smpi/smpi_extended_traces_fortran.h\"" > "${TMPFILE}"
+ echo "#line 1 \"${ARG}\"" >> "${TMPFILE}"
fi
sed 's/^\([[:space:]]*\)\([eE][nN][dD] \)\{0,1\}[pP][rR][oO][gG][rR][aA][mM][^a-zA-Z0-9]*\([a-zA-Z0-9_]*\)/\1\2subroutine user_main /g' "${ARG}" >> "${TMPFILE}"
SRCFILE="${TMPFILE}"
list_add CMDLINE "-c"
;;
*.f)
- FILENAME=`basename ${ARG}`
+ FILENAME=$(basename "${ARG}")
TMPFILE=$(mymktemp "${ARG}" ".f")
ORIGFILE="${FILENAME%.f}"
filter_and_compile_f77
;;
*.F)$
- FILENAME=`basename ${ARG}`
+ FILENAME=$(basename "${ARG}")
TMPFILE=$(mymktemp "${ARG}" ".F")
ORIGFILE="${FILENAME%.F}"
filter_and_compile_f77
;;
*.f90)
- FILENAME=`basename ${ARG}`
+ FILENAME=$(basename "${ARG}")
TMPFILE=$(mymktemp "${ARG}" ".f90")
ORIGFILE="${FILENAME%.f90}"
filter_and_compile_f90
;;
*.F90)$
- FILENAME=`basename ${ARG}`
+ FILENAME=$(basename "${ARG}")
TMPFILE=$(mymktemp "${ARG}" ".F90")
ORIGFILE="${FILENAME%.F90}"
filter_and_compile_f90
fi
list_add_not_empty CMDLINE ${INCLUDEARGS}
-list_add_not_empty CMDLINE ${CMAKE_LINKARGS}
+list_add_not_empty CMDLINE "${CMAKE_LINKARGS}"
list_add_not_empty CMDLINE "${LINKARGS}"
eval $(list_get CMDLINE)
-if [ "x$VERBOSE" = x1 -o "x$show" = x1 ] ; then
+if [ "x$VERBOSE" = x1 ] || [ "x$show" = x1 ] ; then
echo "$@"
[ "x$show" = x1 ] && exit 0
fi
DEFAULT_LOOPBACK_LATENCY="0.000004s"
DEFAULT_NETWORK_BANDWIDTH="$((26 * 1024 * 1024))Bps"
DEFAULT_NETWORK_LATENCY="0.000005s"
-DEFAULT_NUMPROCS="4"
DEFAULT_SPEED="100flops"
LOOPBACK_BANDWIDTH="${DEFAULT_LOOPBACK_BANDWIDTH}"
trapped_signals="HUP INT QUIT ILL ABRT SEGV FPE ALRM TERM USR1 USR2 BUS"
die () {
- printf '[%s] ** error: %s. Aborting.\n' "$(basename $0)" "$*" >&2
+ printf '[%s] ** error: %s. Aborting.\n' "$(basename "$0")" "$*" >&2
exit 1
}
smpirun_cleanup()
{
if [ -z "${KEEP}" ] ; then
- if [ -z "${PLATFORM}" -a -n "$PLATFORMTMP" ]; then
- rm -f ${PLATFORMTMP}
+ if [ -z "${PLATFORM}" ] && [ -n "$PLATFORMTMP" ]; then
+ rm -f "${PLATFORMTMP}"
PLATFORMTMP=""
fi
- if [ ${HOSTFILETMP} = 1 -a -n "$HOSTFILE" ] ; then
- rm -f ${HOSTFILE}
+ if [ ${HOSTFILETMP} = 1 ] && [ -n "$HOSTFILE" ] ; then
+ rm -f "${HOSTFILE}"
HOSTFILE=""
fi
- if [ ${UNROLLEDHOSTFILETMP} = 1 -a -n "$UNROLLEDHOSTFILE" ] ; then
- rm -f ${UNROLLEDHOSTFILE}
+ if [ "${UNROLLEDHOSTFILETMP}" = 1 ] && [ -n "$UNROLLEDHOSTFILE" ] ; then
+ rm -f "${UNROLLEDHOSTFILE}"
UNROLLEDHOSTFILE=""
fi
- if [ -n ${APPLICATIONTMP} ]; then
- rm -f ${APPLICATIONTMP}
+ if [ -n "${APPLICATIONTMP}" ]; then
+ rm -f "${APPLICATIONTMP}"
APPLICATIONTMP=""
fi
fi
# Cleanup and kill the child process:
smpirun_cleanup
- if ! [ -z "$pid" ]; then
- kill -TERM $pid
+ if [ -n "$pid" ]; then
+ kill -TERM "$pid"
fi
unset pid
# Raise the same signal again (remove the traps first):
trap - $trapped_signals
- kill -$sig $$
+ kill -"$sig" $$
# This should never happen:
kill -ABRT $$
}
for s in $trapped_signals; do
- trap "smpirun_trap $s" $s
+ trap "smpirun_trap $s" "$s"
done
while true; do
print(prefix + str(i) + suffix)
else:
print(prefix + r + suffix)
- ' < ${PLATFORM} > ${HOSTFILE}
+ ' < "${PLATFORM}" > "${HOSTFILE}"
fi
UNROLLEDHOSTFILETMP=0
# parse if our lines are terminated by :num_process
-if grep -q ':' $HOSTFILE ; then
+if grep -q ':' "$HOSTFILE" ; then
UNROLLEDHOSTFILETMP=1
UNROLLEDHOSTFILE="$(mktemp smpitmp-hostfXXXXXX)"
@PYTHON_EXECUTABLE@ -c '
print(m.group(1))
else:
print(line.strip())
-' < ${HOSTFILE} > ${UNROLLEDHOSTFILE}
+' < "${HOSTFILE}" > "${UNROLLEDHOSTFILE}"
if [ ${HOSTFILETMP} = 1 ] ; then
- rm ${HOSTFILE}
+ rm "${HOSTFILE}"
HOSTFILETMP=0
fi
HOSTFILE=$UNROLLEDHOSTFILE
fi
# Don't use wc -l to compute it to avoid issues with trailing \n at EOF
-hostfile_procs=$(grep -c "[a-zA-Z0-9]" $HOSTFILE)
-if [ ${hostfile_procs} = 0 ] ; then
+hostfile_procs=$(grep -c "[a-zA-Z0-9]" "$HOSTFILE")
+if [ "${hostfile_procs}" = 0 ] ; then
die "the hostfile '${HOSTFILE}' is empty"
fi
NUMPROCS=$hostfile_procs
fi
-if [ ${NUMPROCS} -gt ${hostfile_procs} ] ; then
+if [ "${NUMPROCS}" -gt "${hostfile_procs}" ] ; then
echo "You requested to use ${NUMPROCS} ranks, but there is only ${hostfile_procs} processes in your hostfile..." >&2
fi
if [ -z "${PLATFORM}" ]; then
PLATFORMTMP="$(mktemp smpitmp-platfXXXXXX)"
- cat > ${PLATFORMTMP} <<PLATFORMHEAD
+ cat > "${PLATFORMTMP}" <<PLATFORMHEAD
<?xml version='1.0'?>
<!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
<platform version="4.1">
PLATFORMHEAD
i=${NUMPROCS}
- while [ $i -gt 0 ]; do
- echo " <host id=\"host$i\" speed=\"${SPEED}\"/>" >> ${PLATFORMTMP}
- echo " <link id=\"loop$i\" bandwidth=\"${LOOPBACK_BANDWIDTH}\" latency=\"${LOOPBACK_LATENCY}\"/>" >> ${PLATFORMTMP}
- echo " <link id=\"link$i\" bandwidth=\"${NETWORK_BANDWIDTH}\" latency=\"${NETWORK_LATENCY}\"/>" >> ${PLATFORMTMP}
+ while [ "$i" -gt 0 ]; do
+ {
+ echo " <host id=\"host$i\" speed=\"${SPEED}\"/>"
+ echo " <link id=\"loop$i\" bandwidth=\"${LOOPBACK_BANDWIDTH}\" latency=\"${LOOPBACK_LATENCY}\"/>"
+ echo " <link id=\"link$i\" bandwidth=\"${NETWORK_BANDWIDTH}\" latency=\"${NETWORK_LATENCY}\"/>"
+ } >> "${PLATFORMTMP}"
i=$((i - 1))
done
i=${NUMPROCS}
- while [ $i -gt 0 ]; do
+ while [ "$i" -gt 0 ]; do
j=${NUMPROCS}
- while [ $j -gt 0 ]; do
- if [ $i -eq $j ]; then
- echo " <route src=\"host$i\" dst=\"host$j\"><link_ctn id=\"loop$i\"/></route>" >> ${PLATFORMTMP}
+ while [ "$j" -gt 0 ]; do
+ if [ "$i" -eq "$j" ]; then
+ echo " <route src=\"host$i\" dst=\"host$j\"><link_ctn id=\"loop$i\"/></route>" >> "${PLATFORMTMP}"
else
- echo " <route src=\"host$i\" dst=\"host$j\"><link_ctn id=\"link$i\"/><link_ctn id=\"link$j\"/></route>" >> ${PLATFORMTMP}
+ echo " <route src=\"host$i\" dst=\"host$j\"><link_ctn id=\"link$i\"/><link_ctn id=\"link$j\"/></route>" >> "${PLATFORMTMP}"
fi
j=$((j - 1))
done
i=$((i - 1))
done
- cat >> ${PLATFORMTMP} <<PLATFORMFOOT
+ cat >> "${PLATFORMTMP}" <<PLATFORMFOOT
</zone>
</platform>
PLATFORMFOOT
APPLICATIONTMP="$(mktemp smpitmp-appXXXXXX)"
#APPLICATIONTMP="app.xml"
-cat > ${APPLICATIONTMP} <<APPLICATIONHEAD
+cat > "${APPLICATIONTMP}" <<APPLICATIONHEAD
<?xml version='1.0'?>
<!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
<platform version="4.1">
APPLICATIONHEAD
##---- cache hostnames of hostfile---------------
-if [ -n "${HOSTFILE}" ] && [ -f ${HOSTFILE} ]; then
- hostnames=$(cat ${HOSTFILE} | tr '\n\r' ' ')
+if [ -n "${HOSTFILE}" ] && [ -f "${HOSTFILE}" ]; then
+ hostnames=$(< "${HOSTFILE}" tr '\n\r' ' ')
fi
if [ -n "${APP_TRACES}" ]; then
if [ -f "${APP_TRACES}" ]; then
- hosttraces=$(cat ${APP_TRACES} | tr '\n\r' ' ' )
- NUMTRACES=$(cat ${APP_TRACES} | wc -l)
+ hosttraces=$(< "${APP_TRACES}" tr '\n\r' ' ' )
+ NUMTRACES=$(< "${APP_TRACES}" wc -l)
REPLAY=1
else
printf "File not found: %s\n" "${APP_TRACES:-\${APP_TRACES\}}" >&2
SEQ=$(${HAVE_SEQ} 0 $(( NUMPROCS - 1)))
else
cnt=0
- while [ $cnt -lt ${NUMPROCS} ] ; do
+ while [ $cnt -lt "${NUMPROCS}" ] ; do
SEQ="$SEQ $cnt"
cnt=$((cnt + 1));
done
if [ ${MAPOPT} = 1 ]; then
echo "[rank $i] -> $host"
fi
-
+ {
echo " <actor host=\"${host}\" function=\"$i\"> <!-- function name used only for logging -->
<prop id=\"instance_id\" value=\"smpirun\"/>
- <prop id=\"rank\" value=\"$i\"/>" >> ${APPLICATIONTMP}
+ <prop id=\"rank\" value=\"$i\"/>"
if [ ${REPLAY} = 1 ]; then
- echo " <prop id=\"smpi_replay\" value=\"true\"/>" >> ${APPLICATIONTMP}
- if [ ${NUMTRACES} -gt 1 ]; then
- echo " <argument value=\"$(echo $hosttraces|cut -d' ' -f$j)\"/>" >> ${APPLICATIONTMP}
+ echo " <prop id=\"smpi_replay\" value=\"true\"/>"
+ if [ "${NUMTRACES}" -gt 1 ]; then
+ echo " <argument value=\"$(echo "$hosttraces"|cut -d' ' -f$j)\"/>"
else
- echo " <argument value=\"$(echo $hosttraces|cut -d' ' -f1)\"/>" >> ${APPLICATIONTMP}
+ echo " <argument value=\"$(echo "$hosttraces"|cut -d' ' -f1)\"/>"
fi
else
- echo ${XML_ARGS} >> ${APPLICATIONTMP}
+ echo "${XML_ARGS}"
fi
- echo " </actor>" >> ${APPLICATIONTMP}
+ echo " </actor>"
+ } >> "${APPLICATIONTMP}"
done
-cat >> ${APPLICATIONTMP} <<APPLICATIONFOOT
+cat >> "${APPLICATIONTMP}" <<APPLICATIONFOOT
</platform>
APPLICATIONFOOT
##-------------------------------- end DEFAULT APPLICATION --------------------------------------
# Do not remove, this variable may be used by user code (e.g. StarPU)
export SMPI_GLOBAL_SIZE=${NUMPROCS}
-if [ -n "${KEEP}" -a -z "${QUIET}" ] ; then
- echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
+if [ -n "${KEEP}" ] && [ -z "${QUIET}" ] ; then
+ echo "${EXEC}" ${PRIVATIZE} "${TRACEOPTIONS}" "${SIMOPTS}" "${PLATFORMTMP}" "${APPLICATIONTMP}"
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} kept."
fi
# * The FD 3 is used to temporarily store FD 1. This is because the shell connects FD 1 to /dev/null when the command
# is launched in the background: this can be overridden in bash but not in standard bourne shell.
exec 3<&0
-${WRAPPER} "@SMPIMAIN@" ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} <&3 3>&- &
+${WRAPPER} "@SMPIMAIN@" "${EXEC}" ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} "${PLATFORMTMP}" "${APPLICATIONTMP}" <&3 3>&- &
pid=$!
exec 3>&-
wait $pid
# Keep temporary files on failures to help debugging
#
if [ ${status} -ne 0 ] ; then
- if [ -z "${KEEP}" -a -z "${QUIET}" ]; then
- echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
+ if [ -z "${KEEP}" ] && [ -z "${QUIET}" ]; then
+ echo "${EXEC}" ${PRIVATIZE} "${TRACEOPTIONS}" "${SIMOPTS}" "${PLATFORMTMP}" "${APPLICATIONTMP}"
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} kept."
fi
*
* Weird things will happen if you turn on a host that is already on. S4U is fool-proof, not this.
*/
-void HostImpl::turn_on()
+void HostImpl::turn_on() const
{
for (auto const& arg : actors_at_boot_) {
XBT_DEBUG("Booting Actor %s(%s) right now", arg->name.c_str(), arg->host->get_cname());
}
/** Kill all actors hosted here */
-void HostImpl::turn_off(kernel::actor::ActorImpl* issuer)
+void HostImpl::turn_off(const kernel::actor::ActorImpl* issuer)
{
for (auto& actor : actor_list_) {
XBT_DEBUG("Killing Actor %s@%s on behalf of %s which turned off that host.", actor.get_cname(),
res.push_back(actor.ciface());
return res;
}
-size_t HostImpl::get_actor_count()
+size_t HostImpl::get_actor_count() const
{
return actor_list_.size();
}
-std::vector<s4u::Disk*> HostImpl::get_disks()
+std::vector<s4u::Disk*> HostImpl::get_disks() const
{
std::vector<s4u::Disk*> disks;
for (auto const& d : disks_)
explicit HostImpl(s4u::Host* host);
virtual ~HostImpl();
- std::vector<s4u::Disk*> get_disks();
+ std::vector<s4u::Disk*> get_disks() const;
void set_disks(const std::vector<kernel::resource::DiskImpl*>& disks, s4u::Host* host);
void add_disk(const s4u::Disk* disk);
void remove_disk(const std::string& disk_name);
s4u::Host* get_iface() { return piface_; }
- void turn_on();
- void turn_off(kernel::actor::ActorImpl* issuer);
+ void turn_on() const;
+ void turn_off(const kernel::actor::ActorImpl* issuer);
std::vector<s4u::ActorPtr> get_all_actors();
- size_t get_actor_count();
+ size_t get_actor_count() const;
void add_actor(kernel::actor::ActorImpl* actor) { actor_list_.push_back(*actor); }
void remove_actor(kernel::actor::ActorImpl* actor) { xbt::intrusive_erase(actor_list_, *actor); }
void add_actor_at_boot(kernel::actor::ProcessArg* arg) { actors_at_boot_.emplace_back(arg); }
StorageModel& operator=(const StorageModel&) = delete;
~StorageModel();
- virtual StorageImpl* createStorage(const std::string& id, const std::string& type_id, const std::string& content_name,
+ virtual StorageImpl* createStorage(std::string& filename, int lineno, const std::string& id,
+ const std::string& type_id, const std::string& content_name,
const std::string& attach) = 0;
};
~StorageImpl() override;
s4u::Storage* get_iface() { return &piface_; }
- const char* get_type() { return typeId_.c_str(); }
+ const char* get_type() const { return typeId_.c_str(); }
lmm::Constraint* get_read_constraint() const { return constraint_read_; }
lmm::Constraint* get_write_constraint() const { return constraint_write_; }
/** @brief Check if the Storage is used (if an action currently uses its resources) */
get_model()->get_maxmin_system()->update_constraint_bound(get_constraint(),
get_core_count() * speed_.scale * speed_.peak);
while ((var = get_constraint()->get_variable(&elem))) {
- auto* action = static_cast<CpuCas01Action*>(var->get_id());
+ const CpuCas01Action* action = static_cast<CpuCas01Action*>(var->get_id());
get_model()->get_maxmin_system()->update_variable_bound(action->get_variable(),
action->requested_core() * speed_.scale * speed_.peak);
get_host()->turn_off();
while ((var = cnst->get_variable(&elem))) {
- auto* action = static_cast<Action*>(var->get_id());
+ Action* action = var->get_id();
if (action->get_state() == Action::State::INITED || action->get_state() == Action::State::STARTED ||
action->get_state() == Action::State::IGNORED) {
{
}
-int CpuCas01Action::requested_core()
+int CpuCas01Action::requested_core() const
{
return requested_core_;
}
CpuCas01Action(const CpuCas01Action&) = delete;
CpuCas01Action& operator=(const CpuCas01Action&) = delete;
~CpuCas01Action() override;
- int requested_core();
+ int requested_core() const;
private:
int requested_core_ = 1;
} // namespace kernel
} // namespace simgrid
-#endif
\ No newline at end of file
+#endif
xbt_assert(speed_.scale > 0, "Speed of host %s must be >0", host->get_cname());
}
+void Cpu::reset_vcpu(Cpu* that)
+{
+ this->pstate_ = that->pstate_;
+ this->speed_ = that->speed_;
+ this->speed_per_pstate_.clear();
+ this->speed_per_pstate_.assign(that->speed_per_pstate_.begin(), that->speed_per_pstate_.end());
+}
+
int Cpu::get_pstate_count() const
{
return speed_per_pstate_.size();
class XBT_PUBLIC Cpu : public Resource {
int core_count_ = 1;
s4u::Host* host_;
- int pstate_ = 0; /*< Current pstate (index in the speed_per_pstate_)*/
- const std::vector<double> speed_per_pstate_; /*< List of supported CPU capacities (pstate related) */
+ int pstate_ = 0; /*< Current pstate (index in the speed_per_pstate_)*/
+ std::vector<double> speed_per_pstate_; /*< List of supported CPU capacities (pstate related). Not 'const' because VCPU
+ get modified on migration */
+ friend simgrid::vm::VirtualMachineImpl; // Resets the VCPU
public:
/**
/** @brief Take speed changes (either load or max) into account */
virtual void on_speed_change();
+ /** Reset most characteristics of this CPU to the one of that CPU.
+ *
+ * Used to reset a VCPU when its VM migrates to another host, so it only resets the fields that should be in this
+ *case.
+ **/
+ virtual void reset_vcpu(Cpu* that);
+
public:
/** @brief Get the available speed ratio, between 0 and 1.
*
* @param b End of interval
* @return the integrate value. -1 if an error occurs.
*/
-double CpuTiTmgr::integrate(double a, double b)
+double CpuTiTmgr::integrate(double a, double b) const
{
if ((a < 0.0) || (a > b)) {
xbt_die("Error, invalid integration interval [%.2f,%.2f]. "
* @param a Initial point
* @param b Final point
*/
-double CpuTiProfile::integrate_simple(double a, double b)
+double CpuTiProfile::integrate_simple(double a, double b) const
{
return integrate_simple_point(b) - integrate_simple_point(a);
}
* @brief Auxiliary function to compute the integral at point a.
* @param a point
*/
-double CpuTiProfile::integrate_simple_point(double a)
+double CpuTiProfile::integrate_simple_point(double a) const
{
double integral = 0;
double a_aux = a;
* @param amount Amount to be executed
* @return End time
*/
-double CpuTiTmgr::solve(double a, double amount)
+double CpuTiTmgr::solve(double a, double amount) const
{
/* Fix very small negative numbers */
if ((a < 0.0) && (a > -EPSILON)) {
* @param amount Amount of flops
* @return The date when amount is available.
*/
-double CpuTiProfile::solve_simple(double a, double amount)
+double CpuTiProfile::solve_simple(double a, double amount) const
{
double integral_a = integrate_simple_point(a);
int ind = binary_search(integral_, integral_a + amount);
* @param a Time
* @return CPU speed scale
*/
-double CpuTiTmgr::get_power_scale(double a)
+double CpuTiTmgr::get_power_scale(double a) const
{
double reduced_a = a - floor(a / last_time_) * last_time_;
int point = CpuTiProfile::binary_search(profile_->time_points_, reduced_a);
public:
explicit CpuTiProfile(const profile::Profile* profile);
- double integrate_simple(double a, double b);
- double integrate_simple_point(double a);
- double solve_simple(double a, double amount);
+ double integrate_simple(double a, double b) const;
+ double integrate_simple_point(double a) const;
+ double solve_simple(double a, double amount) const;
std::vector<double> time_points_;
std::vector<double> integral_;
CpuTiTmgr(const CpuTiTmgr&) = delete;
CpuTiTmgr& operator=(const CpuTiTmgr&) = delete;
- double integrate(double a, double b);
- double solve(double a, double amount);
- double get_power_scale(double a);
+ double integrate(double a, double b) const;
+ double solve(double a, double amount) const;
+ double get_power_scale(double a) const;
private:
Type type_ = Type::FIXED;
for (auto it = std::begin(*get_started_action_set()); it != std::end(*get_started_action_set());) {
auto& action = *it;
++it; // increment iterator here since the following calls to action.finish() may invalidate it
- action.update_remains(lrint(action.get_variable()->get_value() * delta));
+ action.update_remains(rint(action.get_variable()->get_value() * delta));
action.update_max_duration(delta);
if (((action.get_remains_no_update() <= 0) && (action.get_variable()->get_penalty() > 0)) ||
DiskAction* DiskS19::io_start(sg_size_t size, s4u::Io::OpType type)
{
- return new DiskS19Action(get_model(), size, not is_on(), this, type);
+ return new DiskS19Action(get_model(), static_cast<double>(size), not is_on(), this, type);
}
DiskAction* DiskS19::read(sg_size_t size)
{
- return new DiskS19Action(get_model(), size, not is_on(), this, s4u::Io::OpType::READ);
+ return new DiskS19Action(get_model(), static_cast<double>(size), not is_on(), this, s4u::Io::OpType::READ);
}
DiskAction* DiskS19::write(sg_size_t size)
{
- return new DiskS19Action(get_model(), size, not is_on(), this, s4u::Io::OpType::WRITE);
+ return new DiskS19Action(get_model(), static_cast<double>(size), not is_on(), this, s4u::Io::OpType::WRITE);
}
/**********
if (action->latency_ > 0) {
action->set_variable(get_maxmin_system()->variable_new(action, 0.0, -1.0, constraints_per_variable));
if (is_update_lazy()) {
- // add to the heap the event when the latency is payed
+ // add to the heap the event when the latency is paid
double date = action->latency_ + action->get_last_update();
ActionHeap::Type type = route.empty() ? ActionHeap::Type::normal : ActionHeap::Type::latency;
// WIFI links are handled manually just above, so skip them now
if (link->get_sharing_policy() == s4u::Link::SharingPolicy::WIFI) {
xbt_assert(link == src_wifi_link || link == dst_wifi_link,
- "Wifi links can only occure at the beginning of the route (meaning that it's attached to the src) or "
+ "Wifi links can only occur at the beginning of the route (meaning that it's attached to the src) or "
"at its end (meaning that it's attached to the dst");
} else {
get_maxmin_system()->expand(link->get_constraint(), action->get_variable(), 1.0);
}
}
-void NetworkIBModel::computeIBfactors(IBNode* root)
+void NetworkIBModel::computeIBfactors(IBNode* root) const
{
double num_comm_out = root->ActiveCommsUp.size();
double max_penalty_out = 0.0;
XBT_DEBUG("Finished computing IB penalties");
}
-void NetworkIBModel::updateIBfactors_rec(IBNode* root, std::vector<bool>& updatedlist)
+void NetworkIBModel::updateIBfactors_rec(IBNode* root, std::vector<bool>& updatedlist) const
{
if (not updatedlist[root->id]) {
XBT_DEBUG("IB - Updating rec %d", root->id);
}
}
-void NetworkIBModel::updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove)
+void NetworkIBModel::updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove) const
{
if (from == to) // disregard local comms (should use loopback)
return;
double Bs;
double Be;
double ys;
- void updateIBfactors_rec(IBNode* root, std::vector<bool>& updatedlist);
- void computeIBfactors(IBNode* root);
+ void updateIBfactors_rec(IBNode* root, std::vector<bool>& updatedlist) const;
+ void computeIBfactors(IBNode* root) const;
public:
NetworkIBModel();
explicit NetworkIBModel(const char* name);
NetworkIBModel(const NetworkIBModel&) = delete;
NetworkIBModel& operator=(const NetworkIBModel&) = delete;
- void updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove);
+ void updateIBfactors(NetworkAction* action, IBNode* from, IBNode* to, int remove) const;
std::unordered_map<std::string, IBNode> active_nodes;
std::unordered_map<NetworkAction*, std::pair<IBNode*, IBNode*>> active_comms;
const kernel::lmm::Element* elem = nullptr;
double now = surf_get_clock();
while ((var = get_constraint()->get_variable(&elem))) {
- Action* action = static_cast<Action*>(var->get_id());
+ Action* action = var->get_id();
if (action->get_state() == Action::State::INITED || action->get_state() == Action::State::STARTED) {
action->set_finish_time(now);
action->set_state(Action::State::FAILED);
}
}
-void LinkImpl::on_bandwidth_change()
+void LinkImpl::on_bandwidth_change() const
{
s4u::Link::on_bandwidth_change(this->piface_);
}
void turn_on() override;
void turn_off() override;
- void on_bandwidth_change();
+ void on_bandwidth_change() const;
virtual void
set_bandwidth_profile(kernel::profile::Profile* profile); /*< setup the profile file with bandwidth events
#include <ns3/application-container.h>
#include <ns3/event-id.h>
-#include "ns3/wifi-module.h"
#include "ns3/mobility-module.h"
+#include "ns3/wifi-module.h"
#include "network_ns3.hpp"
#include "ns3/ns3_simulator.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(ns3, surf, "Logging specific to the SURF network ns-3 module");
-std::vector<std::string> IPV4addr;
-static std::string transformIpv4Address(ns3::Ipv4Address from);
-
/*****************
* Crude globals *
*****************/
extern std::map<std::string, ns3::ApplicationContainer> sink_from_sock;
static ns3::InternetStackHelper stack;
-static ns3::NodeContainer nodes;
-static ns3::NodeContainer Cluster_nodes;
-static ns3::Ipv4InterfaceContainer interfaces;
-static int number_of_nodes = 0;
-static int number_of_clusters_nodes = 0;
static int number_of_links = 1;
static int number_of_networks = 1;
/* wifi globals */
static ns3::WifiHelper wifi;
-static ns3::YansWifiPhyHelper wifiPhy = ns3::YansWifiPhyHelper::Default ();
-static ns3::YansWifiChannelHelper wifiChannel = ns3::YansWifiChannelHelper::Default ();
+static ns3::YansWifiPhyHelper wifiPhy = ns3::YansWifiPhyHelper::Default();
+static ns3::YansWifiChannelHelper wifiChannel = ns3::YansWifiChannelHelper::Default();
static ns3::WifiMacHelper wifiMac;
static ns3::MobilityHelper mobility;
simgrid::xbt::Extension<simgrid::kernel::routing::NetPoint, NetPointNs3> NetPointNs3::EXTENSION_ID;
+static std::string transformIpv4Address(ns3::Ipv4Address from)
+{
+ std::stringstream sstream;
+ sstream << from;
+ return sstream.str();
+}
+
NetPointNs3::NetPointNs3() : ns3_node_(ns3::CreateObject<ns3::Node>(0))
{
stack.Install(ns3_node_);
- nodes.Add(ns3_node_);
- node_num = number_of_nodes++;
}
WifiZone::WifiZone(std::string name_, simgrid::s4u::Host* host_, ns3::Ptr<ns3::Node> ap_node_,
- ns3::Ptr<ns3::YansWifiChannel> channel_, int mcs_, int nss_, int network_, int link_) :
- name(name_), host(host_), ap_node(ap_node_), channel(channel_), mcs(mcs_), nss(nss_),
- network(network_), link(link_) {
- n_sta_nodes = 0;
- wifi_zones[name_] = this;
+ ns3::Ptr<ns3::YansWifiChannel> channel_, int mcs_, int nss_, int network_, int link_)
+ : name(name_)
+ , host(host_)
+ , ap_node(ap_node_)
+ , channel(channel_)
+ , mcs(mcs_)
+ , nss(nss_)
+ , network(network_)
+ , link(link_)
+{
+ n_sta_nodes = 0;
+ wifi_zones[name_] = this;
}
-bool WifiZone::is_ap(ns3::Ptr<ns3::Node> node){
- for (std::pair<std::string, WifiZone*> zone : wifi_zones)
- if (zone.second->get_ap_node() == node)
- return true;
- return false;
+bool WifiZone::is_ap(ns3::Ptr<ns3::Node> node)
+{
+ for (std::pair<std::string, WifiZone*> zone : wifi_zones)
+ if (zone.second->get_ap_node() == node)
+ return true;
+ return false;
}
-WifiZone* WifiZone::by_name(std::string name) {
- WifiZone* zone;
- try {
- zone = wifi_zones.at(name);
- }
- catch (const std::out_of_range& oor) {
- return nullptr;
- }
- return zone;
+WifiZone* WifiZone::by_name(std::string name)
+{
+ WifiZone* zone;
+ try {
+ zone = wifi_zones.at(name);
+ } catch (const std::out_of_range& oor) {
+ return nullptr;
+ }
+ return zone;
}
std::unordered_map<std::string, WifiZone*> WifiZone::wifi_zones;
-static void initialize_ns3_wifi() {
- wifi.SetStandard (ns3::WIFI_PHY_STANDARD_80211n_5GHZ);
+static void initialize_ns3_wifi()
+{
+ wifi.SetStandard(ns3::WIFI_PHY_STANDARD_80211n_5GHZ);
- for (auto host : simgrid::s4u::Engine::get_instance()->get_all_hosts()) {
- const char* wifi_link = host->get_property("wifi_link");
- const char* wifi_mcs = host->get_property("wifi_mcs");
- const char* wifi_nss = host->get_property("wifi_nss");
+ for (auto host : simgrid::s4u::Engine::get_instance()->get_all_hosts()) {
+ const char* wifi_link = host->get_property("wifi_link");
+ const char* wifi_mcs = host->get_property("wifi_mcs");
+ const char* wifi_nss = host->get_property("wifi_nss");
- if (wifi_link)
- new WifiZone(wifi_link, host, host->get_netpoint()->extension<NetPointNs3>()->ns3_node_,
- wifiChannel.Create(), wifi_mcs ? atoi(wifi_mcs) : 3, wifi_nss ? atoi(wifi_nss) : 1, 0, 0);
- }
+ if (wifi_link)
+ new WifiZone(wifi_link, host, host->get_netpoint()->extension<NetPointNs3>()->ns3_node_, wifiChannel.Create(),
+ wifi_mcs ? atoi(wifi_mcs) : 3, wifi_nss ? atoi(wifi_nss) : 1, 0, 0);
+ }
}
/*************
static void clusterCreation_cb(simgrid::kernel::routing::ClusterCreationArgs const& cluster)
{
- for (int const& i : *cluster.radicals) {
- // Routers don't create a router on the other end of the private link by themselves.
- // We just need this router to be given an ID so we create a temporary NetPointNS3 so that it gets one
- auto* host_dst = new NetPointNs3();
+ ns3::NodeContainer Nodes;
+ for (int const& i : *cluster.radicals) {
// Create private link
std::string host_id = cluster.prefix + std::to_string(i) + cluster.suffix;
- auto* host_src = simgrid::s4u::Host::by_name(host_id)->get_netpoint()->extension<NetPointNs3>();
- xbt_assert(host_src, "Cannot find a ns-3 host of name %s", host_id.c_str());
+ auto* src = simgrid::s4u::Host::by_name(host_id)->get_netpoint();
+ auto* dst = simgrid::s4u::Engine::get_instance()->netpoint_by_name_or_null(cluster.router_id);
+ xbt_assert(dst != nullptr, "No router named %s", cluster.router_id.c_str());
- // Any ns-3 route is symmetrical
- ns3_add_direct_route(host_src, host_dst, cluster.bw, cluster.lat, cluster.id, cluster.sharing_policy);
+ ns3_add_direct_route(src, dst, cluster.bw, cluster.lat, cluster.id,
+ cluster.sharing_policy); // Any ns-3 route is symmetrical
- delete host_dst;
+ // Also add the host to the list of hosts that will be connected to the backbone
+ Nodes.Add(src->extension<NetPointNs3>()->ns3_node_);
}
- //Create link backbone
- ns3_add_cluster(cluster.id.c_str(), cluster.bb_bw, cluster.bb_lat);
+ // Create link backbone
+
+ xbt_assert(Nodes.GetN() <= 65000, "Cluster with ns-3 is limited to 65000 nodes");
+ ns3::CsmaHelper csma;
+ csma.SetChannelAttribute("DataRate",
+ ns3::DataRateValue(ns3::DataRate(cluster.bb_bw * 8))); // ns-3 takes bps, but we provide Bps
+ csma.SetChannelAttribute("Delay", ns3::TimeValue(ns3::Seconds(cluster.bb_lat)));
+ ns3::NetDeviceContainer devices = csma.Install(Nodes);
+ XBT_DEBUG("Create CSMA");
+
+ std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links);
+ XBT_DEBUG("Assign IP Addresses %s to CSMA.", addr.c_str());
+ ns3::Ipv4AddressHelper ipv4;
+ ipv4.SetBase(addr.c_str(), "255.255.0.0");
+ ipv4.Assign(devices);
+
+ if (number_of_links == 255) {
+ xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255");
+ number_of_links = 1;
+ number_of_networks++;
+ } else {
+ number_of_links++;
+ }
}
static void routeCreation_cb(bool symmetrical, simgrid::kernel::routing::NetPoint* src,
// XBT_DEBUG("src (%s), dst (%s), src_id = %d, dst_id = %d",src,dst, src_id, dst_id);
XBT_DEBUG("\tLink (%s) bw:%fbps lat:%fs", link->get_cname(), link->get_bandwidth(), link->get_latency());
- // create link ns3
- auto* host_src = src->extension<NetPointNs3>();
- auto* host_dst = dst->extension<NetPointNs3>();
-
- host_src->set_name(src->get_name());
- host_dst->set_name(dst->get_name());
-
- xbt_assert(host_src != nullptr, "Network element %s does not seem to be ns-3-ready", src->get_cname());
- xbt_assert(host_dst != nullptr, "Network element %s does not seem to be ns-3-ready", dst->get_cname());
-
- ns3_add_direct_route(host_src, host_dst, link->get_bandwidth(), link->get_latency(), link->get_name(), link->get_sharing_policy());
+ ns3_add_direct_route(src, dst, link->get_bandwidth(), link->get_latency(), link->get_name(),
+ link->get_sharing_policy());
} else {
static bool warned_about_long_routes = false;
}
}
-/* Create the ns3 topology based on routing strategy */
-static void postparse_cb()
-{
- IPV4addr.shrink_to_fit();
- ns3::GlobalRouteManager::BuildGlobalRoutingDatabase();
- ns3::GlobalRouteManager::InitializeRoutes();
-}
-
/*********
* Model *
*********/
NetPointNs3::EXTENSION_ID = routing::NetPoint::extension_create<NetPointNs3>();
- ns3_initialize(ns3_tcp_model.get());
+ ns3::Config::SetDefault("ns3::TcpSocket::SegmentSize", ns3::UintegerValue(1000));
+ ns3::Config::SetDefault("ns3::TcpSocket::DelAckCount", ns3::UintegerValue(1));
+ ns3::Config::SetDefault("ns3::TcpSocketBase::Timestamp", ns3::BooleanValue(false));
+
+ auto TcpProtocol = ns3_tcp_model.get();
+ if (TcpProtocol == "default") {
+ /* nothing to do */
+
+ } else if (TcpProtocol == "Reno" || TcpProtocol == "NewReno" || TcpProtocol == "Tahoe") {
+ XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str());
+ ns3::Config::SetDefault("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::Tcp" + TcpProtocol));
+
+ } else {
+ xbt_die("The ns3/TcpModel must be: NewReno or Reno or Tahoe");
+ }
routing::NetPoint::on_creation.connect([](routing::NetPoint& pt) {
pt.extension_set<NetPointNs3>(new NetPointNs3());
- XBT_VERB("SimGrid's %s is known as node %d within ns-3", pt.get_cname(), pt.extension<NetPointNs3>()->node_num);
+ XBT_VERB("Declare SimGrid's %s within ns-3", pt.get_cname());
});
- routing::on_cluster_creation.connect(&clusterCreation_cb);
- s4u::Engine::on_platform_created.connect(&postparse_cb);
+ s4u::Engine::on_platform_created.connect([]() {
+ /* Create the ns3 topology based on routing strategy */
+ ns3::GlobalRouteManager::BuildGlobalRoutingDatabase();
+ ns3::GlobalRouteManager::InitializeRoutes();
+ });
+ routing::on_cluster_creation.connect(&clusterCreation_cb);
s4u::NetZone::on_route_creation.connect(&routeCreation_cb);
}
-NetworkNS3Model::~NetworkNS3Model() {
- IPV4addr.clear();
-}
-
LinkImpl* NetworkNS3Model::create_link(const std::string& name, const std::vector<double>& bandwidths, double latency,
s4u::Link::SharingPolicy policy)
{
LinkNS3::LinkNS3(NetworkNS3Model* model, const std::string& name, double bandwidth, double latency,
s4u::Link::SharingPolicy policy)
- : LinkImpl(model, name, nullptr)
+ : LinkImpl(model, name, nullptr), sharing_policy_(policy)
{
bandwidth_.peak = bandwidth;
latency_.peak = latency;
- sharing_policy_ = policy;
+
+ /* If wifi, create the wifizone now. If not, don't do anything: the links will be created in routeCreate_cb */
if (policy == simgrid::s4u::Link::SharingPolicy::WIFI) {
- static bool wifi_init = false;
- if (!wifi_init) {
- initialize_ns3_wifi();
- wifi_init = true;
- }
-
- ns3::NetDeviceContainer netA;
- WifiZone* zone = WifiZone::by_name(name);
- xbt_assert(zone != 0, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str());
-
- wifi.SetRemoteStationManager ("ns3::ConstantRateWifiManager",
- "ControlMode", ns3::StringValue ("HtMcs0"),
- "DataMode", ns3::StringValue ("HtMcs" + std::to_string(zone->get_mcs())));
-
- wifiPhy.SetChannel (zone->get_channel());
- wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss()));
- wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
- wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
- wifiMac.SetType("ns3::ApWifiMac",
- "Ssid", ns3::SsidValue(name));
-
- netA.Add(wifi.Install (wifiPhy, wifiMac, zone->get_ap_node()));
-
- ns3::Ptr<ns3::ListPositionAllocator> positionAllocS = ns3::CreateObject<ns3::ListPositionAllocator> ();
- positionAllocS->Add(ns3::Vector(0, 0, 0));
- mobility.SetPositionAllocator(positionAllocS);
- mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel");
- mobility.Install(zone->get_ap_node());
-
- ns3::Ipv4AddressHelper address;
- std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links);
- address.SetBase(addr.c_str(), "255.255.0.0");
- XBT_DEBUG("\tInterface stack '%s'", addr.c_str());
- interfaces.Add(address.Assign (netA));
- zone->set_network(number_of_networks);
- zone->set_link(number_of_links);
-
- NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension<NetPointNs3>();
- int nodeNum = netpoint_ns3->node_num;
- if (IPV4addr.size() <= (unsigned)nodeNum)
- IPV4addr.resize(nodeNum + 1);
- IPV4addr[nodeNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 1));
-
- if (number_of_links == 255){
- xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255");
- number_of_links = 1;
- number_of_networks++;
- } else {
- number_of_links++;
- }
+ static bool wifi_init = false;
+ if (!wifi_init) {
+ initialize_ns3_wifi();
+ wifi_init = true;
+ }
+
+ ns3::NetDeviceContainer netA;
+ WifiZone* zone = WifiZone::by_name(name);
+ xbt_assert(zone != 0, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str());
+ NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension<NetPointNs3>();
+
+ wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode",
+ ns3::StringValue("HtMcs" + std::to_string(zone->get_mcs())));
+
+ wifiPhy.SetChannel(zone->get_channel());
+ wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss()));
+ wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
+ wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
+
+ wifiMac.SetType("ns3::ApWifiMac");
+
+ netA.Add(wifi.Install(wifiPhy, wifiMac, zone->get_ap_node()));
+
+ ns3::Ptr<ns3::ListPositionAllocator> positionAllocS = ns3::CreateObject<ns3::ListPositionAllocator>();
+ positionAllocS->Add(ns3::Vector(0, 0, 0));
+ mobility.SetPositionAllocator(positionAllocS);
+ mobility.SetMobilityModel("ns3::ConstantPositionMobilityModel");
+ mobility.Install(zone->get_ap_node());
+
+ ns3::Ipv4AddressHelper address;
+ std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links);
+ address.SetBase(addr.c_str(), "255.255.0.0");
+ XBT_DEBUG("\tInterface stack '%s'", addr.c_str());
+ auto addresses = address.Assign(netA);
+ zone->set_network(number_of_networks);
+ zone->set_link(number_of_links);
+
+ netpoint_ns3->ipv4_address_ = transformIpv4Address(addresses.GetAddress(addresses.GetN() - 1));
+
+ if (number_of_links == 255) {
+ xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255");
+ number_of_links = 1;
+ number_of_networks++;
+ } else {
+ number_of_links++;
+ }
}
s4u::Link::on_creation(*this->get_iface());
}
static int port_number = 1025; // Port number is limited from 1025 to 65 000
- unsigned int node1 = src->get_netpoint()->extension<NetPointNs3>()->node_num;
- unsigned int node2 = dst->get_netpoint()->extension<NetPointNs3>()->node_num;
-
ns3::Ptr<ns3::Node> src_node = src->get_netpoint()->extension<NetPointNs3>()->ns3_node_;
ns3::Ptr<ns3::Node> dst_node = dst->get_netpoint()->extension<NetPointNs3>()->ns3_node_;
- xbt_assert(node2 < IPV4addr.size(), "Element %s is unknown to ns-3. Is it connected to any one-hop link?",
- dst->get_netpoint()->get_cname());
- std::string& addr = IPV4addr[node2];
+ std::string& addr = dst->get_netpoint()->extension<NetPointNs3>()->ipv4_address_;
xbt_assert(not addr.empty(), "Element %s is unknown to ns-3. Is it connected to any one-hop link?",
dst->get_netpoint()->get_cname());
- XBT_DEBUG("ns3: Create flow of %.0f Bytes from %u to %u with Interface %s", totalBytes, node1, node2, addr.c_str());
+ XBT_DEBUG("ns3: Create flow of %.0f Bytes from %s to %s with Interface %s", totalBytes, src->get_cname(),
+ dst->get_cname(), addr.c_str());
ns3::PacketSinkHelper sink("ns3::TcpSocketFactory", ns3::InetSocketAddress(ns3::Ipv4Address::GetAny(), port_number));
ns3::ApplicationContainer apps = sink.Install(dst_node);
port_number = 1025;
XBT_WARN("Too many connections! Port number is saturated. Trying to use the oldest ports.");
}
- xbt_assert(port_number <= 65000, "Too many connections! Port number is saturated.");
s4u::Link::on_communicate(*this);
}
id.Cancel();
}
-// initialize the ns-3 interface and environment
-void ns3_initialize(std::string TcpProtocol)
+void ns3_add_direct_route(simgrid::kernel::routing::NetPoint* src, simgrid::kernel::routing::NetPoint* dst, double bw,
+ double lat, const std::string& link_name, simgrid::s4u::Link::SharingPolicy policy)
{
- // tcpModel are:
- // "ns3::TcpNewReno"
- // "ns3::TcpReno"
- // "ns3::TcpTahoe"
-
- ns3::Config::SetDefault ("ns3::TcpSocket::SegmentSize", ns3::UintegerValue (1000));
- ns3::Config::SetDefault ("ns3::TcpSocket::DelAckCount", ns3::UintegerValue (1));
- ns3::Config::SetDefault ("ns3::TcpSocketBase::Timestamp", ns3::BooleanValue (false));
-
- if (TcpProtocol == "default") {
- /* nothing to do */
-
- } else if (TcpProtocol == "Reno") {
- XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str());
- ns3::Config::SetDefault ("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::TcpReno"));
-
- } else if (TcpProtocol == "NewReno") {
- XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str());
- ns3::Config::SetDefault ("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::TcpNewReno"));
+ ns3::Ipv4AddressHelper address;
+ ns3::NetDeviceContainer netA;
- } else if (TcpProtocol == "Tahoe") {
- XBT_INFO("Switching Tcp protocol to '%s'", TcpProtocol.c_str());
- ns3::Config::SetDefault ("ns3::TcpL4Protocol::SocketType", ns3::StringValue("ns3::TcpTahoe"));
+ // create link ns3
+ auto* host_src = src->extension<NetPointNs3>();
+ auto* host_dst = dst->extension<NetPointNs3>();
- } else {
- xbt_die("The ns3/TcpModel must be: NewReno or Reno or Tahoe");
- }
-}
+ xbt_assert(host_src != nullptr, "Network element %s does not seem to be ns-3-ready", src->get_cname());
+ xbt_assert(host_dst != nullptr, "Network element %s does not seem to be ns-3-ready", dst->get_cname());
-void ns3_add_cluster(const char* /*id*/, double bw, double lat)
-{
- ns3::NodeContainer Nodes;
+ if (policy == simgrid::s4u::Link::SharingPolicy::WIFI) {
+ auto a = host_src->ns3_node_;
+ auto b = host_dst->ns3_node_;
+ xbt_assert(WifiZone::is_ap(a) != WifiZone::is_ap(b),
+ "A wifi route can only exist between an access point node and a station node.");
- for (unsigned int i = number_of_clusters_nodes; i < Cluster_nodes.GetN(); i++) {
- Nodes.Add(Cluster_nodes.Get(i));
- XBT_DEBUG("Add node %u to cluster", i);
- }
- number_of_clusters_nodes = Cluster_nodes.GetN();
+ ns3::Ptr<ns3::Node> apNode = WifiZone::is_ap(a) ? a : b;
+ ns3::Ptr<ns3::Node> staNode = apNode == a ? b : a;
- XBT_DEBUG("Add router %u to cluster", nodes.GetN() - Nodes.GetN() - 1);
- Nodes.Add(nodes.Get(nodes.GetN()-Nodes.GetN()-1));
+ WifiZone* zone = WifiZone::by_name(link_name);
- xbt_assert(Nodes.GetN() <= 65000, "Cluster with ns-3 is limited to 65000 nodes");
- ns3::CsmaHelper csma;
- csma.SetChannelAttribute("DataRate", ns3::DataRateValue(ns3::DataRate(bw * 8))); // ns-3 takes bps, but we provide Bps
- csma.SetChannelAttribute("Delay", ns3::TimeValue(ns3::Seconds(lat)));
- ns3::NetDeviceContainer devices = csma.Install(Nodes);
- XBT_DEBUG("Create CSMA");
+ wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode",
+ ns3::StringValue("HtMcs" + std::to_string(zone->get_mcs())));
- std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links);
- XBT_DEBUG("Assign IP Addresses %s to CSMA.", addr.c_str());
- ns3::Ipv4AddressHelper ipv4;
- ipv4.SetBase(addr.c_str(), "255.255.0.0");
- interfaces.Add(ipv4.Assign (devices));
-
- if(number_of_links == 255){
- xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255");
- number_of_links = 1;
- number_of_networks++;
- }else{
- number_of_links++;
- }
- XBT_DEBUG("Number of nodes in Cluster_nodes: %u", Cluster_nodes.GetN());
-}
+ wifiPhy.SetChannel(zone->get_channel());
+ wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss()));
+ wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
+ wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
-static std::string transformIpv4Address(ns3::Ipv4Address from)
-{
- std::stringstream sstream;
- sstream << from ;
- return sstream.str();
-}
+ wifiMac.SetType("ns3::StaWifiMac");
-void ns3_add_direct_route(NetPointNs3* src, NetPointNs3* dst, double bw, double lat, std::string link_name,
- simgrid::s4u::Link::SharingPolicy policy)
-{
- ns3::Ipv4AddressHelper address;
- ns3::NetDeviceContainer netA;
+ netA.Add(wifi.Install(wifiPhy, wifiMac, staNode));
- int srcNum = src->node_num;
- int dstNum = dst->node_num;
+ ns3::Config::Set("/NodeList/*/DeviceList/*/$ns3::WifiNetDevice/Phy/ChannelWidth", ns3::UintegerValue(40));
- ns3::Ptr<ns3::Node> a = src->ns3_node_;
- ns3::Ptr<ns3::Node> b = dst->ns3_node_;
+ NetPointNs3* sta_netpointNs3 = WifiZone::is_ap(host_src->ns3_node_) ? host_src : host_dst;
+ const char* wifi_distance = simgrid::s4u::Host::by_name(sta_netpointNs3->name_)->get_property("wifi_distance");
+ ns3::Ptr<ns3::ListPositionAllocator> positionAllocS = ns3::CreateObject<ns3::ListPositionAllocator>();
+ positionAllocS->Add(ns3::Vector(wifi_distance ? atof(wifi_distance) : 10.0, 0, 0));
+ mobility.SetPositionAllocator(positionAllocS);
+ mobility.SetMobilityModel("ns3::ConstantPositionMobilityModel");
+ mobility.Install(staNode);
- if (policy == simgrid::s4u::Link::SharingPolicy::WIFI) {
- xbt_assert(WifiZone::is_ap(a) != WifiZone::is_ap(b), "A wifi route can only exist between an access point node and a station node.");
-
- ns3::Ptr<ns3::Node> apNode = WifiZone::is_ap(a) ? a : b;
- ns3::Ptr<ns3::Node> staNode = apNode == a ? b : a;
-
- WifiZone* zone = WifiZone::by_name(link_name);
-
- wifi.SetRemoteStationManager ("ns3::ConstantRateWifiManager",
- "ControlMode", ns3::StringValue ("HtMcs0"),
- "DataMode", ns3::StringValue ("HtMcs" + std::to_string(zone->get_mcs())));
-
- wifiPhy.SetChannel (zone->get_channel());
- wifiPhy.Set("Antennas", ns3::UintegerValue(zone->get_nss()));
- wifiPhy.Set("MaxSupportedTxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
- wifiPhy.Set("MaxSupportedRxSpatialStreams", ns3::UintegerValue(zone->get_nss()));
-
- wifiMac.SetType ("ns3::StaWifiMac",
- "Ssid", ns3::SsidValue(link_name),
- "ActiveProbing", ns3::BooleanValue(false));
-
- netA.Add(wifi.Install (wifiPhy, wifiMac, staNode));
-
- ns3::Config::Set ("/NodeList/*/DeviceList/*/$ns3::WifiNetDevice/Phy/ChannelWidth", ns3::UintegerValue (40));
-
- NetPointNs3* sta_netpointNs3 = WifiZone::is_ap(src->ns3_node_) ? dst : src;
- const char* wifi_distance = simgrid::s4u::Host::by_name(sta_netpointNs3->name_)->get_property("wifi_distance");
- ns3::Ptr<ns3::ListPositionAllocator> positionAllocS = ns3::CreateObject<ns3::ListPositionAllocator> ();
- positionAllocS->Add(ns3::Vector( wifi_distance ? atof(wifi_distance) : 10.0 , 0, 0));
- mobility.SetPositionAllocator(positionAllocS);
- mobility.SetMobilityModel ("ns3::ConstantPositionMobilityModel");
- mobility.Install(staNode);
-
- std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", zone->get_network(), zone->get_link());
- address.SetBase(addr.c_str(), "255.255.0.0", ("0.0.0." + std::to_string(zone->get_n_sta_nodes() + 2)).c_str());
- zone->add_sta_node();
- XBT_DEBUG("\tInterface stack '%s'", addr.c_str());
- interfaces.Add(address.Assign (netA));
- if (IPV4addr.size() <= (unsigned)dstNum)
- IPV4addr.resize(dstNum + 1);
- IPV4addr[dstNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 1));
+ std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", zone->get_network(), zone->get_link());
+ address.SetBase(addr.c_str(), "255.255.0.0", ("0.0.0." + std::to_string(zone->get_n_sta_nodes() + 2)).c_str());
+ zone->add_sta_node();
+ XBT_DEBUG("\tInterface stack '%s'", addr.c_str());
+ auto addresses = address.Assign(netA);
+ host_dst->ipv4_address_ = transformIpv4Address(addresses.GetAddress(addresses.GetN() - 1));
} else {
ns3::PointToPointHelper pointToPoint;
- XBT_DEBUG("\tAdd PTP from %d to %d bw:'%f Bps' lat:'%fs'", srcNum, dstNum, bw, lat);
+
+ XBT_DEBUG("\tAdd PTP from %s to %s bw:'%f Bps' lat:'%fs'", src->get_cname(), dst->get_cname(), bw, lat);
pointToPoint.SetDeviceAttribute("DataRate",
ns3::DataRateValue(ns3::DataRate(bw * 8))); // ns-3 takes bps, but we provide Bps
pointToPoint.SetChannelAttribute("Delay", ns3::TimeValue(ns3::Seconds(lat)));
- netA.Add(pointToPoint.Install(a, b));
+ netA.Add(pointToPoint.Install(host_src->ns3_node_, host_dst->ns3_node_));
std::string addr = simgrid::xbt::string_printf("%d.%d.0.0", number_of_networks, number_of_links);
address.SetBase(addr.c_str(), "255.255.0.0");
XBT_DEBUG("\tInterface stack '%s'", addr.c_str());
- interfaces.Add(address.Assign (netA));
-
- if (IPV4addr.size() <= (unsigned)srcNum)
- IPV4addr.resize(srcNum + 1);
- IPV4addr[srcNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 2));
+ auto addresses = address.Assign(netA);
- if (IPV4addr.size() <= (unsigned)dstNum)
- IPV4addr.resize(dstNum + 1);
- IPV4addr[dstNum] = transformIpv4Address(interfaces.GetAddress(interfaces.GetN() - 1));
+ host_src->ipv4_address_ = transformIpv4Address(addresses.GetAddress(0));
+ host_dst->ipv4_address_ = transformIpv4Address(addresses.GetAddress(1));
- if (number_of_links == 255){
- xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255");
- number_of_links = 1;
- number_of_networks++;
+ if (number_of_links == 255) {
+ xbt_assert(number_of_networks < 255, "Number of links and networks exceed 255*255");
+ number_of_links = 1;
+ number_of_networks++;
} else {
- number_of_links++;
+ number_of_links++;
}
}
}
class NetworkNS3Model : public NetworkModel {
public:
NetworkNS3Model();
- ~NetworkNS3Model();
+ ~NetworkNS3Model() = default;
LinkImpl* create_link(const std::string& name, const std::vector<double>& bandwidth, double latency,
s4u::Link::SharingPolicy policy) override;
Action* communicate(s4u::Host* src, s4u::Host* dst, double size, double rate) override;
return -1;
int rate_id = host_rates_it->second;
- xbt_assert(rate_id >= 0 && rate_id < (int)bandwidths_.size(), "Host '%s' has an invalid rate '%d' on wifi link '%s'",
- host->get_name().c_str(), rate_id, this->get_cname());
+ xbt_assert(rate_id >= 0,
+ "Negative host wifi rate levels are invalid but host '%s' uses %d as a rate level on link '%s'",
+ host->get_cname(), rate_id, this->get_cname());
+ xbt_assert(rate_id < (int)bandwidths_.size(),
+ "Link '%s' only has %zu wifi rate levels, so the provided level %d is invalid for host '%s'.",
+ this->get_cname(), bandwidths_.size(), rate_id, host->get_cname());
Metric rate = use_decay_model_ ? decay_bandwidths_[rate_id] : bandwidths_[rate_id];
return rate.peak * rate.scale;
void NetworkWifiLink::refresh_decay_bandwidths(){
// Compute number of STAtion on the Access Point
- int nSTA=host_rates_.size();
-
+ int nSTA = static_cast<int>(host_rates_.size());
+
std::vector<Metric> new_bandwidths;
for (auto bandwidth : bandwidths_){
- // Instanciate decay model relatively to the actual bandwidth
+ // Instantiate decay model relatively to the actual bandwidth
double max_bw=bandwidth.peak;
double min_bw=bandwidth.peak-(wifi_max_rate_-wifi_min_rate_);
double model_rate=bandwidth.peak-(wifi_max_rate_-model_rate_);
/** @brief Hold every rates association between host and links (host name, rates id) */
std::map<xbt::string, int> host_rates_;
- /** @brief A link can have several bandwith attach to it (mostly use by wifi model) */
+ /** @brief A link can have several bandwidths attached to it (mostly use by wifi model) */
std::vector<Metric> bandwidths_;
/** @brief Should we use the decay model ? */
#include "simgrid/s4u/Host.hpp"
#include "src/surf/network_ns3.hpp"
+#include "ns3/wifi-module.h"
#include <ns3/node.h>
#include <ns3/tcp-socket-factory.h>
-#include "ns3/wifi-module.h"
#include <cstdint>
-class NetPointNs3 {
+class XBT_PRIVATE NetPointNs3 {
public:
static simgrid::xbt::Extension<simgrid::kernel::routing::NetPoint, NetPointNs3> EXTENSION_ID;
- void set_name(std::string name) {name_ = name;}
+ void set_name(std::string name) { name_ = name; }
explicit NetPointNs3();
std::string name_;
- int node_num;
ns3::Ptr<ns3::Node> ns3_node_;
-
+ std::string ipv4_address_;
};
-XBT_PUBLIC void ns3_initialize(std::string TcpProtocol);
-XBT_PUBLIC void ns3_simulator(double max_seconds);
-XBT_PUBLIC void ns3_add_direct_route(NetPointNs3* src, NetPointNs3* dst, double bw, double lat, std::string link_name,
- simgrid::s4u::Link::SharingPolicy policy);
-XBT_PUBLIC void ns3_add_cluster(const char* id, double bw, double lat);
+XBT_PRIVATE void ns3_initialize(std::string TcpProtocol);
+XBT_PRIVATE void ns3_simulator(double max_seconds);
+XBT_PRIVATE void ns3_add_direct_route(simgrid::kernel::routing::NetPoint* src, simgrid::kernel::routing::NetPoint* dst,
+ double bw, double lat, const std::string& link_name,
+ simgrid::s4u::Link::SharingPolicy policy);
class XBT_PRIVATE SgFlow {
public:
simgrid::kernel::resource::NetworkNS3Action* action_;
};
-void start_flow(ns3::Ptr<ns3::Socket> sock, const char* to, uint16_t port_number);
+XBT_PRIVATE void start_flow(ns3::Ptr<ns3::Socket> sock, const char* to, uint16_t port_number);
static inline std::string transform_socket_ptr(ns3::Ptr<ns3::Socket> local_socket)
{
WifiZone(std::string name_, simgrid::s4u::Host* host_, ns3::Ptr<ns3::Node> ap_node_,
ns3::Ptr<ns3::YansWifiChannel> channel_, int mcs_, int nss_, int network_, int link_);
- const char* get_cname() {return name.c_str();}
- simgrid::s4u::Host* get_host(){return host;}
- ns3::Ptr<ns3::Node> get_ap_node() {return ap_node;}
- ns3::Ptr<ns3::YansWifiChannel> get_channel() {return channel;}
- int get_mcs() {return mcs;}
- int get_nss() {return nss;}
- int get_network() {return network;}
- int get_link() {return link;}
- int get_n_sta_nodes() {return n_sta_nodes;}
-
- void set_network(int network_) {network = network_;}
- void set_link(int link_) {link = link_;}
- void add_sta_node() {n_sta_nodes++;}
+ const char* get_cname() { return name.c_str(); }
+ simgrid::s4u::Host* get_host() { return host; }
+ ns3::Ptr<ns3::Node> get_ap_node() { return ap_node; }
+ ns3::Ptr<ns3::YansWifiChannel> get_channel() { return channel; }
+ int get_mcs() { return mcs; }
+ int get_nss() { return nss; }
+ int get_network() { return network; }
+ int get_link() { return link; }
+ int get_n_sta_nodes() { return n_sta_nodes; }
+
+ void set_network(int network_) { network = network_; }
+ void set_link(int link_) { link = link_; }
+ void add_sta_node() { n_sta_nodes++; }
static bool is_ap(ns3::Ptr<ns3::Node> node);
static WifiZone* by_name(std::string name);
L07Action::L07Action(kernel::resource::Model* model, const std::vector<s4u::Host*>& host_list,
const double* flops_amount, const double* bytes_amount, double rate)
- : CpuAction(model, 1, 0), computationAmount_(flops_amount), communicationAmount_(bytes_amount), rate_(rate)
+ : CpuAction(model, 1.0, false), computationAmount_(flops_amount), communicationAmount_(bytes_amount), rate_(rate)
{
size_t link_nb = 0;
size_t used_host_nb = 0; /* Only the hosts with something to compute (>0 flops) are counted) */
get_model()->get_maxmin_system()->update_constraint_bound(get_constraint(), speed_.peak * speed_.scale);
while ((var = get_constraint()->get_variable(&elem))) {
- const kernel::resource::Action* action = static_cast<kernel::resource::Action*>(var->get_id());
+ const kernel::resource::Action* action = var->get_id();
get_model()->get_maxmin_system()->update_variable_bound(action->get_variable(), speed_.scale * speed_.peak);
}
linkUp = simgrid::s4u::Link::by_name_or_null(tmp_link);
linkDown = simgrid::s4u::Link::by_name_or_null(tmp_link);
- auto* as_cluster = static_cast<ClusterZone*>(current_as);
+ ClusterZone* as_cluster = current_as;
as_cluster->private_links_.insert({as_cluster->node_pos(rankId), {linkUp->get_impl(), linkDown->get_impl()}});
}
// Add a router.
XBT_DEBUG(" ");
XBT_DEBUG("<router id=\"%s\"/>", cluster->router_id.c_str());
- if (cluster->router_id.empty()) {
- std::string newid = std::string(cluster->prefix) + cluster->id + "_router" + cluster->suffix;
- current_as->router_ = sg_platf_new_router(newid, NULL);
- } else {
- current_as->router_ = sg_platf_new_router(cluster->router_id, NULL);
- }
+ if (cluster->router_id.empty())
+ cluster->router_id = std::string(cluster->prefix) + cluster->id + "_router" + cluster->suffix;
+ current_as->router_ = sg_platf_new_router(cluster->router_id, NULL);
//Make the backbone
if ((cluster->bb_bw > 0) || (cluster->bb_lat > 0)) {
storage->id.c_str(), stype->model.c_str(), stype->id.c_str(), storage->content.c_str(),
storage->properties);
- auto s = surf_storage_model->createStorage(storage->id, stype->id, storage->content, storage->attach);
+ auto s = surf_storage_model->createStorage(storage->filename, storage->lineno, storage->id, stype->id,
+ storage->content, storage->attach);
if (storage->properties) {
s->set_properties(*storage->properties);
xbt_assert(current_routing, "Cannot seal the current AS: none under construction");
current_routing->seal();
simgrid::s4u::NetZone::on_seal(*current_routing->get_iface());
- current_routing = static_cast<simgrid::kernel::routing::NetZoneImpl*>(current_routing->get_father());
+ current_routing = current_routing->get_father();
}
/** @brief Add a link connecting a host to the rest of its AS (which must be cluster or vivaldi) */
void sg_platf_new_hostlink(const simgrid::kernel::routing::HostLinkCreationArgs* hostlink)
{
- simgrid::kernel::routing::NetPoint* netpoint = simgrid::s4u::Host::by_name(hostlink->id)->get_netpoint();
+ const simgrid::kernel::routing::NetPoint* netpoint = simgrid::s4u::Host::by_name(hostlink->id)->get_netpoint();
xbt_assert(netpoint, "Host '%s' not found!", hostlink->id.c_str());
xbt_assert(dynamic_cast<simgrid::kernel::routing::ClusterZone*>(current_routing),
"Only hosts from Cluster and Vivaldi ASes can get a host_link.");
#include "src/kernel/lmm/maxmin.hpp"
#include "src/surf/xml/platf.hpp"
#include "surf/surf.hpp"
+#include "xbt/parse_units.hpp"
XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(surf_storage);
all_existing_models.push_back(this);
}
-StorageImpl* StorageN11Model::createStorage(const std::string& id, const std::string& type_id,
- const std::string& content_name, const std::string& attach)
+StorageImpl* StorageN11Model::createStorage(std::string& filename, int lineno, const std::string& id,
+ const std::string& type_id, const std::string& content_name,
+ const std::string& attach)
{
const StorageType* storage_type = storage_types.at(type_id);
- double Bread =
- surf_parse_get_bandwidth(storage_type->model_properties->at("Bread").c_str(), "property Bread, storage", type_id);
- double Bwrite = surf_parse_get_bandwidth(storage_type->model_properties->at("Bwrite").c_str(),
- "property Bwrite, storage", type_id);
+ double Bread = xbt_parse_get_bandwidth(filename, lineno, storage_type->model_properties->at("Bread").c_str(),
+ "property Bread, storage", type_id);
+ double Bwrite = xbt_parse_get_bandwidth(filename, lineno, storage_type->model_properties->at("Bwrite").c_str(),
+ "property Bwrite, storage", type_id);
XBT_DEBUG("SURF storage create resource\n\t\tid '%s'\n\t\ttype '%s'\n\t\tBread '%f'\n", id.c_str(), type_id.c_str(),
Bread);
for (auto it = std::begin(*get_started_action_set()); it != std::end(*get_started_action_set());) {
auto& action = *it;
++it; // increment iterator here since the following calls to action.finish() may invalidate it
- action.update_remains(lrint(action.get_variable()->get_value() * delta));
+ action.update_remains(rint(action.get_variable()->get_value() * delta));
action.update_max_duration(delta);
if (((action.get_remains_no_update() <= 0) && (action.get_variable()->get_penalty() > 0)) ||
StorageAction* StorageN11::io_start(sg_size_t size, s4u::Io::OpType type)
{
- return new StorageN11Action(get_model(), size, not is_on(), this, type);
+ return new StorageN11Action(get_model(), static_cast<double>(size), not is_on(), this, type);
}
StorageAction* StorageN11::read(sg_size_t size)
{
- return new StorageN11Action(get_model(), size, not is_on(), this, s4u::Io::OpType::READ);
+ return new StorageN11Action(get_model(), static_cast<double>(size), not is_on(), this, s4u::Io::OpType::READ);
}
StorageAction* StorageN11::write(sg_size_t size)
{
- return new StorageN11Action(get_model(), size, not is_on(), this, s4u::Io::OpType::WRITE);
+ return new StorageN11Action(get_model(), static_cast<double>(size), not is_on(), this, s4u::Io::OpType::WRITE);
}
/**********
class StorageN11Model : public StorageModel {
public:
StorageN11Model();
- StorageImpl* createStorage(const std::string& id, const std::string& type_id, const std::string& content_name,
- const std::string& attach) override;
+ StorageImpl* createStorage(std::string& filename, int lineno, const std::string& id, const std::string& type_id,
+ const std::string& content_name, const std::string& attach) override;
double next_occurring_event(double now) override;
void update_actions_state(double now, double delta) override;
};
XBT_DEBUG("Looking for next trace event");
- while (1) { // Handle next occurring events until none remains
+ while (true) { // Handle next occurring events until none remains
double next_event_date = simgrid::kernel::profile::future_evt_set.next_date();
XBT_DEBUG("Next TRACE event: %f", next_event_date);
auto pos = std::find_if(table.begin(), table.end(),
[&name](const surf_model_description_t& item) { return item.name == name; });
if (pos != table.end())
- return std::distance(table.begin(), pos);
+ return static_cast<int>(std::distance(table.begin(), pos));
if (table.empty())
xbt_die("No model is valid! This is a bug.");
XBT_PUBLIC double surf_parse_get_double(const std::string& s);
XBT_PUBLIC int surf_parse_get_int(const std::string& s);
-XBT_PUBLIC double surf_parse_get_time(const char* string, const char* entity_kind, const std::string& name);
-XBT_PUBLIC double surf_parse_get_size(const char* string, const char* entity_kind, const std::string& name);
-XBT_PUBLIC double surf_parse_get_bandwidth(const char* string, const char* entity_kind, const std::string& name);
-XBT_PUBLIC std::vector<double> surf_parse_get_bandwidths(const char* string, const char* entity_kind,
- const std::string& name);
-XBT_PUBLIC double surf_parse_get_speed(const char* string, const char* entity_kind, const std::string& name);
XBT_PUBLIC void surf_parse(); /* Entry-point to the parser */
class StorageCreationArgs {
public:
+ std::string filename;
+ int lineno;
std::string id;
std::string type_id;
std::string content;
void parse_platform_file(const std::string& file)
{
const char* cfile = file.c_str();
- int len = strlen(cfile);
+ size_t len = strlen(cfile);
bool is_lua = len > 3 && file[len - 3] == 'l' && file[len - 2] == 'u' && file[len - 1] == 'a';
sg_platf_init();
#include "src/surf/xml/platf_private.hpp"
#include "surf/surf.hpp"
#include "xbt/file.hpp"
+#include "xbt/parse_units.hpp"
-#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <string>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_parse, surf, "Logging specific to the SURF parsing module");
-static std::string surf_parsed_filename; // Currently parsed file (for the error messages)
+std::string surf_parsed_filename; // Currently parsed file (for the error messages)
std::vector<simgrid::kernel::resource::LinkImpl*>
parsed_link_list; /* temporary store of current link list of a route */
std::vector<simgrid::kernel::resource::DiskImpl*> parsed_disk_list; /* temporary store of current disk list of a host */
}
}
-namespace {
-
/* Turn something like "1-4,6,9-11" into the vector {1,2,3,4,6,9,10,11} */
-std::vector<int>* explodesRadical(const std::string& radicals)
+static std::vector<int>* explodesRadical(const std::string& radicals)
{
std::vector<int>* exploded = new std::vector<int>();
return exploded;
}
-class unit_scale : public std::unordered_map<std::string, double> {
-public:
- using std::unordered_map<std::string, double>::unordered_map;
- // tuples are : <unit, value for unit, base (2 or 10), true if abbreviated>
- explicit unit_scale(std::initializer_list<std::tuple<const std::string, double, int, bool>> generators);
-};
-
-unit_scale::unit_scale(std::initializer_list<std::tuple<const std::string, double, int, bool>> generators)
-{
- for (const auto& gen : generators) {
- const std::string& unit = std::get<0>(gen);
- double value = std::get<1>(gen);
- const int base = std::get<2>(gen);
- const bool abbrev = std::get<3>(gen);
- double mult;
- std::vector<std::string> prefixes;
- switch (base) {
- case 2:
- mult = 1024.0;
- prefixes = abbrev ? std::vector<std::string>{"Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"}
- : std::vector<std::string>{"kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"};
- break;
- case 10:
- mult = 1000.0;
- prefixes = abbrev ? std::vector<std::string>{"k", "M", "G", "T", "P", "E", "Z", "Y"}
- : std::vector<std::string>{"kilo", "mega", "giga", "tera", "peta", "exa", "zeta", "yotta"};
- break;
- default:
- THROW_IMPOSSIBLE;
- }
- emplace(unit, value);
- for (const auto& prefix : prefixes) {
- value *= mult;
- emplace(prefix + unit, value);
- }
- }
-}
-
-/* Note: no warning is issued for unit-less values when `name' is empty. */
-double surf_parse_get_value_with_unit(const char* string, const unit_scale& units, const char* entity_kind,
- const std::string& name, const char* error_msg, const char* default_unit)
-{
- char* endptr;
- errno = 0;
- double res = strtod(string, &endptr);
- const char* ptr = endptr; // for const-correctness
- if (errno == ERANGE)
- surf_parse_error(std::string("value out of range: ") + string);
- if (ptr == string)
- surf_parse_error(std::string("cannot parse number:") + string);
- if (ptr[0] == '\0') {
- // Ok, 0 can be unit-less
- if (res != 0 && not name.empty())
- XBT_WARN("Deprecated unit-less value '%s' for %s %s. %s", string, entity_kind, name.c_str(), error_msg);
- ptr = default_unit;
- }
- auto u = units.find(ptr);
- if (u == units.end())
- surf_parse_error(std::string("unknown unit: ") + ptr);
- return res * u->second;
-}
-}
-
-double surf_parse_get_time(const char* string, const char* entity_kind, const std::string& name)
-{
- static const unit_scale units{std::make_pair("w", 7 * 24 * 60 * 60),
- std::make_pair("d", 24 * 60 * 60),
- std::make_pair("h", 60 * 60),
- std::make_pair("m", 60),
- std::make_pair("s", 1.0),
- std::make_pair("ms", 1e-3),
- std::make_pair("us", 1e-6),
- std::make_pair("ns", 1e-9),
- std::make_pair("ps", 1e-12)};
- return surf_parse_get_value_with_unit(string, units, entity_kind, name,
- "Append 's' to your time to get seconds", "s");
-}
-
-double surf_parse_get_size(const char* string, const char* entity_kind, const std::string& name)
-{
- static const unit_scale units{std::make_tuple("b", 0.125, 2, true), std::make_tuple("b", 0.125, 10, true),
- std::make_tuple("B", 1.0, 2, true), std::make_tuple("B", 1.0, 10, true)};
- return surf_parse_get_value_with_unit(string, units, entity_kind, name,
- "Append 'B' to get bytes (or 'b' for bits but 1B = 8b).", "B");
-}
-
-double surf_parse_get_bandwidth(const char* string, const char* entity_kind, const std::string& name)
-{
- static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true),
- std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)};
- return surf_parse_get_value_with_unit(string, units, entity_kind, name,
- "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", "Bps");
-}
-
-std::vector<double> surf_parse_get_bandwidths(const char* string, const char* entity_kind, const std::string& name)
-{
- static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true),
- std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)};
-
- std::vector<double> bandwidths;
- std::vector<std::string> tokens;
- boost::split(tokens, string, boost::is_any_of(";"));
- for (auto token : tokens) {
- bandwidths.push_back(surf_parse_get_value_with_unit(
- token.c_str(), units, entity_kind, name,
- "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", "Bps"));
- }
-
- return bandwidths;
-}
-
-double surf_parse_get_speed(const char* string, const char* entity_kind, const std::string& name)
-{
- static const unit_scale units{std::make_tuple("f", 1.0, 10, true), std::make_tuple("flops", 1.0, 10, false)};
- return surf_parse_get_value_with_unit(string, units, entity_kind, name,
- "Append 'f' or 'flops' to your speed to get flop per second", "f");
-}
-
-static std::vector<double> surf_parse_get_all_speeds(char* speeds, const char* entity_kind, const std::string& id)
-{
- std::vector<double> speed_per_pstate;
-
- if (strchr(speeds, ',') == nullptr){
- double speed = surf_parse_get_speed(speeds, entity_kind, id);
- speed_per_pstate.push_back(speed);
- } else {
- std::vector<std::string> pstate_list;
- boost::split(pstate_list, speeds, boost::is_any_of(","));
- for (auto speed_str : pstate_list) {
- boost::trim(speed_str);
- double speed = surf_parse_get_speed(speed_str.c_str(), entity_kind, id);
- speed_per_pstate.push_back(speed);
- XBT_DEBUG("Speed value: %f", speed);
- }
- }
- return speed_per_pstate;
-}
/*
* All the callback lists that can be overridden anywhere.
storage.properties = property_sets.back();
property_sets.pop_back();
+ storage.filename = surf_parsed_filename;
+ storage.lineno = surf_parse_lineno;
storage.id = A_surfxml_storage_id;
storage.type_id = A_surfxml_storage_typeId;
storage.content = A_surfxml_storage_content;
storage_type.id = A_surfxml_storage___type_id;
storage_type.model = A_surfxml_storage___type_model;
storage_type.size =
- surf_parse_get_size(A_surfxml_storage___type_size, "size of storage type", storage_type.id.c_str());
+ static_cast<sg_size_t>(surf_parse_get_size(surf_parsed_filename, surf_parse_lineno, A_surfxml_storage___type_size,
+ "size of storage type", storage_type.id.c_str()));
sg_platf_new_storage_type(&storage_type);
}
host.id = A_surfxml_host_id;
- host.speed_per_pstate = surf_parse_get_all_speeds(A_surfxml_host_speed, "speed of host", host.id);
+ host.speed_per_pstate =
+ xbt_parse_get_all_speeds(surf_parsed_filename, surf_parse_lineno, A_surfxml_host_speed, "speed of host", host.id);
XBT_DEBUG("pstate: %s", A_surfxml_host_pstate);
host.core_amount = surf_parse_get_int(A_surfxml_host_core);
property_sets.pop_back();
disk.id = A_surfxml_disk_id;
- disk.read_bw = surf_parse_get_bandwidth(A_surfxml_disk_read___bw, "read_bw of disk ", disk.id);
- disk.write_bw = surf_parse_get_bandwidth(A_surfxml_disk_write___bw, "write_bw of disk ", disk.id);
+ disk.read_bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_disk_read___bw,
+ "read_bw of disk ", disk.id);
+ disk.write_bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_disk_write___bw,
+ "write_bw of disk ", disk.id);
parsed_disk_list.push_back(sg_platf_new_disk(&disk));
}
cluster.prefix = A_surfxml_cluster_prefix;
cluster.suffix = A_surfxml_cluster_suffix;
cluster.radicals = explodesRadical(A_surfxml_cluster_radical);
- cluster.speeds = surf_parse_get_all_speeds(A_surfxml_cluster_speed, "speed of cluster", cluster.id);
+ cluster.speeds = xbt_parse_get_all_speeds(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_speed,
+ "speed of cluster", cluster.id);
cluster.core_amount = surf_parse_get_int(A_surfxml_cluster_core);
- cluster.bw = surf_parse_get_bandwidth(A_surfxml_cluster_bw, "bw of cluster", cluster.id);
- cluster.lat = surf_parse_get_time(A_surfxml_cluster_lat, "lat of cluster", cluster.id);
+ cluster.bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_bw, "bw of cluster",
+ cluster.id);
+ cluster.lat =
+ xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_lat, "lat of cluster", cluster.id);
if(strcmp(A_surfxml_cluster_bb___bw,""))
- cluster.bb_bw = surf_parse_get_bandwidth(A_surfxml_cluster_bb___bw, "bb_bw of cluster", cluster.id);
+ cluster.bb_bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_bb___bw,
+ "bb_bw of cluster", cluster.id);
if(strcmp(A_surfxml_cluster_bb___lat,""))
- cluster.bb_lat = surf_parse_get_time(A_surfxml_cluster_bb___lat, "bb_lat of cluster", cluster.id);
+ cluster.bb_lat = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_bb___lat,
+ "bb_lat of cluster", cluster.id);
if(strcmp(A_surfxml_cluster_limiter___link,""))
- cluster.limiter_link = surf_parse_get_bandwidth(A_surfxml_cluster_limiter___link, "limiter_link of cluster", cluster.id);
+ cluster.limiter_link =
+ xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_limiter___link,
+ "limiter_link of cluster", cluster.id);
if(strcmp(A_surfxml_cluster_loopback___bw,""))
- cluster.loopback_bw = surf_parse_get_bandwidth(A_surfxml_cluster_loopback___bw, "loopback_bw of cluster", cluster.id);
+ cluster.loopback_bw = xbt_parse_get_bandwidth(
+ surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_loopback___bw, "loopback_bw of cluster", cluster.id);
if(strcmp(A_surfxml_cluster_loopback___lat,""))
- cluster.loopback_lat = surf_parse_get_time(A_surfxml_cluster_loopback___lat, "loopback_lat of cluster", cluster.id);
+ cluster.loopback_lat = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cluster_loopback___lat,
+ "loopback_lat of cluster", cluster.id);
switch(AX_surfxml_cluster_topology){
case A_surfxml_cluster_topology_FLAT:
cabinet.id = A_surfxml_cabinet_id;
cabinet.prefix = A_surfxml_cabinet_prefix;
cabinet.suffix = A_surfxml_cabinet_suffix;
- cabinet.speed = surf_parse_get_speed(A_surfxml_cabinet_speed, "speed of cabinet", cabinet.id.c_str());
- cabinet.bw = surf_parse_get_bandwidth(A_surfxml_cabinet_bw, "bw of cabinet", cabinet.id.c_str());
- cabinet.lat = surf_parse_get_time(A_surfxml_cabinet_lat, "lat of cabinet", cabinet.id.c_str());
+ cabinet.speed = xbt_parse_get_speed(surf_parsed_filename, surf_parse_lineno, A_surfxml_cabinet_speed,
+ "speed of cabinet", cabinet.id.c_str());
+ cabinet.bw = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_cabinet_bw, "bw of cabinet",
+ cabinet.id.c_str());
+ cabinet.lat = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_cabinet_lat, "lat of cabinet",
+ cabinet.id.c_str());
cabinet.radicals = explodesRadical(A_surfxml_cabinet_radical);
sg_platf_new_cabinet(&cabinet);
simgrid::kernel::routing::PeerCreationArgs peer;
peer.id = std::string(A_surfxml_peer_id);
- peer.speed = surf_parse_get_speed(A_surfxml_peer_speed, "speed of peer", peer.id.c_str());
- peer.bw_in = surf_parse_get_bandwidth(A_surfxml_peer_bw___in, "bw_in of peer", peer.id.c_str());
- peer.bw_out = surf_parse_get_bandwidth(A_surfxml_peer_bw___out, "bw_out of peer", peer.id.c_str());
+ peer.speed = xbt_parse_get_speed(surf_parsed_filename, surf_parse_lineno, A_surfxml_peer_speed, "speed of peer",
+ peer.id.c_str());
+ peer.bw_in = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_peer_bw___in, "bw_in of peer",
+ peer.id.c_str());
+ peer.bw_out = xbt_parse_get_bandwidth(surf_parsed_filename, surf_parse_lineno, A_surfxml_peer_bw___out,
+ "bw_out of peer", peer.id.c_str());
peer.coord = A_surfxml_peer_coordinates;
peer.speed_trace = nullptr;
if (A_surfxml_peer_availability___file[0] != '\0') {
property_sets.pop_back();
link.id = std::string(A_surfxml_link_id);
- link.bandwidths = surf_parse_get_bandwidths(A_surfxml_link_bandwidth, "bandwidth of link", link.id.c_str());
+ link.bandwidths = xbt_parse_get_bandwidths(surf_parsed_filename, surf_parse_lineno, A_surfxml_link_bandwidth,
+ "bandwidth of link", link.id.c_str());
link.bandwidth_trace = A_surfxml_link_bandwidth___file[0]
? simgrid::kernel::profile::Profile::from_file(A_surfxml_link_bandwidth___file)
: nullptr;
- link.latency = surf_parse_get_time(A_surfxml_link_latency, "latency of link", link.id.c_str());
+ link.latency = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_link_latency, "latency of link",
+ link.id.c_str());
link.latency_trace = A_surfxml_link_latency___file[0]
? simgrid::kernel::profile::Profile::from_file(A_surfxml_link_latency___file)
: nullptr;
link.properties = nullptr;
link.id = std::string(A_surfxml_backbone_id);
- link.bandwidths.push_back(
- surf_parse_get_bandwidth(A_surfxml_backbone_bandwidth, "bandwidth of backbone", link.id.c_str()));
- link.latency = surf_parse_get_time(A_surfxml_backbone_latency, "latency of backbone", link.id.c_str());
+ link.bandwidths.push_back(xbt_parse_get_bandwidth(
+ surf_parsed_filename, surf_parse_lineno, A_surfxml_backbone_bandwidth, "bandwidth of backbone", link.id.c_str()));
+ link.latency = xbt_parse_get_time(surf_parsed_filename, surf_parse_lineno, A_surfxml_backbone_latency,
+ "latency of backbone", link.id.c_str());
link.policy = simgrid::s4u::Link::SharingPolicy::SHARED;
sg_platf_new_link(&link);
int xbt_automaton_propositional_symbols_compare_value(const_xbt_dynar_t s1, const_xbt_dynar_t s2)
{
- unsigned int nb_elem = xbt_dynar_length(s1);
+ unsigned long nb_elem = xbt_dynar_length(s1);
- for (unsigned int cursor = 0; cursor < nb_elem; cursor++) {
+ for (unsigned long cursor = 0; cursor < nb_elem; cursor++) {
const int* iptr1 = xbt_dynar_get_ptr(s1, cursor);
const int* iptr2 = xbt_dynar_get_ptr(s2, cursor);
if(*iptr1 != *iptr2)
template <> class ConfigType<int> {
public:
static constexpr const char* type_name = "int";
- static inline double parse(const char* value)
+ static inline int parse(const char* value)
{
- return parse_long(value);
+ long val = parse_long(value);
+ if (val < INT_MIN)
+ throw std::range_error("underflow");
+ if (val > INT_MAX)
+ throw std::range_error("overflow");
+ return static_cast<int>(val);
}
};
template <> class ConfigType<double> {
}
// Debug:
- void dump(const char *name, const char *indent);
- void show_aliases();
- void help();
+ void dump(const char* name, const char* indent) const;
+ void show_aliases() const;
+ void help() const;
protected:
ConfigurationElement* get_dict_element(const std::string& name);
* @param name The name to give to this config set
* @param indent what to write at the beginning of each line (right number of spaces)
*/
-void Config::dump(const char *name, const char *indent)
+void Config::dump(const char* name, const char* indent) const
{
if (name)
XBT_CVERB(xbt_help, "%s>> Dumping of the config set '%s':", indent, name);
}
/** @brief Displays the declared aliases and their replacement */
-void Config::show_aliases()
+void Config::show_aliases() const
{
for (auto const& elm : aliases)
XBT_HELP(" %-40s %s", elm.first.c_str(), elm.second->get_key().c_str());
}
/** @brief Displays the declared options and their description */
-void Config::help()
+void Config::help() const
{
for (auto const& elm : options) {
simgrid::config::ConfigurationElement* variable = elm.second.get();
*/
unsigned long xbt_dynar_length(const_xbt_dynar_t dynar)
{
- return (dynar ? (unsigned long) dynar->used : (unsigned long) 0);
+ return (dynar ? dynar->used : 0UL);
}
/**@brief check if a dynar is empty
_sanity_check_dynar(dynar);
for (i = 0; i < used; i++) {
- char* elm = (char*) data + i * elmsize;
+ char* elm = data + i * elmsize;
op(elm);
}
}
xbt_assert(static_cast<size_t>(ev->priority) < sizeof(xbt_log_priority_names)/sizeof(xbt_log_priority_names[0]),
"Priority %d is greater than the biggest allowed value", ev->priority);
- while (1) {
+ while (true) {
const s_xbt_log_appender_t* appender = cat->appender;
if (appender != nullptr) {
va_start(ev->ap, fmt);
done = cat->layout->do_layout(cat->layout, ev, fmt);
va_end(ev->ap);
- ev->buffer = nullptr; // Calm down, static analyzers, this pointer to local array wont leak out of the scope.
+ ev->buffer = nullptr; // Calm down, static analyzers, this pointer to local array won't leak out of the scope.
if (done) {
appender->do_append(appender, buff);
} else {
/* The static buffer was too small, use a dynamically expanded one */
ev->buffer_size = XBT_LOG_DYNAMIC_BUFFER_SIZE;
ev->buffer = static_cast<char*>(xbt_malloc(ev->buffer_size));
- while (1) {
+ while (true) {
va_start(ev->ap, fmt);
done = cat->layout->do_layout(cat->layout, ev, fmt);
va_end(ev->ap);
/* Mark all my ex-blocks as free */
for (it=0; it<mdp->heapinfo[block].busy_block.size; it++) {
if (mdp->heapinfo[block+it].type < 0) {
- fprintf(stderr,"Internal Error: Asked to free a block already marked as free (block=%lu it=%d type=%lu). Please report this bug.\n",
- (unsigned long)block,it,(unsigned long)mdp->heapinfo[block].type);
+ fprintf(stderr,
+ "Internal Error: Asked to free a block already marked as free (block=%zu it=%d type=%d). "
+ "Please report this bug.\n",
+ block, it, mdp->heapinfo[block].type);
abort();
}
mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE;
/* Mark all my ex-blocks as free */
for (it=0; it<mdp->heapinfo[block].free_block.size; it++) {
if (mdp->heapinfo[block+it].type <0) {
- fprintf(stderr,"Internal error: Asked to free a block already marked as free (block=%lu it=%d/%lu type=%lu). Please report this bug.\n",
- (unsigned long)block,it,(unsigned long)mdp->heapinfo[block].free_block.size,(unsigned long)mdp->heapinfo[block].type);
+ fprintf(stderr,
+ "Internal error: Asked to free a block already marked as free (block=%zu it=%d/%zu type=%d). "
+ "Please report this bug.\n",
+ block, it, mdp->heapinfo[block].free_block.size, mdp->heapinfo[block].type);
abort();
}
mdp->heapinfo[block+it].type = MMALLOC_TYPE_FREE;
default:
if (type < 0) {
- fprintf(stderr, "Unkown mmalloc block type.\n");
+ fprintf(stderr, "Unknown mmalloc block type.\n");
abort();
}
mdp -> heapstats.chunks_free -= BLOCKSIZE >> type;
mdp -> heapstats.bytes_free -= BLOCKSIZE;
- mfree((void *) mdp, (void *) ADDRESS(block));
+ mfree(mdp, ADDRESS(block));
} else if (mdp->heapinfo[block].busy_frag.nfree != 0) {
/* If some fragments of this block are free, you know what? I'm already happy. */
++mdp->heapinfo[block].busy_frag.nfree;
{
// Update heapinfo about the heapinfo pages (!):
xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0);
- int block = BLOCK(mdp->heapinfo);
+ size_t block = BLOCK(mdp->heapinfo);
size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE;
// Mark them as free:
for (size_t j=0; j!=nblocks; ++j) {
result = mmalloc(mdp, BLOCKSIZE); // does not return NULL
block = BLOCK(result);
- mdp->heapinfo[block].type = log;
+ mdp->heapinfo[block].type = (int)log;
/* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */
size_t i;
for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) {
block = MALLOC_SEARCH_START;
while (mdp->heapinfo[block].free_block.size < blocks) {
if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool!
- fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type);
+ fprintf(stderr,
+ "Internal error: found a free block not marked as such (block=%zu type=%d). Please report this bug.\n",
+ block, mdp->heapinfo[block].type);
abort();
}
/* We are deallocating memory. If the amount requested would cause us to try to deallocate back past the base of
* the mmap'd region then die verbosely. Otherwise, deallocate the memory and return the old break value. */
if (((char*)mdp->breakval) + size >= (char*)mdp->base) {
- result = (void*)mdp->breakval;
+ result = mdp->breakval;
mdp->breakval = (char*)mdp->breakval + size;
moveto = PAGE_ALIGN(mdp->breakval);
munmap(moveto, (size_t)(((char*)mdp->top) - ((char*)moveto)) - 1);
mdp->base = mdp->breakval = mapto;
mdp->top = PAGE_ALIGN((char*)mdp->breakval + size);
- result = (void *) mdp->breakval;
+ result = mdp->breakval;
mdp->breakval = (char*)mdp->breakval + size;
} else {
/* Memory is already mapped, we only need to increase the breakval: */
- result = (void *) mdp->breakval;
+ result = mdp->breakval;
mdp->breakval = (char*)mdp->breakval + size;
}
}
/* Full blocks -> Full blocks; see if we can hold it in place. */
blocks = BLOCKIFY(size);
if (blocks < mdp->heapinfo[block].busy_block.size) {
- int it;
+ size_t it;
/* The new size is smaller; return excess memory to the free list. */
for (it= block+blocks; it< mdp->heapinfo[block].busy_block.size ; it++){
mdp->heapinfo[it].type = MMALLOC_TYPE_UNFRAGMENTED; // FIXME that should be useless, type should already be 0 here
default: /* Fragment -> ??; type=logarithm to base two of the fragment size. */
if (type < 0) {
- fprintf(stderr, "Unkown mmalloc block type.\n");
+ fprintf(stderr, "Unknown mmalloc block type.\n");
abort();
}
/* The new size is the same kind of fragment. */
result = ptr;
- int frag_nb = RESIDUAL(result, BLOCKSIZE) >> type;
+ uintptr_t frag_nb = RESIDUAL(result, BLOCKSIZE) >> type;
mdp->heapinfo[block].busy_frag.frag_size[frag_nb] = requested_size;
mdp->heapinfo[block].busy_frag.ignore[frag_nb] = 0;
return not file.fail();
}
-bool Random::write_state(const std::string& filename)
+bool Random::write_state(const std::string& filename) const
{
std::ofstream file(filename);
file << mt19937_gen;
do {
value = mt19937_gen();
} while (value >= decltype(mt19937_gen)::max() - decltype(mt19937_gen)::max() % range);
- return value % range + min;
+ return static_cast<int>(value % range + min);
}
double XbtRandom::uniform_real(double min, double max)
do {
numerator = mt19937_gen() - decltype(mt19937_gen)::min();
} while (numerator == divisor);
- return min + (max - min) * numerator / divisor;
+ return min + (max - min) * static_cast<double>(numerator) / divisor;
}
double XbtRandom::exponential(double lambda)
} else \
return 0
-#define set_sz_from_precision() \
- if (1) { \
- sz = rem_size; \
- if (precision != -1) { \
- if (precision < sz) \
- sz = precision + 1; /* +1 for the final '\0' */ \
- precision = -1; \
- } \
- } else (void)0
+#define set_sz_from_precision() \
+ if (true) { \
+ sz = rem_size; \
+ if (precision != -1) { \
+ if (precision < sz) \
+ sz = precision + 1; /* +1 for the final '\0' */ \
+ precision = -1; \
+ } \
+ } else \
+ (void)0
#define show_it(data, letter) \
- if (1) { \
+ if (true) { \
int len; \
int wd; \
if (length == -1) { \
} \
check_overflow(len); \
} else \
- (void)0
+ (void)0
-#define show_string(data) \
- if (1) { \
- const char *show_string_data = (data); \
- show_it(show_string_data ? show_string_data : "(null)", "s"); \
- } else (void)0
+#define show_string(data) \
+ if (true) { \
+ const char* show_string_data = (data); \
+ show_it(show_string_data ? show_string_data : "(null)", "s"); \
+ } else \
+ (void)0
#define show_int(data) show_it((data), "d")
#define show_double(data) show_it((data), "f")
check_overflow(1);
break;
case '.': /* precision specifier */
- precision = strtol(q + 1, &q, 10);
+ precision = static_cast<int>(strtol(q + 1, &q, 10));
continue; /* conversion specifier still not found, continue reading */
case '0':
case '1':
case '7':
case '8':
case '9': /* length modifier */
- length = strtol(q, &q, 10);
+ length = static_cast<int>(strtol(q, &q, 10));
continue; /* conversion specifier still not found, continue reading */
case 'c': /* category name; LOG4J compliant
should accept a precision postfix to show the hierarchy */
xbt_abort();
}
break; /* done, continue normally */
- } while (1);
+ } while (true);
} else {
*p = *q;
check_overflow(1);
GetSystemInfo(&si);
xbt_pagesize = si.dwPageSize;
#elif HAVE_SYSCONF
- xbt_pagesize = sysconf(_SC_PAGESIZE);
+ xbt_pagesize = static_cast<int>(sysconf(_SC_PAGESIZE));
#else
# error Cannot get page size.
#endif
- xbt_pagebits = log2(xbt_pagesize);
+ xbt_pagebits = static_cast<int>(log2(xbt_pagesize));
#ifdef _TWO_DIGIT_EXPONENT
/* Even printf behaves differently on Windows... */
#endif
}
-std::string simgrid::xbt::Path::get_dir_name()
+std::string simgrid::xbt::Path::get_dir_name() const
{
std::string p(path_);
const char* res = dirname(&p[0]);
return std::string(res, strlen(res));
}
-std::string simgrid::xbt::Path::get_base_name()
+std::string simgrid::xbt::Path::get_base_name() const
{
std::string p(path_);
const char* res = basename(&p[0]);
return (double) (time(NULL));
#endif /* HAVE_GETTIMEOFDAY? */
- return (double) (tv.tv_sec + tv.tv_usec / 1000000.0);
+ return (double)tv.tv_sec + (double)tv.tv_usec / 1e6;
}
void xbt_os_sleep(double sec)
#elif HAVE_NANOSLEEP
struct timespec ts;
- ts.tv_sec = sec;
- ts.tv_nsec = (sec - floor(sec)) * 1e9;
+ ts.tv_sec = (time_t)sec;
+ ts.tv_nsec = (long)((sec - floor(sec)) * 1e9);
nanosleep (&ts, NULL);
#else /* don't have nanosleep. Use select to sleep less than one second */
struct timeval timeout;
- timeout.tv_sec = (unsigned long) (sec);
- timeout.tv_usec = (sec - floor(sec)) * 1000000;
+ timeout.tv_sec = (long)sec;
+ timeout.tv_usec = (long)(sec - floor(sec)) * 1e6);
select(0, NULL, NULL, NULL, &timeout);
#endif
--- /dev/null
+#include "simgrid/Exception.hpp"
+#include "xbt/ex.h"
+#include "xbt/log.h"
+
+#include "xbt/parse_units.hpp"
+
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/classification.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(parse, xbt, "Parsing functions");
+
+class unit_scale : public std::unordered_map<std::string, double> {
+public:
+ using std::unordered_map<std::string, double>::unordered_map;
+ // tuples are : <unit, value for unit, base (2 or 10), true if abbreviated>
+ explicit unit_scale(std::initializer_list<std::tuple<const std::string, double, int, bool>> generators);
+};
+
+unit_scale::unit_scale(std::initializer_list<std::tuple<const std::string, double, int, bool>> generators)
+{
+ for (const auto& gen : generators) {
+ const std::string& unit = std::get<0>(gen);
+ double value = std::get<1>(gen);
+ const int base = std::get<2>(gen);
+ const bool abbrev = std::get<3>(gen);
+ double mult;
+ std::vector<std::string> prefixes;
+ switch (base) {
+ case 2:
+ mult = 1024.0;
+ prefixes = abbrev ? std::vector<std::string>{"Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"}
+ : std::vector<std::string>{"kibi", "mebi", "gibi", "tebi", "pebi", "exbi", "zebi", "yobi"};
+ break;
+ case 10:
+ mult = 1000.0;
+ prefixes = abbrev ? std::vector<std::string>{"k", "M", "G", "T", "P", "E", "Z", "Y"}
+ : std::vector<std::string>{"kilo", "mega", "giga", "tera", "peta", "exa", "zeta", "yotta"};
+ break;
+ default:
+ THROW_IMPOSSIBLE;
+ }
+ emplace(unit, value);
+ for (const auto& prefix : prefixes) {
+ value *= mult;
+ emplace(prefix + unit, value);
+ }
+ }
+}
+
+/* Note: no warning is issued for unit-less values when `name' is empty. */
+static double surf_parse_get_value_with_unit(const std::string& filename, int lineno, const char* string,
+ const unit_scale& units, const char* entity_kind, const std::string& name,
+ const char* error_msg, const char* default_unit)
+{
+ char* endptr;
+ errno = 0;
+ double res = strtod(string, &endptr);
+ const char* ptr = endptr; // for const-correctness
+ if (errno == ERANGE)
+ throw simgrid::ParseError(filename, lineno, std::string("value out of range: ") + string);
+ if (ptr == string)
+ throw simgrid::ParseError(filename, lineno, std::string("cannot parse number:") + string);
+ if (ptr[0] == '\0') {
+ // Ok, 0 can be unit-less
+ if (res != 0 && not name.empty())
+ XBT_WARN("Deprecated unit-less value '%s' for %s %s. %s", string, entity_kind, name.c_str(), error_msg);
+ ptr = default_unit;
+ }
+ auto u = units.find(ptr);
+ if (u == units.end())
+ throw simgrid::ParseError(filename, lineno, std::string("unknown unit: ") + ptr);
+ return res * u->second;
+}
+
+double xbt_parse_get_time(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name)
+{
+ static const unit_scale units{std::make_pair("w", 7 * 24 * 60 * 60),
+ std::make_pair("d", 24 * 60 * 60),
+ std::make_pair("h", 60 * 60),
+ std::make_pair("m", 60),
+ std::make_pair("s", 1.0),
+ std::make_pair("ms", 1e-3),
+ std::make_pair("us", 1e-6),
+ std::make_pair("ns", 1e-9),
+ std::make_pair("ps", 1e-12)};
+ return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name,
+ "Append 's' to your time to get seconds", "s");
+}
+
+double surf_parse_get_size(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name)
+{
+ static const unit_scale units{std::make_tuple("b", 0.125, 2, true), std::make_tuple("b", 0.125, 10, true),
+ std::make_tuple("B", 1.0, 2, true), std::make_tuple("B", 1.0, 10, true)};
+ return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name,
+ "Append 'B' to get bytes (or 'b' for bits but 1B = 8b).", "B");
+}
+
+double xbt_parse_get_bandwidth(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name)
+{
+ static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true),
+ std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)};
+ return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name,
+ "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)",
+ "Bps");
+}
+
+std::vector<double> xbt_parse_get_bandwidths(const std::string& filename, int lineno, const char* string,
+ const char* entity_kind, const std::string& name)
+{
+ static const unit_scale units{std::make_tuple("bps", 0.125, 2, true), std::make_tuple("bps", 0.125, 10, true),
+ std::make_tuple("Bps", 1.0, 2, true), std::make_tuple("Bps", 1.0, 10, true)};
+
+ std::vector<double> bandwidths;
+ std::vector<std::string> tokens;
+ boost::split(tokens, string, boost::is_any_of(";,"));
+ for (auto token : tokens) {
+ bandwidths.push_back(surf_parse_get_value_with_unit(
+ filename, lineno, token.c_str(), units, entity_kind, name,
+ "Append 'Bps' to get bytes per second (or 'bps' for bits but 1Bps = 8bps)", "Bps"));
+ }
+
+ return bandwidths;
+}
+
+double xbt_parse_get_speed(const std::string& filename, int lineno, const char* string, const char* entity_kind,
+ const std::string& name)
+{
+ static const unit_scale units{std::make_tuple("f", 1.0, 10, true), std::make_tuple("flops", 1.0, 10, false)};
+ return surf_parse_get_value_with_unit(filename, lineno, string, units, entity_kind, name,
+ "Append 'f' or 'flops' to your speed to get flop per second", "f");
+}
+
+std::vector<double> xbt_parse_get_all_speeds(const std::string& filename, int lineno, char* speeds,
+ const char* entity_kind, const std::string& id)
+{
+ std::vector<double> speed_per_pstate;
+
+ if (strchr(speeds, ',') == nullptr) {
+ double speed = xbt_parse_get_speed(filename, lineno, speeds, entity_kind, id);
+ speed_per_pstate.push_back(speed);
+ } else {
+ std::vector<std::string> pstate_list;
+ boost::split(pstate_list, speeds, boost::is_any_of(","));
+ for (auto speed_str : pstate_list) {
+ boost::trim(speed_str);
+ double speed = xbt_parse_get_speed(filename, lineno, speed_str.c_str(), entity_kind, id);
+ speed_per_pstate.push_back(speed);
+ XBT_DEBUG("Speed value: %f", speed);
+ }
+ }
+ return speed_per_pstate;
+}
test_split_quoted("Useless backslashes", "\\t\\o\\t\\o \\t\\u\\t\\u", {"toto", "tutu"});
test_split_quoted("Protected space", "toto\\ tutu", {"toto tutu"});
test_split_quoted("Several spaces", "toto tutu", {"toto", "tutu"});
- test_split_quoted("LTriming", " toto tatu", {"toto", "tatu"});
- test_split_quoted("Triming", " toto tutu ", {"toto", "tutu"});
+ test_split_quoted("LTrimming", " toto tatu", {"toto", "tatu"});
+ test_split_quoted("Trimming", " toto tutu ", {"toto", "tutu"});
test_split_quoted("Single quotes", "'toto tutu' tata", {"toto tutu", "tata"});
test_split_quoted("Double quotes", "\"toto tutu\" tata", {"toto tutu", "tata"});
test_split_quoted("Mixed quotes", "\"toto' 'tutu\" tata", {"toto' 'tutu", "tata"});
int xbt_getpid()
{
const simgrid::kernel::actor::ActorImpl* self = SIMIX_process_self();
- return self == nullptr ? 0 : self->get_pid();
+ return self == nullptr ? 0 : static_cast<int>(self->get_pid());
}
const char* xbt_procname(void)
IF(SIMGRID_HAVE_LUA)
- # Tests testing simulation from C but using lua for platform files. Executed like this
- # ~$ ./masterslave platform.lua deploy.lua
- ADD_TESH(lua-platform-masterslave --setenv srcdir=${CMAKE_HOME_DIRECTORY} --setenv bindir=${CMAKE_BINARY_DIR} --cd ${CMAKE_BINARY_DIR} ${CMAKE_HOME_DIRECTORY}/teshsuite/lua/lua_platforms.tesh)
- SET_TESTS_PROPERTIES(lua-platform-masterslave PROPERTIES ENVIRONMENT "LUA_CPATH=${CMAKE_BINARY_DIR}/lib/lib?.${LIB_EXE}")
+ # Tests testing simulation from C++ but using lua for platform files.
+ ADD_TESH(lua-platform-masterworker --setenv srcdir=${CMAKE_HOME_DIRECTORY} --setenv bindir=${CMAKE_BINARY_DIR} --cd ${CMAKE_BINARY_DIR} ${CMAKE_HOME_DIRECTORY}/teshsuite/lua/lua_platforms.tesh)
+ SET_TESTS_PROPERTIES(lua-platform-masterworker PROPERTIES ENVIRONMENT "LUA_CPATH=${CMAKE_BINARY_DIR}/lib/lib?.${LIB_EXE}")
ENDIF()
static simgrid::mc::RemoteSimulation* process;
-static uintptr_t eval_binary_operation(simgrid::dwarf::ExpressionContext const& state, int op, uintptr_t a, uintptr_t b)
+static uintptr_t eval_binary_operation(simgrid::dwarf::ExpressionContext const& state, uint8_t op, uintptr_t a,
+ uintptr_t b)
{
Dwarf_Op ops[15];
ops[0].atom = DW_OP_const8u;
* under the terms of the license (GNU LGPL) which comes with this package. */
// This is the MWE of https://framagit.org/simgrid/simgrid/-/issues/50
-// The problem was occuring when suspending an actor that will be executed later in the same scheduling round
+// The problem was occurring when suspending an actor that will be executed later in the same scheduling round
#include <iostream>
#include <simgrid/s4u.hpp>
class Receiver {
public:
- void operator()()
+ void operator()() const
{
XBT_INFO("Starting.");
auto mailbox = simgrid::s4u::Mailbox::by_name("receiver");
class Suspender {
public:
- void operator()()
+ void operator()() const
{
XBT_INFO("Suspend the receiver...");
receiver->suspend();
int main(int argc, char** argv)
{
- simgrid::s4u::Engine* engine = new simgrid::s4u::Engine(&argc, argv);
+ const simgrid::s4u::Engine* engine = new simgrid::s4u::Engine(&argc, argv);
engine->load_platform(argv[1]);
simgrid::s4u::Host* host = simgrid::s4u::Host::by_name("Tremblay");
XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
vm0 = new simgrid::s4u::VirtualMachine("VM0", pm2, 2);
run_test_process("( [ooo]2 X )2", pm2, flop_amount * 2 / 3);
- run_test_process("( [Xoo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- run_test_process("( [oXo]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
- run_test_process("( [ooX]2 o )2", vm0, flop_amount * (2. / 3 * 2) / 3); // VM_share/3
+ run_test_process("( [Xoo]2 o )2", vm0, (flop_amount * 4 / 3) / 3); // VM_share/3
+ run_test_process("( [oXo]2 o )2", vm0, (flop_amount * 4 / 3) / 3); // VM_share/3
+ run_test_process("( [ooX]2 o )2", vm0, (flop_amount * 4 / 3) / 3); // VM_share/3
simgrid::s4u::this_actor::sleep_for(2);
test_energy_consumption(chooser,2);
vm0->destroy();
} else if (chooser == "( [ooo]2 ooo )4") {
XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM");
vm0 = new simgrid::s4u::VirtualMachine("VM0", pm4, 2);
- run_test_process("( [Xoo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3); // The VM has 8/5 of the PM
- run_test_process("( [oXo]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3);
- run_test_process("( [ooX]2 ooo )4", vm0, flop_amount * (8. / 5) * 1 / 3);
+ run_test_process("( [Xoo]2 ooo )4", vm0, (flop_amount * 8 / 5) / 3); // The VM has 8/5 of the PM
+ run_test_process("( [oXo]2 ooo )4", vm0, (flop_amount * 8 / 5) / 3);
+ run_test_process("( [ooX]2 ooo )4", vm0, (flop_amount * 8 / 5) / 3);
run_test_process("( [ooo]2 Xoo )4", pm4, flop_amount * 4 / 5);
run_test_process("( [ooo]2 oXo )4", pm4, flop_amount * 4 / 5);
static void host()
{
simgrid::s4u::Disk* disk = simgrid::s4u::this_actor::get_host()->get_disks().front(); // Disk1
- int id = simgrid::s4u::this_actor::get_pid();
+ int id = static_cast<int>(simgrid::s4u::this_actor::get_pid());
XBT_INFO("process %d is writing!", id);
disk->write(4000000);
XBT_INFO("process %d goes to sleep for %d seconds", id, id);
e.load_platform(argv[1]);
std::vector<simgrid::s4u::Host*> hosts = e.get_all_hosts();
- int host_count = e.get_host_count();
+ int host_count = static_cast<int>(e.get_host_count());
/* Random number initialization */
simgrid::xbt::random::set_mersenne_seed(static_cast<int>(xbt_os_time()));
int tasks_done = 0;
-static void actor_daemon()
+XBT_ATTRIB_NORETURN static void actor_daemon()
{
const simgrid::s4u::Host* host = simgrid::s4u::Host::current();
XBT_INFO(" Start daemon on %s (%f)", host->get_cname(), host->get_speed());
const std::string* payload;
simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name("jupi");
- while (1) {
+ while (true) {
try {
payload = static_cast<std::string*>(mailbox->get());
} catch (const simgrid::HostFailureException&) {
static void sendpid()
{
- simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name("mailbox");
- int pid = simgrid::s4u::this_actor::get_pid();
- double comm_size = 100000;
+ simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name("mailbox");
+ int pid = static_cast<int>(simgrid::s4u::this_actor::get_pid());
+ long comm_size = 100000;
simgrid::s4u::this_actor::on_exit([pid](bool /*failed*/) { XBT_INFO("Process \"%d\" killed.", pid); });
XBT_INFO("Sending pid of \"%d\".", pid);
simgrid::s4u::Mailbox* mailbox = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_host()->get_cname());
XBT_INFO("Server waiting for transfers ...");
- while (1) {
+ while (true) {
const std::string* msg = static_cast<std::string*>(mailbox->get());
if (*msg == "finalize") { // Shutdown ...
delete msg;
SD_task_t task;
sg_host_t *hosts = sg_host_list();
- int totalHosts = sg_host_count();
+ size_t totalHosts = sg_host_count();
qsort((void *) hosts, totalHosts, sizeof(sg_host_t), name_compare_hosts);
xbt_dynar_foreach(dax, cursor, task) {
xbt_dynar_free_container(&route);
}
if (!strcmp(argv[2], "FULL_LINK")) {
- int list_size = sg_host_count();
- for (int i = 0; i < list_size; i++) {
+ size_t list_size = sg_host_count();
+ for (size_t i = 0; i < list_size; i++) {
const_sg_host_t h1 = hosts[i];
const char *name1 = sg_host_get_name(h1);
- for (int j = 0; j < list_size; j++) {
+ for (size_t j = 0; j < list_size; j++) {
const_sg_host_t h2 = hosts[j];
const char *name2 = sg_host_get_name(h2);
fprintf(stderr, "Route between %s and %s\n", name1, name2);
static void dump_hosts()
{
- unsigned int totalHosts = sg_host_count();
+ unsigned int totalHosts = static_cast<unsigned int>(sg_host_count());
sg_host_t* hosts = sg_host_list();
std::sort(hosts, hosts + totalHosts,
[](const_sg_host_t a, const_sg_host_t b) { return strcmp(sg_host_get_name(a), sg_host_get_name(b)) < 0; });
static void dump_routes()
{
- unsigned int totalHosts = sg_host_count();
+ unsigned int totalHosts = static_cast<unsigned int>(sg_host_count());
sg_host_t* hosts = sg_host_list();
std::sort(hosts, hosts + totalHosts,
[](const_sg_host_t a, const_sg_host_t b) { return strcmp(sg_host_get_name(a), sg_host_get_name(b)) < 0; });
int main(int argc, char ** argv)
{
- size_t M = 8*1024;
- size_t N = 32*1024;
+ const int M = 8 * 1024;
+ const int N = 32 * 1024;
MPI_Init(&argc, &argv);
-! Check that getarg does somethig sensible.
+! Check that getarg does something sensible.
program getarg_1
use mpi
CHARACTER*10 ARGS, ARGS2
foreach (test ${umpire_tests_passing})
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! timeout 30")
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! output display" APPEND)
- write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll-selector:ompi \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
+ write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1Gf --cfg=smpi/coll-selector:ompi \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
endforeach()
foreach (test ${umpire_tests_deadlock} ${umpire_tests_problematic} )
MPI_Type_extent (newtype[0], &basic_extent);
if (basic_extent != sizeof (test_basic_struct_t)) {
- fprintf (stderr, "(%d): Unexpect extent for struct\n");
+ fprintf (stderr, "(%d): Unexpected extent for struct\n");
MPI_Abort (MPI_COMM_WORLD, 666);
}
// Set the elements between buf[start] and buf[stop-1] to (i+value)%256
static void set(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
for(size_t i = start; i < stop; i++) {
- buf[i] = (i+value)%256;
+ buf[i] = (uint8_t)((i + value) % 256);
}
}
// Return the number of times that an element is equal to (i+value)%256 between buf[start] and buf[stop-1].
-static int count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value)
+static size_t count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value)
{
size_t occ = 0;
for(size_t i = start ; i < stop ; i++) {
for(int i = 0; i < nb_blocks-1; i++) {
size_t start = shared_blocks[2*i+1];
size_t stop = shared_blocks[2*i+2];
- set(buf, start, stop, rank);
+ set(buf, start, stop, (uint8_t)rank);
}
}
// Then, even processes send their buffer to their successor
if(rank%2 == 0) {
- MPI_Send(buf, mem_size, MPI_UINT8_T, rank+1, 0, MPI_COMM_WORLD);
+ MPI_Send(buf, (int)mem_size, MPI_UINT8_T, rank + 1, 0, MPI_COMM_WORLD);
}
else {
- MPI_Recv(buf, mem_size, MPI_UINT8_T, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ MPI_Recv(buf, (int)mem_size, MPI_UINT8_T, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for(int i = 0; i < nb_blocks-1; i++) {
size_t start = shared_blocks[2*i+1];
size_t stop = shared_blocks[2*i+2];
- int comm = check_all(buf, start, stop, rank-1);
+ int comm = check_all(buf, start, stop, (uint8_t)(rank - 1));
printf("[%d] The result of the (normal) communication check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, comm);
}
memset(buf, rank, mem_size);
// Then, even processes send a sub-part of their buffer their successor
// Note that the last block should not be copied entirely
if(rank%2 == 0) {
- MPI_Send(buf+0x10000, mem_size-0xa00000, MPI_UINT8_T, rank+1, 0, MPI_COMM_WORLD);
+ MPI_Send(buf + 0x10000, (int)(mem_size - 0xa00000), MPI_UINT8_T, rank + 1, 0, MPI_COMM_WORLD);
}
else {
- MPI_Recv(buf+0x10000, mem_size-0xa00000, MPI_UINT8_T, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ MPI_Recv(buf + 0x10000, (int)(mem_size - 0xa00000), MPI_UINT8_T, rank - 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
for(int i = 0; i < nb_blocks-1; i++) {
size_t start = shared_blocks[2*i+1];
size_t stop = shared_blocks[2*i+2];
- int comm = check_all(buf, start, stop, rank-1);
+ int comm = check_all(buf, start, stop, (uint8_t)(rank - 1));
printf("[%d] The result of the (shifted) communication check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, comm);
}
}
// Set the elements between buf[start] and buf[stop-1] to (i+value)%256
static void set(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
for(size_t i = start; i < stop; i++) {
- buf[i] = (i+value)%256;
+ buf[i] = (uint8_t)((i + value) % 256);
}
}
// Return the number of times that an element is equal to (i+value)%256 between buf[start] and buf[stop-1].
-static int count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value)
+static size_t count_all(const uint8_t* buf, size_t start, size_t stop, uint8_t value)
{
size_t occ = 0;
for(size_t i = start ; i < stop ; i++) {
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
MPI_Op_create(matmult, 0, &op);
- /* A single rotation matrix (3x3, stored as 9 consequetive elements) */
+ /* A single rotation matrix (3x3, stored as 9 consecutive elements) */
MPI_Type_contiguous(9, MPI_INT, &mattype);
MPI_Type_commit(&mattype);
/* Create the local matrices.
* Initialize the input matrix so that the entries are
- * consequtive integers, by row, starting at 0.
+ * consecutive integers, by row, starting at 0.
*/
if (rank == size - 1) {
localA = (float *) malloc(gN * lmlast * sizeof(float));
Transpose(localA, localB, gM, gN, comm);
/* check the transposed matrix
- * In the global matrix, the transpose has consequtive integers,
+ * In the global matrix, the transpose has consecutive integers,
* organized by columns.
*/
if (rank == size - 1) {
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
- order, independant of the root.
+ order, independent of the root.
*/
int assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
- order, independant of the root.
+ order, independent of the root.
*/
void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
for (i = 0; i < size; i++) {
for (j = 0; j < COUNT; j++) {
if (recvbuf[i * COUNT + j] != i * VERIFY_CONST + j) {
- printf("PE 0: mis-match error");
+ printf("PE 0: mismatch error");
printf(" recbuf[%d * %d + %d] = ", i, COUNT, j);
printf(" %ld,", recvbuf[i * COUNT + j]);
printf(" should be %ld\n", i * VERIFY_CONST + j);
}
/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
- * in (*req) a request handle corresonding to that operation. This call should
+ * in (*req) a request handle corresponding to that operation. This call should
* be considered collective over comm (with a consistent value for "rndnum"),
* even though the operation may only be a point-to-point request. */
static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request * req,
else
maxsize = size;
- /* General forumula: If we multiple the values from 1 to n, the
+ /* General formula: If we multiple the values from 1 to n, the
* product is n!. This grows very fast, so we'll only use the first
* five (1! = 1, 2! = 2, 3! = 6, 4! = 24, 5! = 120), with n!
* stored in the array result[n] */
}
/* Just performs a simple sum but can be marked as non-commutative to
- potentially tigger different logic in the implementation. */
+ potentially trigger different logic in the implementation. */
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type);
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type)
{
}
/* Just performs a simple sum but can be marked as non-commutative to
- potentially tigger different logic in the implementation. */
+ potentially trigger different logic in the implementation. */
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type);
void nc_sum(void *a, void *b, int *count, MPI_Datatype * type)
{
(see 4.9.4). The order is important.
Note that the computation is in process rank (in the communicator)
- order, independant of the root.
+ order, independent of the root.
*/
void assoc(int *invec, int *inoutvec, int *len, MPI_Datatype * dtype)
{
#define ITERS 10
/* This test uses several scenarios to overlap iallreduce and comm_idup
- * 1.) Use comm_idup dublicate the COMM_WORLD and do iallreduce
+ * 1.) Use comm_idup duplicate the COMM_WORLD and do iallreduce
* on the COMM_WORLD
* 2.) Do the above test in a loop
- * 3.) Dublicate COMM_WORLD, overalp iallreduce on one
+ * 3.) Duplicate COMM_WORLD, overalp iallreduce on one
* communicator with comm_idup on the nother communicator
* 4.) Split MPI_COMM_WORLD, communicate on the split communicator
while dublicating COMM_WORLD
* 5.) Duplicate the split communicators with comm_idup
* while communicating onCOMM_WORLD
- * 6.) Ceate an inter-communicator and duplicate it with comm_idup while
+ * 6.) Create an inter-communicator and duplicate it with comm_idup while
* communicating on the inter-communicator
- * 7.) Dublicate the inter-communicator whil communicate on COMM_WORLD
+ * 7.) Duplicate the inter-communicator whil communicate on COMM_WORLD
* 8.) Merge the inter-communicator to an intra-communicator and idup it,
* overlapping with communication on MPI_COMM_WORLD
* 9.) Communicate on the merge communicator, while duplicating COMM_WORLD
char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME];
MTest_Init(&argc, &argv);
- /* Check world and self firt */
+ /* Check world and self first */
nameout[0] = 0;
MPI_Comm_get_name(MPI_COMM_WORLD, nameout, &rlen);
if (strcmp(nameout, "MPI_COMM_WORLD")) {
MPI_Comm comm, dupcomm, dupcomm2;
MPI_Request rreq[2];
int count;
- int indicies[2];
+ int indices[2];
int r1buf, r2buf, s1buf, s2buf;
int rank, isLeft;
MPI_Irecv(&r1buf, 1, MPI_INT, 0, 0, dupcomm, &rreq[0]);
MPI_Irecv(&r2buf, 1, MPI_INT, 0, 0, comm, &rreq[1]);
MPI_Send(&s2buf, 1, MPI_INT, 0, 0, comm);
- MPI_Waitsome(2, rreq, &count, indicies, MPI_STATUSES_IGNORE);
- if (count != 1 || indicies[0] != 1) {
+ MPI_Waitsome(2, rreq, &count, indices, MPI_STATUSES_IGNORE);
+ if (count != 1 || indices[0] != 1) {
/* The only valid return is that exactly one message
* has been received */
errs++;
- if (count == 1 && indicies[0] != 1) {
+ if (count == 1 && indices[0] != 1) {
printf("Error in context values for intercomm\n");
}
else if (count == 2) {
int i;
printf("Error: count = %d", count);
for (i = 0; i < count; i++) {
- printf(" indicies[%d] = %d", i, indicies[i]);
+ printf(" indices[%d] = %d", i, indices[i]);
}
printf("\n");
}
/* blockindexed_vector_test()
*
* Tests behavior with a blockindexed of some vector types;
- * this shouldn't be easily convertable into anything else.
+ * this shouldn't be easily convertible into anything else.
*
* Returns the number of errors encountered.
*/
/* hindexed_block_vector_test()
*
* Tests behavior with a hindexed_block of some vector types;
- * this shouldn't be easily convertable into anything else.
+ * this shouldn't be easily convertible into anything else.
*
* Returns the number of errors encountered.
*/
int remainder = 0;
MPI_Datatype memtype, chunktype;
- /* need to cook up a new datatype to accomodate large datatypes */
+ /* need to cook up a new datatype to accommodate large datatypes */
/* first pass: chunks of 1 MiB plus an additional remainder. Does require
* 8 byte MPI_Aint, which should have been checked for earlier */
MPI_Aint *disp;
MPI_Datatype memtype;
- /* need to cook up a new datatype to accomodate large datatypes */
+ /* need to cook up a new datatype to accommodate large datatypes */
/* Does require 8 byte MPI_Aint, which should have been checked for earlier
*/
fprintf(stderr, " MPI_Type_struct of %s failed.\n", typemapstring);
if (verbose)
MTestPrintError(err);
- /* No point in contiuing */
+ /* No point in continuing */
return errs;
}
/* added in MPI 3 */
{ MPI_COUNT, "MPI_COUNT" },
#endif
- { 0, (char *)0 }, /* Sentinal used to indicate the last element */
+ { 0, (char *)0 }, /* Sentinel used to indicate the last element */
};
char name[MPI_MAX_OBJECT_NAME];
/* We should not get here, because the default error handler
* is ERRORS_ARE_FATAL. This makes sure that the correct error
- * handler is called and that no failure occured (such as
+ * handler is called and that no failure occurred (such as
* a SEGV) in Comm_call_errhandler on the default
* error handler. */
printf("After the Error Handler Has Been Called\n");
printf( "Unable to cancel MPI_Irecv request\n" );
}
/* Using MPI_Request_free should be ok, but some MPI implementations
- object to it imediately after the cancel and that isn't essential to
+ object to it immediately after the cancel and that isn't essential to
this test */
MTest_Finalize( errs );
{ MPI_LONG_LONG, "MPI_LONG_LONG" },
{ MPI_UNSIGNED_LONG_LONG, "MPI_UNSIGNED_LONG_LONG" },
{ MPI_LONG_DOUBLE_INT, "MPI_LONG_DOUBLE_INT" },
- { 0, (char *)0 }, /* Sentinal used to indicate the last element */
+ { 0, (char *)0 }, /* Sentinel used to indicate the last element */
};
/*
call mtest_init( ierr )
C
-C For upto 6 dimensions, test with periodicity in 0 through all
+C For up to 6 dimensions, test with periodicity in 0 through all
C dimensions. The test is computed by both:
C get info about the created communicator
C apply cart shift
C now create one with MPI_WEIGHTS_EMPTY
C NOTE that MPI_WEIGHTS_EMPTY was added in MPI-3 and does not
-C appear before then. Incluing this test means that this test cannot
+C appear before then. Including this test means that this test cannot
C be compiled if the MPI version is less than 3 (see the testlist file)
degs(1) = 0;
errs = 0
call mtest_init( ierr )
-! integers with upto 9 are 4 bytes integers; r of 4 are 2 byte,
+! integers with up to 9 are 4 bytes integers; r of 4 are 2 byte,
! and r of 2 is 1 byte
call mpi_type_create_f90_integer( 9, ntype1, ierr )
!
endif
call MPI_RECV( aint, 1, MPI_AINT, 0, 0, MPI_COMM_WORLD, s, ierr )
if (taint .ne. aint) then
- print *, "Address-sized int not correctly transfered"
+ print *, "Address-sized int not correctly transferred"
print *, "Value should be ", taint, " but is ", aint
errs = errs + 1
endif
call MPI_RECV( oint, 1, MPI_OFFSET, 0, 1, MPI_COMM_WORLD, s, ierr )
if (toint .ne. oint) then
- print *, "Offset-sized int not correctly transfered"
+ print *, "Offset-sized int not correctly transferred"
print *, "Value should be ", toint, " but is ", oint
errs = errs + 1
endif
call MPI_RECV( iint, 1, MPI_INTEGER, 0, 2, MPI_COMM_WORLD, s, ierr )
if (tiint .ne. iint) then
- print *, "Integer (by kind) not correctly transfered"
+ print *, "Integer (by kind) not correctly transferred"
print *, "Value should be ", tiint, " but is ", iint
errs = errs + 1
endif
call mpi_type_free(newtype,ierr)
! write(*,*) "Sent ",name(1:5),x
else
-! Everyone calls barrier incase size > 2
+! Everyone calls barrier in case size > 2
call mpi_barrier( MPI_COMM_WORLD, ierr )
if (me.eq.dest) then
position=0
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size < 8) {
- fprintf(stderr, "Test requires 8 processes (16 prefered) only %d provided\n", size);
+ fprintf(stderr, "Test requires 8 processes (16 preferred) only %d provided\n", size);
errs++;
}
/*
Test the group routines
- (some tested elsewere)
+ (some tested elsewhere)
MPI_Group_compare
MPI_Group_excl
MTest_Init(&argc, &argv);
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = 0; i < NKEYS; i++) {
MPI_Info_set(info, keys[i], values[i]);
MTest_Init(&argc, &argv);
MPI_Info_create(&info1);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info1, (char *) "host", (char *) "myhost.myorg.org");
MPI_Info_set(info1, (char *) "file", (char *) "runfile.txt");
/* 1,2,3 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = 0; i < NKEYS; i++) {
MPI_Info_set(info, keys1[i], values1[i]);
/* 3,2,1 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = NKEYS - 1; i >= 0; i--) {
MPI_Info_set(info, keys1[i], values1[i]);
/* 1,3,2 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[0], values1[0]);
MPI_Info_set(info, keys1[2], values1[2]);
/* 2,1,3 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[1], values1[1]);
MPI_Info_set(info, keys1[0], values1[0]);
/* 2,3,1 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[1], values1[1]);
MPI_Info_set(info, keys1[2], values1[2]);
/* 3,1,2 */
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
MPI_Info_set(info, keys1[2], values1[2]);
MPI_Info_set(info, keys1[0], values1[0]);
MTest_Init(&argc, &argv);
MPI_Info_create(&info);
- /* Use only named keys incase the info implementation only supports
+ /* Use only named keys in case the info implementation only supports
* the predefined keys (e.g., IBM) */
for (i = 0; i < NKEYS; i++) {
MPI_Info_set(info, keys[i], values[i]);
}
else if (val_p != &attrval[i]) {
lerrs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
*
* . generalized file writing/reading to handle arbitrary number of processors
* . provides the "cb_config_list" hint with several permutations of the
- * avaliable processors.
+ * available processors.
* [ makes use of code copied from ROMIO's ADIO code to collect the names of
* the processors ]
*/
/* this deceptively simple test uncovered a bug in the way certain file systems
- * dealt with tuning parmeters. See
+ * dealt with tuning parameters. See
* https://github.com/open-mpi/ompi/issues/158 and
* http://trac.mpich.org/projects/mpich/ticket/2261
*
sprintf(file, "%s", opt_file);
MPI_Info_create(&info);
nr_errors += test_write(file, nprocs, rank, info);
- /* acutal value does not matter. test only writes a small amount of data */
+ /* actual value does not matter. test only writes a small amount of data */
MPI_Info_set(info, "striping_factor", "50");
nr_errors += test_write(file, nprocs, rank, info);
MPI_Info_free(&info);
#define MAX_MSGS 30
/*
-static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI recieve uses both ANY_SOURCE and ANY_TAG";
+static char MTEST_Descrip[] = "One implementation delivered incorrect data when an MPI receive uses both ANY_SOURCE and ANY_TAG";
*/
int main(int argc, char *argv[])
#include "mpi.h"
#include "mpitest.h"
-/* Test bsend with a buffer with arbitray alignment */
+/* Test bsend with a buffer with arbitrary alignment */
#define BUFSIZE 2000*4
int main(int argc, char *argv[])
{
if (bptr != buf + align) {
errs++;
printf
- ("Did not recieve the same buffer on detach that was provided on init (%p vs %p)\n",
+ ("Did not receive the same buffer on detach that was provided on init (%p vs %p)\n",
bptr, buf);
}
}
else if (rank == dest) {
MPI_Win_fence(0, win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, &recvtype);
if (err) {
errs += err;
MPI_Group_free(&neighbors);
MPI_Win_wait(win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, &recvtype);
if (err) {
errs += errs;
/* This test is going to test the atomicity for "read-modify-write" in GACC
* operations */
-/* This test is similiar with atomic_rmw_fop.c.
+/* This test is similar with atomic_rmw_fop.c.
* There are three processes involved in this test: P0 (origin_shm), P1 (origin_am),
* and P2 (dest). P0 and P1 issues multiple GACC with MPI_SUM and OP_COUNT integers
* (value 1) to P2 via SHM and AM respectively. The correct results should be that the
- * results on P0 and P1 never be the same for intergers on the corresponding index
+ * results on P0 and P1 never be the same for integers on the corresponding index
* in [0...OP_COUNT-1].
*/
}
else if (val_p != &attrval[i]) {
errs++;
- fprintf(stderr, "Atribute value for key %d not correct\n", i);
+ fprintf(stderr, "Attribute value for key %d not correct\n", i);
}
}
/** Contended RMA put test -- James Dinan <dinan@mcs.anl.gov>
*
* Each process issues COUNT put operations to non-overlapping locations on
- * every other processs.
+ * every other process.
*/
#include <stdio.h>
/** Contended RMA put/get test -- James Dinan <dinan@mcs.anl.gov>
*
* Each process issues COUNT put and get operations to non-overlapping
- * locations on every other processs.
+ * locations on every other process.
*/
#include <stdio.h>
MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MPI_Get(recvtype->buf, recvtype->count,
recvtype->datatype, source, 0, sendtype->count, sendtype->datatype, win);
if (err) {
/* Perform several communication operations, mixing synchronization
* types. Use multiple communication to avoid the single-operation
* optimization that may be present. */
- MTestPrintfMsg(3, "Begining loop %d of mixed sync put/acc operations\n", loop);
+ MTestPrintfMsg(3, "Beginning loop %d of mixed sync put/acc operations\n", loop);
memset(winbuf, 0, count * sizeof(int));
MPI_Barrier(comm);
if (crank == source) {
/* Perform several communication operations, mixing synchronization
* types. Use multiple communication to avoid the single-operation
* optimization that may be present. */
- MTestPrintfMsg(3, "Begining loop %d of mixed sync put/get/acc operations\n", loop);
+ MTestPrintfMsg(3, "Beginning loop %d of mixed sync put/get/acc operations\n", loop);
MPI_Barrier(comm);
if (crank == source) {
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, dest, 0, win);
else if (rank == dest) {
MPI_Win_fence(0, win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, recvtype);
if (err) {
if (errs < 10) {
MPI_Group_free(&neighbors);
MPI_Win_wait(win);
/* This should have the same effect, in terms of
- * transfering data, as a send/recv pair */
+ * transferring data, as a send/recv pair */
err = MTestCheckRecv(0, &recvtype);
if (err) {
errs += errs;
if ($ResultTest ne "") {
# This test really needs to be run manually, with this test
- # Eventually, we can update this to include handleing in checktests.
+ # Eventually, we can update this to include handling in checktests.
print STDERR "Run $curdir/$programname with $np processes and use $ResultTest to check the results\n";
return;
}
free(myname); \
counts[cnt] = 1; bytesize[cnt] = sizeof(_ctype) * (_count); cnt++; }
-/* This defines a structure of two basic members; by chosing things like
+/* This defines a structure of two basic members; by choosing things like
(char, double), various packing and alignment tests can be made */
#define SETUPSTRUCT2TYPE(_mpitype1,_ctype1,_mpitype2,_ctype2,_count,_tname) { \
int i; char *myname; \
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 1;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
rleader = 0;
}
else {
- /* Remote leader is signficant only for the processes
+ /* Remote leader is significant only for the processes
* designated local leaders */
rleader = -1;
}
p Test timers
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_config.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100000 --log=xbt_cfg.thres:warning --cfg=smpi/wtime:0
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_config.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100Gf --log=xbt_cfg.thres:warning --cfg=smpi/wtime:0
> [rank 0] -> Tremblay
MPI_Barrier(MPI_COMM_WORLD);
- /* Set dims[] values to descibe a grid of nbNodes and DIM dimensions*/
+ /* Set dims[] values to describe a grid of nbNodes and DIM dimensions*/
MPI_Cart_create(MPI_COMM_WORLD, DIM, dims, periods, reorder, &gridComm);
if (gridComm == MPI_COMM_NULL)
set(tesh_files ${tesh_files} PARENT_SCOPE)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/maxmin_bench/maxmin_bench.cpp PARENT_SCOPE)
-foreach(x small medium large)
- ADD_TESH(tesh-surf-maxmin-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_${x}.tesh)
-endforeach()
+ADD_TESH(tesh-surf-maxmin-large --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_large.tesh)
+
+if(enable_debug)
+ foreach(x small medium)
+ ADD_TESH(tesh-surf-maxmin-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_${x}.tesh)
+ endforeach()
+endif()
\ No newline at end of file
/* Link constraints and variables */
for (int i = 0; i < 15; i++)
for (int j = 0; j < 16; j++)
- if (A[i][j])
+ if (A[i][j] != 0.0)
Sys->expand(tmp_cnst[i], tmp_var[j], 1.0);
Sys->solve();
double date;
static void test(int nb_cnst, int nb_var, int nb_elem, unsigned int pw_base_limit, unsigned int pw_max_limit,
- float rate_no_limit, int max_share, int mode)
+ double rate_no_limit, int max_share, int mode)
{
simgrid::kernel::lmm::Constraint** cnst = new simgrid::kernel::lmm::Constraint*[nb_cnst];
simgrid::kernel::lmm::Variable** var = new simgrid::kernel::lmm::Variable*[nb_var];
for (int i = 0; i < nb_var; i++) {
var[i] = Sys->variable_new(NULL, 1.0, -1.0, nb_elem);
//Have a few variables with a concurrency share of two (e.g. cross-traffic in some cases)
- int concurrency_share = 1 + simgrid::xbt::random::uniform_int(0, max_share - 1);
+ short concurrency_share = static_cast<short>(1 + simgrid::xbt::random::uniform_int(0, max_share - 1));
var[i]->set_concurrency_share(concurrency_share);
for (int j = 0; j < nb_cnst; j++)
{
simgrid::s4u::Engine e(&argc, argv);
- float rate_no_limit=0.2;
- float acc_date=0;
- float acc_date2=0;
+ double rate_no_limit = 0.2;
+ double acc_date = 0.0;
+ double acc_date2 = 0.0;
int testclass;
if(argc<3) {
acc_date2+=date*date;
}
- float mean_date= acc_date/(float)testcount;
- float stdev_date= sqrt(acc_date2/(float)testcount-mean_date*mean_date);
+ double mean_date = acc_date / static_cast<double>(testcount);
+ double stdev_date = sqrt(acc_date2 / static_cast<double>(testcount) - mean_date * mean_date);
fprintf(stderr, "%ix One shot execution time for a total of %u constraints, "
"%u variables with %u active constraint each, concurrency in [%i,%i] and max concurrency share %u\n",
#include "xbt/config.hpp"
#include "xbt/log.h"
-#include "src/surf/network_wifi.hpp"
+#include "src/surf/network_interface.hpp"
XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[usage] wifi_usage <platform-file>");
simgrid::s4u::this_actor::get_host()->get_cname(), dest, end_time - start_time);
});
simgrid::s4u::Actor::create("receiver", simgrid::s4u::Host::by_name(dest), [mailbox]() { mailbox->get(); });
- auto* l = (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl();
- l->set_host_rate(simgrid::s4u::Host::by_name(src), 0);
- l->set_host_rate(simgrid::s4u::Host::by_name(dest), 0);
+ const auto* ap1 = simgrid::s4u::Link::by_name("AP1");
+ ap1->set_host_wifi_rate(simgrid::s4u::Host::by_name(src), 0);
+ ap1->set_host_wifi_rate(simgrid::s4u::Host::by_name(dest), 0);
simgrid::s4u::this_actor::sleep_for(10);
XBT_INFO("\n");
}
constexpr unsigned ARRAY_SIZE = 10007;
constexpr unsigned FIBO_MAX = 25;
-void (*fun_to_apply)(unsigned*);
-
static std::string parmap_mode_name(e_xbt_parmap_mode_t mode)
{
std::string name;
*arg = fibonacci(*arg % FIBO_MAX);
}
-static void bench_parmap(int nthreads, double timeout, e_xbt_parmap_mode_t mode, bool full_bench)
+template <class F>
+void bench_parmap(int nthreads, double timeout, e_xbt_parmap_mode_t mode, bool full_bench, F func_to_apply)
{
std::string mode_name = parmap_mode_name(mode);
XBT_INFO("** mode = %s", mode_name.c_str());
delete parmap;
parmap = new simgrid::xbt::Parmap<unsigned*>(nthreads, mode);
}
- parmap->apply(fun_to_apply, data);
+ parmap->apply(func_to_apply, data);
elapsed_time = xbt_os_time() - start_time;
i++;
} while (elapsed_time < timeout);
XBT_INFO(" ran %d times in %g seconds (%g/s)", i, elapsed_time, i / elapsed_time);
}
-static void bench_all_modes(int nthreads, double timeout, unsigned modes, bool full_bench)
+template <class F> void bench_all_modes(int nthreads, double timeout, unsigned modes, bool full_bench, F func_to_apply)
{
std::vector<e_xbt_parmap_mode_t> all_modes = {XBT_PARMAP_POSIX, XBT_PARMAP_FUTEX, XBT_PARMAP_BUSY_WAIT,
XBT_PARMAP_DEFAULT};
for (unsigned i = 0; i < all_modes.size(); i++) {
if (1U << i & modes)
- bench_parmap(nthreads, timeout, all_modes[i], full_bench);
+ bench_parmap(nthreads, timeout, all_modes[i], full_bench, func_to_apply);
}
}
}
timeout = atof(argv[2]);
if (argc == 4)
- modes = strtol(argv[2], NULL, 0);
+ modes = static_cast<unsigned>(strtoul(argv[2], NULL, 0));
XBT_INFO("Parmap benchmark with %d workers (modes = %#x)...", nthreads, modes);
XBT_INFO("%s", "");
SIMIX_context_set_nthreads(nthreads);
- fun_to_apply = &fun_small_comp;
XBT_INFO("Benchmark for parmap create+apply+destroy (small comp):");
- bench_all_modes(nthreads, timeout, modes, true);
+ bench_all_modes(nthreads, timeout, modes, true, &fun_small_comp);
XBT_INFO("%s", "");
XBT_INFO("Benchmark for parmap apply only (small comp):");
- bench_all_modes(nthreads, timeout, modes, false);
+ bench_all_modes(nthreads, timeout, modes, false, &fun_small_comp);
XBT_INFO("%s", "");
- fun_to_apply = &fun_big_comp;
-
XBT_INFO("Benchmark for parmap create+apply+destroy (big comp):");
- bench_all_modes(nthreads, timeout, modes, true);
+ bench_all_modes(nthreads, timeout, modes, true, &fun_big_comp);
XBT_INFO("%s", "");
XBT_INFO("Benchmark for parmap apply only (big comp):");
- bench_all_modes(nthreads, timeout, modes, false);
+ bench_all_modes(nthreads, timeout, modes, false, &fun_big_comp);
XBT_INFO("%s", "");
return EXIT_SUCCESS;
parmap.apply(fun_get_id, data);
std::sort(begin(a), end(a));
- unsigned count = std::distance(begin(a), std::unique(begin(a), end(a)));
+ unsigned count = static_cast<unsigned>(std::distance(begin(a), std::unique(begin(a), end(a))));
if (count != num_workers) {
XBT_CRITICAL("only %u/%u threads did some work", count, num_workers);
ret = 1;
src/xbt/xbt_main.cpp
src/xbt/xbt_os_file.cpp
src/xbt/xbt_os_time.c
+ src/xbt/xbt_parse_units.cpp
src/xbt/xbt_replay.cpp
src/xbt/xbt_str.cpp
src/xbt/xbt_virtu.cpp
include/xbt/module.h
include/xbt/PropertyHolder.hpp
include/xbt/parmap.h
+ include/xbt/parse_units.hpp
include/xbt/range.hpp
include/xbt/random.hpp
include/xbt/replay.hpp
src/internal_config.h.in
include/smpi/mpif.h.in)
-### depend of some variables setted upper
+### depend of some variables set upper
if(${HAVE_UCONTEXT_CONTEXTS}) #ucontext
set(SIMIX_SRC ${SIMIX_SRC} src/kernel/context/ContextUnix.hpp
src/kernel/context/ContextUnix.cpp)
docs/source/_ext/showfile.css
docs/source/_ext/showfile.js
docs/source/_ext/showfile.py
+ docs/source/_ext/javasphinx/LICENSE
+ docs/source/_ext/javasphinx/MANIFEST.in
+ docs/source/_ext/javasphinx/README.md
+ docs/source/_ext/javasphinx/doc/conf.py
+ docs/source/_ext/javasphinx/doc/index.rst
+ docs/source/_ext/javasphinx/javasphinx/__init__.py
+ docs/source/_ext/javasphinx/javasphinx/apidoc.py
+ docs/source/_ext/javasphinx/javasphinx/compiler.py
+ docs/source/_ext/javasphinx/javasphinx/domain.py
+ docs/source/_ext/javasphinx/javasphinx/extdoc.py
+ docs/source/_ext/javasphinx/javasphinx/formatter.py
+ docs/source/_ext/javasphinx/javasphinx/htmlrst.py
+ docs/source/_ext/javasphinx/javasphinx/util.py
+ docs/source/_ext/javasphinx/setup.py
docs/source/_static/css/custom.css
docs/source/_templates/breadcrumbs.html
examples/s4u/CMakeLists.txt
examples/smpi/CMakeLists.txt
examples/smpi/NAS/CMakeLists.txt
- examples/smpi/smpi_s4u_masterslave/CMakeLists.txt
+ examples/smpi/smpi_s4u_masterworker/CMakeLists.txt
examples/smpi/replay_multiple/CMakeLists.txt
examples/smpi/replay_multiple_manual_deploy/CMakeLists.txt
examples/python/CMakeLists.txt
examples/platforms/two_hosts_platform_with_availability_included.xml
examples/platforms/two_peers.xml
examples/platforms/vivaldi.xml
- examples/platforms/wifi_decay_2STA.xml
examples/platforms/wifi.xml
)
set(CMAKE_C_FLAGS "${warnCFLAGS} ${CMAKE_C_FLAGS} ${optCFLAGS}")
set(CMAKE_CXX_FLAGS "${warnCXXFLAGS} ${CMAKE_CXX_FLAGS} ${optCFLAGS}")
-# Try to make Mac a bit more complient to open source standards
+# Try to make Mac a bit more compliant to open source standards
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_XOPEN_SOURCE=700 -D_DARWIN_C_SOURCE")
endif()
doxygen fig2dev \
chrpath \
libdw-dev libevent-dev libunwind8-dev \
- linkchecker \
python3-sphinx python3-breathe python3-sphinx-rtd-theme
-
\ No newline at end of file
+
+# linkchecker \
printf "and the clang-format rules:\n\n"
cat "${patch}"
-printf "\nYou can apply these changes and readd the files with:\n"
+printf "\nYou can apply these changes and read the files with:\n"
printf " git apply ${patch} && git apply --cached ${patch}\n"
printf "(call this command from the root directory of your repository)\n"
printf "\n\n\n"
+ contrib/.*
++ \.github/.*
+
+ tools/appveyor-irc-notify\.py
+ tools/docker/.*
+ tools/git-hooks/.*
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- This file is an eclipse configuration file, to setup the code formater.
+<!-- This file is an eclipse configuration file, to setup the code formatter.
Open the properties of your SimGrid project, "C/C++ General" -> "Formatter"
Then, enable project-specific settings, and import this file. -->
pkg_check() {
for pkg
do
- if command -v $pkg
+ if command -v "$pkg"
then
echo "$pkg is installed. Good."
else
### Cleanup previous runs
-! [ -z "$WORKSPACE" ] || die "No WORKSPACE"
+[ -n "$WORKSPACE" ] || die "No WORKSPACE"
[ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist"
do_cleanup() {
NUMPROC="$(nproc)" || NUMPROC=1
-cd $BUILDFOLDER
+cd "$BUILDFOLDER"
rm -rf java_cov*
rm -rf jacoco_cov*
rm -rf python_cov*
-Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=ON -Denable_model-checking=ON \
-Denable_smpi_papi=ON \
-Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=ON \
- -Denable_coverage=ON -DLTO_EXTRA_FLAG="auto" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON $WORKSPACE
+ -Denable_coverage=ON -DLTO_EXTRA_FLAG="auto" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON "$WORKSPACE"
#build with sonarqube scanner wrapper
/home/ci/build-wrapper-linux-x86/build-wrapper-linux-x86-64 --out-dir bw-outputs make -j$NUMPROC tests
i=0
for file in $files
do
- sourcepath=$( dirname $file )
+ sourcepath=$( dirname "$file" )
#convert jacoco reports in xml ones
- ant -f $WORKSPACE/tools/jenkins/jacoco.xml -Dexamplesrcdir=$WORKSPACE -Dbuilddir=$BUILDFOLDER/${sourcepath} -Djarfile=$BUILDFOLDER/simgrid.jar -Djacocodir=${JACOCO_PATH}/lib
+ ant -f "$WORKSPACE"/tools/jenkins/jacoco.xml -Dexamplesrcdir="$WORKSPACE" -Dbuilddir="$BUILDFOLDER"/"${sourcepath}" -Djarfile="$BUILDFOLDER"/simgrid.jar -Djacocodir=${JACOCO_PATH}/lib
#convert jacoco xml reports in cobertura xml reports
- cover2cover.py $BUILDFOLDER/${sourcepath}/report.xml .. ../src/bindings/java src/bindings/java > $BUILDFOLDER/java_coverage_${i}.xml
+ cover2cover.py "$BUILDFOLDER"/"${sourcepath}"/report.xml .. ../src/bindings/java src/bindings/java > "$BUILDFOLDER"/java_coverage_${i}.xml
#save jacoco xml report as sonar only allows it
- mv $BUILDFOLDER/${sourcepath}/report.xml $BUILDFOLDER/jacoco_cov_${i}.xml
+ mv "$BUILDFOLDER"/"${sourcepath}"/report.xml "$BUILDFOLDER"/jacoco_cov_${i}.xml
i=$((i + 1))
done
#convert python coverage reports in xml ones
- cd $BUILDFOLDER
+ cd "$BUILDFOLDER"
find .. -size +1c -name ".coverage*" -exec mv {} . \;
/usr/bin/python3-coverage combine
/usr/bin/python3-coverage xml -i -o ./python_coverage.xml
- cd $WORKSPACE
+ cd "$WORKSPACE"
#convert all gcov reports to xml cobertura reports
- gcovr -r . --xml-pretty -e teshsuite -u -o $BUILDFOLDER/xml_coverage.xml
- xsltproc $WORKSPACE/tools/jenkins/ctest2junit.xsl build/Testing/$( head -n 1 < build/Testing/TAG )/Test.xml > CTestResults_memcheck.xml
+ gcovr -r . --xml-pretty -e teshsuite -u -o "$BUILDFOLDER"/xml_coverage.xml
+ xsltproc "$WORKSPACE"/tools/jenkins/ctest2junit.xsl build/Testing/"$( head -n 1 < build/Testing/TAG )"/Test.xml > CTestResults_memcheck.xml
#generate sloccount report
- sloccount --duplicates --wide --details $WORKSPACE | grep -v -e '.git' -e 'mpich3-test' -e 'sloccount.sc' -e 'isp/umpire' -e 'build/' -e 'xml_coverage.xml' -e 'CTestResults_memcheck.xml' -e 'DynamicAnalysis.xml' > $WORKSPACE/sloccount.sc
+ sloccount --duplicates --wide --details "$WORKSPACE" | grep -v -e '.git' -e 'mpich3-test' -e 'sloccount.sc' -e 'isp/umpire' -e 'build/' -e 'xml_coverage.xml' -e 'CTestResults_memcheck.xml' -e 'DynamicAnalysis.xml' > "$WORKSPACE"/sloccount.sc
#generate PVS-studio report
EXCLUDEDPATH="-e $WORKSPACE/src/include/catch.hpp -e $WORKSPACE/teshsuite/smpi/mpich3-test/ -e $WORKSPACE/teshsuite/smpi/isp/ -e *_dtd.c -e *_dtd.h -e *yy.c -e $WORKSPACE/src/xbt/automaton/ -e $WORKSPACE/src/smpi/colls/ -e $WORKSPACE/examples/smpi/NAS/ -e $WORKSPACE/examples/smpi/gemm/gemm.c -e $WORKSPACE/src/msg/ -e $WORKSPACE/include/msg/ -e $WORKSPACE/examples/deprecated/ -e $WORKSPACE/teshsuite/msg/"
- pvs-studio-analyzer analyze -f $BUILDFOLDER/compile_commands.json -o $WORKSPACE/pvs.log $EXCLUDEDPATH -j$NUMPROC
+ pvs-studio-analyzer analyze -f "$BUILDFOLDER"/compile_commands.json -o "$WORKSPACE"/pvs.log $EXCLUDEDPATH -j$NUMPROC
#disable V1042 (copyleft), V521 (commas in catch.hpp)
- plog-converter -t xml -o $WORKSPACE/pvs.plog -d V1042,V521 $WORKSPACE/pvs.log
+ plog-converter -t xml -o "$WORKSPACE"/pvs.plog -d V1042,V521 "$WORKSPACE"/pvs.log
fi || exit 42
pkg_check() {
for pkg
do
- if command -v $pkg
+ if command -v "$pkg"
then
echo "$pkg is installed. Good."
else
### Cleanup previous runs
-! [ -z "$WORKSPACE" ] || die "No WORKSPACE"
+[ -n "$WORKSPACE" ] || die "No WORKSPACE"
[ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist"
do_cleanup() {
fi
mkdir "$d" || die "Could not create $d"
done
- find $WORKSPACE -name "memcheck_test_*.memcheck" -exec rm {} \;
+ find "$WORKSPACE" -name "memcheck_test_*.memcheck" -exec rm {} \;
}
do_cleanup "$WORKSPACE/build" "$WORKSPACE/memcheck"
NUMPROC="$(nproc)" || NUMPROC=1
-cd $WORKSPACE/build
+cd "$WORKSPACE"/build
### Proceed with the tests
ctest -D ExperimentalStart || true
-Denable_compile_optimizations=OFF -Denable_compile_warnings=ON \
-Denable_jedule=OFF -Denable_mallocators=OFF \
-Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_model-checking=OFF \
- -Denable_memcheck_xml=ON -DLTO_EXTRA_FLAG="auto" $WORKSPACE
+ -Denable_memcheck_xml=ON -DLTO_EXTRA_FLAG="auto" "$WORKSPACE"
make -j$NUMPROC tests
ctest --no-compress-output -D ExperimentalTest -j$NUMPROC || true
-cd $WORKSPACE/build
+cd "$WORKSPACE"/build
if [ -f Testing/TAG ] ; then
- find $WORKSPACE -iname "*.memcheck" -exec mv {} $WORKSPACE/memcheck \;
+ find "$WORKSPACE" -iname "*.memcheck" -exec mv {} "$WORKSPACE"/memcheck \;
#remove all "empty" files
- grep -r -L "error>" $WORKSPACE/memcheck | xargs rm -f
- mv Testing/$(head -n 1 < Testing/TAG)/Test.xml $WORKSPACE/DynamicAnalysis.xml
+ grep -r -L "error>" "$WORKSPACE"/memcheck | xargs rm -f
+ mv Testing/"$(head -n 1 < Testing/TAG)"/Test.xml "$WORKSPACE"/DynamicAnalysis.xml
fi
-
### Cleanup previous runs
-! [ -z "$WORKSPACE" ] || die "No WORKSPACE"
+[ -n "$WORKSPACE" ] || die "No WORKSPACE"
[ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist"
do_cleanup() {
NUMPROC="$(nproc)" || NUMPROC=1
-cd $WORKSPACE/build
+cd "$WORKSPACE"/build
#we can't just receive ON or OFF as values as display is bad in the resulting jenkins matrix
-if [ $1 = "JAVA" ]
+if [ "$1" = "JAVA" ]
then
buildjava="ON"
else
buildjava="OFF"
fi
-if [ $2 = "MC" ]
+if [ "$2" = "MC" ]
then
buildmc="ON"
else
buildmc="OFF"
fi
-if [ $3 = "SMPI" ]
+if [ "$3" = "SMPI" ]
then
buildsmpi="ON"
else
buildsmpi="OFF"
fi
-if [ $4 = "DEBUG" ]
+if [ "$4" = "DEBUG" ]
then
builddebug="ON"
else
builddebug="OFF"
fi
-if [ $5 = "MSG" ]
+if [ "$5" = "MSG" ]
then
buildmsg="ON"
else
-Denable_jedule=ON -Denable_mallocators=ON -Denable_debug=${builddebug} \
-Denable_smpi=${buildsmpi} -Denable_smpi_MPICH3_testsuite=${buildsmpi} -Denable_model-checking=${buildmc} \
-Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=OFF \
- -Denable_ns3=$(onoff test "$buildmc" != "ON") -Denable_coverage=OFF -DLTO_EXTRA_FLAG="auto" $WORKSPACE
+ -Denable_ns3=$(onoff test "$buildmc" != "ON") -Denable_coverage=OFF -DLTO_EXTRA_FLAG="auto" "$WORKSPACE"
make -j$NUMPROC tests
make clean
-
-
pkg_check() {
for pkg
do
- if command -v $pkg
+ if command -v "$pkg"
then
echo "$pkg is installed. Good."
else
### Cleanup previous runs
-! [ -z "$WORKSPACE" ] || die "No WORKSPACE"
+[ -n "$WORKSPACE" ] || die "No WORKSPACE"
[ -d "$WORKSPACE" ] || die "WORKSPACE ($WORKSPACE) does not exist"
do_cleanup() {
NUMPROC="$(nproc)" || NUMPROC=1
-cd $WORKSPACE/build
+cd "$WORKSPACE"/build
ctest -D ExperimentalStart || true
-Denable_jedule=ON -Denable_mallocators=OFF \
-Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=ON -Denable_model-checking=OFF \
-Denable_memcheck=OFF -Denable_memcheck_xml=OFF -Denable_smpi_ISP_testsuite=ON -Denable_coverage=OFF\
- -Denable_fortran=OFF -Denable_python=OFF -DLTO_EXTRA_FLAG="auto" ${SANITIZER_OPTIONS} $WORKSPACE
+ -Denable_fortran=OFF -Denable_python=OFF -DLTO_EXTRA_FLAG="auto" ${SANITIZER_OPTIONS} "$WORKSPACE"
make -j$NUMPROC tests
ctest --no-compress-output -D ExperimentalTest || true
if [ -f Testing/TAG ] ; then
- xsltproc $WORKSPACE/tools/jenkins/ctest2junit.xsl Testing/$(head -n 1 < Testing/TAG)/Test.xml > CTestResults_${SANITIZER}.xml
- mv CTestResults_${SANITIZER}.xml $WORKSPACE
+ xsltproc "$WORKSPACE"/tools/jenkins/ctest2junit.xsl Testing/"$(head -n 1 < Testing/TAG)"/Test.xml > CTestResults_"${SANITIZER}".xml
+ mv CTestResults_"${SANITIZER}".xml "$WORKSPACE"
fi
make clean
shift
[ $# -gt 0 ] || set -- "Error - Halting"
echo "$@" >&2
- exit $status
+ exit "$status"
}
# Get an ON/OFF string from a command:
echo "XX"
echo "XX Get out of the tree"
echo "XX"
-if [ -d $WORKSPACE/build ]
+if [ -d "$WORKSPACE"/build ]
then
# Windows cannot remove the directory if it's still used by the previous build
- rm -rf $WORKSPACE/build || sleep 10 && rm -rf $WORKSPACE/build || sleep 10 && rm -rf $WORKSPACE/build
+ rm -rf "$WORKSPACE"/build || sleep 10 && rm -rf "$WORKSPACE"/build || sleep 10 && rm -rf "$WORKSPACE"/build
fi
-mkdir $WORKSPACE/build
-cd $WORKSPACE/build
+mkdir "$WORKSPACE"/build
+cd "$WORKSPACE"/build
have_NS3="no"
if dpkg -l libns3-dev 2>&1|grep -q "ii libns3-dev" ; then
echo "XX"
echo "XX Build the archive out of the tree"
-echo "XX pwd: "$(pwd)
+echo "XX pwd: $(pwd)"
echo "XX"
-cmake -G"$GENERATOR" -Denable_documentation=OFF $WORKSPACE
+cmake -G"$GENERATOR" -Denable_documentation=OFF "$WORKSPACE"
make dist -j $NUMBER_OF_PROCESSORS
SIMGRID_VERSION=$(cat VERSION)
echo "XX"
echo "XX Open the resulting archive"
echo "XX"
-gunzip ${SIMGRID_VERSION}.tar.gz
-tar xf ${SIMGRID_VERSION}.tar
-mkdir ${WORKSPACE}/build/${SIMGRID_VERSION}/build
-cd ${WORKSPACE}/build/${SIMGRID_VERSION}/build
+gunzip "${SIMGRID_VERSION}".tar.gz
+tar xf "${SIMGRID_VERSION}".tar
+mkdir "${WORKSPACE}"/build/"${SIMGRID_VERSION}"/build
+cd "${WORKSPACE}"/build/"${SIMGRID_VERSION}"/build
SRCFOLDER="${WORKSPACE}/build/${SIMGRID_VERSION}"
echo "XX"
echo "XX Configure and build SimGrid"
-echo "XX pwd: "$(pwd)
+echo "XX pwd: $(pwd)"
echo "XX"
set -x
-Denable_java=$(onoff test "$build_mode" = "ModelChecker") \
-Denable_msg=$(onoff test "$build_mode" = "ModelChecker") \
-DLTO_EXTRA_FLAG="auto" \
- $SRCFOLDER
+ "$SRCFOLDER"
# -Denable_lua=$(onoff test "$build_mode" != "DynamicAnalysis") \
set +x
ctest -T test --output-on-failure --no-compress-output || true
-if test -n "$INSTALL" && [ ${branch_name} = "origin/master" ] ; then
+if test -n "$INSTALL" && [ "${branch_name}" = "origin/master" ] ; then
echo "XX"
echo "XX Test done. Install everything since it's a regular build, not on a Windows."
echo "XX"
then
BOOST=$(grep -m 1 "Found Boost:" ./consoleText | sed "s/.*-- Found Boost:.*found suitable version \"\([a-zA-Z0-9\.]*\)\",.*/\1/g")
fi
- echo $BOOST
+ echo "$BOOST"
}
get_compiler(){
}
get_ns3(){
- found=$(grep -c "ns-3 found" ./consoleText)
- if [ $found != 0 ]; then
- echo "✔"
- else
- echo ""
- fi
+ grep -m 1 "ns-3 found (v3.[0-9]*; incl:" ./consoleText | sed "s/.*-- ns-3 found .v\(3.[0-9]*\); incl:.*/\1/g"
+# found=$(grep -c "ns-3 found" ./consoleText)
+# if [ "$found" != 0 ]; then
+# echo "✔"
+# else
+# echo ""
+# fi
}
get_python(){
found=$(grep -c "Compile Python bindings .....: ON" ./consoleText)
- if [ $found != 0 ]; then
+ if [ "$found" != 0 ]; then
grep -m 1 "Found PythonInterp" ./consoleText| sed "s/.*-- Found PythonInterp.*found suitable version \"\([a-zA-Z0-9\.]*\)\",.*/\1/g"
else
echo ""
fi
-if [ -z $BUILD_URL ]; then
+if [ -z "$BUILD_URL" ]; then
BUILD_URL="https://ci.inria.fr/simgrid/job/SimGrid/lastBuild"
fi
v1[i] = parseInt(v1[i], 10);
v2[i] = parseInt(v2[i], 10);
if (v1[i] > v2[i]) return 1;
- if (v1[i] < v2[i]) return -1;
+ if (v1[i] < v2[i]) return -1;
}
return v1.length == v2.length ? 0: (v1.length < v2.length ? -1 : 1);
}</script>
color1=""
color2=""
#in case of success, replace blue by green in status balls
- wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DDebug%2Cnode%3D${node} -O status >/dev/null 2>&1
+ wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DDebug%2Cnode%3D"${node}" -O status >/dev/null 2>&1
status=$(cat status)
- if [ $status == "Success" ]; then
+ if [ "$status" == "Success" ]; then
color1="&color=green"
fi
rm status
statusmc="<img src=https://ci.inria.fr/simgrid/images/24x24/grey.png>"
- wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DModelChecker%2Cnode%3D${node} -O status >/dev/null 2>&1
+ wget --quiet https://ci.inria.fr/simgrid/buildStatus/text?job=SimGrid%2Fbuild_mode%3DModelChecker%2Cnode%3D"${node}" -O status >/dev/null 2>&1
status=$(cat status)
- if [ $status ]; then
- if [ $status == "Success" ]; then
+ if [ "$status" ]; then
+ if [ "$status" == "Success" ]; then
color2="&color=green"
fi
statusmc="<a href=\"build_mode=ModelChecker,node=${node}/\"><img src=\"https://ci.inria.fr/simgrid/job/SimGrid/build_mode=ModelChecker,node=${node}/badge/icon?style=ball-24x24${color2}\"/>"
fi
rm status
- echo "<tr> <td class=\"matrix-leftcolumn\">$node</td><td class=\"matrix-cell\" style=\"text-align:left\">$os</td><td class=\"matrix-cell\" style=\"text-align:left\">$compiler</td><td class=\"matrix-cell\" style=\"text-align:left\">$boost</td><td class=\"matrix-cell\" style=\"text-align:left\">$java</td><td class=\"matrix-cell\" style=\"text-align:left\">$cmake</td><td class=\"matrix-cell\" style=\"text-align:center\">$ns3</td><td class=\"matrix-cell\" style=\"text-align:center\">$py</td><td class="matrix-cell" style="text-align:center"><a href="build_mode=Debug,node=${node}/"><img src="https://ci.inria.fr/simgrid/job/SimGrid/build_mode=Debug,node=${node}/badge/icon?style=ball-24x24${color1}"/></td><td class="matrix-cell" style="text-align:center">${statusmc}</td></tr>"
+ echo "<tr> <td class=\"matrix-leftcolumn\">$node</td><td class=\"matrix-cell\" style=\"text-align:left\">$os</td><td class=\"matrix-cell\" style=\"text-align:left\">$compiler</td><td class=\"matrix-cell\" style=\"text-align:left\">$boost</td><td class=\"matrix-cell\" style=\"text-align:left\">$java</td><td class=\"matrix-cell\" style=\"text-align:left\">$cmake</td><td class=\"matrix-cell\" style=\"text-align:center\">$ns3</td><td class=\"matrix-cell\" style=\"text-align:center\">$py</td><td class=\"matrix-cell\" style=\"text-align:center\"><a href=\"build_mode=Debug,node=${node}/\"><img src=\"https://ci.inria.fr/simgrid/job/SimGrid/build_mode=Debug,node=${node}/badge/icon?style=ball-24x24${color1}\"/></td><td class=\"matrix-cell\" style=\"text-align:center\">${statusmc}</td></tr>"
rm consoleText
done
#Travis - get ID of the last jobs with the API
BUILD_NUM=$(curl -s 'https://api.travis-ci.org/repos/simgrid/simgrid/builds?limit=1' | grep -o '^\[{"id":[0-9]*,' | grep -o '[0-9]' | tr -d '\n')
-BUILDS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/${BUILD_NUM} | grep -o '{"id":[0-9]*,' | grep -o '[0-9]*'| tail -n 3))
-OS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/${BUILD_NUM} | grep -o '"os":"[a-z]*",' | sed 's/"os":"\([a-z]*\)",/\1/g'| tail -n 3))
+BUILDS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/"${BUILD_NUM}" | grep -o '{"id":[0-9]*,' | grep -o '[0-9]*'| tail -n 3))
+OS=($(curl -s https://api.travis-ci.org/repos/simgrid/simgrid/builds/"${BUILD_NUM}" | grep -o '"os":"[a-z]*",' | sed 's/"os":"\([a-z]*\)",/\1/g'| tail -n 3))
for id in "${!BUILDS[@]}"
do
- wget --quiet https://api.travis-ci.org/v3/job/${BUILDS[$id]}/log.txt -O ./consoleText >/dev/null 2>&1
+ wget --quiet https://api.travis-ci.org/v3/job/"${BUILDS[$id]}"/log.txt -O ./consoleText >/dev/null 2>&1
sed -i -e "s/\r//g" ./consoleText
- if [ ${OS[$id]} == "linux" ]; then
+ if [ "${OS[$id]}" == "linux" ]; then
node="travis-linux (<a href=\"https://travis-ci.org/simgrid/simgrid/jobs/${BUILDS[$id]}\">log</a>)"
os="Ubuntu <a href=\"https://docs.travis-ci.com/user/reference/bionic/\">18.04 bionic</a>"
- elif [ ${OS[$id]} == "osx" ]; then
+ elif [ "${OS[$id]}" == "osx" ]; then
node="travis-mac (<a href=\"https://travis-ci.org/simgrid/simgrid/jobs/${BUILDS[$id]}\">log</a>)"
os="Mac OS X <a href=\"https://docs.travis-ci.com/user/reference/osx/\">Catalina (10.15)</a> "
- elif [ ${OS[$id]} == "windows" ]; then
+ elif [ "${OS[$id]}" == "windows" ]; then
node="travis-windows (<a href=\"https://travis-ci.org/simgrid/simgrid/jobs/${BUILDS[$id]}\">log</a>)"
os="Windows <a href=\"https://docs.travis-ci.com/user/reference/windows/\">Server 1809</a>"
fi
#Appveyor - get ID of the last job with the API
BUILD_ID=$(curl -s "https://ci.appveyor.com/api/projects/mquinson/simgrid" | grep -o '\[{"jobId":"[a-zA-Z0-9]*",' | sed "s/\[{\"jobId\":\"//" | sed "s/\",//")
-wget --quiet https://ci.appveyor.com/api/buildjobs/$BUILD_ID/log -O ./consoleText >/dev/null 2>&1
+wget --quiet https://ci.appveyor.com/api/buildjobs/"$BUILD_ID"/log -O ./consoleText >/dev/null 2>&1
sed -i -e "s/\r//g" ./consoleText
-node="<a href="https://ci.appveyor.com/project/mquinson/simgrid">appveyor</a>"
+node="<a href=\"https://ci.appveyor.com/project/mquinson/simgrid\">appveyor</a>"
os="Windows Server 2012 - VS2015 + mingw64 5.3.0"
boost=$(get_boost)
compiler=$(get_compiler)
obj:/usr/lib/x86_64-linux-gnu/libunwind.so.*
...
}
+{
+ ignore unwind cruft
+ Memcheck:Param
+ write(buf)
+ ...
+ fun:_ULx86_64_step
+ obj:/usr/lib/x86_64-linux-gnu/libunwind.so.*
+}
{
ignore unwind invalid reads
This new that includes tags field that links MPI_wait calls to the
MPI_ISend or MPI_IRecv associated to this wait.
-This script reproduce the old behavior of simgrid because informations are
+This script reproduce the old behavior of simgrid because information are
missing to add the tags properly. It also lower case all the mpi calls.
It takes in input (as argument or in stdin) the trace list file that is only a
#!/usr/bin/env tesh
# This suite contains two tests:
-# The first one uses a very big input (150k) to check whether trucated input do work.
+# The first one uses a very big input (150k) to check whether truncated input do work.
# The second one uses both a big input and a big output (150k each).
#
-# This checks whether the non-blocking I/O mess is functionnal.
+# This checks whether the non-blocking I/O mess is functional.
#
p First, a write test
< > c
< > d
$ ${bindir:=.}/tesh --ignore-jenkins
-> Ignore all cruft seen on SimGrid's continous integration servers
+> Ignore all cruft seen on SimGrid's continuous integration servers
> Test suite from stdin
> [(stdin):1] Test sorting and filtering of output
> [(stdin):3] true
group1.add_argument(
'--ignore-jenkins',
action='store_true',
- help='ignore all cruft generated on SimGrid continous integration servers')
+ help='ignore all cruft generated on SimGrid continuous integration servers')
group1.add_argument('--wrapper', metavar='arg', help='Run each command in the provided wrapper (eg valgrind)')
group1.add_argument(
'--keep',
os.chdir(options.cd)
if options.ignore_jenkins:
- print("Ignore all cruft seen on SimGrid's continous integration servers")
+ print("Ignore all cruft seen on SimGrid's continuous integration servers")
# Note: regexps should match at the beginning of lines
TeshState().ignore_regexps_common = [
re.compile(r"profiling:"),