message(FATAL_ERROR "Cannot activate both model-checking and ns-3 bindings: ns-3 pull too much dependencies for the MC to work")
endif()
+get_property(known_features GLOBAL PROPERTY CMAKE_CXX_KNOWN_FEATURES)
+
if(enable_smpi)
SET(HAVE_SMPI 1)
if(NOT WIN32)
include(${CMAKE_HOME_DIRECTORY}/tools/cmake/Java.cmake)
endif()
+if (enable_model-checking AND (NOT ("cxx_std_14" IN_LIST known_features)))
+ message(WARNING "C++14 not found. The model-checker will use a slow hash function. You should upgrade your compiler")
+ set(SG_HAVE_CPP14 0)
+else()
+ set(SG_HAVE_CPP14 1)
+ set_property(TARGET simgrid PROPERTY CXX_STANDARD 14)
+endif()
+
# Python binding (with pybind11)
################
# Our usage of pybind11::overload_cast mandates C++14
if((NOT DEFINED enable_python) OR enable_python)
- get_property(known_features GLOBAL PROPERTY CMAKE_CXX_KNOWN_FEATURES)
-
if("cxx_std_14" IN_LIST known_features)
if(EXISTS ${CMAKE_HOME_DIRECTORY}/pybind11) # Try to use a local copy of pybind11, if any
- sg_{actor,host,link}_{data,data_set}() now all exist.
Use them to attach user data to the object and retrieve it.
+Models:
+ - Improved the usability of ns-3. Several bugs were ironed out.
+ - Introduce an experimental Wifi model. It sounds reasonable
+ according to the state of the art, but it still has to be properly
+ validated, at least against ns-3.
+
MSG:
- convert a new set of functions to the S4U C interface and move the old MSG
versions to legacy (MSG_process_self*, MSG_process_{un}ref, ...)
SMPI:
- Fortran bindings for DVFS have been removed.
- - Add support for MPI_Irsend, MPI_Rsend, MPI_Rsend_init
+ - Add support for MPI_Irsend, MPI_Rsend, MPI_Rsend_init, MPI_Bsend,
+ MPI_Ibsend, MPI_Bsend_init, MPI_Buffer_attach, MPI_Buffer_detach
- SMPI can now be selected by cmake's find_module(MPI) with
MPI_C_COMPILER, MPI_CXX_COMPILER, MPI_Fortran_COMPILER variables.
- Add support for MPI Errhandlers in Comm, File or Win. Default errhandler is now
MPI_ERRORS_ARE_FATAL, so codes which were sending warnings may start failing.
Model-Checker:
+ - Use the included xxHash as an hash implem when C++14 is usable.
- Option model-checker/hash was removed. This is always activated now.
- New option smpi/buffering controls the MPI buffering in MC mode.
- MPI calls now MC_assert() that no MPI_ERR_* code is returned.
- xbt_mutex_t and xbt_cond_t are now marked as deprecated, a new C interface
on S4U is already available to replace them by sg_mutex_t and sg_cond_t.
-Bugs:
+Fixed bugs (FG#.. -> framagit bugs; FG!.. -> framagit merge requests):
- FG#28: add sg_actor_self (and other wrappers on this_actor methods)
- FG#29 and FG#33: provide a new C API to mutexes and condition variables
- FG#30: convert MSG_process_{un}ref to sg_actor_{un}ref
- FG#31: per-actor data
- FG#34: SG_BARRIER_SERIAL_THREAD?
- FG#35: model-checker does not like buster-produced binaries
-
+ - FG!13: MC: complete workaround in the error msg seen on modern systems
+ - FG!15: execute_flops now logs compute
+ - FG!16: Fix the ns-3 bindings when several flows are simultaneously finishing
+ - FG!17: ns-3: unblock the right number of communications + others issues
+ - GH#207: Error in the throughput of TCP transfer
----------------------------------------------------------------------------
SimGrid (3.23.2) July 8. 2019
include teshsuite/s4u/concurrent_rw/concurrent_rw.tesh
include teshsuite/s4u/listen_async/listen_async.cpp
include teshsuite/s4u/listen_async/listen_async.tesh
+include teshsuite/s4u/ns3-simultaneous-send-rcv/ns3-simultaneous-send-rcv.cpp
+include teshsuite/s4u/ns3-simultaneous-send-rcv/ns3-simultaneous-send-rcv.tesh
include teshsuite/s4u/pid/pid.cpp
include teshsuite/s4u/pid/pid.tesh
include teshsuite/s4u/storage_client_server/storage_client_server.cpp
include teshsuite/surf/surf_usage/surf_usage.tesh
include teshsuite/surf/surf_usage2/surf_usage2.cpp
include teshsuite/surf/surf_usage2/surf_usage2.tesh
+include teshsuite/surf/wifi_usage/wifi_usage.cpp
+include teshsuite/surf/wifi_usage/wifi_usage.tesh
include teshsuite/xbt/cmdline/cmdline.c
include teshsuite/xbt/cmdline/cmdline.tesh
include teshsuite/xbt/log_large/log_large.c
include examples/platforms/hosts_with_disks.xml
include examples/platforms/meta_cluster.xml
include examples/platforms/multicore_machine.xml
+include examples/platforms/ns3-big-cluster.xml
include examples/platforms/onelink.xml
include examples/platforms/optorsim/gridpp_grid_2004.conf
include examples/platforms/optorsim/lcg_sept2004_grid.conf
include examples/platforms/two_hosts_profiles.xml
include examples/platforms/two_peers.xml
include examples/platforms/vivaldi.xml
+include examples/platforms/wifi.xml
include examples/python/CMakeLists.txt
include examples/python/actor-create/actor-create_d.xml
include examples/python/actor-lifetime/actor-lifetime.py
include src/include/xbt/coverage.h
include src/include/xbt/mmalloc.h
include src/include/xbt/parmap.hpp
+include src/include/xxhash.hpp
include src/instr/instr_config.cpp
include src/instr/instr_interface.cpp
include src/instr/instr_paje_containers.cpp
\_/ \___|_| |___/_|\___/|_| |_| |____(_)_____| |_|
(not released yet)
+ * Introduce an experimental Wifi network model.
+ * TODO: Disk?
+ * TODO: New Energy?
+ * (+ many bug fixes and internal refactorings)
_ _____ ____ _____
__ _____ _ __ ___(_) ___ _ __ |___ / |___ \|___ /
\ \ / / _ \ '__/ __| |/ _ \| '_ \ |_ \ __) | |_ \
[![Travis Status](https://img.shields.io/travis/simgrid/simgrid/master.svg?logo=travis)](https://travis-ci.org/simgrid/simgrid)
[![AppVeyor Status](https://ci.appveyor.com/api/projects/status/gvcssh340fwtoc35?svg=true)](https://ci.appveyor.com/project/mquinson/simgrid)
[![SonarCloud Status](https://sonarcloud.io/api/project_badges/measure?project=simgrid_simgrid&metric=alert_status)](https://sonarcloud.io/dashboard/?id=simgrid_simgrid)
-[![Codacy Badge](https://api.codacy.com/project/badge/Grade/bf1bdba50440485fbda2ac19f462ccc7)](https://www.codacy.com/app/mquinson/simgrid?utm_source=github.com&)
[![Doc](https://readthedocs.org/projects/pip/badge/?version=stable)](https://simgrid.org/doc/latest/)
[![License: LGPL v2.1][license-badge]](COPYING)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1845/badge)](https://bestpractices.coreinfrastructure.org/projects/1845)
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
+<platform version="4.1">
+ <zone id="AS0" routing="Floyd">
+
+ <host id="c-01.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-02.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-03.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-04.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-05.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-06.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-07.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-08.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-09.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-10.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-11.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-12.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-13.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-14.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-15.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-16.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-17.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-18.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-19.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-20.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-21.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-22.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-23.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-24.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-25.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-26.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-27.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-28.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-29.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-30.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-31.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-32.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-33.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-34.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-35.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-36.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-37.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-38.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-39.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-40.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-41.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-42.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-43.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-44.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-45.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-46.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-47.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-48.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-49.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-50.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-51.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-52.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-53.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-54.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-55.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-56.rennes" core="6" speed="21.496E9f"/>
+ <host id="c-57.rennes" core="6" speed="21.496E9f"/>
+
+ <router id="router1"/>
+
+ <link id="link01" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link02" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link03" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link04" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link05" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link06" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link07" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link08" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link09" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link10" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link11" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link12" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link13" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link14" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link15" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link16" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link17" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link18" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link19" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link20" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link21" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link22" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link23" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link24" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link25" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link26" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link27" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link28" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link29" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link30" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link31" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link32" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link33" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link34" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link35" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link36" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link37" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link38" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link39" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link40" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link41" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link42" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link43" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link44" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link45" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link46" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link47" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link48" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link49" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link50" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link51" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link52" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link53" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link54" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link55" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link56" bandwidth="1.25GBps" latency="2.5ms"/>
+ <link id="link57" bandwidth="1.25GBps" latency="2.5ms"/>
+
+ <route src="router1" dst="c-01.rennes"><link_ctn id="link01"/></route>
+ <route src="router1" dst="c-02.rennes"><link_ctn id="link02"/></route>
+ <route src="router1" dst="c-03.rennes"><link_ctn id="link03"/></route>
+ <route src="router1" dst="c-04.rennes"><link_ctn id="link04"/></route>
+ <route src="router1" dst="c-05.rennes"><link_ctn id="link05"/></route>
+ <route src="router1" dst="c-06.rennes"><link_ctn id="link06"/></route>
+ <route src="router1" dst="c-07.rennes"><link_ctn id="link07"/></route>
+ <route src="router1" dst="c-08.rennes"><link_ctn id="link08"/></route>
+ <route src="router1" dst="c-09.rennes"><link_ctn id="link09"/></route>
+ <route src="router1" dst="c-10.rennes"><link_ctn id="link10"/></route>
+ <route src="router1" dst="c-11.rennes"><link_ctn id="link11"/></route>
+ <route src="router1" dst="c-12.rennes"><link_ctn id="link12"/></route>
+ <route src="router1" dst="c-13.rennes"><link_ctn id="link13"/></route>
+ <route src="router1" dst="c-14.rennes"><link_ctn id="link14"/></route>
+ <route src="router1" dst="c-15.rennes"><link_ctn id="link15"/></route>
+ <route src="router1" dst="c-16.rennes"><link_ctn id="link16"/></route>
+ <route src="router1" dst="c-17.rennes"><link_ctn id="link17"/></route>
+ <route src="router1" dst="c-18.rennes"><link_ctn id="link18"/></route>
+ <route src="router1" dst="c-19.rennes"><link_ctn id="link19"/></route>
+ <route src="router1" dst="c-20.rennes"><link_ctn id="link20"/></route>
+ <route src="router1" dst="c-21.rennes"><link_ctn id="link21"/></route>
+ <route src="router1" dst="c-22.rennes"><link_ctn id="link22"/></route>
+ <route src="router1" dst="c-23.rennes"><link_ctn id="link23"/></route>
+ <route src="router1" dst="c-24.rennes"><link_ctn id="link24"/></route>
+ <route src="router1" dst="c-25.rennes"><link_ctn id="link25"/></route>
+ <route src="router1" dst="c-26.rennes"><link_ctn id="link26"/></route>
+ <route src="router1" dst="c-27.rennes"><link_ctn id="link27"/></route>
+ <route src="router1" dst="c-28.rennes"><link_ctn id="link28"/></route>
+ <route src="router1" dst="c-29.rennes"><link_ctn id="link29"/></route>
+ <route src="router1" dst="c-30.rennes"><link_ctn id="link30"/></route>
+ <route src="router1" dst="c-31.rennes"><link_ctn id="link31"/></route>
+ <route src="router1" dst="c-32.rennes"><link_ctn id="link32"/></route>
+ <route src="router1" dst="c-33.rennes"><link_ctn id="link33"/></route>
+ <route src="router1" dst="c-34.rennes"><link_ctn id="link34"/></route>
+ <route src="router1" dst="c-35.rennes"><link_ctn id="link35"/></route>
+ <route src="router1" dst="c-36.rennes"><link_ctn id="link36"/></route>
+ <route src="router1" dst="c-37.rennes"><link_ctn id="link37"/></route>
+ <route src="router1" dst="c-38.rennes"><link_ctn id="link38"/></route>
+ <route src="router1" dst="c-39.rennes"><link_ctn id="link39"/></route>
+ <route src="router1" dst="c-40.rennes"><link_ctn id="link40"/></route>
+ <route src="router1" dst="c-41.rennes"><link_ctn id="link41"/></route>
+ <route src="router1" dst="c-42.rennes"><link_ctn id="link42"/></route>
+ <route src="router1" dst="c-43.rennes"><link_ctn id="link43"/></route>
+ <route src="router1" dst="c-44.rennes"><link_ctn id="link44"/></route>
+ <route src="router1" dst="c-45.rennes"><link_ctn id="link45"/></route>
+ <route src="router1" dst="c-46.rennes"><link_ctn id="link46"/></route>
+ <route src="router1" dst="c-47.rennes"><link_ctn id="link47"/></route>
+ <route src="router1" dst="c-48.rennes"><link_ctn id="link48"/></route>
+ <route src="router1" dst="c-49.rennes"><link_ctn id="link49"/></route>
+ <route src="router1" dst="c-50.rennes"><link_ctn id="link50"/></route>
+ <route src="router1" dst="c-51.rennes"><link_ctn id="link51"/></route>
+ <route src="router1" dst="c-52.rennes"><link_ctn id="link52"/></route>
+ <route src="router1" dst="c-53.rennes"><link_ctn id="link53"/></route>
+ <route src="router1" dst="c-54.rennes"><link_ctn id="link54"/></route>
+ <route src="router1" dst="c-55.rennes"><link_ctn id="link55"/></route>
+ <route src="router1" dst="c-56.rennes"><link_ctn id="link56"/></route>
+ <route src="router1" dst="c-57.rennes"><link_ctn id="link57"/></route>
+ </zone>
+</platform>
--- /dev/null
+<?xml version='1.0'?>
+
+<!DOCTYPE platform SYSTEM "https://simgrid.org/simgrid.dtd">
+<platform version="4.1">
+ <zone id="world" routing="Full">
+
+ <zone id="WIFI zone" routing="Cluster">
+ <!-- First declare the Access Point (ie, the wifi media) -->
+ <link id="AP1" sharing_policy="WIFI" bandwidth="54Mbps" latency="0ms" />
+
+ <!-- Two stations in the wifi zone -->
+ <host id="Station 1" speed="100.0Mf,50.0Mf,20.0Mf" />
+ <host id="Station 2" speed="100.0Mf,50.0Mf,20.0Mf" />
+
+ <!-- Specify that stations use the WIFI link for every communication (incoming or outgoing) -->
+ <host_link id="Station 1" up="AP1" down="AP1"/>
+ <host_link id="Station 2" up="AP1" down="AP1"/>
+
+ <router id="WIFI router"/>
+ </zone>
+
+
+ <!-- NODE1 AS -->
+ <zone id="Wired zone" routing="Full">
+ <host id="NODE1" speed="100.0Mf,50.0Mf,20.0Mf" />
+ </zone>
+
+
+ <!-- AS Routing -->
+ <link id="Collector" sharing_policy="SHARED" bandwidth="100Mbps" latency="0ms" />
+ <zoneRoute src="WIFI zone" dst="Wired zone" gw_src="WIFI router" gw_dst="NODE1">
+ <link_ctn id="Collector" />
+ </zoneRoute>
+
+ </zone>
+</platform>
+++ /dev/null
-<?xml version='1.0'?>
-
-<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
-<platform version="4.1">
- <AS id="AS0" routing="Full">
-
- <!-- WIFI cell -->
- <AS id="AS_AP1" routing="Cluster">
- <host id="STA1" speed="100.0Mf,50.0Mf,20.0Mf">
- </host>
- <router id="AP1_ROUTER"/>
-
- <!-- Access Point -->
- <link id="AP1" sharing_policy="WIFI" bandwidth="54Mbps" latency="0ms">
- </link>
-
- <host_link id="STA1" up="AP1" down="AP1"/>
- </AS>
-
-
- <!-- NODE1 AS -->
- <AS id="AS1" routing="Full">
- <host id="NODE1" speed="100.0Mf,50.0Mf,20.0Mf">
- </host>
- </AS>
-
-
- <!-- AS Routing -->
- <link id="LINK1" sharing_policy="SHARED" bandwidth="100Mbps" latency="0ms">
- </link>
- <ASroute src="AS_AP1" dst="AS1" gw_src="AP1_ROUTER" gw_dst="NODE1">
- <link_ctn id="LINK1" />
- </ASroute>
-
- </AS>
-</platform>
+++ /dev/null
-<?xml version='1.0'?>
-
-<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
-<platform version="4.1">
- <AS id="AS0" routing="Cluster">
- <host id="STA1" speed="100.0Mf,50.0Mf,20.0Mf">
- </host>
- <host id="STA2" speed="100.0Mf,50.0Mf,20.0Mf">
- </host>
- <router id="AP1_ROUTER"/>
-
-
- <!-- Access Point -->
- <link id="AP1" sharing_policy="WIFI" bandwidth="54Mbps" latency="0ms">
- </link>
-
- <host_link id="STA1" up="AP1" down="AP1"/>
- <host_link id="STA2" up="AP1" down="AP1"/>
-
- </AS>
-</platform>
typedef int (MPI_Datarep_extent_function)(MPI_Datatype, MPI_Aint *, void *);
typedef int (MPI_Datarep_conversion_function)(void *, MPI_Datatype, int, void *, MPI_Offset, void *);
+typedef void MPI_Handler_function(MPI_Comm*, int*, ...);
+typedef void MPI_Comm_errhandler_function(MPI_Comm *, int *, ...);
+typedef void MPI_File_errhandler_function(MPI_File *, int *, ...);
+typedef void MPI_Win_errhandler_function(MPI_Win *, int *, ...);
+typedef MPI_Comm_errhandler_function MPI_Comm_errhandler_fn;
+typedef MPI_File_errhandler_function MPI_File_errhandler_fn;
+typedef MPI_Win_errhandler_function MPI_Win_errhandler_fn;
+
MPI_CALL(XBT_PUBLIC int, MPI_Init, (int* argc, char*** argv));
MPI_CALL(XBT_PUBLIC int, MPI_Finalize, (void));
MPI_CALL(XBT_PUBLIC int, MPI_Finalized, (int* flag));
MPI_CALL(XBT_PUBLIC int, MPI_Free_mem, (void* base));
MPI_CALL(XBT_PUBLIC double, MPI_Wtime, (void));
MPI_CALL(XBT_PUBLIC double, MPI_Wtick, (void));
-
+MPI_CALL(XBT_PUBLIC int, MPI_Buffer_attach, (void* buffer, int size));
+MPI_CALL(XBT_PUBLIC int, MPI_Buffer_detach, (void* buffer, int* size));
MPI_CALL(XBT_PUBLIC int, MPI_Address, (const void* location, MPI_Aint* address));
MPI_CALL(XBT_PUBLIC int, MPI_Get_address, (const void* location, MPI_Aint* address));
MPI_CALL(XBT_PUBLIC int, MPI_Error_class, (int errorcode, int* errorclass));
+MPI_CALL(XBT_PUBLIC int, MPI_Error_string, (int errorcode, char* string, int* resultlen));
MPI_CALL(XBT_PUBLIC int, MPI_Attr_delete, (MPI_Comm comm, int keyval));
MPI_CALL(XBT_PUBLIC int, MPI_Attr_get, (MPI_Comm comm, int keyval, void* attr_value, int* flag));
MPI_CALL(XBT_PUBLIC int, MPI_Ssend, (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
MPI_CALL(XBT_PUBLIC int, MPI_Ssend_init,
(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC int, MPI_Bsend, (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
+MPI_CALL(XBT_PUBLIC int, MPI_Bsend_init,
+ (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC int, MPI_Ibsend,
+ (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
MPI_CALL(XBT_PUBLIC int, MPI_Issend,
(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
MPI_CALL(XBT_PUBLIC int, MPI_Sendrecv,
MPI_CALL(XBT_PUBLIC int, MPI_File_get_position_shared, (MPI_File fh, MPI_Offset* offset));
MPI_CALL(XBT_PUBLIC int, MPI_File_sync, (MPI_File fh));
-
+MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_set, (MPI_Comm comm, MPI_Errhandler errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_create, (MPI_Handler_function * function, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_free, (MPI_Errhandler * errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_get, (MPI_Comm comm, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Comm_set_errhandler, (MPI_Comm comm, MPI_Errhandler errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Comm_get_errhandler, (MPI_Comm comm, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Comm_create_errhandler, (MPI_Comm_errhandler_fn * function, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Comm_call_errhandler, (MPI_Comm comm, int errorcode));
+MPI_CALL(XBT_PUBLIC int, MPI_Win_set_errhandler, (MPI_Win win, MPI_Errhandler errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Win_get_errhandler, (MPI_Win win, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Win_create_errhandler, (MPI_Win_errhandler_fn * function, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_Win_call_errhandler, (MPI_Win win, int errorcode));MPI_CALL(XBT_PUBLIC int, MPI_Type_get_envelope,
+ (MPI_Datatype datatype, int* num_integers, int* num_addresses, int* num_datatypes, int* combiner));
+MPI_CALL(XBT_PUBLIC int, MPI_File_call_errhandler, (MPI_File fh, int errorcode));
+MPI_CALL(XBT_PUBLIC int, MPI_File_create_errhandler,
+ (MPI_File_errhandler_function * function, MPI_Errhandler* errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_File_set_errhandler, (MPI_File file, MPI_Errhandler errhandler));
+MPI_CALL(XBT_PUBLIC int, MPI_File_get_errhandler, (MPI_File file, MPI_Errhandler* errhandler));
//FIXME: these are not yet implemented
typedef enum MPIR_Combiner_enum{
MPI_COMBINER_HINDEXED_BLOCK
}MPIR_Combiner_enum;
-typedef void MPI_Handler_function(MPI_Comm*, int*, ...);
typedef void* MPI_Message;
-typedef void MPI_Comm_errhandler_function(MPI_Comm *, int *, ...);
-typedef void MPI_File_errhandler_function(MPI_File *, int *, ...);
-typedef void MPI_Win_errhandler_function(MPI_Win *, int *, ...);
#define MPI_DUP_FN 1
#define MPI_WIN_DUP_FN ((MPI_Win_copy_attr_function*)MPI_DUP_FN)
#define MPI_TYPE_DUP_FN ((MPI_Type_copy_attr_function*)MPI_DUP_FN)
#define MPI_COMM_DUP_FN ((MPI_Comm_copy_attr_function *)MPI_DUP_FN)
-typedef MPI_Comm_errhandler_function MPI_Comm_errhandler_fn;
-typedef MPI_File_errhandler_function MPI_File_errhandler_fn;
-typedef MPI_Win_errhandler_function MPI_Win_errhandler_fn;
#define MPI_INFO_ENV smpi_process_info_env()
XBT_PUBLIC_DATA const MPI_Datatype MPI_PACKED;
XBT_PUBLIC_DATA MPI_Errhandler MPI_ERRORS_RETURN;
MPI_CALL(XBT_PUBLIC int, MPI_Graph_neighbors_count, (MPI_Comm comm, int rank, int* nneighbors));
MPI_CALL(XBT_PUBLIC int, MPI_Graphdims_get, (MPI_Comm comm, int* nnodes, int* nedges));
MPI_CALL(XBT_PUBLIC int, MPI_Topo_test, (MPI_Comm comm, int* top_type));
-MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_create, (MPI_Handler_function * function, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_free, (MPI_Errhandler * errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_get, (MPI_Comm comm, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Error_string, (int errorcode, char* string, int* resultlen));
-MPI_CALL(XBT_PUBLIC int, MPI_Errhandler_set, (MPI_Comm comm, MPI_Errhandler errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Comm_set_errhandler, (MPI_Comm comm, MPI_Errhandler errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Comm_get_errhandler, (MPI_Comm comm, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Comm_create_errhandler, (MPI_Comm_errhandler_fn * function, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Comm_call_errhandler, (MPI_Comm comm, int errorcode));
MPI_CALL(XBT_PUBLIC int, MPI_Add_error_class, (int* errorclass));
MPI_CALL(XBT_PUBLIC int, MPI_Add_error_code, (int errorclass, int* errorcode));
MPI_CALL(XBT_PUBLIC int, MPI_Add_error_string, (int errorcode, char* string));
MPI_CALL(XBT_PUBLIC int, MPI_Cancel, (MPI_Request * request));
-MPI_CALL(XBT_PUBLIC int, MPI_Buffer_attach, (void* buffer, int size));
-MPI_CALL(XBT_PUBLIC int, MPI_Buffer_detach, (void* buffer, int* size));
MPI_CALL(XBT_PUBLIC int, MPI_Comm_test_inter, (MPI_Comm comm, int* flag));
MPI_CALL(XBT_PUBLIC int, MPI_Intercomm_create,
(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm* comm_out));
MPI_CALL(XBT_PUBLIC int, MPI_Intercomm_merge, (MPI_Comm comm, int high, MPI_Comm* comm_out));
-MPI_CALL(XBT_PUBLIC int, MPI_Bsend, (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
-MPI_CALL(XBT_PUBLIC int, MPI_Bsend_init,
- (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
-MPI_CALL(XBT_PUBLIC int, MPI_Ibsend,
- (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
MPI_CALL(XBT_PUBLIC int, MPI_Comm_remote_group, (MPI_Comm comm, MPI_Group* group));
MPI_CALL(XBT_PUBLIC int, MPI_Comm_remote_size, (MPI_Comm comm, int* size));
MPI_CALL(XBT_PUBLIC int, MPI_Rsend, (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request));
MPI_CALL(XBT_PUBLIC int, MPI_Get_elements, (MPI_Status * status, MPI_Datatype datatype, int* elements));
MPI_CALL(XBT_PUBLIC int, MPI_Pcontrol, (const int level, ...));
-
-MPI_CALL(XBT_PUBLIC int, MPI_Win_set_errhandler, (MPI_Win win, MPI_Errhandler errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Win_get_errhandler, (MPI_Win win, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Win_create_errhandler, (MPI_Win_errhandler_fn * function, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_Win_call_errhandler, (MPI_Win win, int errorcode));MPI_CALL(XBT_PUBLIC int, MPI_Type_get_envelope,
- (MPI_Datatype datatype, int* num_integers, int* num_addresses, int* num_datatypes, int* combiner));
MPI_CALL(XBT_PUBLIC int, MPI_Type_get_contents,
(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers,
MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes));
MPI_CALL(XBT_PUBLIC int, MPI_Register_datarep, (char* datarep, MPI_Datarep_conversion_function* read_conversion_fn,
MPI_Datarep_conversion_function* write_conversion_fn,
MPI_Datarep_extent_function* dtype_file_extent_fn, void* extra_state));
-MPI_CALL(XBT_PUBLIC int, MPI_File_call_errhandler, (MPI_File fh, int errorcode));
-MPI_CALL(XBT_PUBLIC int, MPI_File_create_errhandler,
- (MPI_File_errhandler_function * function, MPI_Errhandler* errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_File_set_errhandler, (MPI_File file, MPI_Errhandler errhandler));
-MPI_CALL(XBT_PUBLIC int, MPI_File_get_errhandler, (MPI_File file, MPI_Errhandler* errhandler));
MPI_CALL(XBT_PUBLIC int, MPI_File_set_size, (MPI_File fh, MPI_Offset size));
MPI_CALL(XBT_PUBLIC int, MPI_File_preallocate, (MPI_File fh, MPI_Offset size));
MPI_CALL(XBT_PUBLIC int, MPI_File_set_view,
--- /dev/null
+#pragma once
+#include <cstdint>
+#include <cstring>
+#include <array>
+#include <type_traits>
+#include <cstdint>
+#include <vector>
+#include <string>
+
+#include <iostream>
+
+/*
+xxHash - Extremely Fast Hash algorithm
+Header File
+Copyright (C) 2012-2018, Yann Collet.
+Copyright (C) 2017-2018, Piotr Pliszka.
+All rights reserved.
+
+BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+* Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+You can contact the author at :
+- xxHash source repository : https://github.com/Cyan4973/xxHash
+- xxHash C++ port repository : https://github.com/RedSpah/xxhash_cpp
+*/
+
+/* *************************************
+* Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+* The below switch allow to select different access method for improved performance.
+* Method 0 (default) : use `memcpy()`. Safe and portable.
+* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+* Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+* It can generate buggy code on targets which do not support unaligned memory accesses.
+* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+* See http://stackoverflow.com/a/32095106/646947 for details.
+* Prefer these methods in priority order (0 > 1 > 2)
+*/
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define XXH_FORCE_MEMORY_ACCESS 2
+# elif defined(__INTEL_COMPILER) || (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
+* Results are therefore identical for little-endian and big-endian CPU.
+* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+* Should endian-independence be of no importance for your application, you may set the #define below to 1,
+* to improve speed for Big-endian CPU.
+* This option has no impact on Little_Endian CPU.
+*/
+#if !defined(XXH_FORCE_NATIVE_FORMAT) || (XXH_FORCE_NATIVE_FORMAT == 0) /* can be defined externally */
+# define XXH_FORCE_NATIVE_FORMAT 0
+# define XXH_CPU_LITTLE_ENDIAN 1
+#endif
+
+
+/*!XXH_FORCE_ALIGN_CHECK :
+* This is a minor performance trick, only useful with lots of very small keys.
+* It means : check for aligned/unaligned input.
+* The check costs one initial branch per hash;
+* set it to 0 when the input is guaranteed to be aligned,
+* or when alignment doesn't matter for performance.
+*/
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+/*!XXH_CPU_LITTLE_ENDIAN :
+* This is a CPU endian detection macro, will be
+* automatically set to 1 (little endian) if XXH_FORCE_NATIVE_FORMAT
+* is left undefined, XXH_FORCE_NATIVE_FORMAT is defined to 0, or if an x86/x86_64 compiler macro is defined.
+* If left undefined, endianness will be determined at runtime, at the cost of a slight one-time overhead
+* and a larger overhead due to get_endian() not being constexpr.
+*/
+#ifndef XXH_CPU_LITTLE_ENDIAN
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# endif
+#endif
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+namespace xxh
+{
+ /* *************************************
+ * Version
+ ***************************************/
+ constexpr int cpp_version_major = 0;
+ constexpr int cpp_version_minor = 6;
+ constexpr int cpp_version_release = 5;
+ constexpr uint32_t version_number() { return cpp_version_major * 10000 + cpp_version_minor * 100 + cpp_version_release; }
+
+ namespace hash_t_impl
+ {
+ /* *************************************
+ * Basic Types - Detail
+ ***************************************/
+
+ using _hash32_underlying = uint32_t;
+ using _hash64_underlying = uint64_t;
+
+ template <size_t N>
+ struct hash_type { using type = void; };
+ template <>
+ struct hash_type<32> { using type = _hash32_underlying; };
+ template <>
+ struct hash_type<64> { using type = _hash64_underlying; };
+ }
+
+ /* *************************************
+ * Basic Types - Public
+ ***************************************/
+
+ template <size_t N>
+ using hash_t = typename hash_t_impl::hash_type<N>::type;
+ using hash32_t = hash_t<32>;
+ using hash64_t = hash_t<64>;
+
+ /* *************************************
+ * Bit Functions - Public
+ ***************************************/
+
+ namespace bit_ops
+ {
+ /* ****************************************
+ * Intrinsics and Bit Operations
+ ******************************************/
+
+#if defined(_MSC_VER)
+ inline uint32_t rotl32(uint32_t x, int32_t r) { return _rotl(x, r); }
+ inline uint64_t rotl64(uint64_t x, int32_t r) { return _rotl64(x, r); }
+#else
+ inline uint32_t rotl32(uint32_t x, int32_t r) { return ((x << r) | (x >> (32 - r))); }
+ inline uint64_t rotl64(uint64_t x, int32_t r) { return ((x << r) | (x >> (64 - r))); }
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+ inline uint32_t swap32(uint32_t x) { return _byteswap_ulong(x); }
+ inline uint64_t swap64(uint64_t x) { return _byteswap_uint64(x); }
+#elif XXH_GCC_VERSION >= 403
+ inline uint32_t swap32(uint32_t x) { return __builtin_bswap32(x); }
+ inline uint64_t swap64(uint64_t x) { return __builtin_bswap64(x); }
+#else
+ inline uint32_t swap32(uint32_t x) { return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff); }
+ inline uint64_t swap64(uint64_t x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); }
+#endif
+ template <size_t N>
+ inline hash_t<N> rotl(hash_t<N> n, int32_t r) {};
+
+ template <>
+ inline hash_t<32> rotl<32>(hash_t<32> n, int32_t r)
+ {
+ return rotl32(n, r);
+ };
+
+ template <>
+ inline hash_t<64> rotl<64>(hash_t<64> n, int32_t r)
+ {
+ return rotl64(n, r);
+ };
+
+ template <size_t N>
+ inline hash_t<N> swap(hash_t<N> n) {};
+
+ template <>
+ inline hash_t<32> swap<32>(hash_t<32> n)
+ {
+ return swap32(n);
+ };
+
+ template <>
+ inline hash_t<64> swap<64>(hash_t<64> n)
+ {
+ return swap64(n);
+ };
+ }
+
+ /* *************************************
+ * Memory Functions - Public
+ ***************************************/
+
+ enum class alignment : uint8_t { aligned, unaligned };
+ enum class endianness : uint8_t { big_endian = 0, little_endian = 1, unspecified = 2 };
+
+ namespace mem_ops
+ {
+ /* *************************************
+ * Memory Access
+ ***************************************/
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+ /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+ template <size_t N>
+ inline hash_t<N> read_unaligned(const void* memPtr) { return *(const hash_t<N>*)memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+ /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+ /* currently only defined for gcc and icc */
+ template <size_t N>
+ using unalign = union { hash_t<N> uval; } __attribute((packed));
+
+ template <size_t N>
+ inline hash_t<N> read_unaligned(const void* memPtr) { return ((const unalign*)memPtr)->uval; }
+#else
+
+ /* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+ template <size_t N>
+ inline hash_t<N> read_unaligned(const void* memPtr)
+ {
+ hash_t<N> val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+ }
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+ inline hash_t<32> read32(const void* memPtr) { return read_unaligned<32>(memPtr); }
+ inline hash_t<64> read64(const void* memPtr) { return read_unaligned<64>(memPtr); }
+
+ /* *************************************
+ * Architecture Macros
+ ***************************************/
+
+ /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+
+#ifndef XXH_CPU_LITTLE_ENDIAN
+
+ inline endianness get_endian(endianness endian)
+ {
+ static struct _dummy_t
+ {
+ std::array<endianness, 3> endian_lookup = { endianness::big_endian, endianness::little_endian, endianness::unspecified };
+ const int g_one = 1;
+ _dummy_t()
+ {
+ endian_lookup[2] = static_cast<endianness>(*(const char*)(&g_one));
+ }
+ } _dummy;
+
+ return _dummy.endian_lookup[(uint8_t)endian];
+ }
+
+ inline bool is_little_endian()
+ {
+ return get_endian(endianness::unspecified) == endianness::little_endian;
+ }
+
+#else
+ constexpr endianness get_endian(endianness endian)
+ {
+ constexpr std::array<endianness, 3> endian_lookup = { endianness::big_endian, endianness::little_endian, (XXH_CPU_LITTLE_ENDIAN) ? endianness::little_endian : endianness::big_endian };
+ return endian_lookup[static_cast<uint8_t>(endian)];
+ }
+
+ constexpr bool is_little_endian()
+ {
+ return get_endian(endianness::unspecified) == endianness::little_endian;
+ }
+
+#endif
+
+
+
+ /* ***************************
+ * Memory reads
+ *****************************/
+
+
+ template <size_t N>
+ inline hash_t<N> readLE_align(const void* ptr, endianness endian, alignment align)
+ {
+ if (align == alignment::unaligned)
+ {
+ return endian == endianness::little_endian ? read_unaligned<N>(ptr) : bit_ops::swap<N>(read_unaligned<N>(ptr));
+ }
+ else
+ {
+ return endian == endianness::little_endian ? *reinterpret_cast<const hash_t<N>*>(ptr) : bit_ops::swap<N>(*reinterpret_cast<const hash_t<N>*>(ptr));
+ }
+ }
+
+ template <size_t N>
+ inline hash_t<N> readLE(const void* ptr, endianness endian)
+ {
+ return readLE_align<N>(ptr, endian, alignment::unaligned);
+ }
+
+ template <size_t N>
+ inline hash_t<N> readBE(const void* ptr)
+ {
+ return is_little_endian() ? bit_ops::swap<N>(read_unaligned<N>(ptr)) : read_unaligned<N>(ptr);
+ }
+
+ template <size_t N>
+ inline alignment get_alignment(const void* input)
+ {
+ return ((XXH_FORCE_ALIGN_CHECK) && ((reinterpret_cast<uintptr_t>(input) & ((N / 8) - 1)) == 0)) ? xxh::alignment::aligned : xxh::alignment::unaligned;
+ }
+ }
+
+ /* *******************************************************************
+ * Hash functions
+ *********************************************************************/
+
+ namespace detail
+ {
+ /* *******************************************************************
+ * Hash functions - Implementation
+ *********************************************************************/
+
+ constexpr static std::array<hash32_t, 5> primes32 = { 2654435761U, 2246822519U, 3266489917U, 668265263U, 374761393U };
+ constexpr static std::array<hash64_t, 5> primes64 = { 11400714785074694791ULL, 14029467366897019727ULL, 1609587929392839161ULL, 9650029242287828579ULL, 2870177450012600261ULL };
+
+ template <size_t N>
+ constexpr hash_t<N> PRIME(int32_t n) {};
+
+ template <>
+ constexpr hash32_t PRIME<32>(int32_t n)
+ {
+ return primes32[n - 1];
+ }
+
+ template <>
+ constexpr hash64_t PRIME<64>(int32_t n)
+ {
+ return primes64[n - 1];
+ }
+
+ template <size_t N>
+ inline hash_t<N> round(hash_t<N> seed, hash_t<N> input)
+ {
+ seed += input * PRIME<N>(2);
+ seed = bit_ops::rotl<N>(seed, ((N == 32) ? 13 : 31));
+ seed *= PRIME<N>(1);
+ return seed;
+ }
+
+ inline hash64_t mergeRound64(hash64_t acc, hash64_t val)
+ {
+ val = round<64>(0, val);
+ acc ^= val;
+ acc = acc * PRIME<64>(1) + PRIME<64>(4);
+ return acc;
+ }
+
+ template <size_t N>
+ inline void endian_align_sub_mergeround([[maybe_unused]] hash_t<N>& hash_ret, hash_t<N> v1, hash_t<N> v2, hash_t<N> v3, hash_t<N> v4) {};
+
+ template <>
+ inline void endian_align_sub_mergeround<64>(hash_t<64>& hash_ret, hash_t<64> v1, hash_t<64> v2, hash_t<64> v3, hash_t<64> v4)
+ {
+ hash_ret = mergeRound64(hash_ret, v1);
+ hash_ret = mergeRound64(hash_ret, v2);
+ hash_ret = mergeRound64(hash_ret, v3);
+ hash_ret = mergeRound64(hash_ret, v4);
+ }
+
+ template <size_t N>
+ inline hash_t<N> endian_align_sub_ending(hash_t<N> hash_ret, const uint8_t* p, const uint8_t* bEnd, xxh::endianness endian, xxh::alignment align) {};
+
+ template <>
+ inline hash_t<32> endian_align_sub_ending<32>(hash_t<32> hash_ret, const uint8_t* p, const uint8_t* bEnd, xxh::endianness endian, xxh::alignment align)
+ {
+ while ((p + 4) <= bEnd)
+ {
+ hash_ret += mem_ops::readLE_align<32>(p, endian, align) * PRIME<32>(3);
+ hash_ret = bit_ops::rotl<32>(hash_ret, 17) * PRIME<32>(4);
+ p += 4;
+ }
+
+ while (p < bEnd)
+ {
+ hash_ret += (*p) * PRIME<32>(5);
+ hash_ret = bit_ops::rotl<32>(hash_ret, 11) * PRIME<32>(1);
+ p++;
+ }
+
+ hash_ret ^= hash_ret >> 15;
+ hash_ret *= PRIME<32>(2);
+ hash_ret ^= hash_ret >> 13;
+ hash_ret *= PRIME<32>(3);
+ hash_ret ^= hash_ret >> 16;
+
+ return hash_ret;
+ }
+
+ template <>
+ inline hash_t<64> endian_align_sub_ending<64>(hash_t<64> hash_ret, const uint8_t* p, const uint8_t* bEnd, xxh::endianness endian, xxh::alignment align)
+ {
+ while (p + 8 <= bEnd)
+ {
+ const hash64_t k1 = round<64>(0, mem_ops::readLE_align<64>(p, endian, align));
+ hash_ret ^= k1;
+ hash_ret = bit_ops::rotl<64>(hash_ret, 27) * PRIME<64>(1) + PRIME<64>(4);
+ p += 8;
+ }
+
+ if (p + 4 <= bEnd)
+ {
+ hash_ret ^= static_cast<hash64_t>(mem_ops::readLE_align<32>(p, endian, align)) * PRIME<64>(1);
+ hash_ret = bit_ops::rotl<64>(hash_ret, 23) * PRIME<64>(2) + PRIME<64>(3);
+ p += 4;
+ }
+
+ while (p < bEnd)
+ {
+ hash_ret ^= (*p) * PRIME<64>(5);
+ hash_ret = bit_ops::rotl<64>(hash_ret, 11) * PRIME<64>(1);
+ p++;
+ }
+
+ hash_ret ^= hash_ret >> 33;
+ hash_ret *= PRIME<64>(2);
+ hash_ret ^= hash_ret >> 29;
+ hash_ret *= PRIME<64>(3);
+ hash_ret ^= hash_ret >> 32;
+
+ return hash_ret;
+ }
+
+ template <size_t N>
+ inline hash_t<N> endian_align(const void* input, size_t len, hash_t<N> seed, xxh::endianness endian, xxh::alignment align)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call endian_align in 32 or 64 bit mode.");
+
+ const uint8_t* p = static_cast<const uint8_t*>(input);
+ const uint8_t* bEnd = p + len;
+ hash_t<N> hash_ret;
+
+ if (len >= (N / 2))
+ {
+ const uint8_t* const limit = bEnd - (N / 2);
+ hash_t<N> v1 = seed + PRIME<N>(1) + PRIME<N>(2);
+ hash_t<N> v2 = seed + PRIME<N>(2);
+ hash_t<N> v3 = seed + 0;
+ hash_t<N> v4 = seed - PRIME<N>(1);
+
+ do
+ {
+ v1 = round<N>(v1, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
+ v2 = round<N>(v2, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
+ v3 = round<N>(v3, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
+ v4 = round<N>(v4, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
+ } while (p <= limit);
+
+ hash_ret = bit_ops::rotl<N>(v1, 1) + bit_ops::rotl<N>(v2, 7) + bit_ops::rotl<N>(v3, 12) + bit_ops::rotl<N>(v4, 18);
+
+ endian_align_sub_mergeround<N>(hash_ret, v1, v2, v3, v4);
+ }
+ else { hash_ret = seed + PRIME<N>(5); }
+
+ hash_ret += static_cast<hash_t<N>>(len);
+
+ return endian_align_sub_ending<N>(hash_ret, p, bEnd, endian, align);
+ }
+ }
+
+ template <size_t N>
+ hash_t<N> xxhash(const void* input, size_t len, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
+ return detail::endian_align<N>(input, len, seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(input));
+ }
+
+ template <size_t N, typename T>
+ hash_t<N> xxhash(const std::basic_string<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
+ return detail::endian_align<N>(static_cast<const void*>(input.data()), input.length() * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
+ }
+
+ template <size_t N, typename ContiguousIterator>
+ hash_t<N> xxhash(ContiguousIterator begin, ContiguousIterator end, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
+ using T = typename std::decay_t<decltype(*end)>;
+ return detail::endian_align<N>(static_cast<const void*>(&*begin), (end - begin) * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(&*begin)));
+ }
+
+ template <size_t N, typename T>
+ hash_t<N> xxhash(const std::vector<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
+ return detail::endian_align<N>(static_cast<const void*>(input.data()), input.size() * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
+ }
+
+ template <size_t N, typename T, size_t AN>
+ hash_t<N> xxhash(const std::array<T, AN>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
+ return detail::endian_align<N>(static_cast<const void*>(input.data()), AN * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
+ }
+
+ template <size_t N, typename T>
+ hash_t<N> xxhash(const std::initializer_list<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
+ return detail::endian_align<N>(static_cast<const void*>(input.begin()), input.size() * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.begin())));
+ }
+
+
+ /* *******************************************************************
+ * Hash streaming
+ *********************************************************************/
+ enum class error_code : uint8_t { ok = 0, error };
+
+ template <size_t N>
+ class hash_state_t {
+
+ uint64_t total_len = 0;
+ hash_t<N> v1 = 0, v2 = 0, v3 = 0, v4 = 0;
+ std::array<hash_t<N>, 4> mem = {{ 0,0,0,0 }};
+ uint32_t memsize = 0;
+
+ inline error_code _update_impl(const void* input, size_t length, endianness endian)
+ {
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(input);
+ const uint8_t* const bEnd = p + length;
+
+ if (!input) { return xxh::error_code::error; }
+
+ total_len += length;
+
+ if (memsize + length < (N / 2))
+ { /* fill in tmp buffer */
+ memcpy(reinterpret_cast<uint8_t*>(mem.data()) + memsize, input, length);
+ memsize += static_cast<uint32_t>(length);
+ return error_code::ok;
+ }
+
+ if (memsize)
+ { /* some data left from previous update */
+ memcpy(reinterpret_cast<uint8_t*>(mem.data()) + memsize, input, (N / 2) - memsize);
+
+ const hash_t<N>* ptr = mem.data();
+ v1 = detail::round<N>(v1, mem_ops::readLE<N>(ptr, endian)); ptr++;
+ v2 = detail::round<N>(v2, mem_ops::readLE<N>(ptr, endian)); ptr++;
+ v3 = detail::round<N>(v3, mem_ops::readLE<N>(ptr, endian)); ptr++;
+ v4 = detail::round<N>(v4, mem_ops::readLE<N>(ptr, endian));
+
+ p += (N / 2) - memsize;
+ memsize = 0;
+ }
+
+ if (p <= bEnd - (N / 2))
+ {
+ const uint8_t* const limit = bEnd - (N / 2);
+
+ do
+ {
+ v1 = detail::round<N>(v1, mem_ops::readLE<N>(p, endian)); p += (N / 8);
+ v2 = detail::round<N>(v2, mem_ops::readLE<N>(p, endian)); p += (N / 8);
+ v3 = detail::round<N>(v3, mem_ops::readLE<N>(p, endian)); p += (N / 8);
+ v4 = detail::round<N>(v4, mem_ops::readLE<N>(p, endian)); p += (N / 8);
+ } while (p <= limit);
+ }
+
+ if (p < bEnd)
+ {
+ memcpy(mem.data(), p, static_cast<size_t>(bEnd - p));
+ memsize = static_cast<uint32_t>(bEnd - p);
+ }
+
+ return error_code::ok;
+ }
+
+ inline hash_t<N> _digest_impl(endianness endian) const
+ {
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(mem.data());
+ const uint8_t* const bEnd = reinterpret_cast<const uint8_t*>(mem.data()) + memsize;
+ hash_t<N> hash_ret;
+
+ if (total_len > (N / 2))
+ {
+ hash_ret = bit_ops::rotl<N>(v1, 1) + bit_ops::rotl<N>(v2, 7) + bit_ops::rotl<N>(v3, 12) + bit_ops::rotl<N>(v4, 18);
+
+ detail::endian_align_sub_mergeround<N>(hash_ret, v1, v2, v3, v4);
+ }
+ else { hash_ret = v3 + detail::PRIME<N>(5); }
+
+ hash_ret += static_cast<hash_t<N>>(total_len);
+
+ return detail::endian_align_sub_ending<N>(hash_ret, p, bEnd, endian, alignment::unaligned);
+ }
+
+ public:
+ hash_state_t(hash_t<N> seed = 0)
+ {
+ static_assert(!(N != 32 && N != 64), "You can only stream hashing in 32 or 64 bit mode.");
+ v1 = seed + detail::PRIME<N>(1) + detail::PRIME<N>(2);
+ v2 = seed + detail::PRIME<N>(2);
+ v3 = seed + 0;
+ v4 = seed - detail::PRIME<N>(1);
+ };
+
+ hash_state_t operator=(hash_state_t<N>& other)
+ {
+ memcpy(this, other, sizeof(hash_state_t<N>));
+ }
+
+ error_code reset(hash_t<N> seed = 0)
+ {
+ memset(this, 0, sizeof(hash_state_t<N>));
+ v1 = seed + detail::PRIME<N>(1) + detail::PRIME<N>(2);
+ v2 = seed + detail::PRIME<N>(2);
+ v3 = seed + 0;
+ v4 = seed - detail::PRIME<N>(1);
+ return error_code::ok;
+ }
+
+ error_code update(const void* input, size_t length, endianness endian = endianness::unspecified)
+ {
+ return _update_impl(input, length, mem_ops::get_endian(endian));
+ }
+
+ template <typename T>
+ error_code update(const std::basic_string<T>& input, endianness endian = endianness::unspecified)
+ {
+ return _update_impl(static_cast<const void*>(input.data()), input.length() * sizeof(T), mem_ops::get_endian(endian));
+ }
+
+ template <typename ContiguousIterator>
+ error_code update(ContiguousIterator begin, ContiguousIterator end, endianness endian = endianness::unspecified)
+ {
+ using T = typename std::decay_t<decltype(*end)>;
+ return _update_impl(static_cast<const void*>(&*begin), (end - begin) * sizeof(T), mem_ops::get_endian(endian));
+ }
+
+ template <typename T>
+ error_code update(const std::vector<T>& input, endianness endian = endianness::unspecified)
+ {
+ return _update_impl(static_cast<const void*>(input.data()), input.size() * sizeof(T), mem_ops::get_endian(endian));
+ }
+
+ template <typename T, size_t AN>
+ error_code update(const std::array<T, AN>& input, endianness endian = endianness::unspecified)
+ {
+ return _update_impl(static_cast<const void*>(input.data()), AN * sizeof(T), mem_ops::get_endian(endian));
+ }
+
+ template <typename T>
+ error_code update(const std::initializer_list<T>& input, endianness endian = endianness::unspecified)
+ {
+ return _update_impl(static_cast<const void*>(input.begin()), input.size() * sizeof(T), mem_ops::get_endian(endian));
+ }
+
+ hash_t<N> digest(endianness endian = endianness::unspecified)
+ {
+ return _digest_impl(mem_ops::get_endian(endian));
+ }
+ };
+
+ using hash_state32_t = hash_state_t<32>;
+ using hash_state64_t = hash_state_t<64>;
+
+
+ /* *******************************************************************
+ * Canonical
+ *********************************************************************/
+
+ template <size_t N>
+ struct canonical_t
+ {
+ std::array<uint8_t, N / 8> digest;\
+
+
+
+ canonical_t(hash_t<N> hash)
+ {
+ if (mem_ops::is_little_endian()) { hash = bit_ops::swap<N>(hash); }
+ memcpy(digest.data(), &hash, sizeof(canonical_t<N>));
+ }
+
+ hash_t<N> get_hash() const
+ {
+ return mem_ops::readBE<N>(&digest);
+ }
+ };
+
+ using canonical32_t = canonical_t<32>;
+ using canonical64_t = canonical_t<64>;
+}
#include "xbt/log.h"
#include "xbt/sysdep.h"
-#include "src/mc/sosp/PageStore.hpp"
+#ifdef SG_HAVE_CPP14
+#include "src/include/xxhash.hpp"
+#endif
#include "src/mc/mc_mmu.hpp"
+#include "src/mc/sosp/PageStore.hpp"
#include <cstring> // memcpy, memcmp
#include <unistd.h>
*/
static XBT_ALWAYS_INLINE PageStore::hash_type mc_hash_page(const void* data)
{
+#ifdef SG_HAVE_CPP14
+ return xxh::xxhash<64>(data, xbt_pagesize);
+#else
const std::uint64_t* values = (const uint64_t*)data;
std::size_t n = xbt_pagesize / sizeof(uint64_t);
for (std::size_t i = 0; i != n; ++i)
hash = ((hash << 5) + hash) + values[i];
return hash;
+#endif
}
// ***** snapshot_page_manager
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Attr_put,(MPI_Comm comm, int keyval, void* attr_value) ,(comm, keyval, attr_value))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Barrier,(MPI_Comm comm),(comm))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Bcast,(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm),(buf, count, datatype, root, comm))
+WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Bsend_init,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request),(buf, count, datatype, dest, tag, comm, request))
+WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Bsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) ,(buf, count, datatype, dest, tag, comm))
+WRAPPED_PMPI_CALL(int,MPI_Buffer_attach,(void* buffer, int size) ,(buffer, size))
+WRAPPED_PMPI_CALL(int,MPI_Buffer_detach,(void* buffer, int* size) ,(buffer, size))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Cart_coords,(MPI_Comm comm, int rank, int maxdims, int* coords) ,(comm, rank, maxdims, coords))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Cart_create,(MPI_Comm comm, int ndims, const int* dims, const int* periods, int reorder, MPI_Comm* comm_cart) ,(comm, ndims, dims, periods, reorder, comm_cart))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Cartdim_get,(MPI_Comm comm, int* ndims) ,(comm, ndims))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Iprobe,(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status) ,(source, tag, comm, flag, status))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Irecv,(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request),(buf, count, datatype, src, tag, comm, request))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Isend,(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request),(buf, count, datatype, dst, tag, comm, request))
+WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Ibsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) ,(buf, count, datatype, dest, tag, comm, request))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Issend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) ,(buf, count, datatype, dest, tag, comm, request))
WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Irsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request),(buf, count, datatype, dest, tag, comm, request))
WRAPPED_PMPI_CALL(int,MPI_Is_thread_main,(int *flag),(flag))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Add_error_class,( int *errorclass),( errorclass))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Add_error_code,(int errorclass, int *errorcode),(errorclass, errorcode))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Add_error_string,( int errorcode, char *string),(errorcode, string))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Bsend_init,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request),(buf, count, datatype, dest, tag, comm, request))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Bsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) ,(buf, count, datatype, dest, tag, comm))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Buffer_attach,(void* buffer, int size) ,(buffer, size))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Buffer_detach,(void* buffer, int* size) ,(buffer, size))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Cart_map,(MPI_Comm comm_old, int ndims, const int* dims, const int* periods, int* newrank) ,(comm_old, ndims, dims, periods, newrank))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Close_port,(const char *port_name),( port_name))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Comm_accept,(const char *port_name, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *newcomm),( port_name, info, root, comm, newcomm))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_map,(MPI_Comm comm_old, int nnodes, const int* index, const int* edges, int* newrank) ,(comm_old, nnodes, index, edges, newrank))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_neighbors_count,(MPI_Comm comm, int rank, int* nneighbors) ,(comm, rank, nneighbors))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_neighbors,(MPI_Comm comm, int rank, int maxneighbors, int* neighbors) ,(comm, rank, maxneighbors, neighbors))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Ibsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) ,(buf, count, datatype, dest, tag, comm, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Intercomm_create,(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag,MPI_Comm* comm_out) ,(local_comm, local_leader, peer_comm, remote_leader, tag, comm_out))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Intercomm_merge,(MPI_Comm comm, int high, MPI_Comm* comm_out) ,(comm, high, comm_out))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Lookup_name,( char *service_name, MPI_Info info, char *port_name),( service_name, info, port_name))
return -1;
return errhan->c2f();
}
+
+int PMPI_Buffer_attach(void *buf, int size){
+ if(buf==nullptr)
+ return MPI_ERR_BUFFER;
+ if(size<0)
+ return MPI_ERR_ARG;
+ smpi_process()->set_bsend_buffer(buf, size);
+ return MPI_SUCCESS;
+}
+
+int PMPI_Buffer_detach(void* buffer, int* size){
+ smpi_process()->bsend_buffer((void**)buffer, size);
+ smpi_process()->set_bsend_buffer(nullptr, 0);
+ return MPI_SUCCESS;
+}
smpi_bench_end();
if (*request != MPI_REQUEST_NULL) {
simgrid::smpi::Request::unref(request);
+ *request = MPI_REQUEST_NULL;
retval = MPI_SUCCESS;
}
smpi_bench_begin();
return PMPI_Send(buf, count, datatype, dst, tag, comm);
}
+int PMPI_Bsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+
+ if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (dst == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (dst >= comm->group()->size() || dst <0){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf == nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag < 0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int my_proc_id = simgrid::s4u::this_actor::get_pid();
+ int dst_traced = getPid(comm, dst);
+ int bsend_buf_size = 0;
+ void* bsend_buf = nullptr;
+ smpi_process()->bsend_buffer(&bsend_buf, &bsend_buf_size);
+ int size = datatype->get_extent() * count;
+ if(bsend_buf==nullptr || bsend_buf_size < size + MPI_BSEND_OVERHEAD )
+ return MPI_ERR_BUFFER;
+ TRACE_smpi_comm_in(my_proc_id, __func__,
+ new simgrid::instr::Pt2PtTIData("bsend", dst,
+ datatype->is_replayable() ? count : count * datatype->size(),
+ tag, simgrid::smpi::Datatype::encode(datatype)));
+ if (not TRACE_smpi_view_internals()) {
+ TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, tag, count * datatype->size());
+ }
+
+ simgrid::smpi::Request::bsend(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_comm_out(my_proc_id);
+ }
+
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Ibsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
+{
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (dst == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (dst >= comm->group()->size() || dst <0){
+ retval = MPI_ERR_RANK;
+ } else if ((count < 0) || (buf==nullptr && count > 0)) {
+ retval = MPI_ERR_COUNT;
+ } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if(tag<0 && tag != MPI_ANY_TAG){
+ retval = MPI_ERR_TAG;
+ } else {
+ int my_proc_id = simgrid::s4u::this_actor::get_pid();
+ int trace_dst = getPid(comm, dst);
+ int bsend_buf_size = 0;
+ void* bsend_buf = nullptr;
+ smpi_process()->bsend_buffer(&bsend_buf, &bsend_buf_size);
+ int size = datatype->get_extent() * count;
+ if(bsend_buf==nullptr || bsend_buf_size < size + MPI_BSEND_OVERHEAD )
+ return MPI_ERR_BUFFER;
+ TRACE_smpi_comm_in(my_proc_id, __func__,
+ new simgrid::instr::Pt2PtTIData("ibsend", dst,
+ datatype->is_replayable() ? count : count * datatype->size(),
+ tag, simgrid::smpi::Datatype::encode(datatype)));
+
+ TRACE_smpi_send(my_proc_id, my_proc_id, trace_dst, tag, count * datatype->size());
+
+ *request = simgrid::smpi::Request::ibsend(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+
+ TRACE_smpi_comm_out(my_proc_id);
+ }
+
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request!=nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
+int PMPI_Bsend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request)
+{
+
+ int retval = 0;
+
+ smpi_bench_end();
+ if (request == nullptr) {
+ retval = MPI_ERR_ARG;
+ } else if (comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else if (dst == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else {
+ int bsend_buf_size = 0;
+ void* bsend_buf = nullptr;
+ smpi_process()->bsend_buffer(&bsend_buf, &bsend_buf_size);
+ if( bsend_buf==nullptr || bsend_buf_size < datatype->get_extent() * count + MPI_BSEND_OVERHEAD ) {
+ retval = MPI_ERR_BUFFER;
+ } else {
+ *request = simgrid::smpi::Request::bsend_init(buf, count, datatype, dst, tag, comm);
+ retval = MPI_SUCCESS;
+ }
+ }
+ smpi_bench_begin();
+ if (retval != MPI_SUCCESS && request != nullptr)
+ *request = MPI_REQUEST_NULL;
+ return retval;
+}
+
int PMPI_Ssend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) {
int retval = 0;
constexpr unsigned MPI_REQ_ACCUMULATE = 0x400;
constexpr unsigned MPI_REQ_GENERALIZED = 0x800;
constexpr unsigned MPI_REQ_COMPLETE = 0x1000;
+constexpr unsigned MPI_REQ_BSEND = 0x2000;
enum class SmpiProcessState { UNINITIALIZED, INITIALIZING, INITIALIZED /*(=MPI_Init called)*/, FINALIZED };
#endif
std::string tracing_category_ = "";
MPI_Info info_env_;
-
+ void* bsend_buffer_ = nullptr;
+ int bsend_buffer_size_ = 0;
+
#if HAVE_PAPI
/** Contains hardware data as read by PAPI **/
int papi_event_set_;
int get_optind();
void set_optind(int optind);
MPI_Info info_env();
+ void bsend_buffer(void** buf, int* size);
+ void set_bsend_buffer(void* buf, int size);
};
} // namespace smpi
static void finish_wait(MPI_Request* request, MPI_Status* status);
static void unref(MPI_Request* request);
static int wait(MPI_Request* req, MPI_Status* status);
+ static MPI_Request bsend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request send_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request isend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request ssend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request rma_recv_init(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag,
MPI_Comm comm, MPI_Op op);
static MPI_Request irecv_init(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm);
+ static MPI_Request ibsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request isend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request issend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static MPI_Request irecv(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm);
static void recv(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status* status);
+ static void bsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static void send(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
static void ssend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm);
optind_ = new_optind;
}
+void ActorExt::bsend_buffer(void** buf, int* size)
+{
+ *buf = bsend_buffer_;
+ *size = bsend_buffer_size_;
+}
+
+void ActorExt::set_bsend_buffer(void* buf, int size)
+{
+ bsend_buffer_ = buf;
+ bsend_buffer_size_= size;
+}
+
} // namespace smpi
} // namespace simgrid
/* factories, to hide the internal flags from the caller */
+MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+{
+
+ return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ comm->group()->actor(dst)->get_pid(), tag, comm,
+ MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND);
+}
+
MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED);
}
+MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ comm->group()->actor(dst)->get_pid(), tag, comm,
+ MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND);
+ request->start();
+ return request;
+}
+
MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
request = nullptr;
}
+void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
+ request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(),
+ comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND);
+
+ request->start();
+ wait(&request, MPI_STATUS_IGNORE);
+ request = nullptr;
+}
+
void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm)
{
MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */
void* buf = buf_;
if ((flags_ & MPI_REQ_SSEND) == 0 &&
- ((flags_ & MPI_REQ_RMA) != 0 ||
+ ((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 ||
static_cast<int>(size_) < simgrid::config::get_value<int>("smpi/send-is-detached-thresh"))) {
void *oldbuf = nullptr;
detached_ = true;
XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_));
}
+ //we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it
+ //so actually ... don't use manually attached buffer space.
buf = xbt_malloc(size_);
memcpy(buf,oldbuf,size_);
XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
#include <ns3/ipv4-address-helper.h>
#include <ns3/packet-sink-helper.h>
#include <ns3/point-to-point-helper.h>
+#include <ns3/application-container.h>
+#include <ns3/event-id.h>
#include "network_ns3.hpp"
#include "ns3/ns3_simulator.hpp"
*****************/
extern std::map<std::string, SgFlow*> flow_from_sock;
+extern std::map<std::string, ns3::ApplicationContainer> sink_from_sock;
static ns3::InternetStackHelper stack;
static ns3::NodeContainer nodes;
double NetworkNS3Model::next_occuring_event(double now)
{
- double time_to_next_flow_completion;
+ double time_to_next_flow_completion = 0.0;
XBT_DEBUG("ns3_next_occuring_event");
//get the first relevant value from the running_actions list
+
+ // If there is no comms in NS-3, then we do not move it forward.
+ // We will synchronize NS-3 with SimGrid when starting a new communication.
+ // (see NetworkNS3Action::NetworkNS3Action() for more details on this point)
if (get_started_action_set()->empty() || now == 0.0)
return -1.0;
- bool ns3_processed_all_finished_flows;
- do {
- double delta = surf_get_clock() + now - ns3::Simulator::Now().GetSeconds();
- ns3_simulator(delta);
- time_to_next_flow_completion = ns3::Simulator::Now().GetSeconds() - surf_get_clock();
-
- // NS3 stops as soon as it detects that a flow is finished.
- // However, to stop NS3 in a consistant state for the current simulated time,
- // we need to make sure that NS3 detects all the flows finishing at the current time.
- ns3_processed_all_finished_flows = true;
- // A flow that has 0 remaining_ is finishing at the current simulated time.
- // However, NS3 hadn't notice it yet if finished_ == false.
- for (const auto& elm : flow_from_sock) {
- SgFlow* sgFlow = elm.second;
- if(!sgFlow->finished_ && sgFlow->remaining_ == 0){
- ns3_processed_all_finished_flows = false;
- break;
- }
- }
- } while (!ns3_processed_all_finished_flows || double_equals(time_to_next_flow_completion, 0, sg_surf_precision));
-
+ XBT_DEBUG("doing a ns3 simulation for a duration of %f", now);
+ ns3_simulator(now);
+ time_to_next_flow_completion = ns3::Simulator::Now().GetSeconds() - surf_get_clock();
+ // NS-3 stops as soon as a flow ends,
+ // but it does not process the other flows that may finish at the same (simulated) time.
+ // If another flow ends at the same time, time_to_next_flow_completion = 0
+ if(double_equals(time_to_next_flow_completion, 0, sg_surf_precision))
+ time_to_next_flow_completion = 0.0;
+
XBT_DEBUG("min : %f", now);
XBT_DEBUG("ns3 time : %f", ns3::Simulator::Now().GetSeconds());
XBT_DEBUG("surf time : %f", surf_get_clock());
{
static std::vector<std::string> socket_to_destroy;
- /* If there are no running flows, advance the ns-3 simulator and return */
- if (get_started_action_set()->empty()) {
-
- while(double_positive(now - ns3::Simulator::Now().GetSeconds(), sg_surf_precision))
- ns3_simulator(now-ns3::Simulator::Now().GetSeconds());
-
- return;
- }
-
std::string ns3_socket;
for (const auto& elm : flow_from_sock) {
ns3_socket = elm.first;
SgFlow* sgFlow = elm.second;
NetworkNS3Action * action = sgFlow->action_;
XBT_DEBUG("Processing socket %p (action %p)",sgFlow,action);
- action->set_remains(action->get_cost() - sgFlow->sent_bytes_);
+ // Because NS3 stops as soon as a flow is finished, the other flows that ends at the same time may remains in an inconsistant state
+ // (i.e. remains_ == 0 but finished_ == false).
+ // However, SimGrid considers sometimes that an action with remains_ == 0 is finished.
+ // Thus, to avoid inconsistencies between SimGrid and NS3, set remains to 0 only when the flow is finished in NS3
+ int remains = action->get_cost() - sgFlow->sent_bytes_;
+ if(remains > 0)
+ action->set_remains(remains);
if (TRACE_is_enabled() && action->get_state() == kernel::resource::Action::State::STARTED) {
double data_delta_sent = sgFlow->sent_bytes_ - action->last_sent_;
if(sgFlow->finished_){
socket_to_destroy.push_back(ns3_socket);
XBT_DEBUG("Destroy socket %p of action %p", ns3_socket.c_str(), action);
+ action->set_remains(0);
action->finish(kernel::resource::Action::State::FINISHED);
} else {
XBT_DEBUG("Socket %p sent %u bytes out of %u (%u remaining)", ns3_socket.c_str(), sgFlow->sent_bytes_,
}
delete flow;
flow_from_sock.erase(ns3_socket);
+ sink_from_sock.erase(ns3_socket);
}
}
NetworkNS3Action::NetworkNS3Action(Model* model, double totalBytes, s4u::Host* src, s4u::Host* dst)
: NetworkAction(model, totalBytes, false), src_(src), dst_(dst)
{
+
+ // If there is no other started actions, we need to move NS-3 forward to be sync with SimGrid
+ if (model->get_started_action_set()->size()==1){
+ while(double_positive(surf_get_clock() - ns3::Simulator::Now().GetSeconds(), sg_surf_precision)){
+ XBT_DEBUG("Synchronizing NS-3 (time %f) with SimGrid (time %f)", ns3::Simulator::Now().GetSeconds(), surf_get_clock());
+ ns3_simulator(surf_get_clock() - ns3::Simulator::Now().GetSeconds());
+ }
+ }
+
XBT_DEBUG("Communicate from %s to %s", src->get_cname(), dst->get_cname());
static int port_number = 1025; // Port number is limited from 1025 to 65 000
XBT_DEBUG("ns3: Create flow of %.0f Bytes from %u to %u with Interface %s", totalBytes, node1, node2, addr.c_str());
ns3::PacketSinkHelper sink("ns3::TcpSocketFactory", ns3::InetSocketAddress(ns3::Ipv4Address::GetAny(), port_number));
- sink.Install(dst_node);
+ ns3::ApplicationContainer apps = sink.Install(dst_node);
ns3::Ptr<ns3::Socket> sock = ns3::Socket::CreateSocket(src_node, ns3::TcpSocketFactory::GetTypeId());
flow_from_sock.insert({transform_socket_ptr(sock), new SgFlow(totalBytes, this)});
+ sink_from_sock.insert({transform_socket_ptr(sock), apps});
sock->Bind(ns3::InetSocketAddress(port_number));
ns3::Simulator::ScheduleNow(&start_flow, sock, addr.c_str(), port_number);
port_number++;
+ if(port_number > 65000){
+ port_number = 1025;
+ XBT_WARN("Too many connections! Port number is saturated. Trying to use the oldest ports.");
+ }
xbt_assert(port_number <= 65000, "Too many connections! Port number is saturated.");
s4u::Link::on_communicate(*this, src, dst);
void ns3_simulator(double maxSeconds)
{
+ ns3::EventId id;
if (maxSeconds > 0.0) // If there is a maximum amount of time to run
- ns3::Simulator::Stop(ns3::Seconds(maxSeconds));
+ id = ns3::Simulator::Schedule(ns3::Seconds(maxSeconds), &ns3::Simulator::Stop);
+
XBT_DEBUG("Start simulator for at most %fs (current time: %f)", maxSeconds, surf_get_clock());
ns3::Simulator::Run ();
+ XBT_DEBUG("Simulator stopped at %fs", ns3::Simulator::Now().GetSeconds());
+
+ if(maxSeconds > 0.0)
+ id.Cancel();
}
// initialize the ns-3 interface and environment
#include <ns3/ipv4-address-helper.h>
#include <ns3/point-to-point-helper.h>
+#include <ns3/application-container.h>
+#include <ns3/ptr.h>
+#include <ns3/callback.h>
+#include <ns3/packet-sink.h>
#include <algorithm>
std::map<std::string, SgFlow*> flow_from_sock; // ns3::sock -> SgFlow
+std::map<std::string, ns3::ApplicationContainer> sink_from_sock; // ns3::sock -> ns3::PacketSink
static void receive_callback(ns3::Ptr<ns3::Socket> socket);
static void datasent_cb(ns3::Ptr<ns3::Socket> socket, uint32_t dataSent);
return (it == flow_from_sock.end()) ? nullptr : it->second;
}
+static ns3::ApplicationContainer* getSinkFromSocket(ns3::Ptr<ns3::Socket> socket)
+{
+ auto it = sink_from_sock.find(transform_socket_ptr(socket));
+ return (it == sink_from_sock.end()) ? nullptr : &(it->second);
+}
+
static void receive_callback(ns3::Ptr<ns3::Socket> socket)
{
SgFlow* flow = getFlowFromSocket(socket);
flow->finished_ = true;
XBT_DEBUG("recv_cb of F[%p, %p, %u]", flow, flow->action_, flow->total_bytes_);
XBT_DEBUG("Stop simulator at %f seconds", ns3::Simulator::Now().GetSeconds());
- ns3::Simulator::Stop(ns3::Seconds(0.0));
- ns3::Simulator::Run();
+ ns3::Simulator::Stop();
}
}
static void send_cb(ns3::Ptr<ns3::Socket> sock, uint32_t txSpace)
{
SgFlow* flow = getFlowFromSocket(sock);
+ ns3::ApplicationContainer* sink = getSinkFromSocket(sock);
XBT_DEBUG("Asked to write on F[%p, total: %u, remain: %u]", flow, flow->total_bytes_, flow->remaining_);
if (flow->remaining_ == 0) // all data was already buffered (and socket was already closed)
flow->remaining_);
}
- if (flow->buffered_bytes_ >= flow->total_bytes_)
+ if (flow->buffered_bytes_ >= flow->total_bytes_){
+ XBT_DEBUG("Closing Sockets of flow %p", flow);
+ // Closing the sockets of the receiving application
+ ns3::Ptr<ns3::PacketSink> app = ns3::DynamicCast<ns3::PacketSink, ns3::Application>(sink->Get(0));
+ ns3::Ptr<ns3::Socket> listening_sock = app->GetListeningSocket();
+ listening_sock->Close();
+ listening_sock->SetRecvCallback(ns3::MakeNullCallback<void, ns3::Ptr<ns3::Socket>>());
+ for(ns3::Ptr<ns3::Socket> accepted_sock : app->GetAcceptedSockets())
+ accepted_sock->Close();
+ // Closing the socket of the sender
sock->Close();
+ }
}
static void datasent_cb(ns3::Ptr<ns3::Socket> socket, uint32_t dataSent)
// tell the tcp implementation to call send_cb again
// if we blocked and new tx buffer space becomes available
sock->SetSendCallback(MakeCallback(&send_cb));
- // Notice when the send is over
- sock->SetRecvCallback(MakeCallback(&receive_callback));
// Notice when we actually sent some data (mostly for the TRACING module)
sock->SetDataSentCallback(MakeCallback(&datasent_cb));
# The output is not relevant
ADD_TEST(tesh-s4u-comm-pt2pt ${CMAKE_BINARY_DIR}/teshsuite/s4u/comm-pt2pt/comm-pt2pt ${CMAKE_HOME_DIRECTORY}/examples/platforms/cluster_backbone.xml)
+# NS-3 specific tests
+if(SIMGRID_HAVE_NS3)
+ foreach(x ns3-simultaneous-send-rcv)
+ add_executable (${x} EXCLUDE_FROM_ALL ${x}/${x}.cpp)
+ target_link_libraries(${x} simgrid)
+ set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
+ add_dependencies(tests ${x})
+ ADD_TESH(tesh-s4u-${x} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_BINARY_DIR}/teshsuite/s4u/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x}/${x}.tesh)
+ endforeach()
+endif()
+foreach(x ns3-simultaneous-send-rcv)
+ set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.cpp)
+ set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
+endforeach()
set(teshsuite_src ${teshsuite_src} PARENT_SCOPE)
set(tesh_files ${tesh_files} PARENT_SCOPE)
--- /dev/null
+/* Copyright (c) 2019. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/* This test checks that ns-3 behave correctly when multiple flows finish */
+/* at the exact same time. Given the amount of simultaneous senders, it */
+/* also serves as a (small) crash test for ns-3. */
+
+#include "simgrid/s4u.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u tests");
+
+const int payload = 1000;
+const int nb_message_to_send = 5;
+const double sleep_time = 5;
+const int nb_sender = 100;
+
+int nb_messages_sent = 0;
+
+simgrid::s4u::Mailbox* box;
+
+static void test_send(){
+ for (int nb_message = 0; nb_message < nb_message_to_send; nb_message++) {
+ nb_messages_sent++;
+ XBT_VERB("start sending test #%i", nb_messages_sent);
+ box->put(new int(nb_messages_sent), payload);
+ XBT_VERB("done sending test #%i", nb_messages_sent);
+ simgrid::s4u::this_actor::sleep_until(sleep_time * (nb_message + 1));
+ }
+}
+
+static void test_receive(){
+ for (int nb_message = 0; nb_message < nb_message_to_send * nb_sender; nb_message++) {
+ XBT_VERB("waiting for messages");
+ int id = *(int*)(box->get());
+ XBT_VERB("received messages #%i", id);
+ }
+ XBT_INFO("Done receiving from %d senders, each of them sending %d messages", nb_sender, nb_message_to_send);
+}
+
+
+int main(int argc, char *argv[])
+{
+ simgrid::s4u::Engine e(&argc, argv);
+
+ e.load_platform(argv[1]);
+
+ simgrid::s4u::ActorPtr receiver = simgrid::s4u::Actor::create("receiver", e.get_all_hosts()[0], test_receive);
+ for (int i = 0; i < nb_sender; i++)
+ simgrid::s4u::Actor::create("sender_" + std::to_string(i), e.get_all_hosts()[i % (e.get_host_count() - 1) + 1],
+ test_send);
+
+ box = simgrid::s4u::Mailbox::by_name("test");
+ box->set_receiver(receiver);
+
+ e.run();
+
+ return 0;
+}
--- /dev/null
+p In the ns-3 tests, the timings are not shown because the exact values may vary with your ns-3 version.
+p We just want to check that the ns-3 bindings of SimGrid are working correctly, we don't want to thoroughly test ns-3.
+
+$ ./ns3-simultaneous-send-rcv ${platfdir}/ns3-big-cluster.xml --cfg=network/model:ns-3 "--log=root.fmt:[%h:%P(%i)]%e[%c/%p]%e%m%n"
+> [:maestro(0)] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'ns-3'
+> [c-01.rennes:receiver(1)] [s4u_test/INFO] Done receiving from 100 senders, each of them sending 5 messages
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
foreach(file anyall bottom eagerdt huge_anysrc huge_underflow inactivereq isendself isendirecv isendselfprobe issendselfcancel cancelanysrc pingping probenull
- dtype_send greq1 probe-unexp rqstatus sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull many_isend manylmt recv_any sendself scancel scancel2 rcancel)
- # not compiled files: big_count_status bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending mprobe
- # cancelrecv icsend large_message pscancel rqfreeb scancel_unmatch
+ dtype_send greq1 probe-unexp rqstatus sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull many_isend manylmt recv_any sendself scancel scancel2 rcancel bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending rqfreeb)
+ # not compiled files: big_count_status mprobe
+ # cancelrecv icsend large_message pscancel scancel_unmatch
add_executable(${file} EXCLUDE_FROM_ALL ${file}.c)
add_dependencies(tests ${file})
target_link_libraries(${file} simgrid mtest_c)
free(buf);
}
else if (rank == dest) {
- double tstart;
+/* double tstart;*/
/* Clear the message buffers */
for (i = 0; i < msgsize; i++) {
NULL, 0, MPI_UNSIGNED_CHAR, source, 10, comm, MPI_STATUS_IGNORE);
/* Wait 2 seconds */
- tstart = MPI_Wtime();
- while (MPI_Wtime() - tstart < 2.0);
-
+/* tstart = MPI_Wtime();*/
+/* while (MPI_Wtime() - tstart < 2.0);*/
+ sleep(2);
/* Now receive the messages */
MPI_Recv(msg1, msgsize, MPI_UNSIGNED_CHAR, source, 0, comm, &status1);
MPI_Recv(msg2, msgsize, MPI_UNSIGNED_CHAR, source, 0, comm, &status2);
eagerdt 2
pingping 2
bottom 2
-#needs MPI_Bsend
-#bsend1 1
-#bsend2 1
-#bsend3 1
-#bsend4 1
+bsend1 1
+bsend2 1
+bsend3 1
+bsend4 1
+#needs MPI_Intercomm_create
#bsend5 4
-#bsendalign 2
-#bsendpending 2
+bsendalign 2
+bsendpending 2
isendself 1
#issendselfcancel 1
isendirecv 10
-#needs MPI_Buffer_attach, MPI_Bsend, MPI_Buffer_detach
-#bsendfrag 2
+bsendfrag 2
#needs MPI_Intercomm_create
#icsend 4
rqstatus 2
-#needs MPI_Pack, MPI_Buffer_attach, MPI_Buffer_detach, MPI_Ibsend
-#rqfreeb 4
+rqfreeb 4
#needs MPI_Grequest_start MPI_Grequest_complete
greq1 1
probe-unexp 4
-foreach(x lmm_usage surf_usage surf_usage2)
+foreach(x lmm_usage surf_usage surf_usage2 wifi_usage)
add_executable (${x} EXCLUDE_FROM_ALL ${x}/${x}.cpp)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.cpp)
+
+ ADD_TESH(tesh-surf-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/${x} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/${x} ${x}.tesh)
endforeach()
add_executable (maxmin_bench EXCLUDE_FROM_ALL maxmin_bench/maxmin_bench.cpp)
set(tesh_files ${tesh_files} PARENT_SCOPE)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/maxmin_bench/maxmin_bench.cpp PARENT_SCOPE)
-foreach(x lmm_usage surf_usage surf_usage2)
- ADD_TESH(tesh-surf-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/${x} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/${x} ${x}.tesh)
-endforeach()
-
foreach(x small medium large)
ADD_TESH(tesh-surf-maxmin-${x} --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/surf/maxmin_bench --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/surf/maxmin_bench maxmin_bench_${x}.tesh)
endforeach()
+++ /dev/null
-/* Copyright (c) 2017-2018. The SimGrid Team. All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/s4u.hpp"
-#include "xbt/log.h"
-
-#include "simgrid/msg.h"
-#include "src/surf/network_cm02.hpp"
-#include <exception>
-#include <iostream>
-#include <random>
-#include <string>
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[wifi_usage] 1STA-1LINK-1NODE-CT");
-
-void setup_simulation();
-static void flowActor(std::vector<std::string> args);
-
-/**
- * Theory says:
- * - AP1 is the most constraint constraint
- * - When two STA communicates on the same AP we have the following AP constraint:
- * 1.05/r_STA1 * rho_STA1 <= 1
- * - Thus:
- * mu = 1 / [ 1/1 * 1.05/54Mbps ] = 51428571
- * simulation_time = 1000*8 / mu = 0.0001555556s
- * BTW: SimGrid should give you: 0.000156s due to computation side effects
- *
- */
-int main(int argc, char** argv)
-{
-
- // Build engine
- simgrid::s4u::Engine engine(&argc, argv);
- engine.load_platform(argv[1]);
- setup_simulation();
- engine.run();
- XBT_INFO("Simulation took %fs", simgrid::s4u::Engine::get_clock());
- return (0);
-}
-
-void setup_simulation()
-{
-
- std::vector<std::string> args, noArgs;
- args.push_back("NODE1");
- args.push_back("1000");
- simgrid::s4u::Actor::create("STA1", simgrid::s4u::Host::by_name("STA1"), flowActor, args);
- simgrid::s4u::Actor::create("NODE1", simgrid::s4u::Host::by_name("NODE1"), flowActor, noArgs);
- simgrid::kernel::resource::NetworkWifiLink* l =
- (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl();
- l->set_host_rate(simgrid::s4u::Host::by_name("STA1"), 0);
-}
-
-static void flowActor(std::vector<std::string> args)
-{
- std::string selfName = simgrid::s4u::this_actor::get_host()->get_name();
- simgrid::s4u::Mailbox* selfMailbox = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_host()->get_name());
-
- if (args.size() > 0) { // We should send
- simgrid::s4u::Mailbox* dstMailbox = simgrid::s4u::Mailbox::by_name(args.at(0));
-
- int dataSize = std::atoi(args.at(1).c_str());
- double comStartTime = simgrid::s4u::Engine::get_clock();
- dstMailbox->put(const_cast<char*>("message"), dataSize);
- double comEndTime = simgrid::s4u::Engine::get_clock();
- XBT_INFO("%s sent %d bytes to %s in %f seconds from %f to %f", selfName.c_str(), dataSize, args.at(0).c_str(),
- comEndTime - comStartTime, comStartTime, comEndTime);
- } else { // We should receive
- selfMailbox->get();
- }
-}
+++ /dev/null
-#!/usr/bin/env tesh
-
-$ ${bindir:=.}/1STA-1LINK-1NODE-CT ${platfdir}/wifi/1STA-1LINK-1NODE.xml
-> [STA1:STA1:(1) 0.000156] [simulator/INFO] STA1 sent 1000 bytes to NODE1 in 0.000156 seconds from 0.000000 to 0.000156
-> [0.000156] [simulator/INFO] Simulation took 0.000156s
+++ /dev/null
-/* Copyright (c) 2017-2018. The SimGrid Team. All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/s4u.hpp"
-#include "xbt/log.h"
-
-#include "simgrid/msg.h"
-#include "src/surf/network_cm02.hpp"
-#include <exception>
-#include <iostream>
-#include <random>
-#include <string>
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[wifi_usage] 1STA-1LINK-1NODE");
-
-void setup_simulation();
-static void flowActor(std::vector<std::string> args);
-
-/**
- * Theory says:
- * - AP1 is the most constraint constraint
- * - When two STA communicates on the same AP we have the following AP constraint:
- * 1/r_STA1 * rho_STA1 <= 1
- * - Thus:
- * mu = 1 / [ 1/1 * 1/54Mbps ] = 5.4e+07
- * simulation_time = 1000*8 / mu = 0.0001481481s
- *
- */
-int main(int argc, char** argv)
-{
-
- // Build engine
- simgrid::s4u::Engine engine(&argc, argv);
- engine.load_platform(argv[1]);
- setup_simulation();
- engine.run();
- XBT_INFO("Simulation took %fs", simgrid::s4u::Engine::get_clock());
-
- return (0);
-}
-
-void setup_simulation()
-{
-
- std::vector<std::string> args, noArgs;
- args.push_back("NODE1");
- args.push_back("1000");
- simgrid::s4u::Actor::create("STA1", simgrid::s4u::Host::by_name("STA1"), flowActor, args);
- simgrid::s4u::Actor::create("NODE1", simgrid::s4u::Host::by_name("NODE1"), flowActor, noArgs);
- simgrid::kernel::resource::NetworkWifiLink* l =
- (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl();
- l->set_host_rate(simgrid::s4u::Host::by_name("STA1"), 0);
-}
-
-static void flowActor(std::vector<std::string> args)
-{
- std::string selfName = simgrid::s4u::this_actor::get_host()->get_name();
- simgrid::s4u::Mailbox* selfMailbox = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_host()->get_name());
-
- if (args.size() > 0) { // We should send
- simgrid::s4u::Mailbox* dstMailbox = simgrid::s4u::Mailbox::by_name(args.at(0));
-
- int dataSize = std::atoi(args.at(1).c_str());
- double comStartTime = simgrid::s4u::Engine::get_clock();
- dstMailbox->put(const_cast<char*>("message"), dataSize);
- double comEndTime = simgrid::s4u::Engine::get_clock();
- XBT_INFO("%s sent %d bytes to %s in %f seconds from %f to %f", selfName.c_str(), dataSize, args.at(0).c_str(),
- comEndTime - comStartTime, comStartTime, comEndTime);
- } else { // We should receive
- selfMailbox->get();
- }
-}
+++ /dev/null
-#!/usr/bin/env tesh
-
-$ ${bindir:=.}/1STA-1LINK-1NODE-NOCT ${platfdir}/wifi/1STA-1LINK-1NODE.xml --cfg=network/crosstraffic:0
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/crosstraffic' to '0'
-> [STA1:STA1:(1) 0.000148] [simulator/INFO] STA1 sent 1000 bytes to NODE1 in 0.000148 seconds from 0.000000 to 0.000148
-> [0.000148] [simulator/INFO] Simulation took 0.000148s
+++ /dev/null
-/* Copyright (c) 2017-2018. The SimGrid Team. All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-#include "simgrid/s4u.hpp"
-#include "src/surf/network_cm02.hpp"
-#include "xbt/log.h"
-#include <exception>
-#include <iostream>
-#include <random>
-#include <string>
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[wifi_usage] 2STA-1NODE");
-
-void setup_simulation();
-static void flowActor(std::vector<std::string> args);
-
-/**
- * Theory says:
- * - When two STA communicates on the same AP we have the following AP constraint:
- * 1.05/r_STA1 * rho_STA1 + 1.05/r_STA2 * rho_2 <= 1
- * - Thus:
- * mu = 1 / [ 1/2 * 1.05/54Mbps + 1.05/54Mbps ] = 51428571
- * simulation_time = 1000*8 / [ mu / 2 ] = 0.0003111111s
- *
- */
-int main(int argc, char** argv)
-{
-
- // Build engine
- simgrid::s4u::Engine engine(&argc, argv);
- engine.load_platform(argv[1]);
- setup_simulation();
- engine.run();
- XBT_INFO("Simulation took %fs", simgrid::s4u::Engine::get_clock());
-
- return (0);
-}
-
-void setup_simulation()
-{
-
- std::vector<std::string> args, noArgs;
- args.push_back("STA2");
- args.push_back("1000");
- simgrid::s4u::Actor::create("STA1", simgrid::s4u::Host::by_name("STA1"), flowActor, args);
- simgrid::s4u::Actor::create("STA2", simgrid::s4u::Host::by_name("STA2"), flowActor, noArgs);
- simgrid::kernel::resource::NetworkWifiLink* l =
- (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl();
- l->set_host_rate(simgrid::s4u::Host::by_name("STA1"), 0);
- l->set_host_rate(simgrid::s4u::Host::by_name("STA2"), 0);
-}
-
-static void flowActor(std::vector<std::string> args)
-{
- std::string selfName = simgrid::s4u::this_actor::get_host()->get_name();
- simgrid::s4u::Mailbox* selfMailbox = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_host()->get_name());
-
- if (args.size() > 0) { // We should send
- simgrid::s4u::Mailbox* dstMailbox = simgrid::s4u::Mailbox::by_name(args.at(0));
-
- int dataSize = std::atoi(args.at(1).c_str());
- double comStartTime = simgrid::s4u::Engine::get_clock();
- dstMailbox->put(const_cast<char*>("message"), dataSize);
- double comEndTime = simgrid::s4u::Engine::get_clock();
- XBT_INFO("%s sent %d bytes to %s in %f seconds from %f to %f", selfName.c_str(), dataSize, args.at(0).c_str(),
- comEndTime - comStartTime, comStartTime, comEndTime);
- } else { // We should receive
- selfMailbox->get();
- }
-}
+++ /dev/null
-#!/usr/bin/env tesh
-
-$ ${bindir:=.}/2STA-CT ${platfdir}/wifi/2STA.xml
-> [STA1:STA1:(1) 0.000311] [simulator/INFO] STA1 sent 1000 bytes to STA2 in 0.000311 seconds from 0.000000 to 0.000311
-> [0.000311] [simulator/INFO] Simulation took 0.000311s
+++ /dev/null
-/* Copyright (c) 2017-2018. The SimGrid Team. All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-#include "simgrid/s4u.hpp"
-#include "src/surf/network_cm02.hpp"
-#include "xbt/log.h"
-#include <exception>
-#include <iostream>
-#include <random>
-#include <string>
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[wifi_usage] 2STA-1NODE");
-
-void setup_simulation();
-static void flowActor(std::vector<std::string> args);
-
-/**
- * Theory says:
- * - When two STA communicates on the same AP we have the following AP constraint:
- * 1/r_STA1 * rho_STA1 + 1/r_STA2 * rho_2 <= 1
- * - Thus:
- * mu = 1 / [ 1/2 * 1/54Mbps + 1/54Mbps ] = 5.4e+07
- * simulation_time = 1000*8 / [ mu / 2 ] = 0.0002962963s
- *
- */
-int main(int argc, char** argv)
-{
-
- // Build engine
- simgrid::s4u::Engine engine(&argc, argv);
- engine.load_platform(argv[1]);
- setup_simulation();
- engine.run();
- XBT_INFO("Simulation took %fs", simgrid::s4u::Engine::get_clock());
-
- return (0);
-}
-
-void setup_simulation()
-{
-
- std::vector<std::string> args, noArgs;
- args.push_back("STA2");
- args.push_back("1000");
- simgrid::s4u::Actor::create("STA1", simgrid::s4u::Host::by_name("STA1"), flowActor, args);
- simgrid::s4u::Actor::create("STA2", simgrid::s4u::Host::by_name("STA2"), flowActor, noArgs);
- simgrid::kernel::resource::NetworkWifiLink* l =
- (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl();
- l->set_host_rate(simgrid::s4u::Host::by_name("STA1"), 0);
- l->set_host_rate(simgrid::s4u::Host::by_name("STA2"), 0);
-}
-
-static void flowActor(std::vector<std::string> args)
-{
- std::string selfName = simgrid::s4u::this_actor::get_host()->get_name();
- simgrid::s4u::Mailbox* selfMailbox = simgrid::s4u::Mailbox::by_name(simgrid::s4u::this_actor::get_host()->get_name());
-
- if (args.size() > 0) { // We should send
- simgrid::s4u::Mailbox* dstMailbox = simgrid::s4u::Mailbox::by_name(args.at(0));
-
- int dataSize = std::atoi(args.at(1).c_str());
- double comStartTime = simgrid::s4u::Engine::get_clock();
- dstMailbox->put(const_cast<char*>("message"), dataSize);
- double comEndTime = simgrid::s4u::Engine::get_clock();
- XBT_INFO("%s sent %d bytes to %s in %f seconds from %f to %f", selfName.c_str(), dataSize, args.at(0).c_str(),
- comEndTime - comStartTime, comStartTime, comEndTime);
- } else { // We should receive
- selfMailbox->get();
- }
-}
+++ /dev/null
-#!/usr/bin/env tesh
-
-$ ${bindir:=.}/2STA-NOCT ${platfdir}/wifi/2STA.xml --cfg=network/crosstraffic:0
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/crosstraffic' to '0'
-> [STA1:STA1:(1) 0.000296] [simulator/INFO] STA1 sent 1000 bytes to STA2 in 0.000296 seconds from 0.000000 to 0.000296
-> [0.000296] [simulator/INFO] Simulation took 0.000296s
--- /dev/null
+/* Copyright (c) 2019. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include "xbt/log.h"
+
+#include "simgrid/msg.h"
+#include "src/surf/network_cm02.hpp"
+#include <exception>
+#include <iostream>
+#include <random>
+#include <string>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(simulator, "[usage] wifi_usage <platform-file>");
+
+void run_ping_test(const char* src, const char* dest, int data_size);
+
+/* We need a separate actor so that it can sleep after each test */
+static void main_dispatcher()
+{
+ bool crosstraffic = simgrid::kernel::resource::NetworkModel::cfg_crosstraffic;
+
+ XBT_INFO("TEST: Send from a station to a node on the wired network after the AP.");
+ XBT_INFO("----------------------------------------------------------------------");
+ XBT_INFO("Since AP1 is the limiting link, we have the following constraint for AP1:");
+ if (crosstraffic) {
+ XBT_INFO("1.05/r_STA1 * rho_STA1 <= 1 (1.05 instead of 1 because of cross-traffic)");
+ XBT_INFO("We should thus have:");
+ XBT_INFO(" mu = 1 / [ 1/1 * 1.05/54Mbps ] = 51428571");
+ XBT_INFO(" simulation_time = 1000*8 / mu = 0.0001555556s (rounded to 0.000156s in SimGrid)");
+ } else {
+ XBT_INFO("1/r_STA1 * rho_STA1 <= 1 (there is no cross-traffic)");
+ XBT_INFO("We should thus have:");
+ XBT_INFO(" mu = 1 / [ 1/1 * 1/54Mbps ] = 5.4e+07");
+ XBT_INFO(" simulation_time = 1000*8 / mu = 0.0001481481s");
+ }
+ run_ping_test("Station 1", "NODE1", 1000);
+
+ XBT_INFO("TEST: Send from a station to another station on the same AP.");
+ XBT_INFO("------------------------------------------------------------");
+ XBT_INFO("We have the following constraint for AP1:");
+ if (crosstraffic) {
+ XBT_INFO("1.05/r_STA1 * rho_STA1 + 1.05/r_STA2 * rho_2 <= 1 (1.05 instead of 1 because of cross-traffic)");
+ XBT_INFO("We should thus have:");
+ XBT_INFO(" mu = 1 / [ 1/2 * 1.05/54Mbps + 1.05/54Mbps ] = 51428571");
+ XBT_INFO(" simulation_time = 1000*8 / [ mu / 2 ] = 0.0003111111s");
+ } else {
+ XBT_INFO("1/r_STA1 * rho_STA1 + 1/r_STA2 * rho_2 <= 1 (there is no cross-traffic)");
+ XBT_INFO(" mu = 1 / [ 1/2 * 1/54Mbps + 1/54Mbps ] = 5.4e+07");
+ XBT_INFO(" simulation_time = 1000*8 / [ mu / 2 ] = 0.0002962963s");
+ }
+ run_ping_test("Station 1", "Station 2", 1000);
+}
+int main(int argc, char** argv)
+{
+ simgrid::s4u::Engine engine(&argc, argv);
+ engine.load_platform(argv[1]);
+ simgrid::s4u::Actor::create("dispatcher", simgrid::s4u::Host::by_name("NODE1"), main_dispatcher);
+ engine.run();
+
+ return 0;
+}
+
+void run_ping_test(const char* src, const char* dest, int data_size)
+{
+ auto* mailbox = simgrid::s4u::Mailbox::by_name("Test");
+
+ simgrid::s4u::Actor::create("sender", simgrid::s4u::Host::by_name(src), [mailbox, dest, data_size]() {
+ double start_time = simgrid::s4u::Engine::get_clock();
+ mailbox->put(const_cast<char*>("message"), data_size);
+ double end_time = simgrid::s4u::Engine::get_clock();
+ XBT_INFO("Actual result: Sending %d bytes from '%s' to '%s' takes %f seconds.", data_size,
+ simgrid::s4u::this_actor::get_host()->get_cname(), dest, end_time - start_time);
+ });
+ simgrid::s4u::Actor::create("receiver", simgrid::s4u::Host::by_name(dest), [mailbox]() { mailbox->get(); });
+ auto* l = (simgrid::kernel::resource::NetworkWifiLink*)simgrid::s4u::Link::by_name("AP1")->get_impl();
+ l->set_host_rate(simgrid::s4u::Host::by_name(src), 0);
+ simgrid::s4u::this_actor::sleep_for(10);
+ XBT_INFO("\n");
+}
--- /dev/null
+#!/usr/bin/env tesh
+
+p Test WITH crosstraffic
+$ ${bindir:=.}/wifi_usage ${platfdir}/wifi.xml --log=root.fmt=%m%n
+> TEST: Send from a station to a node on the wired network after the AP.
+> ----------------------------------------------------------------------
+> Since AP1 is the limiting link, we have the following constraint for AP1:
+> 1.05/r_STA1 * rho_STA1 <= 1 (1.05 instead of 1 because of cross-traffic)
+> We should thus have:
+> mu = 1 / [ 1/1 * 1.05/54Mbps ] = 51428571
+> simulation_time = 1000*8 / mu = 0.0001555556s (rounded to 0.000156s in SimGrid)
+> Actual result: Sending 1000 bytes from 'Station 1' to 'NODE1' takes 0.000156 seconds.
+>
+>
+> TEST: Send from a station to another station on the same AP.
+> ------------------------------------------------------------
+> We have the following constraint for AP1:
+> 1.05/r_STA1 * rho_STA1 + 1.05/r_STA2 * rho_2 <= 1 (1.05 instead of 1 because of cross-traffic)
+> We should thus have:
+> mu = 1 / [ 1/2 * 1.05/54Mbps + 1.05/54Mbps ] = 51428571
+> simulation_time = 1000*8 / [ mu / 2 ] = 0.0003111111s
+> Actual result: Sending 1000 bytes from 'Station 1' to 'Station 2' takes 0.000311 seconds.
+
+p Test WITHOUT crosstraffic
+$ ${bindir:=.}/wifi_usage ${platfdir}/wifi.xml --log=root.fmt=%m%n --cfg=network/crosstraffic:0
+> Configuration change: Set 'network/crosstraffic' to '0'
+> TEST: Send from a station to a node on the wired network after the AP.
+> ----------------------------------------------------------------------
+> Since AP1 is the limiting link, we have the following constraint for AP1:
+> 1/r_STA1 * rho_STA1 <= 1 (there is no cross-traffic)
+> We should thus have:
+> mu = 1 / [ 1/1 * 1/54Mbps ] = 5.4e+07
+> simulation_time = 1000*8 / mu = 0.0001481481s
+> Actual result: Sending 1000 bytes from 'Station 1' to 'NODE1' takes 0.000148 seconds.
+>
+>
+> TEST: Send from a station to another station on the same AP.
+> ------------------------------------------------------------
+> We have the following constraint for AP1:
+> 1/r_STA1 * rho_STA1 + 1/r_STA2 * rho_2 <= 1 (there is no cross-traffic)
+> mu = 1 / [ 1/2 * 1/54Mbps + 1/54Mbps ] = 5.4e+07
+> simulation_time = 1000*8 / [ mu / 2 ] = 0.0002962963s
+> Actual result: Sending 1000 bytes from 'Station 1' to 'Station 2' takes 0.000296 seconds.
src/include/xbt/parmap.hpp
src/include/xbt/mmalloc.h
src/include/catch.hpp
+ src/include/xxhash.hpp
src/mc/mc_mmu.hpp
src/mc/mc_record.hpp
src/msg/msg_private.hpp
examples/platforms/hosts_with_disks.xml
examples/platforms/meta_cluster.xml
examples/platforms/multicore_machine.xml
+ examples/platforms/ns3-big-cluster.xml
examples/platforms/onelink.xml
examples/platforms/prop.xml
examples/platforms/routing_cluster.xml
examples/platforms/two_hosts_platform_with_availability_included.xml
examples/platforms/two_peers.xml
examples/platforms/vivaldi.xml
+ examples/platforms/wifi.xml
)
set(generated_src_files