examples/msg/io-remote/io-remote
examples/msg/io-storage/io-storage
examples/msg/platform-failures/platform-failures
+examples/msg/plugin-hostload/plugin-hostload
examples/msg/process-create/process-create
+examples/msg/process-daemon/process-daemon
examples/msg/process-kill/process-kill
examples/msg/process-join/process-join
examples/msg/process-migration/process-migration
examples/s4u/app-masterworker/s4u_app-masterworker
examples/s4u/app-token-ring/s4u_app-token-ring
examples/s4u/actions-comm/s4u_actions-comm
+examples/s4u/actions-storage/s4u_actions-storage
+examples/s4u/actor-create/s4u_actor-create
+examples/s4u/actor-kill/s4u_actor-kill
+examples/s4u/actor-migration/s4u_actor-migration
+examples/s4u/actor-suspend/s4u_actor-suspend
examples/s4u/basic/s4u_basic
examples/s4u/basic/s4u_basic_deployment
examples/s4u/basic/s4u_basic_function
teshsuite/smpi/mpich3-test/pt2pt/dtype_send
teshsuite/smpi/mpich3-test/pt2pt/eagerdt
teshsuite/smpi/mpich3-test/pt2pt/greq1
+teshsuite/smpi/mpich3-test/pt2pt/huge_anysrc
+teshsuite/smpi/mpich3-test/pt2pt/huge_underflow
teshsuite/smpi/mpich3-test/pt2pt/icsend
teshsuite/smpi/mpich3-test/pt2pt/inactivereq
teshsuite/smpi/mpich3-test/pt2pt/isendirecv
teshsuite/smpi/mpich3-test/pt2pt/sendself
teshsuite/smpi/mpich3-test/pt2pt/waitany-null
teshsuite/smpi/mpich3-test/pt2pt/waittestnull
+teshsuite/smpi/mpich3-test/rma/acc-loc
+teshsuite/smpi/mpich3-test/rma/adlb_mimic1
+teshsuite/smpi/mpich3-test/rma/attrorderwin
+teshsuite/smpi/mpich3-test/rma/badrma
+teshsuite/smpi/mpich3-test/rma/baseattrwin
+teshsuite/smpi/mpich3-test/rma/compare_and_swap
+teshsuite/smpi/mpich3-test/rma/contention_put
+teshsuite/smpi/mpich3-test/rma/contention_putget
+teshsuite/smpi/mpich3-test/rma/contig_displ
+teshsuite/smpi/mpich3-test/rma/fetch_and_op
+teshsuite/smpi/mpich3-test/rma/fkeyvalwin
+teshsuite/smpi/mpich3-test/rma/flush
+teshsuite/smpi/mpich3-test/rma/get-struct
+teshsuite/smpi/mpich3-test/rma/get_acc_local
+teshsuite/smpi/mpich3-test/rma/get_accumulate
+teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_all
+teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_excl
+teshsuite/smpi/mpich3-test/rma/lock_contention_dt
+teshsuite/smpi/mpich3-test/rma/lock_dt
+teshsuite/smpi/mpich3-test/rma/lock_dt_flush
+teshsuite/smpi/mpich3-test/rma/lock_dt_flushlocal
+teshsuite/smpi/mpich3-test/rma/lockall_dt
+teshsuite/smpi/mpich3-test/rma/lockall_dt_flush
+teshsuite/smpi/mpich3-test/rma/lockall_dt_flushall
+teshsuite/smpi/mpich3-test/rma/lockall_dt_flushlocal
+teshsuite/smpi/mpich3-test/rma/lockall_dt_flushlocalall
+teshsuite/smpi/mpich3-test/rma/lockcontention
+teshsuite/smpi/mpich3-test/rma/lockcontention2
+teshsuite/smpi/mpich3-test/rma/locknull
+teshsuite/smpi/mpich3-test/rma/lockopts
+teshsuite/smpi/mpich3-test/rma/manyrma2
+teshsuite/smpi/mpich3-test/rma/pscw_ordering
+teshsuite/smpi/mpich3-test/rma/put_base
+teshsuite/smpi/mpich3-test/rma/put_bottom
+teshsuite/smpi/mpich3-test/rma/racc_local_comp
+teshsuite/smpi/mpich3-test/rma/req_example
+teshsuite/smpi/mpich3-test/rma/rma-contig
+teshsuite/smpi/mpich3-test/rma/rmanull
+teshsuite/smpi/mpich3-test/rma/rmazero
+teshsuite/smpi/mpich3-test/rma/rput_local_comp
+teshsuite/smpi/mpich3-test/rma/selfrma
+teshsuite/smpi/mpich3-test/rma/strided_acc_indexed
+teshsuite/smpi/mpich3-test/rma/strided_acc_onelock
+teshsuite/smpi/mpich3-test/rma/strided_get_indexed
+teshsuite/smpi/mpich3-test/rma/strided_getacc_indexed
+teshsuite/smpi/mpich3-test/rma/strided_putget_indexed
+teshsuite/smpi/mpich3-test/rma/test3_am
+teshsuite/smpi/mpich3-test/rma/test4
+teshsuite/smpi/mpich3-test/rma/test4_am
+teshsuite/smpi/mpich3-test/rma/transpose3_shm
+teshsuite/smpi/mpich3-test/rma/transpose4
+teshsuite/smpi/mpich3-test/rma/transpose5
+teshsuite/smpi/mpich3-test/rma/win_dynamic_acc
+teshsuite/smpi/mpich3-test/rma/win_info
+teshsuite/smpi/mpich3-test/rma/winname
teshsuite/smpi/mpich3-test/rma/accfence1
teshsuite/smpi/mpich3-test/rma/accfence2
teshsuite/smpi/mpich3-test/rma/accfence2_am
teshsuite/smpi/coll-reduce-scatter/coll-reduce-scatter
teshsuite/smpi/coll-scatter/coll-scatter
teshsuite/smpi/macro-shared/macro-shared
+teshsuite/smpi/macro-partial-shared/macro-partial-shared
+teshsuite/smpi/macro-partial-shared-communication/macro-partial-shared-communication
teshsuite/smpi/type-struct/type-struct
teshsuite/smpi/type-vector/type-vector
+teshsuite/s4u/actor/actor
+teshsuite/s4u/concurrent_rw/concurrent_rw
+teshsuite/s4u/host_on_off_wait/host_on_off_wait
+teshsuite/s4u/listen_async/listen_async
+teshsuite/s4u/pid/pid
+teshsuite/s4u/storage_client_server/storage_client_server
teshsuite/surf/lmm_usage/lmm_usage
teshsuite/surf/maxmin_bench/maxmin_bench
teshsuite/surf/surf_usage/surf_usage
name: "simgrid/simgrid"
description: "Build submitted via Travis CI"
notification_email: martin.quinson@ens-rennes.fr
- build_command_prepend: "cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=OFF -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=OFF -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=OFF ."
+ build_command_prepend: "cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=OFF -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=ON -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=OFF ."
build_command: "make VERBOSE=1"
branch_pattern: coverity
script:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update ; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install python3; fi
- - cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=ON -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=OFF -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=ON .
+ - cmake -Denable_documentation=OFF -Denable_coverage=ON -Denable_java=ON -Denable_model-checking=OFF -Denable_lua=OFF -Denable_compile_optimizations=ON -Denable_smpi=ON -Denable_smpi_MPICH3_testsuite=OFF -Denable_compile_warnings=ON .
# run make in the sonar wrapper && run the tests before sonar to get coverage info
- ./tools/internal/travis-sonarqube.sh make VERBOSE=1
# if sonar was not run (and if the build succeeded), run the tests manually
--- /dev/null
+#! /bin/sh
+#
+# This little script rebuilds and runs the SimGrid archive in parallel, extracting a log
+# This is almost an internal script, but others may find this useful
+#
+# Copyright (C) 2017 The SimGrid Team. Licence: LGPL of WDFPL, as you want.
+
+(
+ (nice make -j4 || make) && nice ctest -j4 --output-on-failure ; date
+) 2>&1 | tee BuildSimGrid.sh.log
+exit 0
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
set(SIMGRID_VERSION_MAJOR "3")
-set(SIMGRID_VERSION_MINOR "15")
+set(SIMGRID_VERSION_MINOR "16")
set(SIMGRID_VERSION_PATCH "0")
set(SIMGRID_VERSION_EXTRA "-DEVEL") # Extra words to add to version string (e.g. -rc1)
-set(SIMGRID_VERSION_DATE "2016") # Year for copyright information
+set(SIMGRID_VERSION_DATE "2017") # Year for copyright information
if(${SIMGRID_VERSION_PATCH} EQUAL "0")
set(release_version "${SIMGRID_VERSION_MAJOR}.${SIMGRID_VERSION_MINOR}")
set(HAVE_THREAD_LOCAL_STORAGE 0)
endif()
+CHECK_INCLUDE_FILE("sys/sendfile.h" HAVE_SENDFILE_H)
+CHECK_FUNCTION_EXISTS(sendfile HAVE_SENDFILE)
+if(HAVE_SENDFILE_H AND HAVE_SENDFILE)
+ set(HAVE_SENDFILE 1)
+else()
+ set(HAVE_SENDFILE 0)
+endif()
+
if(enable_model-checking AND NOT "${CMAKE_SYSTEM}" MATCHES "Linux|FreeBSD")
message(WARNING "Support for model-checking has not been enabled on ${CMAKE_SYSTEM}: disabling it")
set(enable_model-checking FALSE)
if(enable_model-checking)
find_package(Libdw REQUIRED)
find_package(Libevent REQUIRED)
- include_directories(${LIBDW_INCLUDE_DIRS} ${LIBEVENT_INCLUDE_DIRS})
+ include_directories(${LIBDW_INCLUDE_DIR} ${LIBEVENT_INCLUDE_DIR})
set(SIMGRID_DEP "${SIMGRID_DEP} ${LIBEVENT_LIBRARIES} ${LIBDW_LIBRARIES}")
set(HAVE_MC 1)
if("${CMAKE_SYSTEM}" MATCHES "FreeBSD" AND enable_java)
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}")
endif()
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}\"")
+set(SMPIMAIN smpimain)
configure_file(${CMAKE_HOME_DIRECTORY}/include/smpi/mpif.h.in ${CMAKE_BINARY_DIR}/include/smpi/mpif.h @ONLY)
foreach(script cc cxx ff f90 run)
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:${NS3_LIBRARY_PATH}")
endif()
set(CMAKE_SMPI_COMMAND "${CMAKE_SMPI_COMMAND}:\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}\"")
+set(SMPIMAIN ${CMAKE_BINARY_DIR}/bin/smpimain)
foreach(script cc cxx ff f90 run)
configure_file(${CMAKE_HOME_DIRECTORY}/src/smpi/smpi${script}.in ${CMAKE_BINARY_DIR}/smpi_script/bin/smpi${script} @ONLY)
-SimGrid (3.15) UNRELEASED; urgency=low
+SimGrid (3.16) UNRELEASED
- The Ever Green Release.
+SimDag
+ - New and Backwards Compatibility break:
+ SD_simulate_with_update (double how_long, xbt_dynar_t changed_tasks_dynar)
+ When one wants to get the list of tasks whose states have changed during a
+ simulation round, s/he has to allocate and free a dynar and use it as argument
+ to this function. The former SD_simulate (double how_long) now returns void.
+
+ SMPI
+ - New algorithm to privatize globals: dlopen, with dynamic loading tricks
+ - New option: smpi/keep-temps to not cleanup temp files
+
+ XBT/Replay:
+ - New function xbt_replay_action_get():
+ Retrieve the function previously associated to an event type.
+
+ -- Release target: June 21 2017 -- Da SimGrid team <simgrid-devel@lists.gforge.inria.fr>
+
+SimGrid (3.15) stable; urgency=low
+
+ The Spring Release: continuous integration servers become green
We fixed even the transient bugs on all target architectures:
Linux (CentOS, Debian, Ubuntu, Fedora), Mac OSX (Mavericks, El Capitan)
Windows, FreeBSD, NetBSD.
- New: MSG_process_ref/unref(). Fiddle with the process refcounting.
- Renamed MSG_energy_plugin_init() -> MSG_host_energy_plugin_init()
to make room for the upcoming network energy plugin.
- - Drop MSG_host_get_current_power_peak: dupplicates MSG_host_get_speed
+ - Drop MSG_host_get_current_power_peak: duplicates MSG_host_get_speed
Java
- Ensure that an actor can kill itself with Process::exit()
- Kill the obscure NativeException. Nobody want to survive the issues
it denotes, so use JniException that is a RuntimeException (not to
- be catched explicitely).
+ be caught explicitly).
+ - Partial bug fix in initialization. SimGrid flags on command line were
+ consumed at C level but stayed in the original Java String[] args.
+ This could mess users' args[i] if SG flags were not put at the end of
+ the command line.
+ The SimGrid flags are now removed from the Java arguments. However,
+ the number of arguments REMAINS UNCHANGED. It is then UNSAFE to test
+ if args.length is greater than the number of YOUR OWN ARGUMENTS.
+ It might be if you have --log or --cfg flags in the command line.
+ - Fix numerous memleaks all around the place. In particular, around VMs.
S4U
- New callbacks:
event from the availability_file changes the avail speed.
- Links are now usable from s4u
- New: Engine::hostList() and Engine::hostCount(). Still clumsy.
- - Drop Host::getPstateSpeedCurrent() which dupplicates Host::speed()
+ - New: Actor::suspend(), Actor::resume(), and Actor::migrate(new_host)
+ - New examples: The conversion of MSG examples to S4U has begun
+ - Actors: create, kill, migration, and suspend
+ - Applications: master-worker and token-ring
+ - Action replay: communications and storage
+ - Drop Host::getPstateSpeedCurrent() which duplicates Host::speed()
SimDag
- Backwards Compatibility breaks
Macros ensure the backwards compatibility, but you should fix your code
SMPI
- - Major C++ rewrite ongoing (SMPI used to be C compiled in C++). This can break codes that were using internals of SMPI (from private.h instead of the public smpi.h).
- - Bump our claim of support from MPI 1.1 to MPI 2.2. We don't support 100% of
- it, but it should be enough. Contact us if not.
+ - Major C++ rewrite ongoing (SMPI used to be C compiled in C++).
+ This can break codes using SMPI internals (from private.h instead of the public smpi.h).
+ - Bump our claim of support from MPI 1.1 to MPI 2.2.
+ We don't support 100% of it, but it should be enough. Contact us if not.
+ - MPI_Win_lock/unlock, MPI_Get_accumulate support added (as for all RMA, implementation is
+ naive and probably inaccurate)
- New algorithm for SMPI_SHARED_MALLOC: global, where all blocks are
- mapped onto a unique file.
- - Bugfix : smpirun was sometimes misusing hostfile when wrongly terminated
- - Fortran : cleanups, fixes, support of user-added operations
- - RMA : MPI_Accumulate are applied in correct order, and
- MPI_Win_complete or MPI_Win_post shouldn't miss messages anymore.
+ mapped onto a unique small file using some system magic.
+ - Bugfix: smpirun was sometimes misusing hostfile when wrongly terminated
+ - Fortran: cleanups, fixes, support of user-added operations
+ - RMA: MPI_Accumulate are applied in correct order.
+ - RMA: MPI_Win_{complete/post} shouldn't miss messages anymore.
- Better support for MPI_IN_PLACE
- Support for MPI_Win attrs and keyvals.
- Support MPI_Comm_group_create, MPI_Type_size_x
- A <cluster> can now be created with different speed values to represent pstates.
see examples/platforms/energy_cluster.xml for an example.
- -- target_date=March 20 2017 -- Da SimGrid team <simgrid-devel@lists.gforge.inria.fr>
+ -- Wed Mar 22 17:50:21 CET 2017 -- Da SimGrid team <simgrid-devel@lists.gforge.inria.fr>
SimGrid (3.14.159) stable; urgency=low
+ _ _____ _ __
+__ _____ _ __ ___(_) ___ _ __ |___ / / |/ /_
+\ \ / / _ \ '__/ __| |/ _ \| '_ \ |_ \ | | '_ \
+ \ V / __/ | \__ \ | (_) | | | | ___) || | (_) |
+ \_/ \___|_| |___/_|\___/|_| |_| |____(_)_|\___/
+ (not released yet)
+
_ _____ _ ____
__ _____ _ __ ___(_) ___ _ __ |___ / / | ___|
\ \ / / _ \ '__/ __| |/ _ \| '_ \ |_ \ | |___ \
\ V / __/ | \__ \ | (_) | | | | ___) || |___) |
\_/ \___|_| |___/_|\___/|_| |_| |____(_)_|____/
- (not released yet)
+ Mar 22 2017
-The Ever Green Release.
+The Spring Release: continuous integration servers become green.
- * Continuous integration tasks are green (no more transient errors)
- * S4U pushed further, integrating more parts of SimDag.
- * Start converting the SMPI internals to C++ too. TBC.
+ * S4U: progress, integrating more parts of SimDag; New examples.
+ * SMPI: Support MPI 2.2; Convert internals to C++ (TBC).
+ * Java: Massive memleaks and performance issues fixed.
* Plus the usual bug fixes, cleanups and documentation improvements
_ _____ _ _ _ _ ____ ___
__ _____ _ __ ___(_) ___ _ __ |___ / / | || | / | ___|/ _ \
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE =
+EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
-EXCLUDE_SYMLINKS = NO
+EXCLUDE_SYMLINKS = YES
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
# doxygen to hide any special comment blocks from generated source code
# fragments. Normal C and C++ comments will always remain visible.
-STRIP_CODE_COMMENTS = YES
+STRIP_CODE_COMMENTS = NO
# If the REFERENCED_BY_RELATION tag is set to YES
# then for each documented function all documented
# link to the source code.
# Otherwise they will link to the documentation.
-REFERENCES_LINK_SOURCE = YES
+REFERENCES_LINK_SOURCE = NO
# If the USE_HTAGS tag is set to YES then the references to source code
# will point to the HTML generated by the htags(1) tool instead of doxygen
/*! @page deployment Deploy the simulation
-When you want to simulate the behavior of your code with SimGrid, you need
-to tell SimGrid exactly what code (that you wrote) is supposed to be run by which host - so you need to assign
-processes/functions to hosts. The hosts in question here are the hosts of your platform model; see Section @ref platform for details on how to set one up.
-
-This assignment of the form @c code -> @c host is what the deployment file is all about, which will
-be discussed briefly here.
-
-@note
- You can bypass the deployment file by hardcoding it in your user code, at least when you're using
- MSG.
-
-The deployment file looks just like a @ref platform "platform" file, except that in
-this case, only two different tags are used: @c process and @c argument, whereas
-the latter is just used to supply additional configuration options to the process; the
-order in which the @c argument tags are given is important and depends on the application.
-
-### The process tag ###
-
-#### Attribute list ####
-
-As already written above, the @c process tag is the tag that defines which host
-executes which function (from your application). Hence, the @c host and @c function
-attributes are mandatory; however, there are some optional attributes to the process tag. Here is a list of all attributes of this tag:
-
-| Attribute name | Mandatory | Values | Description |
-| --------------- | --------- | ---------------------- | ----------- |
-| host | yes | String | Describes which host will be used to run this process. The host must be defined in the platform file! |
-| function | yes | String | Name of a function that will be executed on this host; this function is written in userland code, for instance, C code. Valid values are functions that were registered by MSG_function_register() |
-| start_time | no | int (Default: -1.0) | The simulated time when this function will start to be computed. |
-| kill_time | no | int (Default: -1.0) | The simulated time when this function will end to be computed. By default, it stops only when it's done. |
-| on_failure | no | DIE\|RESTART (Default: "DIE") | What should be done when the process fails. |
-
-#### Examples ####
-
-Almost any @ref msg_examples include a deployment file.
-
-### The argument tag ###
-
-This tag must always be contained by a @c process tag - it doesn't make sense
-without it.
-
-The way this works is that the order of arguments must be pre-defined <i>by the user</i>:
-It is totally up to you what <i>your</i> code expects as arguments and in which
-order. The arguments will be passed to your code (that is: to the function
-executed by this process) in the order you declare them.
-
-#### Attribute list ####
-
-| Attribute name | Mandatory | Values | Description |
-| --------------- | --------- | ---------------------- | ----------- |
-| value | yes | String | Contains the value for this parameter |
+@tableofcontents
+
+Once you've specified your @ref platform "virtual platform" and the
+@ref application "application" you want to study, you must describe
+the mapping of the application onto the platform. This page says how
+to do that if you go for online simulation (that is, the study of a
+program), you must say which code starts on which host, with which
+parameters. You can also go for offline simulation, i.e. the study of
+a trace captured from a past applicative run, as briefly explained
+@ref XBT_replay "here".
+
+There is two ways to specify the mapping of your program onto virtual
+hosts: either directly from your program (with @ref MSG_process_create
+or as in @ref s4u_ex_basics "this S4U example"), or using an external
+XML file. You should really logically separate your application from
+the deployment, as it will ease your experimental campain afterward.
+How exactly you organize your work remains up to you.
+
+@section deploy_s4u Deployment with S4U
+
+The following example shows the several ways of doing so in the S4U
+interface: @ref examples/s4u/actor-create/s4u_actor-create.cpp.
+Associated XML file: @ref examples/s4u/actor-create/s4u_actor-create_d.xml
+
+@section deploy_msg Deployment with MSG
+
+If you're stuck with the MSG interface, then you should simply use one
+of the following functions to start new actors onto your virtual
+hosts: @ref MSG_process_create, @ref MSG_process_create_with_arguments
+or @ref MSG_process_create_with_environment. These functions are used
+in many of the provided example, just grep for them.
+
+@section deploy_xml Deployment with XML
+
+Deploying processes from XML is easy. This section presents a complete
+example and the reference guide of the involved tags.
+
+The deployment file looks just like a @ref platform "platform" file,
+with only 3 tags used:
+
+ - @c <process> starts a new actor on a given host;
+ - @c <argument> passes a given argument in the argv of an actor
+ (the list of arguments is ordered);
+ - @c <prop> adds a property to the actor.
+
+@subsection deploy_xml_ex Examples
+
+To make them easy to find, almost all deployment files in the archive
+are named @c ***_d_xml.
+
+@verbatim
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
+<platform version="4">
+ <!-- Alice, which runs on the machine named 'host1', does not take any parameter -->
+ <process host="host1" function="alice" />
+
+ <!-- Bob, which runs on 'host2', has 2 parameters "3" and "3000" in its argv -->
+ <process host="host2" function="bob" />
+ <argument value="3"/>
+ <argument value="3000"/>
+ </process>
+
+ <!-- Carole runs on 'host3', has 1 parameter "42" in its argv and one property -->
+ <!-- See MSG_process_get_property_value() to retrieve this property -->
+ <process host="host3" function="carole">
+ <argument value="42"/>
+ <prop id="SomeProp" value="SomeValue"/>
+ </process>
+</platform>
+@endverbatim
+
+@subsection deploy_xml_process The process tag
+
+<process> starts a new actor on a given host. It specifies which
+function (from your application) gets executed on the host. Hence, the
+@c host and @c function attributes are mandatory, but this tag accepts
+some optional attributes too.
+
+| Attribute name | Mandatory | Values | Description |
+| --------------- | --------- | ------------ | ----------- |
+| host | yes | String | This must match the name of an host defined in the platform file. |
+| function | yes | String | Name of the function (from your own code) that will be executed. See @ref deploy_xml_functions. |
+| start_time | no | int | The simulated time when this actor will be started (Default: ASAP). |
+| kill_time | no | int | The simulated time when this actor will be forcefully stopped (Default: never). |
+| on_failure | no | DIE\|RESTART | What should be done when the process fails (Default: die). |
+
+@subsection deploy_xml_argument The argument tag
+
+This tag (which must be enclosed in a @c <process> tag) adds a
+new string to the parameter list received by your actor (either its @c
+argv array in MSG or its @c args vector in S4U). Naturally, the
+semantic of these parameters completely depend on your program.
+
+| Attribute name | Mandatory | Values | Description |
+| --------------- | --------- | ---------------------- | ----------- |
+| value | yes | String | Value of this parameter |
+
+@subsection deploy_xml_prop The prop tag
+
+This tag (which must be enclosed in a @c <process> tag) adds a
+new property to your actor.
+
+(either its @c
+argv array in MSG or its @c args vector in S4U). Naturally, the
+semantic of these parameters completely depend on your program.
+
+| Attribute name | Mandatory | Values | Description |
+| --------------- | --------- | ---------------------- | ----------- |
+| id | yes | String | Name of the defined property |
+| value | yes | String | Value of this property |
+
+@subsection deploy_xml_functions Declaring startable functions
+
+You need to connect your code to the names that you use in the XML
+deployment file. Depends on the interface you use, this is done with
+MSG_process_create() or simgrid::s4u::Engine::registerFunction().
+There is nothing to do in your **Java code** since SimGrid uses
+the Java introspection abilities to retrieve the classes from their
+names. In your XML file, you must then use the full class name
+(including the package name).
*/
- The studied **Application**. This can be either a distributed
algorithm described in our simple APIs, or a full featured real
- parallel application using the MPI interface (or other).
+ parallel application using for example the MPI interface
+ @ref application "(more info)".
- The **Virtual Platform**. This is a description of a given
distributed system (machines, links, disks, clusters, etc). Most of
Platform with a Dynamic Scenario where for example the links are
slowed down (because of external usage), the machines fail. You
have even support to specify the applicative workload that you want
- to feed to your application.
+ to feed to your application @ref platform "(more info)".
- - The application's **Deployment Description**. In SimGrid terminology,
- the application is an inert set of source files and binaries. To
- make it run, you have to describe how your application should be
- deployed on the virtual platform. Specify which process is located
- on which host, along with its parameters.
+ - The application's **Deployment Description**. In SimGrid
+ terminology, the application is an inert set of source files and
+ binaries. To make it run, you have to describe how your application
+ should be deployed on the virtual platform. You need to specify
+ which process is mapped on which host, along with their parameters
+ @ref deployment "(more info)".
- The **Platform Models**. They describe how the virtual platform
reacts to the actions of the application. For example, they compute
the time taken by a given communication on the virtual platform.
These models are already included in SimGrid, and you only need to
- pick one and maybe tweak its configuration to get your results.
+ pick one and maybe tweak its configuration to get your results
+ @ref models "(more info)".
These components are put together to run a **simulation**, that is an
experiment or a probe. The result of one or many simulation provides
probe will not change the simulated state). It also makes it easy
to mock some parts of the real system that are not under study.
+Depending on the context, you may see some parts of this process as
+less important, but you should pay close attention if you want to be
+confident in the results coming out of your simulations. In
+particular, you should not trust blindly your results but always
+strive to double-check them. Likewise, you should question the realism
+of your input configuration, and we even encourage you to doubt (and
+check) the provided performance models.
+
+To ease such questionning, you really should logically separate these
+parts in your experimental setup. It is seen as a very bad practice to
+merge the application, the platform and the deployment all together.
+SimGrid is versatile and your milleage may vary, but you should start
+with your Application specified as a C++ or Java program, using one of
+the provided XML platform file, and with your deployment in a separate
+XML file.
+
@section starting_gears SimGrid Execution Gears
Depending on the intended study, SimGrid can be run in several gears,
In this gear, SimGrid can provide information about the time taken by
your application, the amount of energy dissipated by the platform to
run your application and the detailed usage of each resource.
-
+
** **Model-Checking Gear**. This can be seen as a sort of exhaustive
testing gear, where every possible outcome of your application is
explored. In some sense, this gear tests your application for all
You just provide the application and its deployment (amount of
processes and parameters), and the model-checker will litterally
explore all possible outcomes by testing all possible message
-interleaving: if at some point a given process can either receive the
+interleavings: if at some point a given process can either receive the
message A first or the message B depending on the platform
characteristics, the model-checker will explore the scenario where A
arrives first, and then rewind to the same point to explore the
-scenarion where B arrives first.
+scenario where B arrives first.
This is a very powerful gear, where you can evaluate the correction of
-your application. It can verify either *safety properties* (asserts)
-or *liveless properties* stating for example that if a given event
+your application. It can verify either **safety properties** (asserts)
+or **liveless properties** stating for example that if a given event
occures, then another given event will occur in a finite amount of
steps. This gear is not only usable with the abstract algorithms
developed on top of the SimGrid APIs, but also with real MPI
The main limit of Model Checking lays in the huge amount of scenarios
to explore. SimGrid tries to explore only non-redundent scenarios
-thanks to classical reduction techniques (such as DPOR and statefull
+thanks to classical reduction techniques (such as DPOR and stateful
exploration) but the exploration may well never finish if you don't
carefully adapt your application to this gear.
+A classical trap is that the Model Checker can only verify whether
+your application fits the provided properties, which is useless if you
+have a bug in your property. Remember also that one way for your
+application to never violate a given assert is to not start at all
+because of a stupid bug.
+
Another limit of this gear is that it does not use the performance
models of the simulation gear. Time becomes discrete: You can say for
example that the application took 42 steps to run, but there is no way
\subsection inside_release_c_source Building the source archive
-First, clean up your git repository. Some files are included through
-globbing, you must ensure that your tree contains no cruft. You can
-either checkout a new tree or remove anything from your current tree:
+This should be done from a clean git repository because some files are
+included through globbing. The best is to use a clean checkout:
\verbatim
-$ cd simgrid
-$ git reset --hard master # warning, it will kill your uncommited changes
-$ git clean -dfx # warning, it will kill your uncommited changes
+cd /tmp
+git clone ~/Code/simgrid
+cd simgrid
+cmake . && make dist
\endverbatim
-You can then build the archive. This gives you your archive in the
-build dir, named 'SimGrid-${inside_release_version}.tar.gz'.
+If you prefer, you can clean your repository the hard way:
+\verbatim
+git reset --hard master # warning, it will kill your uncommited changes
+git clean -dfx # warning, it will kill your uncommited changes
+cmake . && make dist
+\endverbatim
+
+\subsection inside_release_c_source Building the binary jarfile
+Get the jarfiles for several OSes on the CI slaves. Use Save under to
+give a separate name to each of them.
+
+- On Jenkins: Mac OSX, Linux 64 and Linux 32 (without boost-context), FreeBSD, NetBSD
+- On AppVeyor: Windows
+
+Once all jarfiles are in a separate directory, run the following to
+merge them:
\verbatim
-$ mkdir build
-$ cd build
-$ cmake ..
-$ make dist
+mkdir content ; cd content
+
+for j in ../simgrid-linux64.jar ../*.jar ; do unzip -n $j ; done
+# The content of all jar should be the same, but I prefer using the Linux64 version by default
+# => unpack it first, and unpack the others with -n (never overwrite)
+
+test -e doc/javadoc || echo "YOU ARE MISSING THE DOC"
+
+du -sh . # 273M here. Let's strip (Darwin is already good)
+strip NATIVE/*/*/*.so # Gets BSD and Linux versions, down to 116M
+x86_64-linux-gnu-strip NATIVE/*/*/lib*dll # Gets Windows, down to 22M
+
+rm ../simgrid-3_*.jar
+zip -r ../simgrid-3_XX.jar * # Produced ../simgrid-3_XX.jar is 7.7M
\endverbatim
+To upload the file on gforge, you need to go to Files/Admin then clic
+on the Settings icon near to the "Add a version" button, and then on
+the settings icon of the release you want to change.
+
\subsection inside_release_c_postchecks Check list after releasing
- Tag the git repository (don't forget to push the tags to the main repo)
-- Push the archive files on gforge
-- Rebuild and resynchronize the website so that the file gets visible
- from our download page (see @ref inside_doxygen_website).
+- Push the archive files (tar.gz and jar) on gforge
+- Post a news on gforge (before updating the website)
- Update the link scm.gforge.inria.fr:/home/groups/simgrid/htdocs/simgrid/latest
+- Rebuild and resynchronize the website so that the file gets visible
+ from our download page.\n
+ - Edit org/org-templates/level-0.org to change the release version, the tgz link and the jar link.
+ - emacs org/site/index.org and C-c C-c the first source block to refresh the news.
+ - emacs org/site/download.org and C-c C-c the first source block to refresh the download.
+ - emacs org/site/documentation.org and edit the version links.
+ - make -C org all sync
+ - git commit && git push
- Announce the release
- Mail the simgrid-user mailing list
- the NEWS chunk in the mail;
- the ChangeLog chunk as attachment
- Also mail some other lists (G5K users), with only the NEWS chunk
and the link to the download section
- - Post a news on gforge
+- Release the debian package
+- Create the template for the next release in ChangeLog and NEWS files
+- Change the release number in CMakeLists.txt
*/
- mvapich2: use mvapich2 selector for the alltoall operations
- impi: use intel mpi selector for the alltoall operations
- automatic (experimental): use an automatic self-benchmarking algorithm
- - bruck: Described by Bruck et.al. in <a href="http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=642949">
+ - bruck: Described by Bruck et.al. in <a href="http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=642949">this paper</a>
- 2dmesh: organizes the nodes as a two dimensional mesh, and perform allgather
along the dimensions
- 3dmesh: adds a third dimension to the previous algorithm
Simulation time: 1e3 seconds.
\endverbatim
+\subsection options_smpi_temps smpi/keep-temps: not cleaning up after simulation
+
+\b Default: 0 (false)
+
+Under some conditions, SMPI generates a lot of temporary files. They
+usually get cleaned, but you may use this option to not erase these
+files. This is for example useful when debugging or profiling
+executions using the dlopen privatization schema, as missing binary
+files tend to fool the debuggers.
+
\subsection options_model_smpi_lat_factor smpi/lat-factor: Latency factors
The motivation and syntax for this option is identical to the motivation/syntax
--cfg=smpi/papi-events:"default:PAPI_L3_LDM:PAPI_L2_LDM"
\endverbatim
-\subsection options_smpi_global smpi/privatize-global-variables: Automatic privatization of global variables
+\subsection options_smpi_privatization smpi/privatization: Automatic privatization of global variables
-MPI executables are meant to be executed in separated processes, but SMPI is
+MPI executables are usually meant to be executed in separated processes, but SMPI is
executed in only one process. Global variables from executables will be placed
-in the same memory zone and shared between processes, causing hard to find bugs.
-To avoid this, several options are possible :
- - Manual edition of the code, for example to add __thread keyword before data
- declaration, which allows the resulting code to work with SMPI, but only
- if the thread factory (see \ref options_virt_factory) is used, as global
- variables are then placed in the TLS (thread local storage) segment.
- - Source-to-source transformation, to add a level of indirection
- to the global variables. SMPI does this for F77 codes compiled with smpiff,
- and used to provide coccinelle scripts for C codes, which are not functional anymore.
- - Compilation pass, to have the compiler automatically put the data in
- an adapted zone.
- - Runtime automatic switching of the data segments. SMPI stores a copy of
- each global data segment for each process, and at each context switch replaces
- the actual data with its copy from the right process. This mechanism uses mmap,
- and is for now limited to systems supporting this functionnality (all Linux
- and some BSD should be compatible).
- Another limitation is that SMPI only accounts for global variables defined in
- the executable. If the processes use external global variables from dynamic
- libraries, they won't be switched correctly. To avoid this, using static
- linking is advised (but not with the simgrid library, to avoid replicating
- its own global variables).
-
- To use this runtime automatic switching, the variable \b smpi/privatize-global-variables
- should be set to yes
+in the same memory zone and shared between processes, causing intricate bugs.
+Several options are possible to avoid this, as described in the main
+<a href="https://hal.inria.fr/hal-01415484">SMPI publication</a>.
+SimGrid provides two ways of automatically privatizing the globals,
+and this option allows to choose between them.
+
+ - <b>no</b> (default): Do not automatically privatize variables.
+ - <b>mmap</b> or <b>yes</b>: Runtime automatic switching of the data segments.\n
+ SMPI stores a copy of each global data segment for each process,
+ and at each context switch replaces the actual data with its copy
+ from the right process. No copy actually occures as this mechanism
+ uses mmap for efficiency. As such, it is for now limited to
+ systems supporting this functionnality (all Linux and most BSD).\n
+ Another limitation is that SMPI only accounts for global variables
+ defined in the executable. If the processes use external global
+ variables from dynamic libraries, they won't be switched
+ correctly. The easiest way to solve this is to statically link
+ against the library with these globals (but you should never
+ statically link against the simgrid library itself).
+ - <b>dlopen</b>: Link multiple times against the binary.\n
+ SMPI loads several copy of the same binary in memory, resulting in
+ the natural duplication global variables. Since the dynamic linker
+ refuses to link the same file several times, the binary is copied
+ in a temporary file before being dl-loaded (it is erased right
+ after loading).\n
+ Note that this feature is somewhat experimental at time of writing
+ (v3.16) but seems to work.\n
+ This approach greatly speeds up the context switching, down to
+ about 40 CPU cycles with our raw contextes, instead of requesting
+ several syscalls with the \c mmap approach. Another advantage is
+ that it permits to run the SMPI contexts in parallel, which is
+ obviously not possible with the \c mmap approach.\n
+ Further work may be possible to alleviate the memory and disk
+ overconsumption. It seems that we could
+ <a href="https://lwn.net/Articles/415889/">punch holes</a>
+ in the files before dl-loading them to remove the code and
+ constants, and mmap these area onto a unique copy. This require
+ to understand the ELF layout of the file, but would
+ reduce the disk- and memory- usage to the bare minimum. In
+ addition, this would reduce the pressure on the CPU caches (in
+ particular on instruction one).
\warning
This configuration option cannot be set in your platform file. You can only
pass it as an argument to smpirun.
-
\subsection options_model_smpi_detached Simulating MPI detached send
This threshold specifies the size in bytes under which the send will return
- \c smpi/iprobe: \ref options_model_smpi_iprobe
- \c smpi/iprobe-cpu-usage: \ref options_model_smpi_iprobe_cpu_usage
- \c smpi/init: \ref options_model_smpi_init
+- \c smpi/keep-temps: \ref options_smpi_temps
- \c smpi/lat-factor: \ref options_model_smpi_lat_factor
- \c smpi/ois: \ref options_model_smpi_ois
- \c smpi/or: \ref options_model_smpi_or
- \c smpi/os: \ref options_model_smpi_os
- \c smpi/papi-events: \ref options_smpi_papi_events
-- \c smpi/privatize-global-variables: \ref options_smpi_global
+- \c smpi/privatization: \ref options_smpi_privatization
- \c smpi/send-is-detached-thresh: \ref options_model_smpi_detached
- \c smpi/shared-malloc: \ref options_model_smpi_shared_malloc
- \c smpi/simulate-computation: \ref options_smpi_bench
@tableofcontents
-In order to run any simulation, SimGrid must be provided with three things:
-something to run (i.e., your code), a description of the platform on which you want to simulate your application, and
-information about the deployment of the application: Which process should be executed onto which processor/core?
-
-For the last two items, there are essentially three possible ways you can provide
-this information as an input:
-\li You can program, if you're using MSG, some of the platform and
- deployment functions. If you choose to follow this approach, check the dedicated documentation
- (\ref msg_simulation).
-\li You can use two XML files: one for the platform description and the other for the deployment.
-\li You can program the description of your platform in Lua format.
-
-For more information on SimGrid's deployment features, please refer to the \ref deployment section.
-
-The platform description may be intricate. This documentation is all
-about how to write this file. You should read about the
-@ref routing_basics "routing basic concepts" before proceeding. This page
-first contain a reference guide of the XML. Finally, it gives some hints and tips on how to write a better
-platform description.
-
-\section pf_overview Some words about XML and DTD
-
-We opted for XML not only because it is extensible but also because many tools (and plugins for existing tools) are
-available that facilitate editing and validating XML files. Furthermore, libraries that parse XML are often already
-available and very well tested.
-
-The XML checking is done based on the [simgrid.dtd](http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd) Document Type
-Definition (DTD) file.
-
-If you read the DTD, you should notice the following:
-\li The platform tag has a version attribute. The current version is <b>4</b>. This attribute might be used in the
- provide backward compatibility.
-\li The DTD contains definitions for both the platform description and deployment files used by SimGrid.
-
-\section pf_netzones Defining a netzone
-
-Here is a simplistic example, describing a netzone using the Full
-routing. Other supported values for the routing attribute can be
-found below, Section \ref pf_raf.
-
+As @ref starting_components "explained in the introduction," any
+SimGrid study must entail the description of the platform on which you
+want to simulate your application. You have to describe **each element
+of your platform**, such as computing hosts, clusters, each disks,
+links, etc. You must also define the **routing on your platform**, ie
+which path is taken between two hosts. Finally, you may also describe
+an **experimental scenario**, with qualitative changes (e.g.,
+bandwidth changes representing an external load) and qualitative
+changes (representing how some elements fail and restart over time).
+
+You should really separate your application from the platform
+description, as it will ease your experimental campain afterward.
+Mixing them is seen as a really bad experimental practice. The easiest
+to enforce this split is to put the platform description in a XML
+file. Many example platforms are provided in the archive, and this
+page gives all needed details to write such files, as well as some
+hints and tricks about describing your platform.
+
+On the other side, XML is sometimes not expressive enough for some
+platforms, in particular large platforms exhibiting repetitive
+patterns that are not simply expressed in XML. In practice, many
+users end up generating their XML platform files from some sort of
+scripts. It is probably preferable to rewrite your XML @ref
+platform_lua "platform using the lua scripting language" instead.
+In the future, it should be possible to describe the platform directly
+in C++, but this is not possible yet.
+
+As usual, SimGrid is a versatile framework, and you should find the
+way of describing your platform that best fits your experimental
+practice.
+
+\section pf_overview Describing the platform with XML
+
+Your platform description should follow the specification presented in
+the [simgrid.dtd](http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd)
+DTD file. The same DTD is used for both the platform and deployment
+files.
+
+From time to time, this DTD evolves to introduce possibly
+backward-incompatible changes. That is why each platform desciption is
+enclosed within a @c platform tag, that have a @c version attribute.
+The current version is <b>4</b>. The @c simgrid_update_xml program can
+upgrade most of the past platform files to the recent formalism.
+
+\section pf_netzones Defining a NetZone
+
+In SimGrid, any resource must be located within a given **NetZone**.
+Each netzone is in charge of the routing between its resources. It
+means that when an host wants to communicate with another host of the
+same NetZone, it is the NetZone's duty to find the list of links that
+are involved in the communication. If the hosts are not in the same
+NetZone, @ref routing_basics "things are slightly more complex" to
+determine the links involved in a time- and space-efficient manner.
+
+But only one NetZone is really sufficient to begin with. The following
+chunk describes a simplistic NetZone using the Full routing (we will
+have to specify each and every routes manually).
\verbatim
<AS id="netzone0" routing="Full">
[online on GitLab](https://gitlab.inria.fr/simgrid/simgrid/tree/master/doc/msg-tuto-src).
If you find the right button on the top right of the interface, you can download the whole
directory in one archive file. If you wish, you can find other platform file in
-[this GitLab directory](https://gitlab.inria.fr/simgrid/simgrid/tree/master/doc/examples/platforms).
+[this GitLab directory](https://gitlab.inria.fr/simgrid/simgrid/tree/master/examples/platforms).
As you can see, there is already a little Makefile that compiles
everything for you. If you struggle with the compilation, then you should double check
-/* Copyright (c) 2006-2014, 2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
package app.pingpong;
+import java.io.File;
+
import org.simgrid.msg.HostNotFoundException;
import org.simgrid.msg.Msg;
class Main {
+ protected static final int TASK_COUNT = 3;
+
private Main() {
throw new IllegalAccessError("Utility class");
}
public static void main(String[] args) throws HostNotFoundException {
Msg.init(args);
- if(args.length < 1) {
- Msg.info("Usage : Main platform_file");
- Msg.info("example : Main ../platforms/platform.xml");
- System.exit(1);
+
+ String platfFile = "../../examples/platforms/small_platform.xml";
+ if (args.length == 1)
+ platfFile = args[0];
+
+ File f = new File(platfFile);
+ if (!f.exists()) {
+ System.err.println("File "+platfFile+" does not exist in "+System.getProperty("user.dir"));
+ System.err.println("Usage : Main ../platforms/platform.xml");
}
-
- Msg.createEnvironment(args[0]);
+
+ Msg.createEnvironment(platfFile);
new Sender("Jacquelin", "Sender", new String[] {"Boivin"}).start();
new Receiver ("Boivin", "Receiver", null).start();
-/* Copyright (c) 2006-2014, 2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
this.timeVal = 0;
}
- public PingPongTask(String name, double computeDuration, double messageSize) {
+ public PingPongTask(String name, double computeDuration, double messageSize, double timeVal) {
super(name,computeDuration,messageSize);
- }
-
- public void setTime(double timeVal) {
- this.timeVal = timeVal;
+
+ this.timeVal = timeVal;
}
public double getTime() {
-/* Copyright (c) 2006-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
import org.simgrid.msg.Task;
public class Receiver extends Process {
- private static final double COMM_SIZE_BW = 100000000;
- public Receiver(String hostname, String name, String[]args) throws HostNotFoundException {
- super(hostname, name, args);
- }
-
- public void main(String[] args) throws MsgException {
- Msg.info("hello!");
-
- Msg.info("try to get a task");
-
- PingPongTask task = (PingPongTask)Task.receive(getHost().getName());
- double timeGot = Msg.getClock();
- double timeSent = task.getTime();
-
- Msg.info("Got at time "+ timeGot);
- Msg.info("Was sent at time "+timeSent);
- double time = timeSent;
-
- double communicationTime = timeGot - time;
- Msg.info("Communication time : " + communicationTime);
- Msg.info(" --- bw "+ COMM_SIZE_BW/communicationTime + " ----");
- Msg.info("goodbye!");
- }
+ private static final double COMM_SIZE_BW = 100000000;
+ public Receiver(String hostname, String name, String[]args) throws HostNotFoundException {
+ super(hostname, name, args);
+ }
+
+ public void main(String[] args) throws MsgException {
+ for (int i = 0 ; i<Main.TASK_COUNT; i++) {
+ Msg.info("Wait for a task");
+
+ PingPongTask task = (PingPongTask)Task.receive(getHost().getName());
+ double timeGot = Msg.getClock();
+ double timeSent = task.getTime();
+
+ Msg.info("Got one that was sent at time "+ timeSent);
+
+ double communicationTime = timeGot - timeSent;
+ Msg.info("Communication time : " + communicationTime);
+ Msg.info(" --- bw "+ COMM_SIZE_BW/communicationTime + " ----");
+ }
+ Msg.info("Done.");
+ }
}
\ No newline at end of file
- /* Copyright (c) 2006-2014, 2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
import org.simgrid.msg.Process;
public class Sender extends Process {
- private static final double COMM_SIZE_LAT = 1;
+ private static final double COMM_SIZE_LAT = 1;
- public Sender(String hostname, String name, String[] args) throws HostNotFoundException {
- super(hostname,name,args);
- }
+ public Sender(String hostname, String name, String[] args) throws HostNotFoundException {
+ super(hostname,name,args);
+ }
- public void main(String[] args) throws MsgException {
- Msg.info("hello!");
+ public void main(String[] args) throws MsgException {
+ Msg.info("Host count: " + args.length);
- int hostCount = args.length;
+ for (int i = 0 ; i<Main.TASK_COUNT; i++) {
- Msg.info("host count: " + hostCount);
- String[] mailboxes = new String[hostCount];
- double time;
- double computeDuration = 0;
- PingPongTask task;
+ for(int pos = 0; pos < args.length ; pos++) {
+ String hostname = Host.getByName(args[pos]).getName(); // Make sure that this host exists
- for(int pos = 0; pos < args.length ; pos++) {
- try {
- mailboxes[pos] = Host.getByName(args[pos]).getName();
- } catch (HostNotFoundException e) {
- e.printStackTrace();
- Msg.info("Invalid deployment file: " + e.toString());
- }
- }
+ double time = Msg.getClock();
+ Msg.info("sender time: " + time);
- for (int pos = 0; pos < hostCount; pos++) {
- time = Msg.getClock();
-
- Msg.info("sender time: " + time);
-
- task = new PingPongTask("no name",computeDuration,COMM_SIZE_LAT);
- task.setTime(time);
-
- task.send(mailboxes[pos]);
- }
-
- Msg.info("goodbye!");
- }
+ PingPongTask task = new PingPongTask("no name", /* Duration: 0 flops */ 0, COMM_SIZE_LAT, time);
+ task.send(hostname);
+ }
+ }
+ Msg.info("Done.");
+ }
}
\ No newline at end of file
#! tesh
-! output sort 19
-
$ java -classpath ${classpath:=.} app/pingpong/Main ${srcdir:=.}/../platforms/small_platform.xml
> [0.000000] [java/INFO] Using regular java threads.
-> [1.048882] [java/INFO] MSG_main finished; Cleaning up the simulation...
-> [Boivin:Receiver:(2) 0.000000] [java/INFO] hello!
-> [Boivin:Receiver:(2) 0.000000] [java/INFO] try to get a task
-> [Boivin:Receiver:(2) 1.048882] [java/INFO] Got at time 1.0488818628325232
-> [Boivin:Receiver:(2) 1.048882] [java/INFO] Was sent at time 0.0
+> [Jacquelin:Sender:(1) 0.000000] [java/INFO] Host count: 1
+> [Jacquelin:Sender:(1) 0.000000] [java/INFO] sender time: 0.0
+> [Boivin:Receiver:(2) 0.000000] [java/INFO] Wait for a task
+> [Boivin:Receiver:(2) 1.048882] [java/INFO] Got one that was sent at time 0.0
> [Boivin:Receiver:(2) 1.048882] [java/INFO] Communication time : 1.0488818628325232
> [Boivin:Receiver:(2) 1.048882] [java/INFO] --- bw 9.533962169004269E7 ----
-> [Boivin:Receiver:(2) 1.048882] [java/INFO] goodbye!
-> [Jacquelin:Sender:(1) 0.000000] [java/INFO] hello!
-> [Jacquelin:Sender:(1) 0.000000] [java/INFO] host count: 1
-> [Jacquelin:Sender:(1) 0.000000] [java/INFO] sender time: 0.0
-> [Jacquelin:Sender:(1) 1.048882] [java/INFO] goodbye!
+> [Boivin:Receiver:(2) 1.048882] [java/INFO] Wait for a task
+> [Jacquelin:Sender:(1) 1.048882] [java/INFO] sender time: 1.0488818628325232
+> [Boivin:Receiver:(2) 2.097764] [java/INFO] Got one that was sent at time 1.0488818628325232
+> [Boivin:Receiver:(2) 2.097764] [java/INFO] Communication time : 1.0488818628325232
+> [Boivin:Receiver:(2) 2.097764] [java/INFO] --- bw 9.533962169004269E7 ----
+> [Boivin:Receiver:(2) 2.097764] [java/INFO] Wait for a task
+> [Jacquelin:Sender:(1) 2.097764] [java/INFO] sender time: 2.0977637256650463
+> [Boivin:Receiver:(2) 3.146646] [java/INFO] Got one that was sent at time 2.0977637256650463
+> [Boivin:Receiver:(2) 3.146646] [java/INFO] Communication time : 1.0488818628325234
+> [Boivin:Receiver:(2) 3.146646] [java/INFO] --- bw 9.533962169004266E7 ----
+> [Boivin:Receiver:(2) 3.146646] [java/INFO] Done.
+> [Jacquelin:Sender:(1) 3.146646] [java/INFO] Done.
+> [3.146646] [java/INFO] MSG_main finished; Cleaning up the simulation...
-/* Copyright (c) 2012-2014, 2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2012-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
package cloud.masterworker;
+import java.io.File;
+
import org.simgrid.msg.Msg;
import org.simgrid.msg.Host;
import org.simgrid.msg.MsgException;
class Main {
public static final double TASK_COMP_SIZE = 10;
public static final double TASK_COMM_SIZE = 10;
- public static final int NHOSTS = 2;
+ public static final int NHOSTS = 6;
+ public static final int NSTEPS = 50;
private Main() {
throw new IllegalAccessError("Utility class");
public static void main(String[] args) throws MsgException {
Msg.init(args);
- if (args.length < 1) {
- Msg.info("Usage : Main platform_file");
- Msg.info("Usage : Main ../platforms/platform.xml");
- System.exit(1);
+ String platfFile = "../../examples/platforms/small_platform.xml";
+ if (args.length >= 1)
+ platfFile = args[0];
+
+ File f = new File(platfFile);
+ if (!f.exists()) {
+ System.err.println("File "+platfFile+" does not exist in "+System.getProperty("user.dir"));
+ System.err.println("Usage : Main ../platforms/platform.xml");
}
-
- /* Construct the platform */
- Msg.createEnvironment(args[0]);
+
+ Msg.createEnvironment(platfFile);
Host[] hosts = Host.all();
if (hosts.length < NHOSTS+1) {
Msg.info("I need at least "+ (NHOSTS+1) +" hosts in the platform file, but " + args[0] + " contains only "
+ hosts.length + " hosts");
System.exit(42);
}
- Msg.info("Start "+ NHOSTS +" hosts");
new Master(hosts[0],"Master",hosts).start();
+
/* Execute the simulation */
Msg.run();
}
-/* Copyright (c) 2012-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2012-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
package cloud.masterworker;
-import java.util.ArrayList;
-
import org.simgrid.msg.Host;
import org.simgrid.msg.Msg;
import org.simgrid.msg.MsgException;
import org.simgrid.msg.Task;
import org.simgrid.msg.VM;
-//import eu.plumbr.api.Plumbr;
-
public class Master extends Process {
private Host[] hosts;
public void main(String[] args) throws MsgException {
int workersCount = Main.NHOSTS;
- for (int step = 1; step <= 1/*00000*/ ; step++) {
- //Plumbr.startTransaction("Migration");
- ArrayList<VM> vms = new ArrayList<>();
+ for (int step = 1; step <= Main.NSTEPS ; step++) {
// Create one VM per host and bind a process inside each one.
for (int i = 0; i < workersCount; i++) {
Msg.verb("create VM0-s"+step+"-"+i);
VM vm = new VM(hosts[i+1],"VM0-s"+step+"-"+i);
vm.start();
- vms.add(vm);
Worker worker= new Worker(vm,"WK:"+step+":"+ i);
Msg.verb("Put Worker "+worker.getName()+ " on "+vm.getName());
worker.start();
}
+ VM[] vms = VM.all();
- Msg.info("Launched " + vms.size() + " VMs");
+ Msg.info("Launched " + vms.length + " VMs");
- Msg.info("Send a first batch of work to everyone");
+ Msg.info("Send some work to everyone");
workBatch(workersCount,"WK:"+step+":");
Msg.info("Suspend all VMs, wait a while, resume them, migrate them and shut them down.");
- for (int i = 0; i < vms.size(); i++) {
- Msg.verb("Suspend "+vms.get(i).getName());
- vms.get(i).suspend();
+ for (VM vm : vms) {
+ Msg.verb("Suspend "+vm.getName());
+ vm.suspend();
}
- Msg.verb("Wait a while");
+ Msg.verb("Wait a while, and resume all VMs.");
waitFor(2);
-
- Msg.verb("Resume all VMs.");
- for (int i = 0; i < vms.size(); i++) {
- vms.get(i).resume();
- }
+ for (VM vm : vms)
+ vm.resume();
+
Msg.verb("Sleep long enough for everyone to be done with previous batch of work");
waitFor(1000*step - Msg.getClock());
*/
Msg.verb("Migrate everyone to "+hosts[3].getName());
- for (int i = 0; i < vms.size(); i++) {
- Msg.verb("Migrate "+vms.get(i).getName()+" from "+hosts[i+1].getName()+"to "+hosts[3].getName());
- vms.get(i).migrate(hosts[3]);
+ for (VM vm : vms) {
+ Msg.verb("Migrate "+vm.getName()+" to "+hosts[3].getName());
+ vm.migrate(hosts[3]);
}
- Msg.verb("Let's shut down the simulation and kill everyone.");
+ Msg.verb("Let's kill everyone.");
- for (int i = 0; i < vms.size(); i++) {
- vms.get(i).destroy();
- }
+ for (VM vm : vms)
+ vm.destroy();
Msg.info("XXXXXXXXXXXXXXX Step "+step+" done.");
-// Plumbr.endTransaction();
}
}
-/* Copyright (c) 2012-2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2012-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
public void main(String[] args) throws MsgException {
Msg.verb(this.getName() +" is listening on "+ getName());
while(true) {
- Task task =null;
+ Task task = null;
try {
task = Task.receive(getName());
- if (task == null)
- break;
} catch (MsgException e) {
- Msg.debug("Received failed. I'm done. See you!");
+ Msg.info("Received failed. I'm done. See you!");
+ exit();
}
- Msg.verb("Received \"" + task.getName() + "\". Processing it.");
+ Msg.verb("Received '" + task.getName() + "'. Processing it.");
task.execute();
- Msg.verb(this.getName() +" executed task (" + task.getName()+")");
+ Msg.verb("Done executing task '" + task.getName() +"'");
}
}
}
#! tesh
-! output sort 19
-
-$ java -classpath ${classpath:=.} cloud/masterworker/Main ${srcdir:=.}/../platforms/small_platform.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --log=java.thres:verbose
+$ java -classpath ${classpath:=.} cloud/masterworker/Main ${srcdir:=.}/../platforms/small_platform.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (0:maestro@) Using regular java threads.
-> [ 0.000000] (0:maestro@) Start 2 hosts
-> [ 0.000000] (1:Master@Boivin) create VM0-s1-0
-> [ 0.000000] (1:Master@Boivin) Put Worker WK:1:0 on VM0-s1-0
-> [ 0.000000] (2:WK:1:0@VM0-s1-0) WK:1:0 is listening on WK:1:0
-> [ 0.000000] (1:Master@Boivin) create VM0-s1-1
-> [ 0.000000] (1:Master@Boivin) Put Worker WK:1:1 on VM0-s1-1
-> [ 0.000000] (3:WK:1:1@VM0-s1-1) WK:1:1 is listening on WK:1:1
-> [ 0.000000] (1:Master@Boivin) Launched 2 VMs
-> [ 0.000000] (1:Master@Boivin) Send a first batch of work to everyone
-> [ 0.000000] (1:Master@Boivin) Sending to WK:1:0
-> [ 0.090194] (2:WK:1:0@VM0-s1-0) Received "Task WK:1:0". Processing it.
-> [ 0.090194] (1:Master@Boivin) Sending to WK:1:1
-> [ 0.090194] (2:WK:1:0@VM0-s1-0) WK:1:0 executed task (Task WK:1:0)
-> [ 0.754233] (3:WK:1:1@VM0-s1-1) Received "Task WK:1:1". Processing it.
-> [ 0.754233] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
-> [ 0.754233] (1:Master@Boivin) Suspend VM0-s1-0
-> [ 0.754233] (1:Master@Boivin) Suspend VM0-s1-1
-> [ 0.754233] (1:Master@Boivin) Wait a while
-> [ 2.754233] (1:Master@Boivin) Resume all VMs.
-> [ 2.754233] (1:Master@Boivin) Sleep long enough for everyone to be done with previous batch of work
-> [ 2.754234] (3:WK:1:1@VM0-s1-1) WK:1:1 executed task (Task WK:1:1)
-> [1000.000000] (1:Master@Boivin) Migrate everyone to Ginette
-> [1000.000000] (1:Master@Boivin) Migrate VM0-s1-0 from Bourassato Ginette
-> [1100.157755] (1:Master@Boivin) Migrate VM0-s1-1 from Fafardto Ginette
-> [1242.754111] (1:Master@Boivin) Let's shut down the simulation and kill everyone.
-> [1242.754111] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 1 done.
-> [1242.754111] (0:maestro@) MSG_main finished; Cleaning up the simulation...
+> [ 0.000000] (1:Master@Boivin) Launched 6 VMs
+> [ 0.000000] (1:Master@Boivin) Send some work to everyone
+> [ 2.186532] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [1971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 1 done.
+> [1971.662691] (1:Master@Boivin) Launched 6 VMs
+> [1971.662691] (1:Master@Boivin) Send some work to everyone
+> [1973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [2971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 2 done.
+> [2971.662691] (1:Master@Boivin) Launched 6 VMs
+> [2971.662691] (1:Master@Boivin) Send some work to everyone
+> [2973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [3971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 3 done.
+> [3971.662691] (1:Master@Boivin) Launched 6 VMs
+> [3971.662691] (1:Master@Boivin) Send some work to everyone
+> [3973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [4971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 4 done.
+> [4971.662691] (1:Master@Boivin) Launched 6 VMs
+> [4971.662691] (1:Master@Boivin) Send some work to everyone
+> [4973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [5971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 5 done.
+> [5971.662691] (1:Master@Boivin) Launched 6 VMs
+> [5971.662691] (1:Master@Boivin) Send some work to everyone
+> [5973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [6971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 6 done.
+> [6971.662691] (1:Master@Boivin) Launched 6 VMs
+> [6971.662691] (1:Master@Boivin) Send some work to everyone
+> [6973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [7971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 7 done.
+> [7971.662691] (1:Master@Boivin) Launched 6 VMs
+> [7971.662691] (1:Master@Boivin) Send some work to everyone
+> [7973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [8971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 8 done.
+> [8971.662691] (1:Master@Boivin) Launched 6 VMs
+> [8971.662691] (1:Master@Boivin) Send some work to everyone
+> [8973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [9971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 9 done.
+> [9971.662691] (1:Master@Boivin) Launched 6 VMs
+> [9971.662691] (1:Master@Boivin) Send some work to everyone
+> [9973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [10971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 10 done.
+> [10971.662691] (1:Master@Boivin) Launched 6 VMs
+> [10971.662691] (1:Master@Boivin) Send some work to everyone
+> [10973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [11971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 11 done.
+> [11971.662691] (1:Master@Boivin) Launched 6 VMs
+> [11971.662691] (1:Master@Boivin) Send some work to everyone
+> [11973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [12971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 12 done.
+> [12971.662691] (1:Master@Boivin) Launched 6 VMs
+> [12971.662691] (1:Master@Boivin) Send some work to everyone
+> [12973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [13971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 13 done.
+> [13971.662691] (1:Master@Boivin) Launched 6 VMs
+> [13971.662691] (1:Master@Boivin) Send some work to everyone
+> [13973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [14971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 14 done.
+> [14971.662691] (1:Master@Boivin) Launched 6 VMs
+> [14971.662691] (1:Master@Boivin) Send some work to everyone
+> [14973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [15971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 15 done.
+> [15971.662691] (1:Master@Boivin) Launched 6 VMs
+> [15971.662691] (1:Master@Boivin) Send some work to everyone
+> [15973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [16971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 16 done.
+> [16971.662691] (1:Master@Boivin) Launched 6 VMs
+> [16971.662691] (1:Master@Boivin) Send some work to everyone
+> [16973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [17971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 17 done.
+> [17971.662691] (1:Master@Boivin) Launched 6 VMs
+> [17971.662691] (1:Master@Boivin) Send some work to everyone
+> [17973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [18971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 18 done.
+> [18971.662691] (1:Master@Boivin) Launched 6 VMs
+> [18971.662691] (1:Master@Boivin) Send some work to everyone
+> [18973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [19971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 19 done.
+> [19971.662691] (1:Master@Boivin) Launched 6 VMs
+> [19971.662691] (1:Master@Boivin) Send some work to everyone
+> [19973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [20971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 20 done.
+> [20971.662691] (1:Master@Boivin) Launched 6 VMs
+> [20971.662691] (1:Master@Boivin) Send some work to everyone
+> [20973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [21971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 21 done.
+> [21971.662691] (1:Master@Boivin) Launched 6 VMs
+> [21971.662691] (1:Master@Boivin) Send some work to everyone
+> [21973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [22971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 22 done.
+> [22971.662691] (1:Master@Boivin) Launched 6 VMs
+> [22971.662691] (1:Master@Boivin) Send some work to everyone
+> [22973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [23971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 23 done.
+> [23971.662691] (1:Master@Boivin) Launched 6 VMs
+> [23971.662691] (1:Master@Boivin) Send some work to everyone
+> [23973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [24971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 24 done.
+> [24971.662691] (1:Master@Boivin) Launched 6 VMs
+> [24971.662691] (1:Master@Boivin) Send some work to everyone
+> [24973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [25971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 25 done.
+> [25971.662691] (1:Master@Boivin) Launched 6 VMs
+> [25971.662691] (1:Master@Boivin) Send some work to everyone
+> [25973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [26971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 26 done.
+> [26971.662691] (1:Master@Boivin) Launched 6 VMs
+> [26971.662691] (1:Master@Boivin) Send some work to everyone
+> [26973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [27971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 27 done.
+> [27971.662691] (1:Master@Boivin) Launched 6 VMs
+> [27971.662691] (1:Master@Boivin) Send some work to everyone
+> [27973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [28971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 28 done.
+> [28971.662691] (1:Master@Boivin) Launched 6 VMs
+> [28971.662691] (1:Master@Boivin) Send some work to everyone
+> [28973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [29971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 29 done.
+> [29971.662691] (1:Master@Boivin) Launched 6 VMs
+> [29971.662691] (1:Master@Boivin) Send some work to everyone
+> [29973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [30971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 30 done.
+> [30971.662691] (1:Master@Boivin) Launched 6 VMs
+> [30971.662691] (1:Master@Boivin) Send some work to everyone
+> [30973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [31971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 31 done.
+> [31971.662691] (1:Master@Boivin) Launched 6 VMs
+> [31971.662691] (1:Master@Boivin) Send some work to everyone
+> [31973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [32971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 32 done.
+> [32971.662691] (1:Master@Boivin) Launched 6 VMs
+> [32971.662691] (1:Master@Boivin) Send some work to everyone
+> [32973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [33971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 33 done.
+> [33971.662691] (1:Master@Boivin) Launched 6 VMs
+> [33971.662691] (1:Master@Boivin) Send some work to everyone
+> [33973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [34971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 34 done.
+> [34971.662691] (1:Master@Boivin) Launched 6 VMs
+> [34971.662691] (1:Master@Boivin) Send some work to everyone
+> [34973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [35971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 35 done.
+> [35971.662691] (1:Master@Boivin) Launched 6 VMs
+> [35971.662691] (1:Master@Boivin) Send some work to everyone
+> [35973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [36971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 36 done.
+> [36971.662691] (1:Master@Boivin) Launched 6 VMs
+> [36971.662691] (1:Master@Boivin) Send some work to everyone
+> [36973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [37971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 37 done.
+> [37971.662691] (1:Master@Boivin) Launched 6 VMs
+> [37971.662691] (1:Master@Boivin) Send some work to everyone
+> [37973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [38971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 38 done.
+> [38971.662691] (1:Master@Boivin) Launched 6 VMs
+> [38971.662691] (1:Master@Boivin) Send some work to everyone
+> [38973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [39971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 39 done.
+> [39971.662691] (1:Master@Boivin) Launched 6 VMs
+> [39971.662691] (1:Master@Boivin) Send some work to everyone
+> [39973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [40971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 40 done.
+> [40971.662691] (1:Master@Boivin) Launched 6 VMs
+> [40971.662691] (1:Master@Boivin) Send some work to everyone
+> [40973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [41971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 41 done.
+> [41971.662691] (1:Master@Boivin) Launched 6 VMs
+> [41971.662691] (1:Master@Boivin) Send some work to everyone
+> [41973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [42971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 42 done.
+> [42971.662691] (1:Master@Boivin) Launched 6 VMs
+> [42971.662691] (1:Master@Boivin) Send some work to everyone
+> [42973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [43971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 43 done.
+> [43971.662691] (1:Master@Boivin) Launched 6 VMs
+> [43971.662691] (1:Master@Boivin) Send some work to everyone
+> [43973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [44971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 44 done.
+> [44971.662691] (1:Master@Boivin) Launched 6 VMs
+> [44971.662691] (1:Master@Boivin) Send some work to everyone
+> [44973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [45971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 45 done.
+> [45971.662691] (1:Master@Boivin) Launched 6 VMs
+> [45971.662691] (1:Master@Boivin) Send some work to everyone
+> [45973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [46971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 46 done.
+> [46971.662691] (1:Master@Boivin) Launched 6 VMs
+> [46971.662691] (1:Master@Boivin) Send some work to everyone
+> [46973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [47971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 47 done.
+> [47971.662691] (1:Master@Boivin) Launched 6 VMs
+> [47971.662691] (1:Master@Boivin) Send some work to everyone
+> [47973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [48971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 48 done.
+> [48971.662691] (1:Master@Boivin) Launched 6 VMs
+> [48971.662691] (1:Master@Boivin) Send some work to everyone
+> [48973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [49971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 49 done.
+> [49971.662691] (1:Master@Boivin) Launched 6 VMs
+> [49971.662691] (1:Master@Boivin) Send some work to everyone
+> [49973.849223] (1:Master@Boivin) Suspend all VMs, wait a while, resume them, migrate them and shut them down.
+> [50971.662691] (1:Master@Boivin) XXXXXXXXXXXXXXX Step 50 done.
+> [50971.662691] (0:maestro@) MSG_main finished; Cleaning up the simulation...
-/* Copyright (c) 2014, 2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2014-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
}
public void main(String[] strings) throws MsgException {
- Host host0 = null;
- Host host1 = null;
- try {
- /* get hosts 1 and 2*/
- host0 = Host.getByName("PM0");
- host1 = Host.getByName("PM1");
- }catch (HostNotFoundException e) {
- e.printStackTrace();
- }
+ Host host0 = Host.getByName("PM0");
+ Host host1 = Host.getByName("PM1");
List<VM> vms = new ArrayList<>();
vm1.start();
/* Collocated VMs */
- int collocatedSrc = 6;
int[] vmSrcLoad = {
80,
0,
};
XVM tmp;
- for (int i=1 ; i<= collocatedSrc ; i++){
+ for (int i=1 ; i<= vmSrcLoad.length ; i++){
tmp = new XVM(host0, "vm"+i,
2048, // Ramsize,
125, // Net bandwidth,
tmp.setLoad(vmSrcLoad[i-1]);
}
- int collocatedDst = 6;
int[] vmDstLoad = {
0,
40,
80,
};
- for (int i=1 ; i <= collocatedDst ; i++){
- tmp = new XVM(host1, "vm"+(i+collocatedSrc),
+ for (int i=1 ; i <= vmDstLoad.length ; i++){
+ tmp = new XVM(host1, "vm"+(i+vmSrcLoad.length),
2048, // Ramsize,
125, // Net bandwidth,
dpRate // Memory intensity
$ $SG_TEST_EXENV ${bindir:=.}/cloud-migration ${srcdir:=.}/small_platform.xml --log=no_loc "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [ 0.000000] (1:master_@Fafard) Test: Migrate a VM with 1000 Mbytes RAM
> [ 0.000000] (3:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
-> [132.740093] (0:maestro@) FIXME: may need a proper handling, 1
> [132.765801] (1:master_@Fafard) VM0 migrated: Fafard->Tremblay in 132.766 s
> [132.765801] (1:master_@Fafard) Test: Migrate a VM with 100 Mbytes RAM
> [132.765801] (5:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
-> [146.086085] (0:maestro@) FIXME: may need a proper handling, 1
> [146.111793] (1:master_@Fafard) VM0 migrated: Fafard->Tremblay in 13.346 s
> [146.111793] (1:master_@Fafard) Test: Migrate two VMs at once from PM0 to PM1
> [146.111793] (9:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
> [146.111793] (11:__pr_mig_tx:VM1(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
-> [411.540563] (0:maestro@) FIXME: may need a proper handling, 1
-> [411.540563] (0:maestro@) FIXME: may need a proper handling, 1
> [411.566271] (8:mig_wrk@Fafard) VM1 migrated: Fafard->Tremblay in 265.454 s
> [411.566271] (6:mig_wrk@Fafard) VM0 migrated: Fafard->Tremblay in 265.454 s
> [10146.111793] (1:master_@Fafard) Test: Migrate two VMs at once to different PMs
> [10146.111793] (15:__pr_mig_tx:VM0(Fafard-Tremblay)@Fafard) use the default max_downtime value 30ms
> [10146.111793] (17:__pr_mig_tx:VM1(Fafard-Bourassa)@Fafard) use the default max_downtime value 30ms
-> [10362.613818] (0:maestro@) FIXME: may need a proper handling, 1
> [10362.620589] (14:mig_wrk@Fafard) VM1 migrated: Fafard->Bourassa in 216.509 s
-> [10411.521626] (0:maestro@) FIXME: may need a proper handling, 1
> [10411.547334] (12:mig_wrk@Fafard) VM0 migrated: Fafard->Tremblay in 265.436 s
> [20146.111793] (0:maestro@) Bye (simulation time 20146.1)
> [Fafard:__pr_mig_tx:VM0(Fafard-Tremblay):(38) 49.010000] [msg_vm/WARNING] use the default max_downtime value 30ms
> [Bourassa:comm_rx:(36) 49.204993] [msg_test/INFO] VM0:comm_tx to Bourassa:comm_rx => 0.204993 sec
> [Tremblay:__pr_mig_tx:VM0(Tremblay-Fafard):(40) 191.674258] [msg_vm/WARNING] use the default max_downtime value 30ms
-> [334.199056] [surf_vm/CRITICAL] FIXME: may need a proper handling, 1
> [Fafard:master_:(1) 339.199251] [msg_test/INFO] ## Test 6 (ended)
> [339.199251] [msg_test/INFO] Bye (simulation time 339.199)
// }
// That explained the bug in a snap, with a very cool example and everything.
//
- // This MC_assert is now desactivated as the case is now properly handled in our code and we don't want the
+ // This MC_assert is now deactivated as the case is now properly handled in our code and we don't want the
// MC to fail any further under that condition, but this comment is here to as a memorial for this first
- // brillant victory of the model-checking in the SimGrid community :)
+ // brilliant victory of the model-checking in the SimGrid community :)
if (task_received != task_sent ||
ans_data->type != TASK_FIND_SUCCESSOR_ANSWER) {
p Testing the Chord implementation with MSG
+! timeout 30
! output sort 19
$ $SG_TEST_EXENV ${bindir:=.}/dht-chord$EXEEXT -nb_bits=6 ${srcdir:=.}/cluster.xml ${srcdir:=.}/../msg/dht-chord/dht-chord_d.xml --log=msg_chord.thres:verbose "--log=root.fmt:[%10.5r]%e(%i:%P@%h)%e%m%n"
> [ 0.00000] (1:node@node-0.acme.org) My finger table:
*/
unsigned int answer_destination_found(answer_t answer)
{
- if (xbt_dynar_length(answer->nodes) < 1) {
+ if (xbt_dynar_is_empty(answer->nodes)) {
return 0;
}
node_contact_t contact_tail = xbt_dynar_get_as(answer->nodes, 0, node_contact_t);
char mailbox[MAILBOX_NAME_SIZE];
snprintf(mailbox,MAILBOX_NAME_SIZE, "%d", id_to_ping);
- unsigned int destination_found = 0;
double timeout = MSG_get_clock() + ping_timeout;
msg_task_t ping_task = task_new_ping(node->id, node->mailbox, MSG_host_get_name(MSG_host_self()));
if (data->type == TASK_PING_ANSWER && id_to_ping == data->sender_id) {
XBT_VERB("Ping to %s succeeded", mailbox);
node_routing_table_update(node, data->sender_id);
- destination_found = 1;
task_free(task_received);
+ return 1; // Destination found, ping succeeded!
} else {
//If it's not our answer, we answer the query anyway.
handle_task(node, task_received);
}
}
- } while (destination_found == 0 && MSG_get_clock() < timeout);
+ } while (MSG_get_clock() < timeout);
if (MSG_get_clock() >= timeout) {
XBT_DEBUG("Ping to %s has timeout.", mailbox);
return 0;
}
- if (destination_found == -1) {
- XBT_DEBUG("It seems that %s is offline...", mailbox);
- return 0;
- }
- return 1;
+ XBT_DEBUG("It seems that %s is offline...", mailbox);
+ return -1;
}
/** @brief Does a pseudo-random lookup for someone in the system
int ready;
msg_comm_t comm_receive; // current communication to receive
xbt_dynar_t pending_tasks;
-} s_node_t, *node_t;
+} s_node_t;
+typedef s_node_t* node_t;
typedef struct s_state {
int id;
int namespace_set[NAMESPACE_SIZE];
int neighborhood_set[NEIGHBORHOOD_SIZE];
int routing_table[LEVELS_COUNT][LEVEL_SIZE];
-} s_state_t, *state_t;
+} s_state_t;
+typedef s_state_t* state_t;
/** Types of tasks exchanged between nodes. */
typedef enum {
//const char* issuer_host_name; // used for logging
int steps;
state_t state;
-} s_task_data_t, *task_data_t;
+} s_task_data_t;
+typedef s_task_data_t* task_data_t;
-static void get_mailbox(int node_id, char* mailbox);
static int domain(unsigned int a, unsigned int level);
static int shl(int a, int b);
static int closest_in_namespace_set(node_t node, int dest);
> [ 30.000000] (0:maestro@) Energy consumption of host MyHost1: 2905.000000 Joules
> [ 30.000000] (0:maestro@) Energy consumption of host MyHost2: 2100.000000 Joules
> [ 30.000000] (0:maestro@) Energy consumption of host MyHost3: 3000.000000 Joules
+
+$ ${bindir:=.}/energy-consumption/energy-consumption$EXEEXT ${srcdir:=.}/../platforms/energy_cluster.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+> [ 0.000000] (1:dvfs_test@MyHost1) Energetic profile: 100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0
+> [ 0.000000] (1:dvfs_test@MyHost1) Initial peak speed=1E+08 flop/s; Energy dissipated =0E+00 J
+> [ 0.000000] (1:dvfs_test@MyHost1) Sleep for 10 seconds
+> [ 10.000000] (1:dvfs_test@MyHost1) Done sleeping (duration: 10.00 s). Current peak speed=1E+08; Energy dissipated=1000.00 J
+> [ 10.000000] (1:dvfs_test@MyHost1) Run a task of 1E+08 flops
+> [ 11.000000] (1:dvfs_test@MyHost1) Task done (duration: 1.00 s). Current peak speed=1E+08 flop/s; Current consumption: from 120W to 200W depending on load; Energy dissipated=1120 J
+> [ 11.000000] (1:dvfs_test@MyHost1) ========= Requesting pstate 2 (speed should be of 2E+07 flop/s and is of 2E+07 flop/s)
+> [ 11.000000] (1:dvfs_test@MyHost1) Run a task of 1E+08 flops
+> [ 16.000000] (1:dvfs_test@MyHost1) Task done (duration: 5.00 s). Current peak speed=2E+07 flop/s; Energy dissipated=1645 J
+> [ 16.000000] (1:dvfs_test@MyHost1) Sleep for 4 seconds
+> [ 20.000000] (1:dvfs_test@MyHost1) Done sleeping (duration: 4.00 s). Current peak speed=2E+07 flop/s; Energy dissipated=2005 J
+> [ 20.000000] (1:dvfs_test@MyHost1) Turning MyHost2 off, and sleeping another 10 seconds. MyHost2 dissipated 2000 J so far.
+> [ 30.000000] (1:dvfs_test@MyHost1) Done sleeping (duration: 10.00 s). Current peak speed=2E+07 flop/s; Energy dissipated=2905 J
+> [ 30.000000] (0:maestro@) Total energy consumption: 5005.000000 Joules (used hosts: 5005.000000 Joules; unused/idle hosts: 0.000000)
+> [ 30.000000] (0:maestro@) Total simulation time: 30.00
+> [ 30.000000] (0:maestro@) Energy consumption of host MyHost1: 2905.000000 Joules
+> [ 30.000000] (0:maestro@) Energy consumption of host MyHost2: 2100.000000 Joules
static int dvfs(int argc, char *argv[])
{
double workload = 100E6;
- int new_pstate = 2;
msg_host_t host = MSG_host_self();
int nb = MSG_host_get_nb_pstates(host);
XBT_INFO("Task1 simulation time: %e", task_time);
// Change power peak
- if ((new_pstate >= nb) || (new_pstate < 0)) {
- XBT_INFO("Cannot set pstate %d, host supports only %d pstates", new_pstate, nb);
- return 0;
- }
+ int new_pstate = 2;
+ xbt_assert(new_pstate < nb, "Cannot set the host %s at pstate %d because it only provides %d pstates.",
+ MSG_host_get_name(host), new_pstate, nb);
double peak_at = MSG_host_get_power_peak_at(host, new_pstate);
XBT_INFO("Changing power peak value to %f (at index %d)", peak_at, new_pstate);
> [ 0.006000] (1:@denise) \Windows\winhlp32.exe size: 10752 bytes
> [ 0.006000] (1:@denise) \Windows\setuperr.log size: 0 bytes
> [ 0.006000] (1:@denise) \Windows\system.ini size: 219 bytes
-> [ 0.006000] (1:@denise) \Windows\hapint.exe size: 382056 bytes
> [ 0.006000] (1:@denise) \Windows\Professional.xml size: 31881 bytes
+> [ 0.006000] (1:@denise) \Windows\hapint.exe size: 382056 bytes
> [ 0.006000] (1:@denise) \Windows\regedit.exe size: 159232 bytes
> [ 0.006000] (1:@denise) \Windows\setupact.log size: 101663 bytes
> [ 0.006000] (1:@denise) \Windows\WindowsUpdate.log size: 1518934 bytes
retcode = MSG_task_execute(task);
if (retcode == MSG_OK) {
XBT_INFO("\"%s\" done", MSG_task_get_name(task));
+ free(task->data);
MSG_task_destroy(task);
} else if (retcode == MSG_HOST_FAILURE) {
XBT_INFO("Gloups. The cpu on which I'm running just turned off!. See you!");
+ free(task->data);
MSG_task_destroy(task);
return 0;
} else {
static int bob(int argc, char *argv[])
{
+ /* this host also tests the properties of the AS*/
+ msg_as_t root = MSG_environment_get_routing_root();
+ XBT_INFO("== Print the properties of the AS");
+ XBT_INFO(" Process property: filename -> %s", MSG_environment_as_get_property_value(root, "filename"));
+ XBT_INFO(" Process property: date -> %s", MSG_environment_as_get_property_value(root, "date"));
+ XBT_INFO(" Process property: author -> %s", MSG_environment_as_get_property_value(root, "author"));
+
/* Get the property list of current bob process */
xbt_dict_t props = MSG_process_get_properties(MSG_process_self());
xbt_dict_cursor_t cursor = NULL;
> [ 0.000000] (0:maestro@) Host 'node-2.acme.org' runs at 1000000000 flops/s
> [ 0.000000] (0:maestro@) Host 'node-3.acme.org' runs at 1000000000 flops/s
> [ 0.000000] (0:maestro@) Host 'node-4.acme.org' runs at 1000000000 flops/s
+> [ 0.000000] (2:bob@host1) == Print the properties of the AS
+> [ 0.000000] (2:bob@host1) Process property: filename -> prop.xml
+> [ 0.000000] (2:bob@host1) Process property: date -> 31-08-12
+> [ 0.000000] (2:bob@host1) Process property: author -> pnavarro
> [ 0.000000] (2:bob@host1) == Print the properties of the process
> [ 0.000000] (2:bob@host1) Process property: SomeProp -> SomeValue
> [ 0.000000] (2:bob@host1) == Try to get a process property that does not exist
<platform version="4">
<cluster id="cluster" prefix="MyHost" radical="1-2" suffix="" speed="100.0Mf,50.0Mf,20.0Mf"
bw="125MBps" lat="50us" bb_bw="2.25GBps" bb_lat="500us">
- <!-- List of min_power:max_power pairs (in Watts) corresponding to the speed consumed when the processor is idle
- and when it is fully loaded -->
- <!-- The list must contain one speed pair for each previously defined pstate-->
+ <!-- List of idle_power:min_power:max_power pairs (in Watts) -->
+ <!-- The list must contain one speed tupple for each previously defined pstate-->
<prop id="watt_per_state" value="100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0" />
<prop id="watt_off" value="10" />
</cluster>
-foreach (example app-masterworker app-token-ring io launching mutex actions-comm)
+foreach (example actions-comm actions-storage actor-create actor-kill actor-migration actor-suspend
+ app-masterworker app-token-ring io mutex )
add_executable (s4u_${example} ${example}/s4u_${example}.cpp)
target_link_libraries(s4u_${example} simgrid)
set_target_properties(s4u_${example} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${example})
set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/${example}/s4u_${example}.cpp)
endforeach()
-set(examples_src ${examples_src} PARENT_SCOPE)
-set(tesh_files ${tesh_files} PARENT_SCOPE)
-set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/launching/deployment.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_d.xml
+# CHORD EXAMPLE
+add_executable (s4u_dht-chord dht-chord/s4u_dht-chord.cpp dht-chord/node.cpp)
+target_link_libraries(s4u_dht-chord simgrid)
+set_target_properties(s4u_dht-chord PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dht-chord)
+foreach (file s4u_dht-chord node)
+ set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/${file}.cpp)
+endforeach()
+set(examples_src ${examples_src} ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u_dht-chord.hpp)
+
+set(examples_src ${examples_src} PARENT_SCOPE)
+set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u_dht-chord.tesh PARENT_SCOPE)
+set(xml_files ${xml_files} ${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_d.xml
${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_d.xml
- ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/s4u_app-masterworker_d.xml PARENT_SCOPE)
+ ${CMAKE_CURRENT_SOURCE_DIR}/actions-storage/s4u_actions-storage_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/actor-create/s4u_actor-create_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/app-masterworker/s4u_app-masterworker_d.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/dht-chord/s4u_dht-chord_d.xml PARENT_SCOPE)
set(txt_files ${txt_files} ${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_p0.txt
${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm_split_p1.txt
${CMAKE_CURRENT_SOURCE_DIR}/actions-comm/s4u_actions-comm.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/actions-storage/s4u_actions-storage.txt
${CMAKE_CURRENT_SOURCE_DIR}/README.doc PARENT_SCOPE)
-foreach(example app-masterworker app-token-ring io launching mutex actions-comm)
+foreach(example actions-comm actions-storage actor-create actor-kill actor-migration actor-suspend
+ app-masterworker app-token-ring dht-chord io mutex )
ADD_TESH_FACTORIES(s4u-${example} "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_CURRENT_BINARY_DIR}/${example} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --cd ${CMAKE_HOME_DIRECTORY}/examples/s4u/${example} s4u_${example}.tesh)
endforeach()
@brief Find the S4U example fitting your needs in the archive.
- @ref s4u_ex_basics
+ - @ref s4u_ex_actors
- @ref s4u_ex_synchro
+ - @ref s4u_ex_actions
@section s4u_ex_basics Basics of SimGrid simulation
- - <b>Launching actors:</b> @ref examples/s4u/launching/s4u_launching.cpp and
- @ref examples/s4u/launching/deployment.xml \n
+ - <b>Creating actors:</b> @ref examples/s4u/actor-create/s4u_actor-create.cpp and
+ @ref examples/s4u/actor-create/s4u_actor-create_d.xml \n
Shows how to start your actors to populate your simulation.
- <b>Token ring:</b> @ref examples/s4u/app-token-ring/s4u_app-token-ring.cpp \n
Shows how to implement a classical communication pattern, where a token is exchanged along a ring to reach every
participant.
- - <b>Master Workers:</b> @ref examples/s4u/app-token-ring/s4u_app-token-ring.cpp \n
+ - <b>Master Workers:</b> @ref examples/s4u/app-masterworker/s4u_app-masterworker.cpp \n
Another good old example, where one Master process has a bunch of task to dispatch to a set of several Worker
processes.
+@section s4u_ex_actors Acting on Actors
+
+ - <b>Creating actors</b>.
+ @ref examples/s4u/actor-create/s4u_actor-create.cpp \n
+ Most actors are started from the deployment XML file, but they exist other methods.
+
+ - <b>Suspend and Resume actors</b>.
+ @ref examples/s4u/actor-suspend/s4u_actor-suspend.cpp \n
+ Actors can be suspended and resumed during their executions
+ thanks to the @ref suspend and @ref resume methods.
+
+ - <b>Kill actors</b>.
+ @ref examples/s4u/actor-kill/s4u_actor-kill.cpp \n
+ Actors can forcefully stop other actors with the @ref kill method.
+
+ - <b>Migrating Actors</b>.
+ @ref examples/s4u/actor-migration/s4u_actor-migration.cpp \n
+ Actors can move or be moved from a host to another with the @ref migrate method.
+
@section s4u_ex_synchro Inter-Actor Synchronization
- <b>Mutex: </b> @ref examples/s4u/mutex/s4u_mutex.cpp \n
Shows how to use simgrid::s4u::Mutex synchronization objects.
-
+
+@section s4u_ex_actions Following Workload Traces
+
+This section details how to run trace-driven simulations. It is very
+handy when you want to test an algorithm or protocol that only react
+to external events. For example, many P2P protocols react to user
+requests, but do nothing if there is no such event.
+
+In such situations, you should write your protocol in C++, and separate
+the workload that you want to play onto your protocol in a separate
+text file. Declare a function handling each type of the events in your
+trace, register them using @ref xbt_replay_action_register in your
+main, and then run the simulation.
+
+Then, you can either have one trace file containing all your events,
+or a file per simulated process: the former may be easier to work
+with, but the second is more efficient on very large traces. Check
+also the tesh files in the example directories for details.
+
+ - <b>Communication replay</b>.
+ @ref examples/s4u/actions-comm/s4u_actions-comm.cpp \n
+ Presents a set of event handlers reproducing classical communication
+ primitives (asynchronous send/receive at the moment).
+
+ - <b>I/O replay</b>.
+ @ref examples/s4u/actions-storage/s4u_actions-storage.cpp \n
+ Presents a set of event handlers reproducing classical I/O
+ primitives (open, read, close).
+
*/
/**
-@example examples/s4u/launching/s4u_launching.cpp
+@example examples/s4u/actions-comm/s4u_actions-comm.cpp
+@example examples/s4u/actions-storage/s4u_actions-storage.cpp
+@example examples/s4u/actor-create/s4u_actor-create.cpp
+@example examples/s4u/actor-create/s4u_actor-create_d.xml
+@example examples/s4u/actor-kill/s4u_actor-kill.cpp
+@example examples/s4u/actor-migration/s4u_actor-migration.cpp
+@example examples/s4u/actor-suspend/s4u_actor-suspend.cpp
@example examples/s4u/app-token-ring/s4u_app-token-ring.cpp
-@example examples/s4u/app-master-worker/s4u_app-master-worker.cpp
-@example examples/s4u/launching/deployment.xml
+@example examples/s4u/app-masterworker/s4u_app-masterworker.cpp
@example examples/s4u/mutex/s4u_mutex.cpp
xbt_replay_action_register("send", Replayer::send);
xbt_replay_action_register("recv", Replayer::recv);
- /* Actually do the simulation using MSG_action_trace_run */
if (argv[3]) {
simgrid::xbt::action_fs = new std::ifstream(argv[3], std::ifstream::in);
}
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <xbt/replay.hpp>
+#include <xbt/str.h>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(storage_actions, "Messages specific for this example");
+
+static std::unordered_map<std::string, simgrid::s4u::File*> opened_files;
+
+#define ACT_DEBUG(...) \
+ if (XBT_LOG_ISENABLED(storage_actions, xbt_log_priority_verbose)) { \
+ char* NAME = xbt_str_join_array(action, " "); \
+ XBT_DEBUG(__VA_ARGS__); \
+ xbt_free(NAME); \
+ } else \
+ ((void)0)
+
+static void log_action(const char* const* action, double date)
+{
+ if (XBT_LOG_ISENABLED(storage_actions, xbt_log_priority_verbose)) {
+ char* name = xbt_str_join_array(action, " ");
+ XBT_VERB("%s %f", name, date);
+ xbt_free(name);
+ }
+}
+
+static simgrid::s4u::File* get_file_descriptor(const char* file_name)
+{
+ std::string full_name = simgrid::s4u::this_actor::name() + ":" + file_name;
+
+ return opened_files.at(full_name);
+}
+
+class Replayer {
+public:
+ explicit Replayer(std::vector<std::string> args)
+ {
+ int argc;
+ char* argv[2];
+ argv[0] = &args.at(0)[0];
+ if (args.size() == 1) {
+ argc = 1;
+ } else {
+ argc = 2;
+ argv[1] = &args.at(1)[0];
+ }
+ simgrid::xbt::replay_runner(argc, argv);
+ }
+
+ void operator()()
+ {
+ // Nothing to do here
+ }
+
+ /* My actions */
+ static void open(const char* const* action)
+ {
+ const char* file_name = action[2];
+ double clock = simgrid::s4u::Engine::getClock();
+ std::string full_name = simgrid::s4u::this_actor::name() + ":" + file_name;
+
+ ACT_DEBUG("Entering Open: %s (filename: %s)", NAME, file_name);
+ simgrid::s4u::File* file = new simgrid::s4u::File(file_name, NULL);
+
+ opened_files.insert({full_name, file});
+
+ log_action(action, simgrid::s4u::Engine::getClock() - clock);
+ }
+
+ static void read(const char* const* action)
+ {
+ const char* file_name = action[2];
+ sg_size_t size = std::stoul(action[3]);
+ double clock = simgrid::s4u::Engine::getClock();
+
+ simgrid::s4u::File* file = get_file_descriptor(file_name);
+
+ ACT_DEBUG("Entering Read: %s (size: %llu)", NAME, size);
+ file->read(size);
+
+ log_action(action, simgrid::s4u::Engine::getClock() - clock);
+ }
+
+ static void close(const char* const* action)
+ {
+ const char* file_name = action[2];
+ double clock = simgrid::s4u::Engine::getClock();
+
+ simgrid::s4u::File* file = get_file_descriptor(file_name);
+
+ ACT_DEBUG("Entering Close: %s (filename: %s)", NAME, file_name);
+ delete file;
+
+ log_action(action, simgrid::s4u::Engine::getClock() - clock);
+ }
+};
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+
+ xbt_assert(argc > 3, "Usage: %s platform_file deployment_file [action_files]\n"
+ "\texample: %s platform.xml deployment.xml actions # if all actions are in the same file\n"
+ "\t# if actions are in separate files, specified in deployment\n"
+ "\texample: %s platform.xml deployment.xml",
+ argv[0], argv[0], argv[0]);
+
+ e->loadPlatform(argv[1]);
+ e->registerDefault(&simgrid::xbt::replay_runner);
+ e->registerFunction<Replayer>("p0");
+ e->loadDeployment(argv[2]);
+
+ /* Action registration */
+ xbt_replay_action_register("open", Replayer::open);
+ xbt_replay_action_register("read", Replayer::read);
+ xbt_replay_action_register("close", Replayer::close);
+
+ if (argv[3]) {
+ simgrid::xbt::action_fs = new std::ifstream(argv[3], std::ifstream::in);
+ }
+
+ e->run();
+
+ if (argv[3]) {
+ delete simgrid::xbt::action_fs;
+ simgrid::xbt::action_fs = nullptr;
+ }
+
+ XBT_INFO("Simulation time %g", e->getClock());
+
+ return 0;
+}
--- /dev/null
+! output sort 19
+$ ${bindir:=.}/s4u_actions-storage --log=storage_actions.thres=verbose ${srcdir:=.}/storage/storage.xml s4u_actions-storage_d.xml s4u_actions-storage.txt "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (p0@denise) p0 open /home/lib/libsimgrid.so.3.6.2 0.000000
+> [ 0.063552] (p0@denise) p0 read /home/lib/libsimgrid.so.3.6.2 12710497 0.063552
+> [ 0.063552] (p0@denise) p0 close /home/lib/libsimgrid.so.3.6.2 0.000000
+> [ 0.063552] (maestro@) Simulation time 0.0635525
--- /dev/null
+p0 open /home/lib/libsimgrid.so.3.6.2
+p0 read /home/lib/libsimgrid.so.3.6.2 12710497
+p0 close /home/lib/libsimgrid.so.3.6.2
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
+<platform version="4">
+ <process host="denise" function="p0"/>
+</platform>
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-
/* This example shows how to declare and start your actors.
*
- * The first step is to declare the code of your actors (what they do exactly
- * does not matter to this example) and then you ask SimGrid to start your
- * actors. There is three ways of doing so:
- * - Directly, by instantiating your actor as paramter to Actor::create();
+ * The first step is to declare the code of your actors (what they do exactly does not matter to this example) and then
+ * you ask SimGrid to start your actors. There is three ways of doing so:
+ * - Directly, by instantiating your actor as paramter to Actor::create()
* - By first registering your actors before instantiating it;
* - Through the deployment file.
*
- * This example shows all these solutions, even if you obviously should use
- * only one of these solutions to start your actors. The most advised solution
- * is to use a deployment file, as it creates a clear separation between your
- * application and the settings to test it. This is a better scientific
- * methodology. Actually, starting an actor with Actor::create() is mostly
- * useful to start an actor from another actor.
- *
+ * This example shows all these solutions, even if you obviously should use only one of these solutions to start your
+ * actors. The most advised solution is to use a deployment file, as it creates a clear separation between your
+ * application and the settings to test it. This is a better scientific methodology. Actually, starting an actor with
+ * Actor::create() is mostly useful to start an actor from another actor.
*/
#include <simgrid/s4u.hpp>
// This declares a logging channel so that XBT_INFO can be used later
-XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_launching_test, "The logging channel used in this example");
-
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor_create, "The logging channel used in this example");
/* Declares a first class of actors which sends a message to the mailbox 'mb42'.
* The sent message is what was passed as parameter on creation (or 'GaBuZoMeu' by default)
class Sender {
public:
std::string msg = "GaBuZoMeu";
- explicit Sender() {
- /* Constructor used when no parameter is passed to the actor */
- };
- explicit Sender(std::vector<std::string> args) {
+ explicit Sender() = default;
+ explicit Sender(std::vector<std::string> args)
+ {
/* This constructor is used when we pass parameters to the actor */
if (args.size() > 0)
msg = args[0];
}
- void operator()() {
+ void operator()()
+ {
XBT_INFO("Hello s4u, I have something to send");
simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName("mb42");
}
};
-
/* Declares a second class of actor which receive two messages on the mailbox which
* name is passed as parameter ('thingy' by default, ie the wrong one).
*
simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName("thingy");
explicit Receiver() = default;
- explicit Receiver(std::vector<std::string> args) {
+ explicit Receiver(std::vector<std::string> args)
+ {
/* This constructor is used when we pass parameters to the actor */
/* as with argc/argv, args[0] is the actor's name, so the first parameter is args[1] */
if (args.size() > 1)
mailbox = simgrid::s4u::Mailbox::byName(args[1]);
}
- void operator()() {
+ void operator()()
+ {
XBT_INFO("Hello s4u, I'm ready to get any message you'd want on %s", mailbox->name());
- char *msg1 = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox));
- char *msg2 = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox));
- XBT_INFO("I received '%s' and '%s'",msg1,msg2);
+ char* msg1 = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox));
+ char* msg2 = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox));
+ XBT_INFO("I received '%s' and '%s'", msg1, msg2);
+ xbt_free(msg1);
+ xbt_free(msg2);
XBT_INFO("I'm done. See you.");
}
};
-
/* Here comes the main function of your program */
-int main(int argc, char **argv) {
+int main(int argc, char** argv)
+{
/* When your program starts, you have to first start a new simulation engine, as follows */
- simgrid::s4u::Engine *e = new simgrid::s4u::Engine(&argc,argv);
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
/* Then you should load a platform file, describing your simulated platform */
e->loadPlatform("../../platforms/small_platform.xml");
/* The second way is to first register your function, and then retrieve it */
e->registerFunction<Sender>("sender"); // The sender is passed as a template parameter here
- std::vector<std::string> args; // Here we declare the parameter that the actor will get
- args.push_back("GloubiBoulga"); // Add a parameter to the set (we could have done it in the first approach too)
+ std::vector<std::string> args; // Here we declare the parameter that the actor will get
+ args.push_back("GloubiBoulga"); // Add a parameter to the set (we could have done it in the first approach too)
simgrid::s4u::Actor::createActor("sender2", simgrid::s4u::Host::by_name("Jupiter"), "sender", args);
/* The third way to start your actors is to use a deployment file. */
- e->registerFunction<Receiver>("receiver"); // You first have to register the actor as with the second approach
- e->loadDeployment("deployment.xml"); // And then, you load the deployment file
+ e->registerFunction<Receiver>("receiver"); // You first have to register the actor as with the second approach
+ e->loadDeployment("s4u_actor-create_d.xml"); // And then, you load the deployment file
/* Once every actors are started in the engine, the simulation can start */
e->run();
--- /dev/null
+#! ./tesh
+
+$ $SG_TEST_EXENV ${bindir:=.}/s4u_actor-create
+> [Tremblay:sender1:(0) 0.000000] [s4u_actor_create/INFO] Hello s4u, I have something to send
+> [Jupiter:sender2:(0) 0.000000] [s4u_actor_create/INFO] Hello s4u, I have something to send
+> [Fafard:receiver:(0) 0.000000] [s4u_actor_create/INFO] Hello s4u, I'm ready to get any message you'd want on mb42
+> [Tremblay:sender1:(0) 0.025709] [s4u_actor_create/INFO] I'm done. See you.
+> [Jupiter:sender2:(0) 0.070434] [s4u_actor_create/INFO] I'm done. See you.
+> [Fafard:receiver:(0) 0.070434] [s4u_actor_create/INFO] I received 'GaBuZoMeu' and 'GloubiBoulga'
+> [Fafard:receiver:(0) 0.070434] [s4u_actor_create/INFO] I'm done. See you.
--- /dev/null
+/* Copyright (c) 2017 The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <simgrid/s4u.hpp>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor_kill, "Messages specific for this s4u example");
+
+static void victim()
+{
+ XBT_INFO("Hello!");
+ XBT_INFO("Suspending myself");
+ simgrid::s4u::this_actor::suspend(); /* - Start by suspending itself */
+ XBT_INFO("OK, OK. Let's work"); /* - Then is resumed and start to execute a task */
+ simgrid::s4u::this_actor::execute(1e9);
+ XBT_INFO("Bye!"); /* - But will never reach the end of it */
+}
+
+static void killer()
+{
+ XBT_INFO("Hello!"); /* - First start a victim process */
+ simgrid::s4u::ActorPtr poor_victim =
+ simgrid::s4u::Actor::createActor("victim", simgrid::s4u::Host::by_name("Fafard"), victim);
+ simgrid::s4u::this_actor::sleep_for(10); /* - Wait for 10 seconds */
+
+ XBT_INFO("Resume process"); /* - Resume it from its suspended state */
+ poor_victim->resume();
+
+ XBT_INFO("Kill process"); /* - and then kill it */
+ poor_victim->kill();
+
+ XBT_INFO("OK, goodbye now. I commit a suicide.");
+ simgrid::s4u::this_actor::kill();
+
+ XBT_INFO("This line will never get displayed: I'm already dead since the previous line.");
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s msg_platform.xml\n", argv[0], argv[0]);
+
+ e->loadPlatform(argv[1]); /* - Load the platform description */
+ /* - Create and deploy killer process, that will create the victim process */
+ simgrid::s4u::Actor::createActor("killer", simgrid::s4u::Host::by_name("Tremblay"), killer);
+
+ e->run(); /* - Run the simulation */
+
+ XBT_INFO("Simulation time %g", e->getClock());
+ return 0;
+}
--- /dev/null
+#! ./tesh
+
+$ $SG_TEST_EXENV ${bindir:=.}/s4u_actor-kill ${srcdir:=.}/small_platform.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (killer@Tremblay) Hello!
+> [ 0.000000] (victim@Fafard) Hello!
+> [ 0.000000] (victim@Fafard) Suspending myself
+> [ 10.000000] (killer@Tremblay) Resume process
+> [ 10.000000] (killer@Tremblay) Kill process
+> [ 10.000000] (killer@Tremblay) OK, goodbye now. I commit a suicide.
+> [ 10.000000] (maestro@) Simulation time 10
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <simgrid/s4u.hpp>
+#include <simgrid/s4u/Mutex.hpp>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor_migration, "Messages specific for this s4u example");
+
+simgrid::s4u::MutexPtr checkpoint = nullptr;
+simgrid::s4u::ConditionVariablePtr identification = nullptr;
+static simgrid::s4u::ActorPtr controlled_process = nullptr;
+
+/* The Emigrant process will be moved from host to host. */
+static void emigrant()
+{
+ XBT_INFO("I'll look for a new job on another machine ('Boivin') where the grass is greener.");
+ simgrid::s4u::this_actor::migrate(
+ simgrid::s4u::Host::by_name("Boivin")); /* - First, move to another host by myself */
+
+ XBT_INFO("Yeah, found something to do");
+ simgrid::s4u::this_actor::execute(98095000);
+ simgrid::s4u::this_actor::sleep_for(2);
+
+ XBT_INFO("Moving back home after work");
+ simgrid::s4u::this_actor::migrate(simgrid::s4u::Host::by_name("Jacquelin")); /* - Move back to original location */
+
+ simgrid::s4u::this_actor::migrate(simgrid::s4u::Host::by_name("Boivin")); /* - Go back to the other host to sleep*/
+ simgrid::s4u::this_actor::sleep_for(4);
+
+ checkpoint->lock(); /* - Get controlled at checkpoint */
+ controlled_process = simgrid::s4u::Actor::self(); /* - and get moved back by the policeman process */
+ identification->notify_all();
+ checkpoint->unlock();
+
+ simgrid::s4u::this_actor::suspend();
+
+ XBT_INFO("I've been moved on this new host: %s", simgrid::s4u::this_actor::host()->cname());
+ XBT_INFO("Uh, nothing to do here. Stopping now");
+}
+
+/* The policeman check for emigrants and move them back to 'Jacquelin' */
+static void policeman()
+{
+ checkpoint->lock();
+
+ XBT_INFO("Wait at the checkpoint."); /* - block on the mutex+condition */
+ while (controlled_process == nullptr)
+ identification->wait(checkpoint);
+
+ controlled_process->migrate(simgrid::s4u::Host::by_name("Jacquelin")); /* - Move an emigrant to Jacquelin */
+ XBT_INFO("I moved the emigrant");
+ controlled_process->resume();
+
+ checkpoint->unlock();
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s msg_platform.xml\n", argv[0], argv[0]);
+ e->loadPlatform(argv[1]); /* - Load the platform description */
+
+ /* - Create and deploy the emigrant and policeman processes */
+ simgrid::s4u::Actor::createActor("emigrant", simgrid::s4u::Host::by_name("Jacquelin"), emigrant);
+ simgrid::s4u::Actor::createActor("policeman", simgrid::s4u::Host::by_name("Boivin"), policeman);
+
+ checkpoint = simgrid::s4u::Mutex::createMutex(); /* - Initiate the mutex and conditions */
+ identification = simgrid::s4u::ConditionVariable::createConditionVariable();
+ e->run();
+
+ XBT_INFO("Simulation time %g", e->getClock());
+
+ return 0;
+}
--- /dev/null
+#! ./tesh
+
+p Testing the migration feature of MSG
+
+! output sort 19
+$ $SG_TEST_EXENV ${bindir:=.}/s4u_actor-migration ${srcdir:=.}/small_platform.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (emigrant@Jacquelin) I'll look for a new job on another machine ('Boivin') where the grass is greener.
+> [ 0.000000] (emigrant@Boivin) Yeah, found something to do
+> [ 0.000000] (policeman@Boivin) Wait at the checkpoint.
+> [ 3.000000] (emigrant@Boivin) Moving back home after work
+> [ 7.000000] (maestro@) Simulation time 7
+> [ 7.000000] (emigrant@Jacquelin) I've been moved on this new host: Jacquelin
+> [ 7.000000] (emigrant@Jacquelin) Uh, nothing to do here. Stopping now
+> [ 7.000000] (policeman@Boivin) I moved the emigrant
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <simgrid/s4u.hpp>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor_suspend, "Messages specific for this s4u example");
+
+/* The Lazy guy only wants to sleep, but can be awaken by the dream_master process. */
+static void lazy_guy()
+{
+ XBT_INFO("Nobody's watching me ? Let's go to sleep.");
+ simgrid::s4u::this_actor::suspend(); /* - Start by suspending itself */
+ XBT_INFO("Uuuh ? Did somebody call me ?");
+
+ XBT_INFO("Going to sleep..."); /* - Then repetitively go to sleep, but got awaken */
+ simgrid::s4u::this_actor::sleep_for(10);
+ XBT_INFO("Mmm... waking up.");
+
+ XBT_INFO("Going to sleep one more time (for 10 sec)...");
+ simgrid::s4u::this_actor::sleep_for(10);
+ XBT_INFO("Waking up once for all!");
+
+ XBT_INFO("Ok, let's do some work, then (for 10 sec on Boivin).");
+ simgrid::s4u::this_actor::execute(980.95e6);
+
+ XBT_INFO("Mmmh, I'm done now. Goodbye.");
+}
+
+/* The Dream master: */
+static void dream_master()
+{
+ XBT_INFO("Let's create a lazy guy."); /* - Create a lazy_guy process */
+ simgrid::s4u::ActorPtr lazy = simgrid::s4u::Actor::createActor("Lazy", simgrid::s4u::this_actor::host(), lazy_guy);
+ XBT_INFO("Let's wait a little bit...");
+ simgrid::s4u::this_actor::sleep_for(10); /* - Wait for 10 seconds */
+ XBT_INFO("Let's wake the lazy guy up! >:) BOOOOOUUUHHH!!!!");
+ lazy->resume(); /* - Then wake up the lazy_guy */
+
+ simgrid::s4u::this_actor::sleep_for(5); /* Repeat two times: */
+ XBT_INFO("Suspend the lazy guy while he's sleeping...");
+ lazy->suspend(); /* - Suspend the lazy_guy while he's asleep */
+ XBT_INFO("Let him finish his siesta.");
+ simgrid::s4u::this_actor::sleep_for(10); /* - Wait for 10 seconds */
+ XBT_INFO("Wake up, lazy guy!");
+ lazy->resume(); /* - Then wake up the lazy_guy again */
+
+ simgrid::s4u::this_actor::sleep_for(5);
+ XBT_INFO("Suspend again the lazy guy while he's sleeping...");
+ lazy->suspend();
+ XBT_INFO("This time, don't let him finish his siesta.");
+ simgrid::s4u::this_actor::sleep_for(2);
+ XBT_INFO("Wake up, lazy guy!");
+ lazy->resume();
+
+ simgrid::s4u::this_actor::sleep_for(5);
+ XBT_INFO("Give a 2 seconds break to the lazy guy while he's working...");
+ lazy->suspend();
+ simgrid::s4u::this_actor::sleep_for(2);
+ XBT_INFO("Back to work, lazy guy!");
+ lazy->resume();
+
+ XBT_INFO("OK, I'm done here.");
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s msg_platform.xml\n", argv[0], argv[0]);
+
+ e->loadPlatform(argv[1]); /* - Load the platform description */
+ std::vector<simgrid::s4u::Host*> list;
+ e->hostList(&list);
+ simgrid::s4u::Actor::createActor("dream_master", list.front(), dream_master);
+
+ e->run(); /* - Run the simulation */
+
+ return 0;
+}
--- /dev/null
+#! ./tesh
+
+p Testing the suspend/resume feature of MSG
+
+! output sort 19
+$ $SG_TEST_EXENV ${bindir:=.}/s4u_actor-suspend ${srcdir:=.}/small_platform.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (dream_master@Boivin) Let's create a lazy guy.
+> [ 0.000000] (Lazy@Boivin) Nobody's watching me ? Let's go to sleep.
+> [ 0.000000] (dream_master@Boivin) Let's wait a little bit...
+> [ 10.000000] (dream_master@Boivin) Let's wake the lazy guy up! >:) BOOOOOUUUHHH!!!!
+> [ 10.000000] (Lazy@Boivin) Uuuh ? Did somebody call me ?
+> [ 10.000000] (Lazy@Boivin) Going to sleep...
+> [ 15.000000] (dream_master@Boivin) Suspend the lazy guy while he's sleeping...
+> [ 15.000000] (dream_master@Boivin) Let him finish his siesta.
+> [ 25.000000] (dream_master@Boivin) Wake up, lazy guy!
+> [ 25.000000] (Lazy@Boivin) Mmm... waking up.
+> [ 25.000000] (Lazy@Boivin) Going to sleep one more time (for 10 sec)...
+> [ 30.000000] (dream_master@Boivin) Suspend again the lazy guy while he's sleeping...
+> [ 30.000000] (dream_master@Boivin) This time, don't let him finish his siesta.
+> [ 32.000000] (dream_master@Boivin) Wake up, lazy guy!
+> [ 35.000000] (Lazy@Boivin) Waking up once for all!
+> [ 35.000000] (Lazy@Boivin) Ok, let's do some work, then (for 10 sec on Boivin).
+> [ 37.000000] (dream_master@Boivin) Give a 2 seconds break to the lazy guy while he's working...
+> [ 39.000000] (dream_master@Boivin) Back to work, lazy guy!
+> [ 39.000000] (dream_master@Boivin) OK, I'm done here.
+> [ 47.000000] (Lazy@Boivin) Mmmh, I'm done now. Goodbye.
break;
}
/* - Otherwise, process the task */
- double comp_size = xbt_str_parse_double(res, nullptr);
+ double comp_size = std::stod(res);
xbt_free(res);
simgrid::s4u::this_actor::execute(comp_size);
}
--- /dev/null
+/* Copyright (c) 2010-2016. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "s4u_dht-chord.hpp"
+
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(s4u_chord);
+
+/* Returns whether an id belongs to the interval [start, end].
+ *
+ * The parameters are normalized to make sure they are between 0 and nb_keys - 1).
+ * 1 belongs to [62, 3]
+ * 1 does not belong to [3, 62]
+ * 63 belongs to [62, 3]
+ * 63 does not belong to [3, 62]
+ * 24 belongs to [21, 29]
+ * 24 does not belong to [29, 21]
+ *
+ * \param id id to check
+ * \param start lower bound
+ * \param end upper bound
+ * \return a non-zero value if id in in [start, end]
+ */
+static int is_in_interval(int id, int start, int end)
+{
+ int i = id % nb_keys;
+ int s = start % nb_keys;
+ int e = end % nb_keys;
+
+ // make sure end >= start and id >= start
+ if (e < s) {
+ e += nb_keys;
+ }
+
+ if (i < s) {
+ i += nb_keys;
+ }
+
+ return i <= e;
+}
+
+/* Initializes the current node as the first one of the system */
+Node::Node(std::vector<std::string> args)
+{
+ xbt_assert(args.size() == 3 || args.size() == 5, "Wrong number of arguments for this node");
+
+ // initialize my node
+ id_ = std::stoi(args[1]);
+ stream = simgrid::s4u::this_actor::host()->extension<HostChord>()->getStream();
+ mailbox_ = simgrid::s4u::Mailbox::byName(std::to_string(id_));
+ next_finger_to_fix = 0;
+ fingers_ = new int[nb_bits];
+
+ for (int i = 0; i < nb_bits; i++) {
+ fingers_[i] = id_;
+ }
+
+ if (args.size() == 3) { // first ring
+ deadline_ = std::stod(args[2]);
+ start_time_ = simgrid::s4u::Engine::getClock();
+ XBT_DEBUG("Create a new Chord ring...");
+ } else {
+ known_id_ = std::stoi(args[2]);
+ start_time_ = std::stod(args[3]);
+ deadline_ = std::stod(args[4]);
+ XBT_DEBUG("Hey! Let's join the system in %f seconds (shall leave at time %f)", start_time_,
+ start_time_ + deadline_);
+ }
+}
+
+Node::~Node()
+{
+ delete[] fingers_;
+}
+/* Makes the current node join the ring, knowing the id of a node already in the ring
+ *
+ * \param known_id id of a node already in the ring
+ * \return true if the join operation succeeded
+ * */
+
+void Node::join(int known_id)
+{
+ XBT_INFO("Joining the ring with id %d, knowing node %d", id_, known_id);
+ setPredecessor(-1); // no predecessor (yet)
+
+ int successor_id = remoteFindSuccessor(known_id, id_);
+ if (successor_id == -1) {
+ XBT_INFO("Cannot join the ring.");
+ } else {
+ setFinger(0, successor_id);
+ printFingerTable();
+ joined = true;
+ }
+}
+
+/* Makes the current node quit the system */
+void Node::leave()
+{
+ XBT_INFO("Well Guys! I Think it's time for me to leave ;)");
+ notifyAndQuit();
+ joined = false;
+}
+
+/* Notifies the successor and the predecessor of the current node before leaving */
+void Node::notifyAndQuit()
+{
+ // send the PREDECESSOR_LEAVING to our successor
+ ChordMessage* pred_msg = new ChordMessage(PREDECESSOR_LEAVING);
+ pred_msg->request_id = pred_id_;
+ pred_msg->answer_to = mailbox_;
+
+ XBT_DEBUG("Sending a 'PREDECESSOR_LEAVING' to my successor %d", fingers_[0]);
+ try {
+ simgrid::s4u::this_actor::send(simgrid::s4u::Mailbox::byName(std::to_string(fingers_[0])), pred_msg, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Timeout expired when sending a 'PREDECESSOR_LEAVING' to my successor %d", fingers_[0]);
+ delete pred_msg;
+ }
+ }
+
+ // send the SUCCESSOR_LEAVING to our predecessor
+ ChordMessage* succ_msg = new ChordMessage(SUCCESSOR_LEAVING);
+ succ_msg->request_id = fingers_[0];
+ succ_msg->answer_to = mailbox_;
+ XBT_DEBUG("Sending a 'SUCCESSOR_LEAVING' to my predecessor %d", pred_id_);
+
+ try {
+ simgrid::s4u::this_actor::send(simgrid::s4u::Mailbox::byName(std::to_string(pred_id_)), succ_msg, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Timeout expired when sending a 'SUCCESSOR_LEAVING' to my predecessor %d", pred_id_);
+ delete succ_msg;
+ }
+ }
+}
+
+/* Performs a find successor request to a random id */
+void Node::randomLookup()
+{
+ int res = id_;
+ int random_index = RngStream_RandInt(stream, 0, nb_bits - 1);
+ int random_id = fingers_[random_index];
+ XBT_DEBUG("Making a lookup request for id %d", random_id);
+ if (random_id != id_)
+ res = findSuccessor(random_id);
+ XBT_DEBUG("The successor of node %d is %d", random_id, res);
+}
+
+/* Sets a finger of the current node.
+ *
+ * \param node the current node
+ * \param finger_index index of the finger to set (0 to nb_bits - 1)
+ * \param id the id to set for this finger
+ */
+void Node::setFinger(int finger_index, int id)
+{
+ if (id != fingers_[finger_index]) {
+ fingers_[finger_index] = id;
+ XBT_VERB("My new finger #%d is %d", finger_index, id);
+ }
+}
+
+/* Sets the predecessor of the current node.
+ * \param id the id to predecessor, or -1 to unset the predecessor
+ */
+void Node::setPredecessor(int predecessor_id)
+{
+ if (predecessor_id != pred_id_) {
+ pred_id_ = predecessor_id;
+ XBT_VERB("My new predecessor is %d", predecessor_id);
+ }
+}
+
+/** refreshes the finger table of the current node (called periodically) */
+void Node::fixFingers()
+{
+ XBT_DEBUG("Fixing fingers");
+ int id = findSuccessor(id_ + powers2[next_finger_to_fix]);
+ if (id != -1) {
+ if (id != fingers_[next_finger_to_fix]) {
+ setFinger(next_finger_to_fix, id);
+ printFingerTable();
+ }
+ next_finger_to_fix = (next_finger_to_fix + 1) % nb_bits;
+ }
+}
+
+/** Displays the finger table of a node. */
+void Node::printFingerTable()
+{
+ if (XBT_LOG_ISENABLED(s4u_chord, xbt_log_priority_verbose)) {
+ XBT_VERB("My finger table:");
+ XBT_VERB("Start | Succ");
+ for (int i = 0; i < nb_bits; i++) {
+ XBT_VERB(" %3d | %3d", (id_ + powers2[i]) % nb_keys, fingers_[i]);
+ }
+
+ XBT_VERB("Predecessor: %d", pred_id_);
+ }
+}
+
+/* checks whether the predecessor has failed (called periodically) */
+void Node::checkPredecessor()
+{
+ XBT_DEBUG("Checking whether my predecessor is alive");
+ void* data = nullptr;
+ if (pred_id_ == -1)
+ return;
+
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(pred_id_));
+ simgrid::s4u::MailboxPtr return_mailbox = simgrid::s4u::Mailbox::byName(std::to_string(id_) + "_is_alive");
+
+ ChordMessage* message = new ChordMessage(PREDECESSOR_ALIVE);
+ message->request_id = pred_id_;
+ message->answer_to = return_mailbox;
+
+ XBT_DEBUG("Sending a 'Predecessor Alive' request to my predecessor %d", pred_id_);
+ try {
+ simgrid::s4u::this_actor::send(mailbox, message, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to send the 'Predecessor Alive' request to %d", pred_id_);
+ delete message;
+ return;
+ }
+ }
+ // receive the answer
+ XBT_DEBUG("Sent 'Predecessor Alive' request to %d, waiting for the answer on my mailbox '%s'", pred_id_,
+ message->answer_to->name());
+ simgrid::s4u::Comm& comm = simgrid::s4u::this_actor::irecv(return_mailbox, &data);
+
+ try {
+ comm.wait(timeout);
+ XBT_DEBUG("Received the answer to my 'Predecessor Alive': my predecessor %d is alive", pred_id_);
+ delete message;
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to receive the answer to my 'Predecessor Alive' request");
+ pred_id_ = -1;
+ }
+ }
+}
+
+/* Asks its predecessor to a remote node
+ *
+ * \param ask_to the node to ask to
+ * \return the id of its predecessor node, or -1 if the request failed (or if the node does not know its predecessor)
+ */
+int Node::remoteGetPredecessor(int ask_to)
+{
+ int predecessor_id = -1;
+ void* data = nullptr;
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(ask_to));
+ simgrid::s4u::MailboxPtr return_mailbox = simgrid::s4u::Mailbox::byName(std::to_string(id_) + "_pred");
+
+ ChordMessage* message = new ChordMessage(GET_PREDECESSOR);
+ message->request_id = id_;
+ message->answer_to = return_mailbox;
+
+ // send a "Get Predecessor" request to ask_to_id
+ XBT_DEBUG("Sending a 'Get Predecessor' request to %d", ask_to);
+ try {
+ simgrid::s4u::this_actor::send(mailbox, message, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to send the 'Get Predecessor' request to %d", ask_to);
+ delete message;
+ return predecessor_id;
+ }
+ }
+
+ // receive the answer
+ XBT_DEBUG("Sent 'Get Predecessor' request to %d, waiting for the answer on my mailbox '%s'", ask_to,
+ message->answer_to->name());
+ simgrid::s4u::Comm& comm = simgrid::s4u::this_actor::irecv(return_mailbox, &data);
+
+ try {
+ comm.wait(timeout);
+ ChordMessage* answer = static_cast<ChordMessage*>(data);
+ XBT_DEBUG("Received the answer to my 'Get Predecessor' request: the predecessor of node %d is %d", ask_to,
+ answer->answer_id);
+ predecessor_id = answer->answer_id;
+ delete answer;
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to receive the answer to my 'Get Predecessor' request");
+ delete static_cast<ChordMessage*>(data);
+ }
+ }
+
+ return predecessor_id;
+}
+
+/* Returns the closest preceding finger of an id with respect to the finger table of the current node.
+ *
+ * \param id the id to find
+ * \return the closest preceding finger of that id
+ */
+int Node::closestPrecedingFinger(int id)
+{
+ for (int i = nb_bits - 1; i >= 0; i--) {
+ if (is_in_interval(fingers_[i], id_ + 1, id - 1)) {
+ return fingers_[i];
+ }
+ }
+ return id_;
+}
+
+/* Makes the current node find the successor node of an id.
+ *
+ * \param id the id to find
+ * \return the id of the successor node, or -1 if the request failed
+ */
+int Node::findSuccessor(int id)
+{
+ // is my successor the successor?
+ if (is_in_interval(id, id_ + 1, fingers_[0])) {
+ return fingers_[0];
+ }
+
+ // otherwise, ask the closest preceding finger in my table
+ return remoteFindSuccessor(closestPrecedingFinger(id), id);
+}
+
+int Node::remoteFindSuccessor(int ask_to, int id)
+{
+ int successor = -1;
+ void* data = nullptr;
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(ask_to));
+ simgrid::s4u::MailboxPtr return_mailbox = simgrid::s4u::Mailbox::byName(std::to_string(id_) + "_succ");
+
+ ChordMessage* message = new ChordMessage(FIND_SUCCESSOR);
+ message->request_id = id_;
+ message->answer_to = return_mailbox;
+
+ // send a "Find Successor" request to ask_to_id
+ XBT_DEBUG("Sending a 'Find Successor' request to %d for id %d", ask_to, id);
+ try {
+ simgrid::s4u::this_actor::send(mailbox, message, 10, timeout);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to send the 'Find Successor' request to %d for id %d", ask_to, id_);
+ delete message;
+ return successor;
+ }
+ }
+ // receive the answer
+ XBT_DEBUG("Sent a 'Find Successor' request to %d for key %d, waiting for the answer", ask_to, id);
+ simgrid::s4u::Comm& comm = simgrid::s4u::this_actor::irecv(return_mailbox, &data);
+
+ try {
+ comm.wait(timeout);
+ ChordMessage* answer = static_cast<ChordMessage*>(data);
+ XBT_DEBUG("Received the answer to my 'Find Successor' request for id %d: the successor of key %d is %d",
+ answer->request_id, id_, answer->answer_id);
+ successor = answer->answer_id;
+ delete answer;
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Failed to receive the answer to my 'Find Successor' request");
+ delete static_cast<ChordMessage*>(data);
+ }
+ }
+ return successor;
+}
+
+/* Notifies the current node that its predecessor may have changed. */
+void Node::notify(int predecessor_candidate_id)
+{
+ if (pred_id_ == -1 || is_in_interval(predecessor_candidate_id, pred_id_ + 1, id_ - 1)) {
+ setPredecessor(predecessor_candidate_id);
+ printFingerTable();
+ } else {
+ XBT_DEBUG("I don't have to change my predecessor to %d", predecessor_candidate_id);
+ }
+}
+
+/* Notifies a remote node that its predecessor may have changed. */
+void Node::remoteNotify(int notify_id, int predecessor_candidate_id)
+{
+ ChordMessage* message = new ChordMessage(NOTIFY);
+ message->request_id = predecessor_candidate_id;
+ message->answer_to = nullptr;
+
+ // send a "Notify" request to notify_id
+ XBT_DEBUG("Sending a 'Notify' request to %d", notify_id);
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(notify_id));
+ try {
+ // TODO make it a dsend
+ simgrid::s4u::this_actor::isend(mailbox, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Notify' failed due to an expired timeout on receiver side");
+ delete message;
+ }
+ }
+}
+
+/* This function is called periodically. It checks the immediate successor of the current node. */
+void Node::stabilize()
+{
+ XBT_DEBUG("Stabilizing node");
+
+ // get the predecessor of my immediate successor
+ int candidate_id;
+ int successor_id = fingers_[0];
+ if (successor_id != id_) {
+ candidate_id = remoteGetPredecessor(successor_id);
+ } else {
+ candidate_id = pred_id_;
+ }
+
+ // this node is a candidate to become my new successor
+ if (candidate_id != -1 && is_in_interval(candidate_id, id_ + 1, successor_id - 1)) {
+ setFinger(0, candidate_id);
+ }
+ if (successor_id != id_) {
+ remoteNotify(successor_id, id_);
+ }
+}
+
+/* This function is called when a node receives a message.
+ *
+ * \param message the message to handle (don't touch it afterward: it will be destroyed, reused or forwarded)
+ */
+void Node::handleMessage(ChordMessage* message)
+{
+ switch (message->type) {
+ case FIND_SUCCESSOR:
+ XBT_DEBUG("Received a 'Find Successor' request from %s for id %d", message->issuer_host_name.c_str(),
+ message->request_id);
+ // is my successor the successor?
+ if (is_in_interval(message->request_id, id_ + 1, fingers_[0])) {
+ message->type = FIND_SUCCESSOR_ANSWER;
+ message->answer_id = fingers_[0];
+ XBT_DEBUG("Sending back a 'Find Successor Answer' to %s (mailbox %s): the successor of %d is %d",
+ message->issuer_host_name.c_str(), message->answer_to->name(), message->request_id, message->answer_id);
+ // TODO Replace by dsend
+ try {
+ simgrid::s4u::this_actor::isend(message->answer_to, message, 10);
+ } catch(xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Find Successor Answer' failed due du an expired timeout on receiver side");
+ }
+ }
+ } else {
+ // otherwise, forward the request to the closest preceding finger in my table
+ int closest = closestPrecedingFinger(message->request_id);
+ XBT_DEBUG("Forwarding the 'Find Successor' request for id %d to my closest preceding finger %d",
+ message->request_id, closest);
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(std::to_string(closest));
+ //TODO make it a dsend
+ try{
+ simgrid::s4u::this_actor::isend(mailbox, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Forward of 'Find Successor' failed due du an expired timeout on receiver side");
+ }
+ }
+ }
+ break;
+
+ case GET_PREDECESSOR:
+ XBT_DEBUG("Receiving a 'Get Predecessor' request from %s", message->issuer_host_name.c_str());
+ message->type = GET_PREDECESSOR_ANSWER;
+ message->answer_id = pred_id_;
+ XBT_DEBUG("Sending back a 'Get Predecessor Answer' to %s via mailbox '%s': my predecessor is %d",
+ message->issuer_host_name.c_str(), message->answer_to->name(), message->answer_id);
+ //TODO make it a dsend
+ try{
+ simgrid::s4u::this_actor::isend(message->answer_to, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Get Predecessor Answer' failed due du an expired timeout on receiver side");
+ }
+ }
+ break;
+
+ case NOTIFY:
+ // someone is telling me that he may be my new predecessor
+ XBT_DEBUG("Receiving a 'Notify' request from %s", message->issuer_host_name.c_str());
+ notify(message->request_id);
+ delete message;
+ break;
+
+ case PREDECESSOR_LEAVING:
+ // my predecessor is about to quit
+ XBT_DEBUG("Receiving a 'Predecessor Leaving' message from %s", message->issuer_host_name.c_str());
+ // modify my predecessor
+ setPredecessor(message->request_id);
+ delete message;
+ /*TODO :
+ >> notify my new predecessor
+ >> send a notify_predecessors !!
+ */
+ break;
+
+ case SUCCESSOR_LEAVING:
+ // my successor is about to quit
+ XBT_DEBUG("Receiving a 'Successor Leaving' message from %s", message->issuer_host_name.c_str());
+ // modify my successor FIXME : this should be implicit ?
+ setFinger(0, message->request_id);
+ delete message;
+ /* TODO
+ >> notify my new successor
+ >> update my table & predecessors table */
+ break;
+
+ case PREDECESSOR_ALIVE:
+ XBT_DEBUG("Receiving a 'Predecessor Alive' request from %s", message->issuer_host_name.c_str());
+ message->type = PREDECESSOR_ALIVE_ANSWER;
+ XBT_DEBUG("Sending back a 'Predecessor Alive Answer' to %s (mailbox %s)",
+ message->issuer_host_name.c_str(), message->answer_to->name());
+ //TODO Make it a dsend
+ try{
+ simgrid::s4u::this_actor::isend(message->answer_to, message, 10);
+ } catch (xbt_ex& e) {
+ if (e.category == timeout_error) {
+ XBT_DEBUG("Send of 'Predecessor Alive' failed due du an expired timeout on receiver side");
+ }
+ }
+ break;
+
+ default:
+ XBT_DEBUG("Ignoring unexpected message: %d from %s", message->type, message->issuer_host_name.c_str());
+ delete message;
+ }
+}
--- /dev/null
+/* Copyright (c) 2010-2016. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "s4u_dht-chord.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_chord, "Messages specific for this s4u example");
+simgrid::xbt::Extension<simgrid::s4u::Host, HostChord> HostChord::EXTENSION_ID;
+
+int nb_bits = 24;
+int nb_keys = 0;
+int timeout = 50;
+int* powers2 = nullptr;
+
+/* Global initialization of the Chord simulation. */
+static void chord_init()
+{
+ // compute the powers of 2 once for all
+ powers2 = new int[nb_bits];
+ int pow = 1;
+ for (int i = 0; i < nb_bits; i++) {
+ powers2[i] = pow;
+ pow = pow << 1;
+ }
+ nb_keys = pow;
+ XBT_DEBUG("Sets nb_keys to %d", nb_keys);
+
+ HostChord::EXTENSION_ID = simgrid::s4u::Host::extension_create<HostChord>();
+
+ std::vector<simgrid::s4u::Host*> list;
+ simgrid::s4u::Engine::instance()->hostList(&list);
+ for (auto host : list)
+ host->extension_set(new HostChord(host));
+}
+
+static void chord_exit()
+{
+ delete[] powers2;
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ xbt_assert(argc > 2, "Usage: %s [-nb_bits=n] [-timeout=t] platform_file deployment_file\n"
+ "\tExample: %s ../msg_platform.xml chord.xml\n",
+ argv[0], argv[0]);
+ char** options = &argv[1];
+ while (!strncmp(options[0], "-", 1)) {
+ unsigned int length = strlen("-nb_bits=");
+ if (!strncmp(options[0], "-nb_bits=", length) && strlen(options[0]) > length) {
+ nb_bits = xbt_str_parse_int(options[0] + length, "Invalid nb_bits parameter: %s");
+ XBT_DEBUG("Set nb_bits to %d", nb_bits);
+ } else {
+ length = strlen("-timeout=");
+ if (!strncmp(options[0], "-timeout=", length) && strlen(options[0]) > length) {
+ timeout = xbt_str_parse_int(options[0] + length, "Invalid timeout parameter: %s");
+ XBT_DEBUG("Set timeout to %d", timeout);
+ } else {
+ xbt_die("Invalid chord option '%s'", options[0]);
+ }
+ }
+ options++;
+ }
+
+ e->loadPlatform(options[0]);
+
+ chord_init();
+
+ e->registerFunction<Node>("node");
+ e->loadDeployment(options[1]);
+
+ e->run();
+
+ XBT_INFO("Simulated time: %g", e->getClock());
+
+ chord_exit();
+
+ return 0;
+}
--- /dev/null
+/* Copyright (c) 2016-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef S4U_CHORD_HPP
+#define S4U_CHORD_HPP
+#include "simgrid/s4u.hpp"
+#include <string>
+#include <xbt/RngStream.h>
+#include <xbt/ex.hpp>
+#include <xbt/str.h>
+
+#define MAX_SIMULATION_TIME 1000
+#define PERIODIC_STABILIZE_DELAY 20
+#define PERIODIC_FIX_FINGERS_DELAY 120
+#define PERIODIC_CHECK_PREDECESSOR_DELAY 120
+#define PERIODIC_LOOKUP_DELAY 10
+#define SLEEP_DELAY 4.9999
+
+extern int nb_bits;
+extern int nb_keys;
+extern int timeout;
+extern int* powers2;
+
+class HostChord {
+ RngStream stream_;
+ simgrid::s4u::Host* host = nullptr;
+
+public:
+ static simgrid::xbt::Extension<simgrid::s4u::Host, HostChord> EXTENSION_ID;
+
+ explicit HostChord(simgrid::s4u::Host* ptr) : host(ptr)
+ {
+ std::string descr = std::string("RngSream<") + host->cname() + ">";
+ stream_ = RngStream_CreateStream(descr.c_str());
+ }
+
+ ~HostChord() { RngStream_DeleteStream(&stream_); };
+
+ RngStream getStream() { return stream_; };
+};
+
+/* Types of tasks exchanged between nodes. */
+typedef enum {
+ FIND_SUCCESSOR,
+ FIND_SUCCESSOR_ANSWER,
+ GET_PREDECESSOR,
+ GET_PREDECESSOR_ANSWER,
+ NOTIFY,
+ SUCCESSOR_LEAVING,
+ PREDECESSOR_LEAVING,
+ PREDECESSOR_ALIVE,
+ PREDECESSOR_ALIVE_ANSWER
+} e_message_type_t;
+
+class ChordMessage {
+public:
+ e_message_type_t type; // type of message
+ std::string issuer_host_name; // used for logging
+ int request_id = -1; // id (used by some types of messages)
+ int request_finger = 1; // finger parameter (used by some types of messages)
+ int answer_id = -1; // answer (used by some types of messages)
+ simgrid::s4u::MailboxPtr answer_to; // mailbox to send an answer to (if any)
+
+ explicit ChordMessage(e_message_type_t type) : type(type)
+ {
+ issuer_host_name = simgrid::s4u::this_actor::host()->name();
+ }
+
+ ~ChordMessage() = default;
+};
+
+class Node {
+ int known_id_ = -1;
+ double start_time_ = -1;
+ double deadline_ = -1;
+ bool joined = false;
+ int id_; // my id
+ int pred_id_ = -1; // predecessor id
+ simgrid::s4u::MailboxPtr mailbox_; // my mailbox
+ int* fingers_; // finger table,(fingers[0] is my successor)
+ int next_finger_to_fix; // index of the next finger to fix in fix_fingers()
+ RngStream stream;
+
+public:
+ explicit Node(std::vector<std::string> args);
+ ~Node();
+ void join(int known_id);
+ void leave();
+ void notifyAndQuit();
+
+ void randomLookup();
+ void setFinger(int finger_index, int id);
+ void fixFingers();
+ void printFingerTable();
+
+ void setPredecessor(int predecessor_id);
+ void checkPredecessor();
+ int remoteGetPredecessor(int ask_to);
+ int closestPrecedingFinger(int id);
+ int findSuccessor(int id);
+ int remoteFindSuccessor(int ask_to, int id);
+
+ void notify(int predecessor_candidate_id);
+ void remoteNotify(int notify_id, int predecessor_candidate_id);
+ void stabilize();
+ void handleMessage(ChordMessage* message);
+
+ void operator()()
+ {
+ simgrid::s4u::this_actor::sleep_for(start_time_);
+ if (known_id_ == -1) {
+ setPredecessor(-1); // -1 means that I have no predecessor
+ printFingerTable();
+ joined = true;
+ } else {
+ join(known_id_);
+ }
+
+ if (!joined)
+ return;
+ ChordMessage* message = nullptr;
+ void* data = nullptr;
+ double now = simgrid::s4u::Engine::getClock();
+ double next_stabilize_date = start_time_ + PERIODIC_STABILIZE_DELAY;
+ double next_fix_fingers_date = start_time_ + PERIODIC_FIX_FINGERS_DELAY;
+ double next_check_predecessor_date = start_time_ + PERIODIC_CHECK_PREDECESSOR_DELAY;
+ double next_lookup_date = start_time_ + PERIODIC_LOOKUP_DELAY;
+
+ while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME) {
+ data = nullptr;
+ simgrid::s4u::Comm& comm_receive = simgrid::s4u::this_actor::irecv(mailbox_, &data);
+ while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME && !comm_receive.test()) {
+ // no task was received: make some periodic calls
+ if (now >= next_stabilize_date) {
+ stabilize();
+ next_stabilize_date = simgrid::s4u::Engine::getClock() + PERIODIC_STABILIZE_DELAY;
+ } else if (now >= next_fix_fingers_date) {
+ fixFingers();
+ next_fix_fingers_date = simgrid::s4u::Engine::getClock() + PERIODIC_FIX_FINGERS_DELAY;
+ } else if (now >= next_check_predecessor_date) {
+ checkPredecessor();
+ next_check_predecessor_date = simgrid::s4u::Engine::getClock() + PERIODIC_CHECK_PREDECESSOR_DELAY;
+ } else if (now >= next_lookup_date) {
+ randomLookup();
+ next_lookup_date = simgrid::s4u::Engine::getClock() + PERIODIC_LOOKUP_DELAY;
+ } else {
+ // nothing to do: sleep for a while
+ simgrid::s4u::this_actor::sleep_for(SLEEP_DELAY);
+ }
+ now = simgrid::s4u::Engine::getClock();
+ }
+
+ if (data != nullptr) {
+ message = static_cast<ChordMessage*>(data);
+ handleMessage(message);
+ }
+ now = simgrid::s4u::Engine::getClock();
+ }
+ if (data != nullptr) {
+ delete static_cast<ChordMessage*>(data);
+ }
+ // leave the ring
+ leave();
+ }
+};
+
+#endif
--- /dev/null
+#! ./tesh
+
+p Testing the Chord implementation with MSG
+
+! output sort 19
+$ $SG_TEST_EXENV ${bindir:=.}/s4u_dht-chord$EXEEXT -nb_bits=3 ${srcdir:=.}/cluster.xml ${srcdir:=.}/../s4u/dht-chord/s4u_dht-chord_d.xml --log=s4u_chord.thres:verbose "--log=root.fmt:[%10.5r]%e(%P@%h)%e%m%n"
+> [ 0.00000] (node@node-0.acme.org) My finger table:
+> [ 0.00000] (node@node-0.acme.org) Start | Succ
+> [ 0.00000] (node@node-0.acme.org) 3 | 42
+> [ 0.00000] (node@node-0.acme.org) 4 | 42
+> [ 0.00000] (node@node-0.acme.org) 6 | 42
+> [ 0.00000] (node@node-0.acme.org) Predecessor: -1
+> [ 10.00000] (node@node-1.acme.org) Joining the ring with id 366680, knowing node 42
+> [ 15.00751] (node@node-1.acme.org) My new finger #0 is 42
+> [ 15.00751] (node@node-1.acme.org) My finger table:
+> [ 15.00751] (node@node-1.acme.org) Start | Succ
+> [ 15.00751] (node@node-1.acme.org) 1 | 42
+> [ 15.00751] (node@node-1.acme.org) 2 | 366680
+> [ 15.00751] (node@node-1.acme.org) 4 | 366680
+> [ 15.00751] (node@node-1.acme.org) Predecessor: -1
+> [ 20.00000] (node@node-2.acme.org) Joining the ring with id 533744, knowing node 366680
+> [ 30.00000] (node@node-3.acme.org) Joining the ring with id 1319738, knowing node 42
+> [ 30.00721] (node@node-2.acme.org) My new finger #0 is 42
+> [ 30.00721] (node@node-2.acme.org) My finger table:
+> [ 30.00721] (node@node-2.acme.org) Start | Succ
+> [ 30.00721] (node@node-2.acme.org) 1 | 42
+> [ 30.00721] (node@node-2.acme.org) 2 | 533744
+> [ 30.00721] (node@node-2.acme.org) 4 | 533744
+> [ 30.00721] (node@node-2.acme.org) Predecessor: -1
+> [ 35.00711] (node@node-3.acme.org) My new finger #0 is 42
+> [ 35.00711] (node@node-3.acme.org) My finger table:
+> [ 35.00711] (node@node-3.acme.org) Start | Succ
+> [ 35.00711] (node@node-3.acme.org) 3 | 42
+> [ 35.00711] (node@node-3.acme.org) 4 | 1319738
+> [ 35.00711] (node@node-3.acme.org) 6 | 1319738
+> [ 35.00711] (node@node-3.acme.org) Predecessor: -1
+> [ 40.00000] (node@node-4.acme.org) Joining the ring with id 16509405, knowing node 366680
+> [ 49.99900] (node@node-0.acme.org) My new predecessor is 366680
+> [ 49.99900] (node@node-0.acme.org) My finger table:
+> [ 49.99900] (node@node-0.acme.org) Start | Succ
+> [ 49.99900] (node@node-0.acme.org) 3 | 42
+> [ 49.99900] (node@node-0.acme.org) 4 | 42
+> [ 49.99900] (node@node-0.acme.org) 6 | 42
+> [ 49.99900] (node@node-0.acme.org) Predecessor: 366680
+> [ 49.99900] (node@node-0.acme.org) My new finger #0 is 366680
+> [ 55.00671] (node@node-4.acme.org) My new finger #0 is 366680
+> [ 55.00671] (node@node-4.acme.org) My finger table:
+> [ 55.00671] (node@node-4.acme.org) Start | Succ
+> [ 55.00671] (node@node-4.acme.org) 6 | 366680
+> [ 55.00671] (node@node-4.acme.org) 7 | 16509405
+> [ 55.00671] (node@node-4.acme.org) 1 | 16509405
+> [ 55.00671] (node@node-4.acme.org) Predecessor: -1
+> [ 60.00000] (node@node-6.acme.org) Joining the ring with id 16728096, knowing node 1319738
+> [ 65.00651] (node@node-3.acme.org) My new finger #0 is 366680
+> [ 65.01431] (node@node-6.acme.org) My new finger #0 is 366680
+> [ 65.01431] (node@node-6.acme.org) My finger table:
+> [ 65.01431] (node@node-6.acme.org) Start | Succ
+> [ 65.01431] (node@node-6.acme.org) 1 | 366680
+> [ 65.01431] (node@node-6.acme.org) 2 | 16728096
+> [ 65.01431] (node@node-6.acme.org) 4 | 16728096
+> [ 65.01431] (node@node-6.acme.org) Predecessor: -1
+> [ 70.00641] (node@node-1.acme.org) My new predecessor is 16509405
+> [ 70.00641] (node@node-1.acme.org) My finger table:
+> [ 70.00641] (node@node-1.acme.org) Start | Succ
+> [ 70.00641] (node@node-1.acme.org) 1 | 42
+> [ 70.00641] (node@node-1.acme.org) 2 | 366680
+> [ 70.00641] (node@node-1.acme.org) 4 | 366680
+> [ 70.00641] (node@node-1.acme.org) Predecessor: 16509405
+> [ 80.01401] (node@node-0.acme.org) My new finger #0 is 16509405
+> [ 85.01391] (node@node-6.acme.org) My new finger #0 is 16509405
+> [ 100.02922] (node@node-3.acme.org) My new finger #0 is 16509405
+> [ 110.02902] (node@node-4.acme.org) My new predecessor is 42
+> [ 110.02902] (node@node-4.acme.org) My finger table:
+> [ 110.02902] (node@node-4.acme.org) Start | Succ
+> [ 110.02902] (node@node-4.acme.org) 6 | 366680
+> [ 110.02902] (node@node-4.acme.org) 7 | 16509405
+> [ 110.02902] (node@node-4.acme.org) 1 | 16509405
+> [ 110.02902] (node@node-4.acme.org) Predecessor: 42
+> [ 115.03673] (node@node-6.acme.org) My new finger #0 is 42
+> [ 200.05164] (node@node-3.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 210.04364] (node@node-1.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 210.05925] (node@node-4.acme.org) My new predecessor is -1
+> [ 220.05905] (node@node-4.acme.org) My new predecessor is 42
+> [ 220.05905] (node@node-4.acme.org) My finger table:
+> [ 220.05905] (node@node-4.acme.org) Start | Succ
+> [ 220.05905] (node@node-4.acme.org) 6 | 366680
+> [ 220.05905] (node@node-4.acme.org) 7 | 16509405
+> [ 220.05905] (node@node-4.acme.org) 1 | 16509405
+> [ 220.05905] (node@node-4.acme.org) Predecessor: 42
+> [ 220.07466] (node@node-0.acme.org) My new predecessor is 16509405
+> [ 225.05895] (node@node-4.acme.org) My new finger #0 is 42
+> [ 230.07446] (node@node-0.acme.org) My new predecessor is 533744
+> [ 230.07446] (node@node-0.acme.org) My finger table:
+> [ 230.07446] (node@node-0.acme.org) Start | Succ
+> [ 230.07446] (node@node-0.acme.org) 3 | 16509405
+> [ 230.07446] (node@node-0.acme.org) 4 | 42
+> [ 230.07446] (node@node-0.acme.org) 6 | 42
+> [ 230.07446] (node@node-0.acme.org) Predecessor: 533744
+> [ 235.08217] (node@node-4.acme.org) My new finger #0 is 533744
+> [ 240.08987] (node@node-0.acme.org) My new finger #1 is 16509405
+> [ 240.08987] (node@node-0.acme.org) My finger table:
+> [ 240.08987] (node@node-0.acme.org) Start | Succ
+> [ 240.08987] (node@node-0.acme.org) 3 | 16509405
+> [ 240.08987] (node@node-0.acme.org) 4 | 16509405
+> [ 240.08987] (node@node-0.acme.org) 6 | 42
+> [ 240.08987] (node@node-0.acme.org) Predecessor: 533744
+> [ 250.00000] (node@node-5.acme.org) Joining the ring with id 10874876, knowing node 533744
+> [ 255.11299] (node@node-5.acme.org) My new finger #0 is 16509405
+> [ 255.11299] (node@node-5.acme.org) My finger table:
+> [ 255.11299] (node@node-5.acme.org) Start | Succ
+> [ 255.11299] (node@node-5.acme.org) 5 | 16509405
+> [ 255.11299] (node@node-5.acme.org) 6 | 10874876
+> [ 255.11299] (node@node-5.acme.org) 0 | 10874876
+> [ 255.11299] (node@node-5.acme.org) Predecessor: -1
+> [ 265.09718] (node@node-2.acme.org) My new predecessor is 16509405
+> [ 265.09718] (node@node-2.acme.org) My finger table:
+> [ 265.09718] (node@node-2.acme.org) Start | Succ
+> [ 265.09718] (node@node-2.acme.org) 1 | 42
+> [ 265.09718] (node@node-2.acme.org) 2 | 533744
+> [ 265.09718] (node@node-2.acme.org) 4 | 533744
+> [ 265.09718] (node@node-2.acme.org) Predecessor: 16509405
+> [ 275.11259] (node@node-5.acme.org) My new finger #0 is 42
+> [ 280.10468] (node@node-4.acme.org) My new predecessor is 10874876
+> [ 280.10468] (node@node-4.acme.org) My finger table:
+> [ 280.10468] (node@node-4.acme.org) Start | Succ
+> [ 280.10468] (node@node-4.acme.org) 6 | 533744
+> [ 280.10468] (node@node-4.acme.org) 7 | 16509405
+> [ 280.10468] (node@node-4.acme.org) 1 | 16509405
+> [ 280.10468] (node@node-4.acme.org) Predecessor: 10874876
+> [ 285.13581] (node@node-4.acme.org) My new predecessor is 42
+> [ 285.13581] (node@node-4.acme.org) My finger table:
+> [ 285.13581] (node@node-4.acme.org) Start | Succ
+> [ 285.13581] (node@node-4.acme.org) 6 | 533744
+> [ 285.13581] (node@node-4.acme.org) 7 | 16509405
+> [ 285.13581] (node@node-4.acme.org) 1 | 16509405
+> [ 285.13581] (node@node-4.acme.org) Predecessor: 42
+> [ 300.13551] (node@node-4.acme.org) My new finger #1 is 533744
+> [ 300.13551] (node@node-4.acme.org) My finger table:
+> [ 300.13551] (node@node-4.acme.org) Start | Succ
+> [ 300.13551] (node@node-4.acme.org) 6 | 533744
+> [ 300.13551] (node@node-4.acme.org) 7 | 533744
+> [ 300.13551] (node@node-4.acme.org) 1 | 16509405
+> [ 300.13551] (node@node-4.acme.org) Predecessor: 42
+> [ 300.14332] (node@node-2.acme.org) My new finger #1 is 42
+> [ 300.14332] (node@node-2.acme.org) My finger table:
+> [ 300.14332] (node@node-2.acme.org) Start | Succ
+> [ 300.14332] (node@node-2.acme.org) 1 | 42
+> [ 300.14332] (node@node-2.acme.org) 2 | 42
+> [ 300.14332] (node@node-2.acme.org) 4 | 533744
+> [ 300.14332] (node@node-2.acme.org) Predecessor: 16509405
+> [ 305.14322] (node@node-5.acme.org) My new finger #0 is 533744
+> [ 305.15102] (node@node-0.acme.org) My new finger #0 is 10874876
+> [ 310.15873] (node@node-6.acme.org) My new finger #1 is 42
+> [ 310.15873] (node@node-6.acme.org) My finger table:
+> [ 310.15873] (node@node-6.acme.org) Start | Succ
+> [ 310.15873] (node@node-6.acme.org) 1 | 42
+> [ 310.15873] (node@node-6.acme.org) 2 | 42
+> [ 310.15873] (node@node-6.acme.org) 4 | 16728096
+> [ 310.15873] (node@node-6.acme.org) Predecessor: -1
+> [ 330.16613] (node@node-5.acme.org) My new finger #0 is 16509405
+> [ 335.16603] (node@node-5.acme.org) My new predecessor is 42
+> [ 335.16603] (node@node-5.acme.org) My finger table:
+> [ 335.16603] (node@node-5.acme.org) Start | Succ
+> [ 335.16603] (node@node-5.acme.org) 5 | 16509405
+> [ 335.16603] (node@node-5.acme.org) 6 | 10874876
+> [ 335.16603] (node@node-5.acme.org) 0 | 10874876
+> [ 335.16603] (node@node-5.acme.org) Predecessor: 42
+> [ 340.16593] (node@node-4.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 350.15793] (node@node-2.acme.org) My new predecessor is 42
+> [ 350.16573] (node@node-0.acme.org) My new finger #0 is 533744
+> [ 360.18115] (node@node-0.acme.org) My new finger #2 is 533744
+> [ 360.18115] (node@node-0.acme.org) My finger table:
+> [ 360.18115] (node@node-0.acme.org) Start | Succ
+> [ 360.18115] (node@node-0.acme.org) 3 | 533744
+> [ 360.18115] (node@node-0.acme.org) 4 | 16509405
+> [ 360.18115] (node@node-0.acme.org) 6 | 533744
+> [ 360.18115] (node@node-0.acme.org) Predecessor: 533744
+> [ 420.23459] (node@node-2.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 425.22668] (node@node-0.acme.org) My new predecessor is 42
+> [ 475.23449] (node@node-0.acme.org) My new finger #0 is 42
+> [ 480.23439] (node@node-0.acme.org) My new predecessor is 16728096
+> [ 480.23439] (node@node-0.acme.org) My finger table:
+> [ 480.23439] (node@node-0.acme.org) Start | Succ
+> [ 480.23439] (node@node-0.acme.org) 3 | 42
+> [ 480.23439] (node@node-0.acme.org) 4 | 16509405
+> [ 480.23439] (node@node-0.acme.org) 6 | 533744
+> [ 480.23439] (node@node-0.acme.org) Predecessor: 16728096
+> [ 485.24209] (node@node-6.acme.org) My new finger #2 is 42
+> [ 485.24209] (node@node-6.acme.org) My finger table:
+> [ 485.24209] (node@node-6.acme.org) Start | Succ
+> [ 485.24209] (node@node-6.acme.org) 1 | 42
+> [ 485.24209] (node@node-6.acme.org) 2 | 42
+> [ 485.24209] (node@node-6.acme.org) 4 | 42
+> [ 485.24209] (node@node-6.acme.org) Predecessor: -1
+> [ 495.24970] (node@node-0.acme.org) My new finger #0 is 16728096
+> [ 575.26471] (node@node-6.acme.org) My new predecessor is 42
+> [ 575.26471] (node@node-6.acme.org) My finger table:
+> [ 575.26471] (node@node-6.acme.org) Start | Succ
+> [ 575.26471] (node@node-6.acme.org) 1 | 42
+> [ 575.26471] (node@node-6.acme.org) 2 | 42
+> [ 575.26471] (node@node-6.acme.org) 4 | 42
+> [ 575.26471] (node@node-6.acme.org) Predecessor: 42
+> [ 600.27202] (node@node-0.acme.org) My new finger #1 is 16728096
+> [ 600.27202] (node@node-0.acme.org) My finger table:
+> [ 600.27202] (node@node-0.acme.org) Start | Succ
+> [ 600.27202] (node@node-0.acme.org) 3 | 16728096
+> [ 600.27202] (node@node-0.acme.org) 4 | 16728096
+> [ 600.27202] (node@node-0.acme.org) 6 | 533744
+> [ 600.27202] (node@node-0.acme.org) Predecessor: 16728096
+> [ 720.36329] (node@node-0.acme.org) My new finger #2 is 16728096
+> [ 720.36329] (node@node-0.acme.org) My finger table:
+> [ 720.36329] (node@node-0.acme.org) Start | Succ
+> [ 720.36329] (node@node-0.acme.org) 3 | 16728096
+> [ 720.36329] (node@node-0.acme.org) 4 | 16728096
+> [ 720.36329] (node@node-0.acme.org) 6 | 16728096
+> [ 720.36329] (node@node-0.acme.org) Predecessor: 16728096
+> [ 855.46207] (node@node-6.acme.org) My new finger #2 is 16728096
+> [ 855.46207] (node@node-6.acme.org) My finger table:
+> [ 855.46207] (node@node-6.acme.org) Start | Succ
+> [ 855.46207] (node@node-6.acme.org) 1 | 42
+> [ 855.46207] (node@node-6.acme.org) 2 | 42
+> [ 855.46207] (node@node-6.acme.org) 4 | 16728096
+> [ 855.46207] (node@node-6.acme.org) Predecessor: 42
+> [ 860.46197] (node@node-6.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 865.45406] (node@node-0.acme.org) My new predecessor is 42
+> [ 890.43115] (node@node-5.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [ 915.45406] (node@node-0.acme.org) My new finger #0 is 42
+> [ 940.45356] (node@node-0.acme.org) My new finger #0 is 16509405
+> [ 990.45356] (node@node-0.acme.org) My new finger #1 is 16509405
+> [ 990.45356] (node@node-0.acme.org) My finger table:
+> [ 990.45356] (node@node-0.acme.org) Start | Succ
+> [ 990.45356] (node@node-0.acme.org) 3 | 16509405
+> [ 990.45356] (node@node-0.acme.org) 4 | 16509405
+> [ 990.45356] (node@node-0.acme.org) 6 | 16728096
+> [ 990.45356] (node@node-0.acme.org) Predecessor: 42
+> [1040.45356] (node@node-0.acme.org) Well Guys! I Think it's time for me to leave ;)
+> [1090.46137] (maestro@) Simulated time: 1090.46
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
+<platform version="4">
+ <process host="node-0.acme.org" function="node">
+ <argument value="42"/>
+ <argument value="1000"/>
+ </process>
+ <process host="node-1.acme.org" function="node">
+ <argument value="366680" />
+ <argument value="42" />
+ <argument value="10" />
+ <argument value="200" />
+ </process>
+ <process host="node-2.acme.org" function="node">
+ <argument value="533744" />
+ <argument value="366680" />
+ <argument value="20" />
+ <argument value="400" />
+ </process>
+ <process host="node-3.acme.org" function="node">
+ <argument value="1319738" />
+ <argument value="42" />
+ <argument value="30" />
+ <argument value="150" />
+ </process>
+ <process host="node-4.acme.org" function="node">
+ <argument value="16509405" />
+ <argument value="366680" />
+ <argument value="40" />
+ <argument value="300" />
+ </process>
+ <process host="node-5.acme.org" function="node">
+ <argument value="10874876" />
+ <argument value="533744" />
+ <argument value="250" />
+ <argument value="600" />
+ </process>
+ <process host="node-6.acme.org" function="node">
+ <argument value="16728096" />
+ <argument value="1319738" />
+ <argument value="60" />
+ <argument value="800" />
+ </process>
+</platform>
\ No newline at end of file
show_info(mounts);
// Open an non-existing file to create it
- const char *filename = "/home/tmp/data.txt";
- simgrid::s4u::File *file = new simgrid::s4u::File(filename, NULL);
+ const char* filename = "/home/tmp/data.txt";
+ simgrid::s4u::File* file = new simgrid::s4u::File(filename, nullptr);
sg_size_t write = file->write(200000); // Write 200,000 bytes
XBT_INFO("Create a %llu bytes file named '%s' on /sd1", write, filename);
storage.setUserdata(xbt_strdup("Some user data"));
XBT_INFO(" Set and get data: '%s'", (char*)storage.userdata());
+
+ xbt_free(storage.userdata());
}
};
+++ /dev/null
-#! ./tesh
-
-$ $SG_TEST_EXENV ${bindir:=.}/s4u_launching
-> [Tremblay:sender1:(0) 0.000000] [s4u_launching_test/INFO] Hello s4u, I have something to send
-> [Jupiter:sender2:(0) 0.000000] [s4u_launching_test/INFO] Hello s4u, I have something to send
-> [Fafard:receiver:(0) 0.000000] [s4u_launching_test/INFO] Hello s4u, I'm ready to get any message you'd want on mb42
-> [Tremblay:sender1:(0) 0.025709] [s4u_launching_test/INFO] I'm done. See you.
-> [Jupiter:sender2:(0) 0.070434] [s4u_launching_test/INFO] I'm done. See you.
-> [Fafard:receiver:(0) 0.070434] [s4u_launching_test/INFO] I received 'GaBuZoMeu' and 'GloubiBoulga'
-> [Fafard:receiver:(0) 0.070434] [s4u_launching_test/INFO] I'm done. See you.
SD_task_watch(t3, SD_DONE);
SD_task_watch(c3, SD_DONE);
SD_task_watch(t4, SD_DONE);
-
+ xbt_dynar_t changed_tasks = xbt_dynar_new(sizeof(SD_task_t), NULL);
while (1) {
- xbt_dynar_t changed_tasks = SD_simulate(-1.0);
+ SD_simulate_with_update(-1.0, changed_tasks);
if (xbt_dynar_is_empty(changed_tasks))
break;
XBT_INFO("link1: bw=%.0f, lat=%f", sg_host_route_bandwidth(hosts[0], hosts[1]),
if (SD_task_get_state(task)==SD_DONE)
SD_task_destroy(task);
}
- xbt_dynar_free(&changed_tasks);
+ xbt_dynar_reset(changed_tasks);
}
SD_exit();
xbt_free(hosts);
+ xbt_dynar_free(&changed_tasks);
return 0;
}
SD_task_schedulel(task, 1, sg_host_by_name("Faulty Host"));
- xbt_dynar_t tasks = SD_simulate(-1.0);
- xbt_dynar_free(&tasks);
+ SD_simulate(-1.0);
SD_task_dump(task);
SD_task_schedulel(task, 1, sg_host_by_name("Safe Host"));
XBT_INFO("Run the simulation again");
- tasks = SD_simulate(-1.0);
- xbt_dynar_free(&tasks);
+ SD_simulate(-1.0);
SD_task_dump(task);
XBT_INFO("Task '%s' start time: %f, finish time: %f", SD_task_get_name(task), SD_task_get_start_time(task),
hosts[0] = sg_host_by_name("Faulty Host");
SD_task_schedule(task, 1, hosts, computation_amount, communication_amount,-1);
- tasks = SD_simulate(-1.0);
- xbt_dynar_free(&tasks);
+ SD_simulate(-1.0);
SD_task_dump(task);
SD_task_schedule(task, 1, hosts, computation_amount, communication_amount,-1);
XBT_INFO("Run the simulation again");
- tasks = SD_simulate(-1.0);
- xbt_dynar_free(&tasks);
+ SD_simulate(-1.0);
SD_task_dump(task);
XBT_INFO("Task '%s' start time: %f, finish time: %f", SD_task_get_name(task), SD_task_get_start_time(task),
ws_list = sg_host_list();
reclaimed = xbt_dynar_new(sizeof(bcast_task_t),xbt_free_ref);
- xbt_dynar_t done = NULL;
+ xbt_dynar_t done = xbt_dynar_new(sizeof(SD_task_t), NULL);
xbt_os_cputimer_start(timer);
send_one(0,sg_host_count());
do {
- if (done != NULL && !xbt_dynar_is_empty(done)) {
+ if (!xbt_dynar_is_empty(done)) {
unsigned int cursor;
SD_task_t task;
}
SD_task_destroy(task);
}
- xbt_dynar_free(&done);
+ xbt_dynar_free_container(&done);
}
- done=SD_simulate(-1);
+ SD_simulate_with_update(-1, done);
} while(!xbt_dynar_is_empty(done));
xbt_os_cputimer_stop(timer);
printf("exec_time:%f\n", xbt_os_timer_elapsed(timer) );
xbt_dynar_get_cpy(dax, 0, &task);
sg_host_t host = SD_task_get_best_host(task);
SD_task_schedulel(task, 1, host);
+ xbt_dynar_t changed_tasks = xbt_dynar_new(sizeof(SD_task_t), NULL);
+ SD_simulate_with_update(-1.0, changed_tasks);
- while (!xbt_dynar_is_empty(SD_simulate(-1.0))) {
+ while (!xbt_dynar_is_empty(changed_tasks)) {
/* Get the set of ready tasks */
ready_tasks = get_ready_tasks(dax);
+ xbt_dynar_reset(changed_tasks);
+
if (xbt_dynar_is_empty(ready_tasks)) {
xbt_dynar_free_container(&ready_tasks);
/* there is no ready task, let advance the simulation */
+ SD_simulate_with_update(-1.0, changed_tasks);
continue;
}
/* For each ready task:
xbt_dynar_free_container(&ready_tasks);
/* reset the min_finish_time for the next set of ready tasks */
min_finish_time = -1.;
+ xbt_dynar_reset(changed_tasks);
+ SD_simulate_with_update(-1.0, changed_tasks);
}
XBT_INFO("Simulation Time: %f", SD_get_clock());
free(tracefilename);
xbt_dynar_free_container(&ready_tasks);
+ xbt_dynar_free(&changed_tasks);
xbt_dynar_foreach(dax, cursor, task) {
SD_task_destroy(task);
SD_create_environment(argv[1]);
/* test the estimation functions */
- const sg_host_t *hosts = sg_host_list();
+ const sg_host_t* hosts = sg_host_list();
simgrid::s4u::Host* h1 = hosts[4];
simgrid::s4u::Host* h2 = hosts[2];
double comp_amount1 = 2000000;
XBT_DEBUG("Tasks destroyed. Exiting SimDag...");
SD_exit();
+ xbt_free((sg_host_t*)hosts);
return 0;
}
{
unsigned int ctr;
SD_task_t task;
- xbt_dynar_t changed_tasks;
+ xbt_dynar_t changed_tasks = xbt_dynar_new(sizeof(SD_task_t), NULL);
SD_init(&argc, argv);
xbt_assert(argc > 1, "Usage: %s platform_file\n\nExample: %s two_clusters.xml", argv[0], argv[0]);
SD_task_schedulel(taskA, 1, hosts[0]);
SD_task_schedulel(taskC, 1, hosts[1]);
SD_task_schedulel(taskE, 1, hosts[0]);
- while (!xbt_dynar_is_empty((changed_tasks = SD_simulate(-1.0)))) {
+
+ SD_simulate_with_update(-1.0, changed_tasks);
+ while (!xbt_dynar_is_empty(changed_tasks)) {
XBT_INFO("Simulation stopped after %.4f seconds", SD_get_clock());
xbt_dynar_foreach(changed_tasks, ctr, task) {
XBT_INFO("Task '%s' start time: %f, finish time: %f", SD_task_get_name(task), SD_task_get_start_time(task),
SD_task_get_finish_time(task));
}
- xbt_dynar_free(&changed_tasks);
+ xbt_dynar_reset(changed_tasks);
/* let throttle the communication for taskD if its parent is SD_DONE */
/* the bandwidth is 1.25e8, the data size is 1e7, and we want to throttle the bandwidth by a factor 2.
*/
if (SD_task_get_state(taskC) == SD_DONE && SD_task_get_state(taskD) < SD_RUNNING)
SD_task_set_rate(taskD, 6.25);
+ SD_simulate_with_update(-1.0, changed_tasks);
}
+ xbt_dynar_free(&changed_tasks);
+
XBT_DEBUG("Destroying tasks...");
SD_task_destroy(taskA);
SD_task_destroy(taskB);
SD_task_schedule(par_comp3, 4, host_list, computation_amount, communication_amount, -1);
- xbt_dynar_t changed_tasks = SD_simulate(-1.0);
+ xbt_dynar_t changed_tasks = xbt_dynar_new(sizeof(SD_task_t), NULL);
+ SD_simulate_with_update(-1.0, changed_tasks);
xbt_dynar_foreach(changed_tasks, ctr, task) {
XBT_INFO("Task '%s' start time: %f, finish time: %f", SD_task_get_name(task), SD_task_get_start_time(task),
SD_task_get_finish_time(task));
p Test smpi bindings for dvfs functions (C example)
-$ ../../../smpi_script/bin/smpirun -np 2 -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../platforms/energy_platform.xml --cfg=smpi/simulate-computation:no ${bindir:=.}/smpi_energy --cfg=plugin:Energy --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'plugin' to 'Energy'
+$ ../../../smpi_script/bin/smpirun -np 2 -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../platforms/energy_platform.xml --cfg=smpi/simulate-computation:no ${bindir:=.}/smpi_energy --cfg=plugin:Energy --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [0.000000] [rank 0] Pstates: 3; Powers: 100000000, 50000000, 20000000
> [0.000000] [rank 1] Pstates: 3; Powers: 100000000, 50000000, 20000000
> [0.000000] [rank 0] Current pstate: 0; Current power: 100000000
p Test smpi bindings for dvfs functions (Fortran 77 example)
-$ ../../../smpi_script/bin/smpirun -np 2 -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../platforms/energy_platform.xml --cfg=smpi/simulate-computation:no ${bindir:=.}/f77/sef --cfg=plugin:Energy --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'plugin' to 'Energy'
+$ ../../../smpi_script/bin/smpirun -np 2 -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../platforms/energy_platform.xml --cfg=smpi/simulate-computation:no ${bindir:=.}/f77/sef --cfg=plugin:Energy --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [ 0.0000000000000000 ] [rank 0 ] 3 pstates available
> [ 0.0000000000000000 ] [rank 1 ] 3 pstates available
> [ 0.0000000000000000 ] [rank 0 ] Power: 100000000.00000000
p Test smpi bindings for dvfs functions (Fortran 90 example)
! output sort 1
-$ ../../../smpi_script/bin/smpirun -np 2 -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../platforms/energy_platform.xml --cfg=smpi/simulate-computation:no ${bindir:=.}/f90/sef90 --cfg=plugin:Energy --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'plugin' to 'Energy'
+$ ../../../smpi_script/bin/smpirun -np 2 -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../../platforms/energy_platform.xml --cfg=smpi/simulate-computation:no ${bindir:=.}/f90/sef90 --cfg=plugin:Energy --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [ 0.0000000000000000 ] [rank 0 ] 3 pstates available
> [ 0.0000000000000000 ] [rank 1 ] 3 pstates available
> [ 0.0000000000000000 ] [rank 0 ] Power: 100000000.00000000
$ mkfile replay/one_trace
! timeout 60
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 0.000000] [smpi_replay/VERBOSE] 0 bcast 5e4 0.000000
> [Jupiter:1:(2) 0.015036] [smpi_replay/VERBOSE] 1 bcast 5e4 0.015036
> [Fafard:2:(3) 0.015676] [smpi_replay/VERBOSE] 2 bcast 5e4 0.015676
< replay/actions_bcast.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=no_loc --cfg=tracing:yes --cfg=tracing/smpi:yes --cfg=tracing/smpi/computing:yes --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/computing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=no_loc --cfg=tracing:yes --cfg=tracing/smpi:yes --cfg=tracing/smpi/computing:yes --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Fafard:2:(3) 19.691622] [smpi_replay/INFO] Simulation time 19.691622
$ rm -f replay/one_trace
< replay/actions1.txt
$ mkfile ./split_traces_tesh
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 2 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay ./split_traces_tesh --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 2 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay ./split_traces_tesh --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 0.164463] [smpi_replay/VERBOSE] 0 send 1 1e6 0.164463
> [Jupiter:1:(2) 0.164463] [smpi_replay/VERBOSE] 1 recv 0 1e6 0.164463
> [Jupiter:1:(2) 13.271310] [smpi_replay/VERBOSE] 1 compute 1e9 13.106847
< replay/actions_barrier.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 1.491472] [smpi_replay/VERBOSE] 0 send 1 1e7 1.491472
> [Jupiter:1:(2) 1.491472] [smpi_replay/VERBOSE] 1 recv 0 1e7 1.491472
> [Tremblay:0:(1) 1.493448] [smpi_replay/VERBOSE] 0 barrier 0.001976
< replay/actions_with_isend.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Jupiter:1:(2) 0.000000] [smpi_replay/VERBOSE] 1 Irecv 0 1e6 0.000000
> [Jupiter:1:(2) 6.553424] [smpi_replay/VERBOSE] 1 compute 5e8 6.553424
> [Jupiter:1:(2) 6.553524] [smpi_replay/VERBOSE] 1 test 0.000100
< replay/actions_allReduce.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 5.112775] [smpi_replay/VERBOSE] 0 allReduce 5e4 5e8 5.112775
> [Jupiter:1:(2) 6.584135] [smpi_replay/VERBOSE] 1 allReduce 5e4 5e8 6.584135
> [Fafard:2:(3) 6.584775] [smpi_replay/VERBOSE] 2 allReduce 5e4 5e8 6.584775
< replay/actions_alltoall.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 0.004041] [smpi_replay/VERBOSE] 0 allToAll 500 500 0.004041
> [Fafard:2:(3) 0.006920] [smpi_replay/VERBOSE] 2 allToAll 500 500 0.006920
> [Jupiter:1:(2) 0.006920] [smpi_replay/VERBOSE] 1 allToAll 500 500 0.006920
< replay/actions_alltoallv.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 0.003999] [smpi_replay/VERBOSE] 0 allToAllV 100 1 40 30 1000 1 80 100 0.003999
> [Jupiter:1:(2) 0.006934] [smpi_replay/VERBOSE] 1 allToAllV 1000 80 1 40 1000 40 1 30 0.006934
> [Fafard:2:(3) 0.006936] [smpi_replay/VERBOSE] 2 allToAllV 1000 100 30 1 1000 30 40 1 0.006936
< replay/actions_allgatherv.txt
$ mkfile replay/one_trace
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Tremblay:0:(1) 0.882872] [smpi_replay/VERBOSE] 0 allGatherV 275427 275427 275427 275427 204020 0 0 0.882872
> [Fafard:2:(3) 1.300605] [smpi_replay/VERBOSE] 2 allGatherV 275427 275427 275427 275427 204020 0 0 1.300605
> [Jupiter:1:(2) 1.300605] [smpi_replay/VERBOSE] 1 allGatherV 275427 275427 275427 275427 204020 0 0 1.300605
$ mkfile replay/one_trace
! output sort 19
-$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.thresh:critical --log=smpi_replay.thresh:verbose --log=no_loc --cfg=smpi/simulate-computation:no -np 3 -platform ${srcdir:=.}/../platforms/small_platform.xml -hostfile ${srcdir:=.}/hostfile ./replay/smpi_replay replay/one_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Fafard:2:(3) 0.000000] [smpi_replay/VERBOSE] 2 Irecv 1 3000 0.000000
> [Fafard:2:(3) 0.000000] [smpi_replay/VERBOSE] 2 Isend 0 3000 0.000000
> [Jupiter:1:(2) 0.000000] [smpi_replay/VERBOSE] 1 Isend 0 2000 0.000000
xbt_free(line_char);
}
+ xbt_free(line);
fclose(fp);
int main(int argc, char *argv[])
{
- int rank, numprocs, i;
+ int rank;
+ int numprocs;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
/** Test C */
TRACE_smpi_set_category("C");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Wait(&req[i], &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("D");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
int completed;
MPI_Waitany(N, req, &completed, sta);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("E");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("F");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("G");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
}
MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("H");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Send(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("I");
- for (i = 0; i < 2 * N; i++) {
+ for (int i = 0; i < 2 * N; i++) {
if (i < N) {
MPI_Send(r, DATATOSENT, MPI_INT, 2, tag, MPI_COMM_WORLD);
} else {
}
}
MPI_Barrier(MPI_COMM_WORLD);
- for (i = 0; i < 2 * N; i++) {
+ for (int i = 0; i < 2 * N; i++) {
if (i < N) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
} else {
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("J");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Isend(r, DATATOSENT, MPI_INT, 1, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
int flag;
MPI_Test(&req[i], &flag, &sta[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Wait(&req[i], &sta[i]);
}
free(r);
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("C");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("D");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("E");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Wait(&req[i], &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("F");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
int completed;
MPI_Waitany(N, req, &completed, sta);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("G");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Recv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &sta[i]);
}
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("H");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("I");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Isend(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
MPI_Waitall(N, req, sta);
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("J");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
int flag;
MPI_Test(&req[i], &flag, &sta[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Wait(&req[i], &sta[i]);
}
free(r);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
TRACE_smpi_set_category("I");
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Irecv(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD, &req[i]);
}
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
int completed;
MPI_Waitany(N, req, &completed, sta);
}
MPI_Barrier(MPI_COMM_WORLD);
- for (i = 0; i < N; i++) {
+ for (int i = 0; i < N; i++) {
MPI_Send(r, DATATOSENT, MPI_INT, 0, tag, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
# Go for the first test
p Test instrumentation of SMPI
-$ ../../smpi_script/bin/smpirun -trace -trace-resource -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace/smpi_trace --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_trace.trace'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/categorized' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/uncategorized' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -trace -trace-resource -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace/smpi_trace --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
-$ rm -f smpi_trace.trace
\ No newline at end of file
+$ rm -f smpi_trace.trace
p Test SMPI with call-location tracing. This means that the binary must have
p already been compiled with the -trace-call-location switch.
-$ ../../smpi_script/bin/smpirun -trace -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/trace_call_location/smpi_trace_call_location --cfg=smpi/host-speed:-1 --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_trace.trace'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/trace-call-location' to '1'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/host-speed' to '-1'
+$ ../../smpi_script/bin/smpirun -trace -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=smpi/trace-call-location:1 -np 3 ${bindir:=.}/trace_call_location/smpi_trace_call_location --cfg=smpi/host-speed:-1 --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
$ grep --quiet "12 0.* 2 1 5 .*trace_call_location\.c\" 14$" ./smpi_trace.trace
# Go for the first test
p SMPI test
-$ ../../smpi_script/bin/smpirun -trace -trace-resource -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_trace.trace'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/categorized' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/uncategorized' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -trace -trace-resource -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
p Another SMPI test, with only -trace
-$ ../../smpi_script/bin/smpirun -trace -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_trace.trace'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -trace -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
p Testing without trace parameters
-$ ../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
p Testing grouped tracing
-$ ../../smpi_script/bin/smpirun -trace -trace-grouped -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_trace.trace'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/group' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -trace -trace-grouped -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
p Testing generation of viva configuration files
-$ ../../smpi_script/bin/smpirun -trace -trace-resource -trace-viva -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/simulate-computation:no -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_trace.trace'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/categorized' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/uncategorized' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'viva/categorized' to 'smpi_cat.plist'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'viva/uncategorized' to 'smpi_uncat.plist'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
+$ ../../smpi_script/bin/smpirun -trace -trace-resource -trace-viva -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg --cfg=smpi/simulate-computation:no -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [0.003952] [instr_config/INFO] No categories declared, ignoring generation of viva graph configuration
p Testing with parameters but without activating them with the safe switch (-trace)
-$ ../../smpi_script/bin/smpirun -trace-resource -trace-viva -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ../../smpi_script/bin/smpirun -trace-resource -trace-viva -trace-file smpi_trace.trace -hostfile ${srcdir:=.}/hostfile -platform ${srcdir:=.}/../platforms/small_platform.xml --cfg=path:${srcdir:=.}/../msg -np 3 ./trace_simple/smpi_trace_simple --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
$ rm -f smpi_trace.trace smpi_uncat.plist smpi_cat.plist
class Cpu;
class LinkImpl;
class HostImpl;
+ class Storage;
}
namespace trace_mgr {
class trace;
*/
typedef long long sg_offset_t;
+typedef unsigned long aid_t;
+
#endif
/* ******************************** File ************************************ */
-/** @brief Opaque object describing a File in MSG.
- * @ingroup msg_file */
-typedef xbt_dictelm_t msg_file_t;
+typedef struct simdata_file* simdata_file_t;
-extern int MSG_FILE_LEVEL;
-typedef struct simdata_file *simdata_file_t;
-
-struct msg_file_priv {
+typedef struct msg_file_priv {
char *fullpath;
sg_size_t size;
char* mount_point;
int desc_id;
void *data;
simdata_file_t simdata;
-};
+} s_msg_file_priv_t;
-typedef struct msg_file_priv s_msg_file_priv_t;
-typedef struct msg_file_priv* msg_file_priv_t;
-
-static inline msg_file_priv_t MSG_file_priv(msg_file_t file){
- return (msg_file_priv_t )xbt_lib_get_level(file, MSG_FILE_LEVEL);
-}
+typedef struct msg_file_priv* msg_file_t;
/* ******************************** Storage ************************************ */
/* TODO: PV: to comment */
typedef xbt_dictelm_t msg_storage_t;
struct msg_storage_priv {
- const char *hostname;
- void *data;
+ const char* name;
+ const char* hostname;
+ sg_size_t size;
+ void* data;
};
typedef struct msg_storage_priv s_msg_storage_priv_t;
typedef struct msg_storage_priv* msg_storage_priv_t;
XBT_PUBLIC(msg_error_t) MSG_file_seek(msg_file_t fd, sg_offset_t offset, int origin);
XBT_PUBLIC(sg_size_t) MSG_file_tell (msg_file_t fd);
XBT_PUBLIC(void) __MSG_file_get_info(msg_file_t fd);
-XBT_PUBLIC(void) __MSG_file_priv_free(msg_file_priv_t priv);
XBT_PUBLIC(const char *) MSG_file_get_name(msg_file_t file);
XBT_PUBLIC(msg_error_t) MSG_file_move(msg_file_t fd, const char* fullpath);
XBT_PUBLIC(msg_error_t) MSG_file_rcopy(msg_file_t fd, msg_host_t host, const char* fullpath);
XBT_PUBLIC(void *) MSG_storage_get_data(msg_storage_t storage);
XBT_PUBLIC(xbt_dict_t) MSG_storage_get_content(msg_storage_t storage);
XBT_PUBLIC(sg_size_t) MSG_storage_get_size(msg_storage_t storage);
-XBT_PUBLIC(msg_error_t) MSG_storage_file_move(msg_file_t fd, msg_host_t dest, char* mount, char* fullname);
XBT_PUBLIC(const char *) MSG_storage_get_host(msg_storage_t storage);
/************************** Host handling ***********************************/
SG_END_DECL()
+
+#ifdef __cplusplus
+XBT_PUBLIC(msg_process_t)
+MSG_process_create_from_stdfunc(const char* name, std::function<void()> code, void* data, msg_host_t host,
+ xbt_dict_t properties);
+#endif
+
#endif
-/* Copyright (c) 2004-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <simgrid/s4u/Activity.hpp>
#include <simgrid/s4u/Actor.hpp>
+#include <simgrid/s4u/Engine.hpp>
+#include <simgrid/s4u/Host.hpp>
#include <simgrid/s4u/Mailbox.hpp>
-#include <simgrid/s4u/engine.hpp>
-#include <simgrid/s4u/host.hpp>
+#include <simgrid/s4u/Comm.hpp>
+#include <simgrid/s4u/ConditionVariable.hpp>
#include <simgrid/s4u/Mutex.hpp>
-#include <simgrid/s4u/conditionVariable.hpp>
-#include <simgrid/s4u/comm.hpp>
-#include <simgrid/s4u/storage.hpp>
-#include <simgrid/s4u/file.hpp>
+#include <simgrid/s4u/File.hpp>
+#include <simgrid/s4u/Storage.hpp>
#endif /* SIMGRID_S4U_S4U_H */
/** @brief Simulation Agent */
XBT_PUBLIC_CLASS Actor : public simgrid::xbt::Extendable<Actor>
{
-
friend Mailbox;
friend simgrid::simix::ActorImpl;
friend simgrid::kernel::activity::MailboxImpl;
simgrid::xbt::string name();
/** Retrieves the host on which that actor is running */
s4u::Host* host();
- /** Retrieves the PID of that actor */
- int pid();
- /** Retrieves the PPID of that actor */
- int ppid();
+ /** Retrieves the PID of that actor
+ *
+ * actor_id_t is an alias for unsigned long */
+ aid_t pid();
+ /** Retrieves the PPID of that actor
+ *
+ * actor_id_t is an alias for unsigned long */
+ aid_t ppid();
+
+ /** Suspend an actor by suspending the task on which it was waiting for the completion. */
+ void suspend();
+
+ /** Resume a suspended process by resuming the task on which it was waiting for the completion. */
+ void resume();
+
+ /** Returns true if the process is suspended. */
+ int isSuspended();
/** If set to true, the actor will automatically restart when its host reboots */
void setAutoRestart(bool autorestart);
+
+ /** Add a function to the list of "on_exit" functions for the current actor. The on_exit functions are the functions
+ * executed when your actor is killed. You should use them to free the data used by your process.
+ */
+ void onExit(int_f_pvoid_pvoid_t fun, void* data);
+
/** Sets the time at which that actor should be killed */
void setKillTime(double time);
/** Retrieves the time at which that actor will be killed (or -1 if not set) */
double killTime();
+ void migrate(Host * new_host);
+
/** Ask the actor to die.
*
- * It will only notice your request when doing a simcall next time (a communication or similar).
- * SimGrid sometimes have issues when you kill actors that are currently communicating and such.
+ * Any blocking activity will be canceled, and it will be rescheduled to free its memory.
+ * Being killed is not something that actors can defer or avoid.
+ *
+ * SimGrid still have sometimes issues when you kill actors that are currently communicating and such.
* Still. Please report any bug that you may encounter with a minimal working example.
*/
void kill();
- static void kill(int pid);
+ static void kill(aid_t pid);
/** Retrieves the actor that have the given PID (or nullptr if not existing) */
- static ActorPtr byPid(int pid);
+ static ActorPtr byPid(aid_t pid);
/** @brief Wait for the actor to finish.
*
/** Ask kindly to all actors to die. Only the issuer will survive. */
static void killAll();
+ static void killAll(int resetPid);
/** Returns the internal implementation of this actor */
simix::ActorImpl* getImpl();
* See \ref Comm for the full communication API (including non blocking communications).
*/
XBT_PUBLIC(void*) recv(MailboxPtr chan);
+ XBT_PUBLIC(Comm&) irecv(MailboxPtr chan, void** data);
/** Block the actor until it delivers a message of the given simulated size to the given mailbox
*
* See \ref Comm for the full communication API (including non blocking communications).
*/
XBT_PUBLIC(void) send(MailboxPtr chan, void* payload, double simulatedSize);
+ XBT_PUBLIC(void) send(MailboxPtr chan, void* payload, double simulatedSize, double timeout);
+
+ XBT_PUBLIC(Comm&) isend(MailboxPtr chan, void* payload, double simulatedSize);
- /** @brief Returns the PID of the current actor. */
- XBT_PUBLIC(int) pid();
+ /** @brief Returns the actor ID of the current actor (same as pid). */
+ XBT_PUBLIC(aid_t) pid();
- /** @brief Returns the PPID of the current actor. */
- XBT_PUBLIC(int) ppid();
+ /** @brief Returns the ancestor's actor ID of the current actor (same as ppid). */
+ XBT_PUBLIC(aid_t) ppid();
/** @brief Returns the name of the current actor. */
XBT_PUBLIC(std::string) name();
+
+ /** @brief Returns the name of the host on which the process is running. */
+ XBT_PUBLIC(Host*) host();
+
+ /** @brief Suspend the actor. */
+ XBT_PUBLIC(void) suspend();
+
+ /** @brief Resume the actor. */
+ XBT_PUBLIC(void) resume();
+
+ XBT_PUBLIC(int) isSuspended();
+
+ /** @brief kill the actor. */
+ XBT_PUBLIC(void) kill();
+
+ /** @brief Add a function to the list of "on_exit" functions. */
+ XBT_PUBLIC(void) onExit(int_f_pvoid_pvoid_t fun, void* data);
+
+ /** @brief Migrate the actor to a new host. */
+ XBT_PUBLIC(void) migrate(Host* new_host);
};
/** @}Â */
#include <xbt/base.h>
+#include <simgrid/forward.h>
#include <simgrid/s4u/Activity.hpp>
#include <simgrid/s4u/forward.hpp>
-#include <simgrid/forward.h>
-
namespace simgrid {
namespace s4u {
-
/** @brief Communication async
*
* Represents all asynchronous communications, that you can test or wait onto.
*/
-XBT_PUBLIC_CLASS Comm : public Activity {
+XBT_PUBLIC_CLASS Comm : public Activity
+{
Comm() : Activity() {}
public:
~Comm() override;
- /*! take a range of s4u::Comm* (last excluded) and return when one of them is finished. The return value is an iterator on the finished Comms. */
- template<class I> static
- I wait_any(I first, I last)
+ /*! take a range of s4u::Comm* (last excluded) and return when one of them is finished. The return value is an
+ * iterator on the finished Comms. */
+ template <class I> static I wait_any(I first, I last)
{
// Map to dynar<Synchro*>:
xbt_dynar_t comms = xbt_dynar_new(sizeof(simgrid::kernel::activity::ActivityImpl*), NULL);
- for(I iter = first; iter != last; iter++) {
+ for (I iter = first; iter != last; iter++) {
Comm& comm = **iter;
if (comm.state_ == inited)
comm.start();
if (idx == -1)
return last;
// Lift the index to the corresponding iterator:
- auto res = std::next(first, idx);
+ auto res = std::next(first, idx);
(*res)->state_ = finished;
return res;
}
/*! Same as wait_any, but with a timeout. If wait_any_for return because of the timeout last is returned.*/
- template<class I> static
- I wait_any_for(I first, I last, double timeout)
+ template <class I> static I wait_any_for(I first, I last, double timeout)
{
// Map to dynar<Synchro*>:
xbt_dynar_t comms = xbt_dynar_new(sizeof(simgrid::kernel::activity::ActivityImpl*), NULL);
- for(I iter = first; iter != last; iter++) {
+ for (I iter = first; iter != last; iter++) {
Comm& comm = **iter;
if (comm.state_ == inited)
comm.start();
if (idx == -1)
return last;
// Lift the index to the corresponding iterator:
- auto res = std::next(first, idx);
+ auto res = std::next(first, idx);
(*res)->state_ = finished;
return res;
}
/** Creates (but don't start) an async send to the mailbox @p dest */
- static Comm &send_init(MailboxPtr dest);
+ static Comm& send_init(MailboxPtr dest);
/** Creates and start an async send to the mailbox @p dest */
- static Comm &send_async(MailboxPtr dest, void *data, int simulatedByteAmount);
- /** Creates (but don't start) an async recv onto the mailbox @p from */
- static Comm &recv_init(MailboxPtr from);
+ static Comm& send_async(MailboxPtr dest, void* data, int simulatedByteAmount);
+ /** Creates (but don't start) an async recv onto the mailbox @p from */
+ static Comm& recv_init(MailboxPtr from);
/** Creates and start an async recv to the mailbox @p from */
- static Comm &recv_async(MailboxPtr from, void **data);
+ static Comm& recv_async(MailboxPtr from, void** data);
void start() override;
void wait() override;
void setRate(double rate);
/** Specify the data to send */
- void setSrcData(void * buff);
+ void setSrcData(void* buff);
/** Specify the size of the data to send */
void setSrcDataSize(size_t size);
/** Specify the data to send and its size */
- void setSrcData(void * buff, size_t size);
+ void setSrcData(void* buff, size_t size);
/** Specify where to receive the data */
- void setDstData(void ** buff);
+ void setDstData(void** buff);
/** Specify the buffer in which the data should be received */
- void setDstData(void ** buff, size_t size);
+ void setDstData(void** buff, size_t size);
/** Retrieve the size of the received data */
size_t getDstDataSize();
bool test();
-
private:
- double rate_ = -1;
- void *dstBuff_ = nullptr;
+ double rate_ = -1;
+ void* dstBuff_ = nullptr;
size_t dstBuffSize_ = 0;
- void *srcBuff_ = nullptr;
+ void* srcBuff_ = nullptr;
size_t srcBuffSize_ = sizeof(void*);
/* FIXME: expose these elements in the API */
int detached_ = 0;
- int (*matchFunction_)(void *, void *, smx_activity_t) = nullptr;
- void (*cleanFunction_)(void *) = nullptr;
+ int (*matchFunction_)(void*, void*, smx_activity_t) = nullptr;
+ void (*cleanFunction_)(void*) = nullptr;
void (*copyDataFunction_)(smx_activity_t, void*, size_t) = nullptr;
- smx_actor_t sender_ = nullptr;
+ smx_actor_t sender_ = nullptr;
smx_actor_t receiver_ = nullptr;
- MailboxPtr mailbox_ = nullptr;
+ MailboxPtr mailbox_ = nullptr;
};
-
-}} // namespace simgrid::s4u
+}
+} // namespace simgrid::s4u
#endif /* SIMGRID_S4U_COMM_HPP */
-/* Copyright (c) 2006-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <xbt/base.h>
-#include <simgrid/simix.h>
#include <simgrid/chrono.hpp>
#include <simgrid/s4u/Mutex.hpp>
+#include <simgrid/simix.h>
namespace simgrid {
namespace s4u {
* semantic. But we currently use (only) double for both durations and
* timestamp timeouts.
*/
-XBT_PUBLIC_CLASS ConditionVariable {
+XBT_PUBLIC_CLASS ConditionVariable
+{
private:
friend s_smx_cond;
smx_cond_t cond_;
- ConditionVariable(smx_cond_t cond) : cond_(cond) {}
+ explicit ConditionVariable(smx_cond_t cond) : cond_(cond) {}
public:
-
ConditionVariable(ConditionVariable const&) = delete;
ConditionVariable& operator=(ConditionVariable const&) = delete;
- friend XBT_PUBLIC(void) intrusive_ptr_add_ref(ConditionVariable* cond);
- friend XBT_PUBLIC(void) intrusive_ptr_release(ConditionVariable* cond);
+ friend XBT_PUBLIC(void) intrusive_ptr_add_ref(ConditionVariable * cond);
+ friend XBT_PUBLIC(void) intrusive_ptr_release(ConditionVariable * cond);
using Ptr = boost::intrusive_ptr<ConditionVariable>;
static Ptr createConditionVariable();
// Wait functions without time:
- void wait(std::unique_lock<Mutex>& lock);
- template<class P>
- void wait(std::unique_lock<Mutex>& lock, P pred)
+ void wait(MutexPtr lock);
+ void wait(std::unique_lock<Mutex> & lock);
+ template <class P> void wait(std::unique_lock<Mutex> & lock, P pred)
{
while (!pred())
wait(lock);
// Wait function taking a plain double as time:
- std::cv_status wait_until(std::unique_lock<Mutex>& lock, double timeout_time);
- std::cv_status wait_for(std::unique_lock<Mutex>& lock, double duration);
- template<class P>
- bool wait_until(std::unique_lock<Mutex>& lock, double timeout_time, P pred)
+ std::cv_status wait_until(std::unique_lock<Mutex> & lock, double timeout_time);
+ std::cv_status wait_for(std::unique_lock<Mutex> & lock, double duration);
+ template <class P> bool wait_until(std::unique_lock<Mutex> & lock, double timeout_time, P pred)
{
while (!pred())
if (this->wait_until(lock, timeout_time) == std::cv_status::timeout)
return pred();
return true;
}
- template<class P>
- bool wait_for(std::unique_lock<Mutex>& lock, double duration, P pred)
+ template <class P> bool wait_for(std::unique_lock<Mutex> & lock, double duration, P pred)
{
return this->wait_until(lock, SIMIX_get_clock() + duration, std::move(pred));
}
// Wait function taking a C++ style time:
- template<class Rep, class Period, class P>
- bool wait_for(
- std::unique_lock<Mutex>& lock, std::chrono::duration<Rep, Period> duration,
- P pred)
+ template <class Rep, class Period, class P>
+ bool wait_for(std::unique_lock<Mutex> & lock, std::chrono::duration<Rep, Period> duration, P pred)
{
auto seconds = std::chrono::duration_cast<SimulationClockDuration>(duration);
return this->wait_for(lock, seconds.count(), pred);
}
- template<class Rep, class Period>
- std::cv_status wait_for(
- std::unique_lock<Mutex>& lock, std::chrono::duration<Rep, Period> duration)
+ template <class Rep, class Period>
+ std::cv_status wait_for(std::unique_lock<Mutex> & lock, std::chrono::duration<Rep, Period> duration)
{
auto seconds = std::chrono::duration_cast<SimulationClockDuration>(duration);
return this->wait_for(lock, seconds.count());
}
- template<class Duration>
- std::cv_status wait_until(std::unique_lock<Mutex>& lock,
- const SimulationTimePoint<Duration>& timeout_time)
+ template <class Duration>
+ std::cv_status wait_until(std::unique_lock<Mutex> & lock, const SimulationTimePoint<Duration>& timeout_time)
{
auto timeout_native = std::chrono::time_point_cast<SimulationClockDuration>(timeout_time);
return this->wait_until(lock, timeout_native.time_since_epoch().count());
}
- template<class Duration, class P>
- bool wait_until(std::unique_lock<Mutex>& lock,
- const SimulationTimePoint<Duration>& timeout_time, P pred)
+ template <class Duration, class P>
+ bool wait_until(std::unique_lock<Mutex> & lock, const SimulationTimePoint<Duration>& timeout_time, P pred)
{
auto timeout_native = std::chrono::time_point_cast<SimulationClockDuration>(timeout_time);
- return this->wait_until(lock, timeout_native.time_since_epoch().count(),
- std::move(pred));
+ return this->wait_until(lock, timeout_native.time_since_epoch().count(), std::move(pred));
}
// Notify functions
};
using ConditionVariablePtr = ConditionVariable::Ptr;
+}
+} // namespace simgrid::s4u
-}} // namespace simgrid::s4u
-
-#endif /* SIMGRID_S4U_COND_VARIABLE_HPP */
+#endif
*
* This class is an interface to the simulation engine.
*/
-XBT_PUBLIC_CLASS Engine {
+XBT_PUBLIC_CLASS Engine
+{
private:
~Engine();
public:
/** Constructor, taking the command line parameters of your main function */
- Engine(int *argc, char **argv);
+ Engine(int* argc, char** argv);
/** Finalize the default engine and all its dependencies */
static void shutdown();
* The environment is either a XML file following the simgrid.dtd formalism, or a lua file.
* Some examples can be found in the directory examples/platforms.
*/
- void loadPlatform(const char *platf);
+ void loadPlatform(const char* platf);
/** Registers the main function of an actor that will be launched from the deployment file */
- void registerFunction(const char*name, int (*code)(int,char**));
+ void registerFunction(const char* name, int (*code)(int, char**));
/** Registers a function as the default main function of actors
*
* It will be used as fallback when the function requested from the deployment file was not registered.
* It is used for trace-based simulations (see examples/msg/actions).
*/
- void registerDefault(int (*code)(int,char**));
+ void registerDefault(int (*code)(int, char**));
/** @brief Load a deployment file and launch the actors that it contains */
- void loadDeployment(const char *deploy);
+ void loadDeployment(const char* deploy);
size_t hostCount();
void hostList(std::vector<Host*> * whereTo);
/** @brief Retrieve the simulation time */
static double getClock();
-
+
/** @brief Retrieve the engine singleton */
- static s4u::Engine *instance();
+ static s4u::Engine* instance();
/** @brief Retrieve the root netzone, containing all others */
simgrid::s4u::NetZone* netRoot();
void netpointRegister(simgrid::kernel::routing::NetPoint * card);
void netpointUnregister(simgrid::kernel::routing::NetPoint * card);
- template<class F>
- void registerFunction(const char* name)
+ template <class F> void registerFunction(const char* name)
{
- simgrid::simix::registerFunction(name, [](std::vector<std::string> args){
+ simgrid::simix::registerFunction(name, [](std::vector<std::string> args) {
return simgrid::simix::ActorCode([args] {
F code(std::move(args));
code();
});
}
- template<class F>
- void registerFunction(const char* name, F code)
+ template <class F> void registerFunction(const char* name, F code)
{
- simgrid::simix::registerFunction(name, [code](std::vector<std::string> args){
- return simgrid::simix::ActorCode([code,args] {
- code(std::move(args));
- });
+ simgrid::simix::registerFunction(name, [code](std::vector<std::string> args) {
+ return simgrid::simix::ActorCode([code, args] { code(std::move(args)); });
});
}
+ /** Returns whether SimGrid was initialized yet -- mostly for internal use */
+ static bool isInitialized();
+
simgrid::kernel::EngineImpl* pimpl;
private:
- static s4u::Engine *instance_;
+ static s4u::Engine* instance_;
};
/** Callback fired when the platform is created (ie, the xml file parsed),
/** Callback fired when the time jumps into the future */
extern XBT_PRIVATE xbt::signal<void(double)> onTimeAdvance;
-}} // namespace simgrid::s4u
+}
+} // namespace simgrid::s4u
#endif /* SIMGRID_S4U_ENGINE_HPP */
*
* Used to simulate the time it takes to access to a file, but does not really store any information.
*
- * They are located on @ref simgrid::s4u::Storage that are accessed from a given @ref simgrid::s4u::Host through mountpoints.
+ * They are located on @ref simgrid::s4u::Storage that are accessed from a given @ref simgrid::s4u::Host through
+ * mountpoints.
* For now, you cannot change the mountpoints programatically, and must declare them from your platform file.
*/
-XBT_PUBLIC_CLASS File {
+XBT_PUBLIC_CLASS File
+{
public:
- File(const char *fullpath, void* userdata);
+ File(const char* fullpath, void* userdata);
~File();
/** Retrieves the path to the file */
- const char *path() { return path_;}
+ const char* path() { return path_; }
/** Simulates a read action. Returns the size of data actually read
*
sg_size_t write(sg_size_t size);
/** Allows to store user data on that host */
- void setUserdata(void *data) {userdata_ = data;}
+ void setUserdata(void* data) { userdata_ = data; }
/** Retrieves the previously stored data */
- void* userdata() {return userdata_;}
+ void* userdata() { return userdata_; }
/** Retrieve the datasize */
sg_size_t size();
/** Rename a file
*
* WARNING: It is forbidden to move the file to another mount point */
- void move(const char*fullpath);
+ void move(const char* fullpath);
/** Remove a file from disk */
void unlink();
private:
smx_file_t pimpl_ = nullptr;
- const char *path_ = nullptr;
- void *userdata_ = nullptr;
+ const char* path_ = nullptr;
+ void* userdata_ = nullptr;
};
-
-}} // namespace simgrid::s4u
+}
+} // namespace simgrid::s4u
#endif /* SIMGRID_S4U_HOST_HPP */
namespace simgrid {
namespace xbt {
- extern template class XBT_PUBLIC() Extendable<simgrid::s4u::Host>;
+extern template class XBT_PUBLIC() Extendable<simgrid::s4u::Host>;
}
namespace s4u {
/** @ingroup s4u_api
*
- * @tableofcontents
+ * @tableofcontents
*
* An host represents some physical resource with computing and networking capabilities.
*
* You can retrieve a particular host using simgrid::s4u::Host::byName()
* and actors can retrieve the host on which they run using simgrid::s4u::Host::current().
*/
-XBT_PUBLIC_CLASS Host :
- public simgrid::xbt::Extendable<Host> {
+XBT_PUBLIC_CLASS Host : public simgrid::xbt::Extendable<Host>
+{
public:
- explicit Host(const char *name);
+ explicit Host(const char* name);
/** Host destruction logic */
protected:
private:
bool currentlyDestroying_ = false;
+
public:
void destroy();
// No copy/move
/** Retrieves an host from its name, or return nullptr */
static Host* by_name_or_null(std::string name);
/** Retrieves an host from its name, or die */
- static s4u::Host *by_name(std::string name);
+ static s4u::Host* by_name(std::string name);
/** Retrieves the host on which the current actor is running */
- static s4u::Host *current();
+ static s4u::Host* current();
simgrid::xbt::string const& name() const { return name_; }
const char* cname() { return name_.c_str(); }
+ void actorList(std::vector<ActorPtr> * whereto);
+
/** Turns that host on if it was previously off
*
* All actors on that host which were marked autorestart will be restarted automatically.
double speed();
int coreCount();
xbt_dict_t properties();
- const char*property(const char*key);
- void setProperty(const char*key, const char *value);
- void processes(std::vector<ActorPtr>* list);
+ const char* property(const char* key);
+ void setProperty(const char* key, const char* value);
+ void processes(std::vector<ActorPtr> * list);
double getPstateSpeed(int pstate_index);
int pstatesCount() const;
void setPstate(int pstate_index);
*
* This is defined in the platform file, and cannot be modified programatically (yet).
*/
- boost::unordered_map<std::string, Storage*> const &mountedStorages();
+ boost::unordered_map<std::string, Storage*> const& mountedStorages();
void routeTo(Host * dest, std::vector<Link*> * links, double* latency);
void routeTo(Host * dest, std::vector<surf::LinkImpl*> * links, double* latency);
private:
simgrid::xbt::string name_ = "noname";
- boost::unordered_map<std::string, Storage*> *mounts = nullptr; // caching
+ boost::unordered_map<std::string, Storage*>* mounts = nullptr; // caching
public:
// TODO, this could be a unique_ptr
surf::HostImpl* pimpl_ = nullptr;
/** DO NOT USE DIRECTLY (@todo: these should be protected, once our code is clean) */
- surf::Cpu *pimpl_cpu = nullptr;
+ surf::Cpu* pimpl_cpu = nullptr;
/** DO NOT USE DIRECTLY (@todo: these should be protected, once our code is clean) */
kernel::routing::NetPoint* pimpl_netpoint = nullptr;
* (either because of a pstate switch or because of an external load event coming from the profile) */
static simgrid::xbt::signal<void(Host&)> onSpeedChange;
};
-
-}} // namespace simgrid::s4u
+}
+} // namespace simgrid::s4u
extern int USER_HOST_LEVEL;
#endif /* SIMGRID_S4U_HOST_HPP */
#if 0
-/* Bindings to the MSG hosts */
-
-/* Copyright (c) 2006-2014. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-package org.simgrid.msg;
public class Host {
- /**
- * This static method returns all of the hosts of the installed platform.
- *
- * @return An array containing all the hosts installed.
- *
- */
- public native static Host[] all();
-
- /**
- * This static method sets a mailbox to receive in asynchronous mode.
- *
- * All messages sent to this mailbox will be transferred to
- * the receiver without waiting for the receive call.
- * The receive call will still be necessary to use the received data.
- * If there is a need to receive some messages asynchronously, and some not,
- * two different mailboxes should be used.
- *
- * @param mailboxName The name of the mailbox
- */
- public static native void setAsyncMailbox(String mailboxName);
/**
* This method returns the number of tasks currently running on a host.
public native int getLoad();
- /** This methods returns the list of storages attached to an host
- * @return An array containing all storages (name) attached to the host
- */
- public native String[] getAttachedStorage();
-
-
-}
+}
#endif
-/* Copyright (c) 2006-2015. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
/** Returns whether the mailbox contains queued communications */
bool empty();
+ /** Check if there is a communication going on in a mailbox. */
+ bool listen();
+
/** Gets the first element in the queue (without dequeuing it), or nullptr if none is there */
smx_activity_t front();
class ConditionVariable;
/** @brief A classical mutex, but blocking in the simulation world
+ * @ingroup s4u_api
*
- * It is strictly impossible to use a real mutex (such as
+ * It is strictly impossible to use a real mutex, such as
* [std::mutex](http://en.cppreference.com/w/cpp/thread/mutex)
- * or [pthread_mutex_t](http://pubs.opengroup.org/onlinepubs/007908775/xsh/pthread_mutex_lock.html)),
+ * or [pthread_mutex_t](http://pubs.opengroup.org/onlinepubs/007908775/xsh/pthread_mutex_lock.html),
* because it would block the whole simulation.
* Instead, you should use the present class, that is a drop-in replacement of
* [std::mutex](http://en.cppreference.com/w/cpp/thread/mutex).
--- /dev/null
+/* Copyright (c) 2006-2015, 2017. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef INCLUDE_SIMGRID_S4U_STORAGE_HPP_
+#define INCLUDE_SIMGRID_S4U_STORAGE_HPP_
+
+#include <map>
+#include <simgrid/s4u/forward.hpp>
+#include <simgrid/simix.h>
+#include <string>
+#include <unordered_map>
+#include <xbt/base.h>
+
+namespace simgrid {
+namespace s4u {
+
+XBT_PUBLIC_CLASS Storage
+{
+ friend s4u::Engine;
+
+ Storage(std::string name, smx_storage_t inferior);
+
+public:
+ Storage() = default;
+ virtual ~Storage();
+ /** Retrieve a Storage by its name. It must exist in the platform file */
+ static Storage& byName(const char* name);
+ const char* name();
+ const char* host();
+ sg_size_t sizeFree();
+ sg_size_t sizeUsed();
+ /** Retrieve the total amount of space of this storage element */
+ sg_size_t size();
+ xbt_dict_t properties();
+ const char* property(const char* key);
+ void setProperty(const char* key, char* value);
+ std::map<std::string, sg_size_t*>* content();
+ std::unordered_map<std::string, Storage*>* allStorages();
+
+protected:
+ smx_storage_t inferior();
+
+public:
+ void setUserdata(void* data) { userdata_ = data; }
+ void* userdata() { return userdata_; }
+
+private:
+ static std::unordered_map<std::string, Storage*>* storages_;
+
+ std::string hostname_;
+ std::string name_;
+ sg_size_t size_ = 0;
+ smx_storage_t pimpl_ = nullptr;
+ void* userdata_ = nullptr;
+};
+
+} /* namespace s4u */
+} /* namespace simgrid */
+
+#endif /* INCLUDE_SIMGRID_S4U_STORAGE_HPP_ */
-/* Copyright (c) 2015-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2015-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#define SIMGRID_S4U_VM_HPP
#include "simgrid/datatypes.h"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/forward.hpp"
-#include "simgrid/s4u/host.hpp"
typedef enum {
SURF_VM_STATE_CREATED, /**< created, but not yet started */
SURF_VM_STATE_RUNNING,
SURF_VM_STATE_SUSPENDED, /**< Suspend/resume does not involve disk I/O, so we assume there is no transition states. */
+ SURF_VM_STATE_DESTROYED
} e_surf_vm_state_t;
namespace simgrid {
double getRamsize();
simgrid::s4u::Host* pm();
+ e_surf_vm_state_t getState();
+
/* FIXME: protect me */
simgrid::vm::VirtualMachineImpl* pimpl_vm_ = nullptr;
};
+++ /dev/null
-/* Copyright (c) 2006-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#ifndef INCLUDE_SIMGRID_S4U_STORAGE_HPP_
-#define INCLUDE_SIMGRID_S4U_STORAGE_HPP_
-
-#include <string>
-
-#include <boost/unordered_map.hpp>
-
-#include <xbt/base.h>
-
-#include <simgrid/simix.h>
-#include <simgrid/s4u/forward.hpp>
-
-namespace simgrid {
-namespace s4u {
-
-XBT_PUBLIC_CLASS Storage {
- friend s4u::Engine;
-
- Storage(std::string name, smx_storage_t inferior);
- virtual ~Storage();
-
-public:
- /** Retrieve a Storage by its name. It must exist in the platform file */
- static Storage &byName(const char* name);
- const char *name();
- sg_size_t sizeFree();
- sg_size_t sizeUsed();
- /** Retrieve the total amount of space of this storage element */
- sg_size_t size();
-
- /* TODO: missing API:
-XBT_PUBLIC(xbt_dict_t) MSG_storage_get_properties(msg_storage_t storage);
-XBT_PUBLIC(void) MSG_storage_set_property_value(msg_storage_t storage, const char *name, char *value,void_f_pvoid_t free_ctn);
-XBT_PUBLIC(const char *)MSG_storage_get_property_value(msg_storage_t storage, const char *name);
-XBT_PUBLIC(xbt_dynar_t) MSG_storages_as_dynar(void);
-XBT_PUBLIC(xbt_dict_t) MSG_storage_get_content(msg_storage_t storage);
-XBT_PUBLIC(msg_error_t) MSG_storage_file_move(msg_file_t fd, msg_host_t dest, char* mount, char* fullname);
-XBT_PUBLIC(const char *) MSG_storage_get_host(msg_storage_t storage);
- */
-protected:
- smx_storage_t inferior();
-
-public:
- void setUserdata(void *data) {userdata_ = data;}
- void *userdata() {return userdata_;}
-
-private:
- static boost::unordered_map<std::string, Storage *> *storages_;
-
- std::string name_;
- smx_storage_t pimpl_ = nullptr;
- void *userdata_ = nullptr;
-};
-
-} /* namespace s4u */
-} /* namespace simgrid */
-
-#endif /* INCLUDE_SIMGRID_S4U_STORAGE_HPP_ */
XBT_PUBLIC(void) SD_init(int *argc, char **argv);
XBT_PUBLIC(void) SD_config(const char *key, const char *value);
XBT_PUBLIC(void) SD_create_environment(const char *platform_file);
-XBT_PUBLIC(xbt_dynar_t) SD_simulate(double how_long);
+XBT_PUBLIC(void) SD_simulate(double how_long);
+XBT_PUBLIC(void) SD_simulate_with_update(double how_long, xbt_dynar_t changed_tasks_dynar);
XBT_PUBLIC(double) SD_get_clock();
XBT_PUBLIC(void) SD_exit();
XBT_PUBLIC(xbt_dynar_t) SD_daxload(const char *filename);
SG_BEGIN_DECL()
XBT_PUBLIC(xbt_dynar_t) SIMIX_process_get_runnable();
-XBT_PUBLIC(smx_actor_t) SIMIX_process_from_PID(int PID);
+XBT_PUBLIC(smx_actor_t) SIMIX_process_from_PID(aid_t PID);
XBT_PUBLIC(xbt_dynar_t) SIMIX_processes_as_dynar();
/* parallelism */
XBT_PUBLIC(int) simcall_file_seek(smx_file_t fd, sg_offset_t offset, int origin);
XBT_PUBLIC(int) simcall_file_move(smx_file_t fd, const char* fullpath);
/***************************** Storage **********************************/
-XBT_PUBLIC(sg_size_t) simcall_storage_get_free_size (smx_storage_t storage);
-XBT_PUBLIC(sg_size_t) simcall_storage_get_used_size (smx_storage_t storage);
XBT_PUBLIC(xbt_dict_t) simcall_storage_get_properties(smx_storage_t storage);
-XBT_PUBLIC(void*) SIMIX_storage_get_data(smx_storage_t storage);
-XBT_PUBLIC(void) SIMIX_storage_set_data(smx_storage_t storage, void *data);
-XBT_PUBLIC(xbt_dict_t) SIMIX_storage_get_content(smx_storage_t storage);
-XBT_PUBLIC(xbt_dict_t) simcall_storage_get_content(smx_storage_t storage);
XBT_PUBLIC(const char*) SIMIX_storage_get_name(smx_storage_t storage);
-XBT_PUBLIC(sg_size_t) SIMIX_storage_get_size(smx_storage_t storage);
-XBT_PUBLIC(const char*) SIMIX_storage_get_host(smx_storage_t storage);
/************************** MC simcalls **********************************/
XBT_PUBLIC(int) simcall_mc_random(int min, int max);
typedef simgrid::smpi::Group SMPI_Group;
typedef simgrid::smpi::Info SMPI_Info;
typedef simgrid::smpi::Op SMPI_Op;
+typedef simgrid::smpi::Process SMPI_Process;
typedef simgrid::smpi::Request SMPI_Request;
typedef simgrid::smpi::Topo SMPI_Topology;
typedef simgrid::smpi::Topo_Cart SMPI_Cart_topology;
typedef struct SMPI_Group SMPI_Group;
typedef struct SMPI_Info SMPI_Info;
typedef struct SMPI_Op SMPI_Op;
+typedef struct SMPI_Process SMPI_Process;
typedef struct SMPI_Request SMPI_Request;
typedef struct SMPI_Topology SMPI_Topology;
typedef struct SMPI_Win SMPI_Win;
-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
XBT_PUBLIC_DATA( MPI_Op ) MPI_BXOR;
//For accumulate
XBT_PUBLIC_DATA( MPI_Op ) MPI_REPLACE;
+XBT_PUBLIC_DATA( MPI_Op ) MPI_NO_OP;
typedef SMPI_Group* MPI_Group;
MPI_CALL(XBT_PUBLIC(int), MPI_Testall, (int count, MPI_Request* requests, int* flag, MPI_Status* statuses));
MPI_CALL(XBT_PUBLIC(int), MPI_Op_create, (MPI_User_function * function, int commute, MPI_Op * op));
MPI_CALL(XBT_PUBLIC(int), MPI_Op_free, (MPI_Op * op));
+MPI_CALL(XBT_PUBLIC(int), MPI_Op_commutative, (MPI_Op op, int *commute));
MPI_CALL(XBT_PUBLIC(int), MPI_Group_free, (MPI_Group * group));
MPI_CALL(XBT_PUBLIC(int), MPI_Group_size, (MPI_Group group, int *size));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_free,( MPI_Win* win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_create,( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm,
MPI_Win *win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_allocate,( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base,
+ MPI_Win *win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_create_dynamic,( MPI_Info info, MPI_Comm comm,
+ MPI_Win *win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_attach,(MPI_Win win, void *base, MPI_Aint size));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_detach,(MPI_Win win, void *base));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_set_name,(MPI_Win win, char * name));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_get_name,(MPI_Win win, char * name, int* len));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_set_info,(MPI_Win win, MPI_Info info));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_get_info,(MPI_Win win, MPI_Info* info));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_get_group,(MPI_Win win, MPI_Group * group));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_fence,( int assert, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_get_attr, (MPI_Win type, int type_keyval, void *attribute_val, int* flag));
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Accumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Get_accumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+ void* result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
+ int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win));
+
+MPI_CALL(XBT_PUBLIC(int), MPI_Rget,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC(int), MPI_Rput,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC(int), MPI_Raccumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+ int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request));
+MPI_CALL(XBT_PUBLIC(int), MPI_Rget_accumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+ void* result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
+ int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request));
+
+MPI_CALL(XBT_PUBLIC(int), MPI_Fetch_and_op,( void *origin_addr, void* result_addr, MPI_Datatype datatype,
+ int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Compare_and_swap, (void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win));
+
MPI_CALL(XBT_PUBLIC(int), MPI_Alloc_mem, (MPI_Aint size, MPI_Info info, void *baseptr));
MPI_CALL(XBT_PUBLIC(int), MPI_Free_mem, (void *base));
MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes));
MPI_CALL(XBT_PUBLIC(int), MPI_Comm_get_parent,( MPI_Comm *parent));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_complete,(MPI_Win win));
-MPI_CALL(XBT_PUBLIC(int), MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win));
+
MPI_CALL(XBT_PUBLIC(int), MPI_Win_post,(MPI_Group group, int assert, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_start,(MPI_Group group, int assert, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_test,(MPI_Win win, int *flag));
-MPI_CALL(XBT_PUBLIC(int), MPI_Win_unlock,(int rank, MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_Win_wait,(MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_lock_all,(int assert, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_unlock,(int rank, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_unlock_all,(MPI_Win win));
+
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush,(int rank, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush_local,(int rank, MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush_all,(MPI_Win win));
+MPI_CALL(XBT_PUBLIC(int), MPI_Win_flush_local_all,(MPI_Win win));
MPI_CALL(XBT_PUBLIC(int), MPI_File_get_errhandler , (MPI_File file, MPI_Errhandler *errhandler));
MPI_CALL(XBT_PUBLIC(int), MPI_File_set_errhandler, (MPI_File file, MPI_Errhandler errhandler));
XBT_PUBLIC(void *) smpi_shared_malloc(size_t size, const char *file, int line);
#define SMPI_SHARED_MALLOC(size) smpi_shared_malloc(size, __FILE__, __LINE__)
+XBT_PUBLIC(void*) smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks);
+#define SMPI_PARTIAL_SHARED_MALLOC(size, shared_block_offsets, nb_shared_blocks) \
+ smpi_shared_malloc_partial(size, shared_block_offsets, nb_shared_blocks)
XBT_PUBLIC(void) smpi_shared_free(void *data);
#define SMPI_SHARED_FREE(data) smpi_shared_free(data)
/* Fortran specific stuff */
-XBT_PUBLIC(int) __attribute__((weak)) smpi_simulated_main_(int argc, char** argv);
-XBT_PUBLIC(int) __attribute__((weak)) MAIN__();
-XBT_PUBLIC(int) smpi_main(int (*realmain) (int argc, char *argv[]),int argc, char *argv[]);
-XBT_PUBLIC(void) __attribute__((weak)) user_main_();
+XBT_PUBLIC(int) smpi_main(const char* program, int argc, char *argv[]);
XBT_PUBLIC(int) smpi_process_index();
XBT_PUBLIC(void) smpi_process_init(int *argc, char ***argv);
#define MPI_Testall(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Testall(__VA_ARGS__); })
#define MPI_Op_create(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Op_create(__VA_ARGS__); })
#define MPI_Op_free(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Op_free(__VA_ARGS__); })
+#define MPI_Op_commutative(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Op_commutative(__VA_ARGS__); })
#define MPI_Group_free(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Group_free(__VA_ARGS__); })
#define MPI_Group_size(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Group_size(__VA_ARGS__); })
#define MPI_Group_rank(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Group_rank(__VA_ARGS__); })
#define MPI_Reduce_local(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Reduce_local(__VA_ARGS__); })
#define MPI_Win_free(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_free(__VA_ARGS__); })
#define MPI_Win_create(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_create(__VA_ARGS__); })
+#define MPI_Win_allocate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_allocate(__VA_ARGS__); })
+#define MPI_Win_create_dynamic(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_create_dynamic(__VA_ARGS__); })
+#define MPI_Win_attach(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_attach(__VA_ARGS__); })
+#define MPI_Win_detach(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_detach(__VA_ARGS__); })
#define MPI_Win_set_name(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_set_name(__VA_ARGS__); })
#define MPI_Win_get_name(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_name(__VA_ARGS__); })
+#define MPI_Win_set_info(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_set_info(__VA_ARGS__); })
+#define MPI_Win_get_info(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_info(__VA_ARGS__); })
#define MPI_Win_get_group(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_group(__VA_ARGS__); })
#define MPI_Win_fence(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_fence(__VA_ARGS__); })
#define MPI_Win_get_attr(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_get_attr(__VA_ARGS__); })
#define MPI_Get(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Get(__VA_ARGS__); })
#define MPI_Put(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Put(__VA_ARGS__); })
#define MPI_Accumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Accumulate(__VA_ARGS__); })
+#define MPI_Get_accumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Get_accumulate(__VA_ARGS__); })
+#define MPI_Rget(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Rget(__VA_ARGS__); })
+#define MPI_Rput(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Rput(__VA_ARGS__); })
+#define MPI_Raccumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Raccumulate(__VA_ARGS__); })
+#define MPI_Rget_accumulate(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Rget_accumulate(__VA_ARGS__); })
+#define MPI_Fetch_and_op(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Fetch_and_op(__VA_ARGS__); })
+#define MPI_Compare_and_swap(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Compare_and_swap(__VA_ARGS__); })
#define MPI_Alloc_mem(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Alloc_mem(__VA_ARGS__); })
#define MPI_Free_mem(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Free_mem(__VA_ARGS__); })
#define MPI_Type_f2c(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Type_f2c(__VA_ARGS__); })
#define MPI_Comm_spawn_multiple(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Comm_spawn_multiple(__VA_ARGS__); })
#define MPI_Comm_get_parent(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Comm_get_parent(__VA_ARGS__); })
#define MPI_Win_complete(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_complete(__VA_ARGS__); })
-#define MPI_Win_lock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_lock(__VA_ARGS__); })
#define MPI_Win_post(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_post(__VA_ARGS__); })
#define MPI_Win_start(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_start(__VA_ARGS__); })
#define MPI_Win_test(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_test(__VA_ARGS__); })
-#define MPI_Win_unlock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_unlock(__VA_ARGS__); })
#define MPI_Win_wait(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_wait(__VA_ARGS__); })
+#define MPI_Win_lock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_lock(__VA_ARGS__); })
+#define MPI_Win_lock_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_lock_all(__VA_ARGS__); })
+#define MPI_Win_unlock(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_unlock(__VA_ARGS__); })
+#define MPI_Win_unlock_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_unlock_all(__VA_ARGS__); })
+#define MPI_Win_flush(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush(__VA_ARGS__); })
+#define MPI_Win_flush_local(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush_local(__VA_ARGS__); })
+#define MPI_Win_flush_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush_all(__VA_ARGS__); })
+#define MPI_Win_flush_local_all(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_Win_flush_local_all(__VA_ARGS__); })
#define MPI_File_get_errhandler(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_File_get_errhandler(__VA_ARGS__); })
#define MPI_File_set_errhandler(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_File_set_errhandler(__VA_ARGS__); })
#define MPI_File_open(...) ({ smpi_trace_set_call_location(__FILE__,__LINE__); MPI_File_open(__VA_ARGS__); })
#define MPI_TESTALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_testall
#define MPI_OP_CREATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_op_create
#define MPI_OP_FREE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_op_free
+#define MPI_OP_COMMUTATIVE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_op_commutative
#define MPI_GROUP_FREE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_group_free
#define MPI_GROUP_SIZE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_group_size
#define MPI_GROUP_RANK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_group_rank
#define MPI_REDUCE_LOCAL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_reduce_local
#define MPI_WIN_FREE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_free
#define MPI_WIN_CREATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_create
+#define MPI_WIN_ALLOCATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_allocate
+#define MPI_WIN_CREATE_DYNAMIC smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_create_dynamic
+#define MPI_WIN_ATTACH smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_attach
+#define MPI_WIN_DETACH smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_detach
#define MPI_WIN_SET_NAME smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_set_name
#define MPI_WIN_GET_NAME smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_name
+#define MPI_WIN_SET_INFO smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_set_info
+#define MPI_WIN_GET_INFO smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_info
#define MPI_WIN_GET_GROUP smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_group
#define MPI_WIN_FENCE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_fence
#define MPI_WIN_GET_ATTR smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_get_attr
#define MPI_GET smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_get
#define MPI_PUT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_put
#define MPI_ACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_accumulate
+#define MPI_GET_ACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_get_accumulate
+#define MPI_RGET smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_rget
+#define MPI_RPUT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_rput
+#define MPI_RACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_raccumulate
+#define MPI_RGET_ACCUMULATE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_rget_accumulate
+#define MPI_FETCH_AND_OP smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_fetch_and_op
+#define MPI_COMPARE_AND_SWAP smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_compare_and_swap
#define MPI_ALLOC_MEM smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_alloc_mem
#define MPI_FREE_MEM smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_free_mem
#define MPI_TYPE_F2C smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_type_f2c
#define MPI_COMM_SPAWN_MULTIPLE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_comm_spawn_multiple
#define MPI_COMM_GET_PARENT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_comm_get_parent
#define MPI_WIN_COMPLETE smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_complete
-#define MPI_WIN_LOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_lock
#define MPI_WIN_POST smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_post
#define MPI_WIN_START smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_start
#define MPI_WIN_TEST smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_test
-#define MPI_WIN_UNLOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_unlock
#define MPI_WIN_WAIT smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_wait
+#define MPI_WIN_LOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_lock
+#define MPI_WIN_LOCK_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_lock_all
+#define MPI_WIN_UNLOCK smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_unlock
+#define MPI_WIN_UNLOCK_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_unlock_all
+#define MPI_WIN_FLUSH smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush
+#define MPI_WIN_FLUSH_LOCAL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush_local
+#define MPI_WIN_FLUSH_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush_all
+#define MPI_WIN_FLUSH_LOCAL_ALL smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_win_flush_local_all
#define MPI_FILE_GET_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_file_get_errhandler
#define MPI_FILE_SET_ERRHANDLER smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_file_set_errhandler
#define MPI_FILE_OPEN smpi_trace_set_call_location(__FILE__,__LINE__); call mpi_file_open
--- /dev/null
+#ifndef SMPI_SHARED_HPP
+#define SMPI_SHARED_HPP
+#include <xbt/function_types.h>
+#include <xbt/misc.h>
+#include <vector>
+
+
+/*
+ * We cannot put this declaration in smpi.h, since we use C++ features.
+ */
+
+
+XBT_PUBLIC(int) smpi_is_shared(void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset);
+
+std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>> vec, size_t offset, size_t buff_size);
+std::vector<std::pair<size_t, size_t>> merge_private_blocks(std::vector<std::pair<size_t, size_t>> src, std::vector<std::pair<size_t, size_t>> dst);
+
+#endif
XBT_PUBLIC_DATA(xbt_lib_t) storage_lib;
XBT_PUBLIC_DATA(int) ROUTING_STORAGE_LEVEL; //Routing storage level
XBT_PUBLIC_DATA(int) SURF_STORAGE_LEVEL; // Surf storage level
-XBT_PUBLIC_DATA(xbt_lib_t) file_lib;
XBT_PUBLIC_DATA(xbt_lib_t) storage_type_lib;
XBT_PUBLIC_DATA(int) ROUTING_STORAGE_TYPE_LEVEL; //Routing storage_type level
/* Define _GNU_SOURCE for getline, isfinite, etc. */
#ifndef _GNU_SOURCE
- #define _GNU_SOURCE
+# define _GNU_SOURCE
#endif
// Teach the compiler that some code path is unreacheable:
#if defined(__has_builtin)
- #if __has_builtin(__builtin_unreachable)
- #define XBT_UNREACHABLE() __builtin_unreachable()
- #else
- #include <stdlib.h>
- #define XBT_UNREACHABLE() abort()
- #endif
+# if __has_builtin(__builtin_unreachable)
+# define XBT_UNREACHABLE() __builtin_unreachable()
+# else
+# include <stdlib.h>
+# define XBT_UNREACHABLE() abort()
+# endif
#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
- #define XBT_UNREACHABLE() __builtin_unreachable()
+# define XBT_UNREACHABLE() __builtin_unreachable()
#else
- #include <stdlib.h>
- #define XBT_UNREACHABLE() abort()
+# include <stdlib.h>
+# define XBT_UNREACHABLE() abort()
#endif
/* On MinGW, stdio.h defines __MINGW_PRINTF_FORMAT and __MINGW_SCANF_FORMAT
which are the suitable format style (either gnu_printf or ms_printf)
depending on which version is available (__USE_MINGW_ANSI_STDIO): */
#ifdef __MINGW32__
- #include <stdio.h>
+# include <stdio.h>
- # define XBT_ATTRIB_PRINTF( format_idx, arg_idx ) \
+# define XBT_ATTRIB_PRINTF( format_idx, arg_idx ) \
__attribute__((__format__ (__MINGW_PRINTF_FORMAT, format_idx, arg_idx)))
- # define XBT_ATTRIB_SCANF( format_idx, arg_idx ) \
- __attribute__((__MINGW_SCANF_FORMAT (__scanf__, format_idx, arg_idx)))
+# define XBT_ATTRIB_SCANF( format_idx, arg_idx ) \
+ __attribute__((__MINGW_SCANF_FORMAT (__scanf__, format_idx, arg_idx)))
#else
- # define XBT_ATTRIB_PRINTF( format_idx, arg_idx ) \
+# define XBT_ATTRIB_PRINTF( format_idx, arg_idx ) \
__attribute__((__format__ (__printf__, format_idx, arg_idx)))
- # define XBT_ATTRIB_SCANF( format_idx, arg_idx ) \
- __attribute__((__format__ (__scanf__, format_idx, arg_idx)))
+# define XBT_ATTRIB_SCANF( format_idx, arg_idx ) \
+ __attribute__((__format__ (__scanf__, format_idx, arg_idx)))
#endif
-# define XBT_ATTRIB_NORETURN __attribute__((__noreturn__))
-# define XBT_ATTRIB_UNUSED __attribute__((__unused__))
-# define XBT_ATTRIB_DEPRECATED(m) __attribute__((__deprecated__(m)))
+#define XBT_ATTRIB_NORETURN __attribute__((__noreturn__))
+#define XBT_ATTRIB_UNUSED __attribute__((__unused__))
+#define XBT_ATTRIB_DEPRECATED(m) __attribute__((__deprecated__(m)))
/* Constructor priorities exist since gcc 4.3. Apparently, they are however not
* supported on Macs. */
-# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__APPLE__)
+#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__APPLE__)
# define _XBT_GNUC_CONSTRUCTOR(prio) __attribute__((__constructor__ (prio)))
# define _XBT_GNUC_DESTRUCTOR(prio) __attribute__((__destructor__ (prio)))
-# else
+#else
# define _XBT_GNUC_CONSTRUCTOR(prio) __attribute__((__constructor__))
# define _XBT_GNUC_DESTRUCTOR(prio) __attribute__((__destructor__))
-# endif
+#endif
#if defined(__GNUC__)
-# define XBT_ALWAYS_INLINE inline __attribute__ ((always_inline))
+# define XBT_ALWAYS_INLINE inline __attribute__ ((always_inline))
#else
-# define XBT_ALWAYS_INLINE inline
+# define XBT_ALWAYS_INLINE inline
#endif
#if defined(__GNUC__)
-# define XBT_THREAD_LOCAL __thread
+# define XBT_THREAD_LOCAL __thread
#else
-# define XBT_THREAD_LOCAL No thread local on this architecture
+# define XBT_THREAD_LOCAL No thread local on this architecture
#endif
/* improvable on gcc (by evaluating arguments only once), but wouldn't be portable */
#ifdef MIN
-# undef MIN
+# undef MIN
#endif
#define MIN(a,b) ((a)<(b)?(a):(b))
#ifdef MAX
-# undef MAX
+# undef MAX
#endif
#define MAX(a,b) ((a)>(b)?(a):(b))
~xbt_ex() override;
/** Category (what went wrong) */
- xbt_errcat_t category;
+ xbt_errcat_t category = unknown_error;
/** Why did it went wrong */
- int value;
+ int value = 0;
};
char *data;
void_f_pvoid_t free_f;
-} s_xbt_matrix_t, *xbt_matrix_t;
-
+} s_xbt_matrix_t;
+typedef s_xbt_matrix_t* xbt_matrix_t;
/** @brief Retrieve the address of a cell (not its content)
* @hideinitializer */
typedef void (*action_fun)(const char* const* args);
XBT_PUBLIC(void) xbt_replay_action_register(const char* action_name, action_fun function);
+XBT_PUBLIC(action_fun) xbt_replay_action_get(const char* action_name);
SG_END_DECL()
XBT_PUBLIC(void) xbt_str_subst(char *str, char from, char to, int amount);
XBT_PUBLIC(char *) xbt_str_varsubst(const char *str, xbt_dict_t patterns);
-XBT_PUBLIC(char *) xbt_str_from_file(FILE * file);
-
XBT_PUBLIC(long int) xbt_str_parse_int(const char* str, const char* error_msg);
XBT_PUBLIC(double) xbt_str_parse_double(const char* str, const char* error_msg);
*/
/** @brief Buffer data container **/
-struct xbt_strbuff {
+typedef struct xbt_strbuff {
char *data;
- int used, size;
-};
-typedef struct xbt_strbuff s_xbt_strbuff_t;
-typedef struct xbt_strbuff* xbt_strbuff_t;
+ int used;
+ int size;
+} s_xbt_strbuff_t;
+typedef s_xbt_strbuff_t* xbt_strbuff_t;
XBT_PUBLIC(void) xbt_strbuff_clear(xbt_strbuff_t b);
XBT_PUBLIC(xbt_strbuff_t) xbt_strbuff_new(void);
# Disable some rules on some files
-sonar.issue.ignore.multicriteria=j1,jni1,jni2,c1,c2a,c2b,c3
+sonar.issue.ignore.multicriteria=j1,jni1,jni2,c1,c2a,c2b,c3,c4a,c4b
# The Object.finalize() method should not be overriden
# But we need to clean the native memory with JNI
sonar.issue.ignore.multicriteria.c3.ruleKey=c:PPMacroName
sonar.issue.ignore.multicriteria.c3.resourceKey=include/smpi/smpi_extended_traces.h
+# Declarations should be placed in a namespace
+# But examples are intended to remain small and simple
+sonar.issue.ignore.multicriteria.c4a.ruleKey=cpp:GlobalNamespaceMembers
+sonar.issue.ignore.multicriteria.c4a.resourceKey=examples/**/*.cpp
+sonar.issue.ignore.multicriteria.c4b.ruleKey=cpp:GlobalNamespaceMembers
+sonar.issue.ignore.multicriteria.c4b.resourceKey=examples/**/*.hpp
# Exclude some files from the analysis:
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <functional>
-#include <utility>
-
#include "JavaContext.hpp"
#include "jxbt_utilities.h"
#include "src/simix/smx_private.h"
#include "xbt/ex.hpp"
+#include <functional>
+#include <utility>
+
extern "C" JavaVM* __java_vm;
XBT_LOG_NEW_DEFAULT_CATEGORY(java, "MSG for Java(TM)");
// (as the ones created for the VM migration). The Java exception will not be catched anywhere.
// Bad things happen currently if these actors get killed, unfortunately.
jxbt_throw_by_name(env, "org/simgrid/msg/ProcessKilledError",
- bprintf("Process %s killed from file JavaContext.cpp)", this->process()->name.c_str()));
+ std::string("Process ") + this->process()->cname() + " killed from file JavaContext.cpp");
// (remember that throwing a java exception from C does not break the C execution path.
// Instead, it marks the exception to be raised when returning to the Java world and
#include <locale.h>
-#include <simgrid/msg.h>
-#include <simgrid/simix.h>
-#include <simgrid/plugins/energy.h>
+#include "simgrid/msg.h"
+#include "simgrid/plugins/energy.h"
+#include "simgrid/simix.h"
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
-#include <src/simix/smx_private.h>
+#include "src/simix/smx_private.h"
#include "jmsg_process.h"
#include "jmsg_as.h"
setlocale(LC_NUMERIC,"C");
if (jargs)
- argc = (int) env->GetArrayLength(jargs);
+ argc = static_cast<int>(env->GetArrayLength(jargs));
argc++;
argv = xbt_new(char *, argc + 1);
JAVA_HOST_LEVEL = simgrid::s4u::Host::extension_create(__JAVA_host_priv_free);
JAVA_STORAGE_LEVEL = xbt_lib_add_level(storage_lib, __JAVA_storage_priv_free);
- for (index = 0; index < argc; index++)
+ for (index = 0; index < argc - 1; index++) {
+ env->SetObjectArrayElement(jargs, index, (jstring)env->NewStringUTF(argv[index + 1]));
free(argv[index]);
-
+ }
+ free(argv[argc]);
free(argv);
}
JNIEnv *env = get_current_thread_env();
simgrid::kernel::context::JavaContext* context = static_cast<simgrid::kernel::context::JavaContext*>(SIMIX_context_self());
context->jprocess = jprocess;
- msg_process_t process = MSG_process_self();
- jprocess_bind(context->jprocess, process, env);
+ jprocess_bind(context->jprocess, MSG_process_self(), env);
- // Adrien, ugly path, just to bypass creation of context at low levels (i.e such as for the VM migration for instance)
- if (context->jprocess != nullptr)
- run_jprocess(env, context->jprocess);
+ run_jprocess(env, context->jprocess);
}
}}}
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/NetZone.hpp"
-#include "simgrid/s4u/host.hpp"
#include "src/kernel/routing/NetZoneImpl.hpp"
#include "jmsg_as.h"
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "simgrid/plugins/energy.h"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "jmsg.h"
#include "jmsg_host.h"
JNIEXPORT jobjectArray JNICALL Java_org_simgrid_msg_Host_all(JNIEnv * env, jclass cls_arg)
{
- int index;
- jobjectArray jtable;
- jobject jhost;
- jstring jname;
- msg_host_t host;
xbt_dynar_t table = MSG_hosts_as_dynar();
int count = xbt_dynar_length(table);
jclass cls = jxbt_get_class(env, "org/simgrid/msg/Host");
-
- if (!cls) {
+ if (!cls)
return nullptr;
- }
- jtable = env->NewObjectArray((jsize) count, cls, nullptr);
+ jobjectArray jtable = env->NewObjectArray((jsize)count, cls, nullptr);
if (!jtable) {
jxbt_throw_jni(env, "Hosts table allocation failed");
return nullptr;
}
- for (index = 0; index < count; index++) {
- host = xbt_dynar_get_as(table,index,msg_host_t);
- jhost = (jobject) host->extension(JAVA_HOST_LEVEL);
+ for (int index = 0; index < count; index++) {
+ msg_host_t host = xbt_dynar_get_as(table, index, msg_host_t);
+ jobject jhost = static_cast<jobject>(host->extension(JAVA_HOST_LEVEL));
if (!jhost) {
- jname = env->NewStringUTF(host->cname());
-
- jhost = Java_org_simgrid_msg_Host_getByName(env, cls_arg, jname);
- /* FIXME: leak of jname ? */
+ jstring jname = env->NewStringUTF(host->cname());
+ jhost = Java_org_simgrid_msg_Host_getByName(env, cls_arg, jname);
}
env->SetObjectArrayElement(jtable, index, jhost);
env->SetLongField(jprocess, jprocess_field_Process_bind, (intptr_t)process);
}
-jstring jprocess_get_name(jobject jprocess, JNIEnv * env)
-{
- jstring jname = (jstring) env->GetObjectField(jprocess, jprocess_field_Process_name);
- return (jstring) env->NewGlobalRef(jname);
-}
-
JNIEXPORT void JNICALL Java_org_simgrid_msg_Process_nativeInit(JNIEnv *env, jclass cls) {
jclass jprocess_class_Process = env->FindClass("org/simgrid/msg/Process");
xbt_assert(jprocess_class_Process, "Native initialization of msg/Process failed. Please report that bug");
"Native initialization of msg/Process failed. Please report that bug");
}
-JNIEXPORT void JNICALL Java_org_simgrid_msg_Process_create(JNIEnv * env, jobject jprocess_arg, jobject jhostname)
+JNIEXPORT void JNICALL Java_org_simgrid_msg_Process_create(JNIEnv* env, jobject jprocess_arg, jobject jhost)
{
- jobject jprocess; /* the global reference to the java process instance */
- jstring jname; /* the name of the java process instance */
- msg_process_t process; /* the native process to create */
- msg_host_t host; /* Where that process lives */
-
-
- /* get the name of the java process */
- jname = jprocess_get_name(jprocess_arg, env);
- if (!jname) {
- jxbt_throw_null(env, xbt_strdup("Process name cannot be nullptr"));
- return;
- }
- const char* name = env->GetStringUTFChars(jname, 0);
-
- /* bind/retrieve the msg host */
- const char* hostname = env->GetStringUTFChars((jstring)jhostname, 0);
- host = MSG_host_by_name(hostname);
- if (!(host)) { /* not bound */
- jxbt_throw_host_not_found(env, hostname);
- return;
- }
- env->ReleaseStringUTFChars((jstring)jhostname, hostname);
-
/* create a global java process instance */
- jprocess = jprocess_ref(jprocess_arg, env);
- if (!jprocess) {
- jxbt_throw_jni(env, "Can't get a global ref to the java process");
- return;
- }
+ jobject jprocess = jprocess_ref(jprocess_arg, env);
/* Actually build the MSG process */
- process = MSG_process_create_with_environment(name, [](int argc, char** argv) -> int {
- msg_process_t process = MSG_process_self();
- // This is the jprocess passed as process data.
- // It would be simpler if we could use a closure.
- jobject jprocess = (jobject) MSG_process_get_data(process);
- simgrid::kernel::context::java_main_jprocess(jprocess);
- return 0;
- }, jprocess,
- host,
- /*argc, argv, properties*/
- 0, nullptr, nullptr);
+ jstring jname = (jstring)env->GetObjectField(jprocess, jprocess_field_Process_name);
+ const char* name = env->GetStringUTFChars(jname, 0);
+ msg_process_t process = MSG_process_create_from_stdfunc(
+ name, [jprocess]() -> void { simgrid::kernel::context::java_main_jprocess(jprocess); },
+ /*data*/ nullptr, jhost_get_native(env, jhost), /* properties*/ nullptr);
env->ReleaseStringUTFChars(jname, name);
+
/* bind the java process instance to the native process */
jprocess_bind(jprocess, process, env);
/* sets the PID and the PPID of the process */
env->SetIntField(jprocess, jprocess_field_Process_pid,(jint) MSG_process_get_PID(process));
env->SetIntField(jprocess, jprocess_field_Process_ppid, (jint) MSG_process_get_PPID(process));
- /* sets the Host of the process */
- jobject jhost = Java_org_simgrid_msg_Host_getByName(env,nullptr, (jstring)jhostname);
-
- env->SetObjectField(jprocess, jprocess_field_Process_host, jhost);
}
JNIEXPORT jint JNICALL Java_org_simgrid_msg_Process_killAll(JNIEnv * env, jclass cls, jint jresetPID)
const char *name = env->GetStringUTFChars((jstring)jname, 0);
const char *property = MSG_process_get_property_value(process, name);
- if (!property) {
+ if (!property)
return nullptr;
- }
jobject jproperty = env->NewStringUTF(property);
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Process_getCurrentProcess(JNIEnv * env, jclass cls)
{
- msg_process_t process = MSG_process_self();
- jobject jprocess;
-
- if (!process) {
- jxbt_throw_jni(env, xbt_strdup("MSG_process_self() failed"));
- return nullptr;
- }
-
- jprocess = jprocess_from_native(process);
-
+ jobject jprocess = jprocess_from_native(MSG_process_self());
if (!jprocess)
jxbt_throw_jni(env, xbt_strdup("SIMIX_process_get_jprocess() failed"));
}
/* try to resume the process */
- msg_error_t rv = MSG_process_resume(process);
-
- jxbt_check_res("MSG_process_resume()", rv, MSG_OK, bprintf("unexpected error , please report this bug"));
+ msg_error_t res = MSG_process_resume(process);
+ jxbt_check_res("MSG_process_resume()", res, MSG_OK, bprintf("unexpected error , please report this bug"));
}
+
JNIEXPORT void
JNICALL Java_org_simgrid_msg_Process_setAutoRestart (JNIEnv *env, jobject jprocess, jboolean jauto_restart) {
- msg_process_t process = jprocess_to_native(jprocess, env);
- xbt_ex_t e;
-
- int auto_restart = jauto_restart == JNI_TRUE ? 1 : 0;
+ msg_process_t process = jprocess_to_native(jprocess, env);
if (!process) {
jxbt_throw_notbound(env, "process", jprocess);
return;
}
- try {
- MSG_process_auto_restart_set(process,auto_restart);
- }
- catch (xbt_ex& e) {
- // Nothing to do
- }
+ MSG_process_auto_restart_set(process, (jauto_restart == JNI_TRUE));
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Process_restart (JNIEnv *env, jobject jprocess) {
jxbt_throw_notbound(env, "process", jprocess);
return;
}
-
try {
MSG_process_kill(process);
} catch (xbt_ex& ex) {
- XBT_VERB("This process just killed itself.");
+ XBT_VERB("Process %s just committed a suicide", MSG_process_get_name(process));
+ xbt_assert(process == MSG_process_self(),
+ "Killing a process should not raise an exception if it's not a suicide. Please report that bug.");
}
}
/** Extract the native instance from the java one */
msg_process_t jprocess_to_native(jobject jprocess, JNIEnv* env);
-/** Get the name of a java instance. */
-jstring jprocess_get_name(jobject jprocess, JNIEnv* env);
-
/** Initialize the native world, called from the Java world at startup */
JNIEXPORT void JNICALL Java_org_simgrid_msg_Process_nativeInit(JNIEnv *env, jclass cls);
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include "jmsg.h"
#include "jmsg_host.h"
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_create(JNIEnv * env, jobject jtask, jstring jname,
jdouble jflopsAmount, jdouble jbytesAmount)
{
- msg_task_t task; /* the native task to create */
const char *name = nullptr; /* the name of the task */
- if (jflopsAmount < 0) {
- jxbt_throw_illegal(env, bprintf("Task flopsAmount (%f) cannot be negative", static_cast<double>(jflopsAmount)));
- return;
- }
-
- if (jbytesAmount < 0) {
- jxbt_throw_illegal(env, bprintf("Task bytesAmount (%f) cannot be negative", static_cast<double>(jbytesAmount)));
- return;
- }
-
- if (jname) {
- /* get the C string from the java string */
+ if (jname)
name = env->GetStringUTFChars(jname, 0);
- }
-
- /* create the task */
- task = MSG_task_create(name, static_cast<double>(jflopsAmount), static_cast<double>(jbytesAmount), nullptr);
+ msg_task_t task = MSG_task_create(name, static_cast<double>(jflopsAmount), static_cast<double>(jbytesAmount), jtask);
if (jname)
env->ReleaseStringUTFChars(jname, name);
- /* sets the task name */
- env->SetObjectField(jtask, jtask_field_Task_name, jname);
+
/* bind & store the task */
jtask_bind(jtask, task, env);
- MSG_task_set_data(task, jtask);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_parallelCreate(JNIEnv * env, jobject jtask, jstring jname,
jobjectArray jhosts, jdoubleArray jcomputeDurations_arg,
jdoubleArray jmessageSizes_arg)
{
- msg_task_t task; /* the native parallel task to create */
- const char *name; /* the name of the task */
- int host_count;
- msg_host_t *hosts;
- double *computeDurations;
- double *messageSizes;
- jdouble *jcomputeDurations;
- jdouble *jmessageSizes;
- jobject jhost;
- int index;
-
- if (!jcomputeDurations_arg) {
- jxbt_throw_null(env, xbt_strdup("Parallel task flops amounts cannot be null"));
- return;
- }
-
- if (!jmessageSizes_arg) {
- jxbt_throw_null(env, xbt_strdup("Parallel task bytes amounts cannot be null"));
- return;
- }
-
- if (!jname) {
- jxbt_throw_null(env, xbt_strdup("Parallel task name cannot be null"));
- return;
- }
-
- host_count = static_cast<int>(env->GetArrayLength(jhosts));
-
- hosts = xbt_new0(msg_host_t, host_count);
- computeDurations = xbt_new0(double, host_count);
- messageSizes = xbt_new0(double, host_count * host_count);
+ int host_count = static_cast<int>(env->GetArrayLength(jhosts));
- jcomputeDurations = env->GetDoubleArrayElements(jcomputeDurations_arg, 0);
- jmessageSizes = env->GetDoubleArrayElements(jmessageSizes_arg, 0);
-
- for (index = 0; index < host_count; index++) {
- jhost = env->GetObjectArrayElement(jhosts, index);
+ jdouble* jcomputeDurations = env->GetDoubleArrayElements(jcomputeDurations_arg, 0);
+ msg_host_t* hosts = xbt_new0(msg_host_t, host_count);
+ double* computeDurations = xbt_new0(double, host_count);
+ for (int index = 0; index < host_count; index++) {
+ jobject jhost = env->GetObjectArrayElement(jhosts, index);
hosts[index] = jhost_get_native(env, jhost);
computeDurations[index] = jcomputeDurations[index];
}
- for (index = 0; index < host_count * host_count; index++) {
+ env->ReleaseDoubleArrayElements(jcomputeDurations_arg, jcomputeDurations, 0);
+
+ jdouble* jmessageSizes = env->GetDoubleArrayElements(jmessageSizes_arg, 0);
+ double* messageSizes = xbt_new0(double, host_count* host_count);
+ for (int index = 0; index < host_count * host_count; index++) {
messageSizes[index] = jmessageSizes[index];
}
-
- env->ReleaseDoubleArrayElements(jcomputeDurations_arg, jcomputeDurations, 0);
env->ReleaseDoubleArrayElements(jmessageSizes_arg, jmessageSizes, 0);
/* get the C string from the java string */
- name = env->GetStringUTFChars(jname, 0);
-
- task = MSG_parallel_task_create(name, host_count, hosts, computeDurations, messageSizes, nullptr);
-
+ const char* name = env->GetStringUTFChars(jname, 0);
+ msg_task_t task = MSG_parallel_task_create(name, host_count, hosts, computeDurations, messageSizes, jtask);
env->ReleaseStringUTFChars(jname, name);
- /* sets the task name */
- env->SetObjectField(jtask, jtask_field_Task_name, jname);
+
/* associate the java task object and the native task */
jtask_bind(jtask, task, env);
-
- MSG_task_set_data(task, (void *) jtask);
-
- if (!MSG_task_get_data(task))
- jxbt_throw_jni(env, "global ref allocation failed");
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_cancel(JNIEnv * env, jobject jtask)
MSG_task_set_bytes_amount(task, static_cast<double>(dataSize));
}
-JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_send(JNIEnv * env,jobject jtask, jstring jalias, jdouble jtimeout)
-{
- msg_error_t rv;
- const char *alias = env->GetStringUTFChars(jalias, 0);
-
- msg_task_t task = jtask_to_native(jtask, env);
-
- if (!task) {
- env->ReleaseStringUTFChars(jalias, alias);
- jxbt_throw_notbound(env, "task", jtask);
- return;
- }
-
- /* Pass a global ref to the Jtask into the Ctask so that the receiver can use it */
- MSG_task_set_data(task, (void *) env->NewGlobalRef(jtask));
- rv = MSG_task_send_with_timeout(task, alias, static_cast<double>(jtimeout));
- env->ReleaseStringUTFChars(jalias, alias);
-
- if (rv != MSG_OK) {
- jmsg_throw_status(env, rv);
- }
-}
-
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_sendBounded(JNIEnv * env,jobject jtask, jstring jalias,
jdouble jtimeout,jdouble maxrate)
{
- msg_error_t rv;
- const char *alias = env->GetStringUTFChars(jalias, 0);
-
msg_task_t task = jtask_to_native(jtask, env);
-
if (!task) {
- env->ReleaseStringUTFChars(jalias, alias);
jxbt_throw_notbound(env, "task", jtask);
return;
}
- /* Pass a global ref to the Jtask into the Ctask so that the receiver can use it */
+ /* Add a global ref into the Ctask so that the receiver can use it */
MSG_task_set_data(task, (void *) env->NewGlobalRef(jtask));
- rv = MSG_task_send_with_timeout_bounded(task, alias, static_cast<double>(jtimeout), static_cast<double>(maxrate));
+
+ const char* alias = env->GetStringUTFChars(jalias, 0);
+ msg_error_t res =
+ MSG_task_send_with_timeout_bounded(task, alias, static_cast<double>(jtimeout), static_cast<double>(maxrate));
env->ReleaseStringUTFChars(jalias, alias);
- if (rv != MSG_OK) {
- jmsg_throw_status(env, rv);
- }
+ if (res != MSG_OK)
+ jmsg_throw_status(env, res);
}
-JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receive(JNIEnv * env, jclass cls, jstring jalias, jdouble jtimeout,
- jobject jhost)
+JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receive(JNIEnv* env, jclass cls, jstring jalias, jdouble jtimeout)
{
msg_task_t task = nullptr;
- msg_host_t host = nullptr;
-
- if (jhost) {
- host = jhost_get_native(env, jhost);
-
- if (!host) {
- jxbt_throw_notbound(env, "host", jhost);
- return nullptr;
- }
- }
const char *alias = env->GetStringUTFChars(jalias, 0);
- msg_error_t rv = MSG_task_receive_ext(&task, alias, (double) jtimeout, host);
+ msg_error_t rv = MSG_task_receive_ext(&task, alias, (double)jtimeout, /*host*/ nullptr);
env->ReleaseStringUTFChars(jalias, alias);
if (env->ExceptionOccurred())
return nullptr;
env->DeleteGlobalRef(jtask_global);
MSG_task_set_data(task, nullptr);
-
return (jobject) jtask_local;
}
return jcomm;
}
-JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receiveBounded(JNIEnv * env, jclass cls, jstring jalias,
- jdouble jtimeout, jobject jhost, jdouble rate)
+JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receiveBounded(JNIEnv* env, jclass cls, jstring jalias,
+ jdouble jtimeout, jdouble rate)
{
- msg_error_t rv;
- msg_task_t *task = xbt_new(msg_task_t,1);
- *task = nullptr;
-
- msg_host_t host = nullptr;
-
- if (jhost) {
- host = jhost_get_native(env, jhost);
-
- if (!host) {
- jxbt_throw_notbound(env, "host", jhost);
- return nullptr;
- }
- }
+ msg_task_t task = nullptr;
const char *alias = env->GetStringUTFChars(jalias, 0);
- rv = MSG_task_receive_ext_bounded(task, alias, static_cast<double>(jtimeout), host, static_cast<double>(rate));
+ msg_error_t res = MSG_task_receive_ext_bounded(&task, alias, static_cast<double>(jtimeout), /*host*/ nullptr,
+ static_cast<double>(rate));
if (env->ExceptionOccurred())
return nullptr;
- if (rv != MSG_OK) {
- jmsg_throw_status(env,rv);
+ if (res != MSG_OK) {
+ jmsg_throw_status(env, res);
return nullptr;
}
- jobject jtask_global = (jobject) MSG_task_get_data(*task);
+ jobject jtask_global = (jobject)MSG_task_get_data(task);
/* Convert the global ref into a local ref so that the JVM can free the stuff */
jobject jtask_local = env->NewLocalRef(jtask_global);
env->DeleteGlobalRef(jtask_global);
- MSG_task_set_data(*task, nullptr);
+ MSG_task_set_data(task, nullptr);
env->ReleaseStringUTFChars(jalias, alias);
- xbt_free(task);
-
return (jobject) jtask_local;
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_setFlopsAmount(JNIEnv* env, jobject jtask, jdouble computationAmount);
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_setBytesAmount(JNIEnv* env, jobject jtask, jdouble dataSize);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_send(JNIEnv* env, jobject jtask, jstring jalias, jdouble jtimeout);
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_sendBounded(JNIEnv* env, jobject jtask, jstring jalias,
jdouble jtimeout, jdouble maxrate);
-JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receive(JNIEnv* env, jclass cls, jstring jalias, jdouble jtimeout,
- jobject jhost);
+JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receive(JNIEnv* env, jclass cls, jstring jalias, jdouble jtimeout);
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_irecv(JNIEnv* env, jclass cls, jstring jmailbox);
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_receiveBounded(JNIEnv* env, jclass cls, jstring jalias,
- jdouble jtimeout, jobject jhost, jdouble rate);
+ jdouble jtimeout, jdouble rate);
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_irecvBounded(JNIEnv* env, jclass cls, jstring jmailbox,
jdouble rate);
JNIEXPORT jobject JNICALL Java_org_simgrid_msg_Task_isend(JNIEnv* env, jobject jtask, jstring jmailbox);
SG_BEGIN_DECL()
+extern int JAVA_HOST_LEVEL;
static jfieldID jvm_field_bind;
void jvm_bind(JNIEnv *env, jobject jvm, msg_vm_t vm)
env->ReleaseStringUTFChars(jname, name);
jvm_bind(env, jvm, vm);
+ jvm = env->NewWeakGlobalRef(jvm);
+ // We use the extension level of the host, even if that's somehow disturbing
+ vm->extension_set(JAVA_HOST_LEVEL, (void*)jvm);
+}
+
+JNIEXPORT jobjectArray JNICALL Java_org_simgrid_msg_VM_all(JNIEnv* env, jclass cls_arg)
+{
+ xbt_dynar_t hosts = MSG_hosts_as_dynar();
+ std::vector<jobject> vms;
+
+ unsigned int it;
+ msg_host_t h;
+ xbt_dynar_foreach (hosts, it, h) {
+ simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(h);
+ if (vm != nullptr && vm->getState() != SURF_VM_STATE_DESTROYED) {
+ jobject jvm = static_cast<jobject>(vm->extension(JAVA_HOST_LEVEL));
+ vms.push_back(jvm);
+ }
+ }
+ xbt_dynar_free(&hosts);
+
+ vms.shrink_to_fit();
+ int count = vms.size();
+
+ jclass cls = jxbt_get_class(env, "org/simgrid/msg/VM");
+ if (!cls)
+ return nullptr;
+
+ jobjectArray jtable = env->NewObjectArray((jsize)count, cls, nullptr);
+ if (!jtable) {
+ jxbt_throw_jni(env, "Hosts table allocation failed");
+ return nullptr;
+ }
+
+ for (int index = 0; index < count; index++) {
+ jobject jhost = vms.at(index);
+ env->SetObjectArrayElement(jtable, index, jhost);
+ }
+ return jtable;
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeFinalize(JNIEnv *env, jobject jvm)
MSG_vm_shutdown(vm);
auto vmList = &simgrid::vm::VirtualMachineImpl::allVms_;
vmList->erase(
- std::remove_if(vmList->begin(), vmList->end(), [vm](simgrid::s4u::VirtualMachine* it) { return vm == it; }),
+ std::remove_if(vmList->begin(), vmList->end(), [vm](simgrid::s4u::VirtualMachine* it) {
+ return vm == it;
+ }),
vmList->end());
}
}
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_internalmig(JNIEnv *env, jobject jvm, jobject jhost)
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeMigration(JNIEnv* env, jobject jvm, jobject jhost)
{
msg_vm_t vm = jvm_get_native(env,jvm);
msg_host_t host = jhost_get_native(env, jhost);
MSG_vm_resume(vm);
}
+JNIEXPORT jobject JNICALL Java_org_simgrid_msg_VM_getVMByName(JNIEnv* env, jclass cls, jstring jname)
+{
+
+ /* get the C string from the java string */
+ if (jname == nullptr) {
+ jxbt_throw_null(env, bprintf("No VM can have a null name"));
+ return nullptr;
+ }
+ const char* name = env->GetStringUTFChars(jname, 0);
+ /* get the VM by name (VMs are just special hosts, unfortunately) */
+ msg_host_t host = MSG_host_by_name(name);
+
+ if (!host) { /* invalid name */
+ jxbt_throw_host_not_found(env, name);
+ env->ReleaseStringUTFChars(jname, name);
+ return nullptr;
+ }
+ env->ReleaseStringUTFChars(jname, name);
+
+ return static_cast<jobject>(host->extension(JAVA_HOST_LEVEL));
+}
SG_END_DECL()
void jvm_bind(JNIEnv *env, jobject jvm, msg_vm_t vm);
msg_vm_t jvm_get_native(JNIEnv *env, jobject jvm);
-/*
- * Class org_simgrid_msg_VM
- * Method nativeInit
- * Signature ()V
- */
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeInit(JNIEnv *env, jclass cls);
-
-/**
- * Class org_simgrid_msg_VM
- * Method isCreated
- * Signature ()B
- */
-JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isCreated(JNIEnv *env, jobject jvm);
-
-JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isRunning(JNIEnv *env, jobject jvm);
-JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isMigrating(JNIEnv *env, jobject jvm);
-JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isSuspended(JNIEnv *env, jobject jvm);
-JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isResuming(JNIEnv *env, jobject jvm);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_setBound(JNIEnv *env, jobject jvm, jdouble bound);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeInit(JNIEnv* env, jclass cls);
+
+JNIEXPORT jobjectArray JNICALL Java_org_simgrid_msg_VM_all(JNIEnv* env, jclass cls_arg);
+
+JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isCreated(JNIEnv* env, jobject jvm);
+JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isRunning(JNIEnv* env, jobject jvm);
+JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isMigrating(JNIEnv* env, jobject jvm);
+JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isSuspended(JNIEnv* env, jobject jvm);
+JNIEXPORT jint JNICALL Java_org_simgrid_msg_VM_isResuming(JNIEnv* env, jobject jvm);
+
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_setBound(JNIEnv* env, jobject jvm, jdouble bound);
+
JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_create(JNIEnv* env, jobject jvm, jobject jhost, jstring jname,
jint jramsize, jint dprate, jint mig_netspeed);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeFinalize(JNIEnv *env, jobject jvm);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_start(JNIEnv *env, jobject jvm);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_internalmig(JNIEnv *env, jobject jvm, jobject jhost);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_suspend(JNIEnv *env, jobject jvm);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_resume(JNIEnv *env, jobject jvm);
-JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_shutdown(JNIEnv *env, jobject jvm);
+JNIEXPORT jobject JNICALL Java_org_simgrid_msg_VM_getVMByName(JNIEnv* env, jclass cls, jstring jname);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeFinalize(JNIEnv* env, jobject jvm);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_start(JNIEnv* env, jobject jvm);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_nativeMigration(JNIEnv* env, jobject jvm, jobject jhost);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_suspend(JNIEnv* env, jobject jvm);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_resume(JNIEnv* env, jobject jvm);
+JNIEXPORT void JNICALL Java_org_simgrid_msg_VM_shutdown(JNIEnv* env, jobject jvm);
SG_END_DECL()
/** Class initializer, to initialize various JNI stuff */
- public static native void nativeInit();
+ private static native void nativeInit();
static {
nativeInit();
}
*/
private long bind = 0;
/** Indicates if the process is started */
- boolean started;
- /**
- * Even if this attribute is public you must never access to it.
- * It is used to compute the id of an MSG process.
- */
- private static long nextProcessId = 0;
-
- /**
- * Even if this attribute is public you must never access to it.
- * It is compute automatically during the creation of the object.
- * The native functions use this identifier to synchronize the process.
- */
- private long id;
/** Time at which the process should be created */
protected double startTime = 0;
if (name == null)
throw new NullPointerException("Process name cannot be null");
- this.id = nextProcessId++;
this.host = host;
this.name = name;
this.killTime = killTime;
}
/**
- * The natively implemented method to create an MSG process.
- * @param hostName A valid (bound) host where create the process.
+ * The native method to create an MSG process.
+ * @param host where to create the process.
*/
- protected native void create(String hostName) throws HostNotFoundException;
+ protected native void create(Host host);
+
/**
* This method kills all running process of the simulation.
*
* @throws HostNotFoundException
*/
public final void start() throws HostNotFoundException {
- if (!started) {
- started = true;
- create(host.getName());
- }
+ if (bind == 0)
+ create(host);
}
/** This method runs the process. It calls the method function that you must overwrite. */
* This value has to be ≥ 0.
*/
public Task(String name, double flopsAmount, double bytesAmount) {
+ if (flopsAmount<0)
+ throw new IllegalArgumentException("Task flopsAmount (" + flopsAmount + ") cannot be negative");
+ if (bytesAmount<0)
+ throw new IllegalArgumentException("Task bytesAmount (" + bytesAmount + ") cannot be negative");
+
create(name, flopsAmount, bytesAmount);
+
+ this.name = name;
this.messageSize = bytesAmount;
}
/**
* the destination of the communications.
*/
public Task(String name, Host[]hosts, double[]flopsAmount, double[]bytesAmount) {
+ if (flopsAmount == null)
+ throw new NullPointerException("Parallel task flops amounts is null");
+ if (bytesAmount == null)
+ throw new NullPointerException("Parallel task bytes amounts is null");
+ if (hosts == null)
+ throw new NullPointerException("Host list is null");
+ if (name == null)
+ throw new NullPointerException("Parallel task name is null");
+
parallelCreate(name, hosts, flopsAmount, bytesAmount);
+ this.name = name;
}
/**
* @throws HostFailureException
* @throws TransferFailureException
*/
- public native void send(String mailbox, double timeout) throws TransferFailureException, HostFailureException, TimeoutException;
+ public void send(String mailbox, double timeout) throws TransferFailureException, HostFailureException, TimeoutException {
+ sendBounded(mailbox, timeout, -1);
+ }
/** Sends the task on the specified mailbox (capping the sending rate to \a maxrate)
*
* @throws TimeoutException
*/
public void sendBounded(String mailbox, double maxrate) throws TransferFailureException, HostFailureException, TimeoutException {
- sendBounded(mailbox,-1,maxrate);
+ sendBounded(mailbox, -1, maxrate);
}
* @return a Comm handler
*/
public static native Comm irecv(String mailbox);
- /**
- * Retrieves next task from the mailbox identified by the specified name
- *
- * @param mailbox
- * @return a Task
- */
-
- public static Task receive(String mailbox) throws TransferFailureException, HostFailureException, TimeoutException {
- return receive(mailbox, -1.0, null);
- }
-
- /**
- * Retrieves next task on the mailbox identified by the specified name (wait at most \a timeout seconds)
- *
- * @param mailbox
- * @param timeout
- * @return a Task
- */
- public static Task receive(String mailbox, double timeout) throws TransferFailureException, HostFailureException, TimeoutException {
- return receive(mailbox, timeout, null);
- }
/**
- * Retrieves next task sent by a given host on the mailbox identified by the specified alias
+ * Retrieves next task on the mailbox identified by the specified alias
*
* @param mailbox
- * @param host
* @return a Task
*/
- public static Task receive(String mailbox, Host host) throws TransferFailureException, HostFailureException, TimeoutException {
- return receive(mailbox, -1.0, host);
+ public static Task receive(String mailbox) throws TransferFailureException, HostFailureException, TimeoutException {
+ return receive(mailbox, -1.0);
}
/**
- * Retrieves next task sent by a given host on the mailbox identified by the specified alias (wait at most \a timeout seconds)
+ * Retrieves next task on the mailbox identified by the specified alias (wait at most \a timeout seconds)
*
* @param mailbox
* @param timeout
- * @param host
* @return a Task
*/
- public static native Task receive(String mailbox, double timeout, Host host) throws TransferFailureException, HostFailureException, TimeoutException;
+ public static native Task receive(String mailbox, double timeout) throws TransferFailureException, HostFailureException, TimeoutException;
/**
* Starts listening for receiving a task from an asynchronous communication with a capped rate
*/
public static Task receiveBounded(String mailbox, double rate) throws TransferFailureException, HostFailureException, TimeoutException {
- return receiveBounded(mailbox, -1.0, null, rate);
+ return receiveBounded(mailbox, -1.0, rate);
}
/**
* @param timeout
* @return a Task
*/
- public static Task receiveBounded(String mailbox, double timeout, double rate) throws TransferFailureException, HostFailureException, TimeoutException {
- return receiveBounded(mailbox, timeout, null, rate);
- }
-
- /**
- * Retrieves next task sent by a given host on the mailbox identified by the specified alias with a capped rate
- *
- * @param mailbox
- * @param host
- * @return a Task
- */
-
- public static Task receiveBounded(String mailbox, Host host, double rate) throws TransferFailureException, HostFailureException, TimeoutException {
- return receiveBounded(mailbox, -1.0, host, rate);
- }
-
- /**
- * Retrieves next task sent by a given host on the mailbox identified by the specified alias (wait at most \a timeout seconds)
- * with a capped rate
- *
- * @param mailbox
- * @param timeout
- * @param host
- * @return a Task
- */
- public static native Task receiveBounded(String mailbox, double timeout, Host host, double rate) throws TransferFailureException, HostFailureException, TimeoutException;
+ public static native Task receiveBounded(String mailbox, double timeout, double rate) throws TransferFailureException, HostFailureException, TimeoutException;
-/* JNI interface to virtual machine in Simgrid */
+/* Java bindings of the s4u::VirtualMachine */
-/* Copyright (c) 2006-2014. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
package org.simgrid.msg;
-import java.util.ArrayList;
public class VM extends Host {
// No need to declare a new bind variable: we use the one inherited from the super class Host
- /* Static functions */
-
- private static ArrayList<VM> vms= new ArrayList<>();
private Host currentHost;
/** Create a `basic' VM (i.e. 1GB of RAM, other values are not taken into account). */
super.name = name;
this.currentHost = host;
create(host, name, ramSize, migNetSpeed, dpIntensity);
- vms.add(this);
}
- public static VM[] all(){
- VM[] allvms = new VM[vms.size()];
- vms.toArray(allvms);
- return allvms;
- }
+ /** Retrieve the list of all existing VMs */
+ public static native VM[] all();
- public static VM getVMByName(String name){
- for (VM vm : vms){
- if (vm.getName().equals(name))
- return vm;
- }
- return null;
- }
+ /** Retrieve a VM from its name */
+ public static native VM getVMByName(String name);
- /** Kills all the actors running on that VM
+ /** Shutdown and unref the VM.
*
* Actually, this strictly equivalent to shutdown().
* In C and in libvirt, the destroy function also releases the memory associated to the VM,
protected void finalize() throws Throwable {
nativeFinalize();
}
- public native void nativeFinalize();
+ private native void nativeFinalize();
/** Returns whether the given VM is currently suspended */
public native int isCreated();
*/
public native void shutdown();
- /** Invoke native migration routine */
- public native void internalmig(Host destination) throws Exception; // TODO add throws DoubleMigrationException (i.e. when you call migrate on a VM that is already migrating);
-
-
-
/** Change the host on which all processes are running
* (pre-copy is implemented)
*/
public void migrate(Host destination) throws HostFailureException{
try {
- this.internalmig(destination);
+ this.nativeMigration(destination);
} catch (Exception e){
Msg.info("Migration of VM "+this.getName()+" to "+destination.getName()+" is impossible ("+e.getMessage()+")");
throw new HostFailureException();
// If the migration correcly returned, then we should change the currentHost value.
this.currentHost = destination;
}
+ private native void nativeMigration(Host destination) throws MsgException;
/** Immediately suspend the execution of all processes within the given VM
*
public native void resume();
/** Class initializer (for JNI), don't do it yourself */
- public static native void nativeInit();
+ private static native void nativeInit();
static {
nativeInit();
}
-/* Copyright (c) 2010, 2012-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
/* SimGrid Lua bindings */
#include "lua_private.h"
-#include <simgrid/host.h>
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
extern "C" {
#include <lauxlib.h>
}
XBT_PUBLIC(void) TRACE_help(int detailed);
XBT_PUBLIC(void) TRACE_surf_resource_utilization_alloc();
XBT_PUBLIC(void) TRACE_surf_resource_utilization_release();
-XBT_PUBLIC(void) TRACE_add_start_function(void (*func)(void));
-XBT_PUBLIC(void) TRACE_add_end_function(void (*func)(void));
SG_END_DECL()
-#endif
\ No newline at end of file
+#endif
typedef simgrid::surf::Cpu surf_Cpu;
typedef simgrid::surf::HostModel surf_HostModel;
typedef simgrid::surf::NetworkModel surf_NetworkModel;
+typedef simgrid::surf::Storage surf_Storage;
typedef simgrid::surf::StorageModel surf_StorageModel;
typedef simgrid::surf::Resource surf_Resource;
typedef simgrid::surf::HostImpl surf_Host;
typedef struct surf_Cpu surf_Cpu;
typedef struct surf_HostModel surf_HostModel;
typedef struct surf_NetworkModel surf_NetworkModel;
+typedef struct surf_Storage surf_Storage;
typedef struct surf_StorageModel surf_StorageModel;
typedef struct surf_Resource surf_Resource;
typedef struct surf_Host surf_Host;
typedef surf_HostModel *surf_host_model_t;
typedef surf_NetworkModel *surf_network_model_t;
typedef surf_StorageModel *surf_storage_model_t;
+typedef surf_Storage* surf_storage_t;
typedef xbt_dictelm_t surf_resource_t;
/* Generic model object */
/***************************/
-static inline void *surf_storage_resource_priv(const void *storage){
- return (void*)xbt_lib_get_level((xbt_dictelm_t)storage, SURF_STORAGE_LEVEL);
+static inline surf_storage_t surf_storage_resource_priv(const void* storage)
+{
+ return (surf_storage_t)xbt_lib_get_level((xbt_dictelm_t)storage, SURF_STORAGE_LEVEL);
}
static inline void *surf_storage_resource_by_name(const char *name){
*/
XBT_PUBLIC(int) surf_host_file_seek(sg_host_t host, surf_file_t fd, sg_offset_t offset, int origin);
-/**
- * @brief Get the content of a storage
- *
- * @param resource The surf storage
- * @return A xbt_dict_t with path as keys and size in bytes as values
- */
-XBT_PUBLIC(xbt_dict_t) surf_storage_get_content(surf_resource_t resource);
-
/**
* @brief Get the size in bytes of a storage
*
/* Prototypes of the functions that handle the properties */
XBT_PUBLIC_DATA(xbt_dict_t) current_property_set;// the prop set for the currently parsed element (also used in SIMIX)
-/* The same for model_prop set*/
-XBT_PUBLIC_DATA(xbt_dict_t) current_model_property_set;
/* surf parse file related (public because called from a test suite) */
XBT_PUBLIC(void) parse_platform_file(const char *file);
fprintf(trace_file, "%s send %d %d %s\n", process_id, extra->dst, extra->send_size, extra->datatype1);
break;
case TRACING_ISEND:
- fprintf(trace_file, "%s isend %d %d %s\n", process_id, extra->dst, extra->send_size, extra->datatype1);
+ fprintf(trace_file, "%s Isend %d %d %s\n", process_id, extra->dst, extra->send_size, extra->datatype1);
break;
case TRACING_RECV:
fprintf(trace_file, "%s recv %d %d %s\n", process_id, extra->src, extra->send_size, extra->datatype1);
break;
case TRACING_IRECV:
- fprintf(trace_file, "%s irecv %d %d %s\n", process_id, extra->src, extra->send_size, extra->datatype1);
+ fprintf(trace_file, "%s Irecv %d %d %s\n", process_id, extra->src, extra->send_size, extra->datatype1);
break;
case TRACING_TEST:
fprintf(trace_file, "%s test\n", process_id);
fprintf(trace_file, "%s wait\n", process_id);
break;
case TRACING_WAITALL:
- fprintf(trace_file, "%s waitall\n", process_id);
+ fprintf(trace_file, "%s waitAll\n", process_id);
break;
case TRACING_BARRIER:
fprintf(trace_file, "%s barrier\n", process_id);
fprintf(trace_file, "\n");
break;
case TRACING_ALLREDUCE: // rank allreduce comm_size comp_size (datatype)
- fprintf(trace_file, "%s allreduce %d %f %s\n", process_id, extra->send_size, extra->comp_size, extra->datatype1);
+ fprintf(trace_file, "%s allReduce %d %f %s\n", process_id, extra->send_size, extra->comp_size, extra->datatype1);
break;
case TRACING_ALLTOALL: // rank alltoall send_size recv_size (sendtype) (recvtype)
- fprintf(trace_file, "%s alltoall %d %d %s %s\n", process_id, extra->send_size, extra->recv_size, extra->datatype1,
+ fprintf(trace_file, "%s allToAll %d %d %s %s\n", process_id, extra->send_size, extra->recv_size, extra->datatype1,
extra->datatype2);
break;
case TRACING_ALLTOALLV: // rank alltoallv send_size [sendcounts] recv_size [recvcounts] (sendtype) (recvtype)
- fprintf(trace_file, "%s alltoallv %d ", process_id, extra->send_size);
+ fprintf(trace_file, "%s allToAllV %d ", process_id, extra->send_size);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->sendcounts[i]);
fprintf(trace_file, "%d ", extra->recv_size);
extra->datatype1, extra->datatype2);
break;
case TRACING_ALLGATHERV: // rank allgatherv send_size [recvcounts] (sendtype) (recvtype)
- fprintf(trace_file, "%s allgatherv %d ", process_id, extra->send_size);
+ fprintf(trace_file, "%s allGatherV %d ", process_id, extra->send_size);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->recvcounts[i]);
fprintf(trace_file, "%s %s \n", extra->datatype1, extra->datatype2);
break;
case TRACING_REDUCE_SCATTER: // rank reducescatter [recvcounts] comp_size (sendtype)
- fprintf(trace_file, "%s reducescatter ", process_id);
+ fprintf(trace_file, "%s reduceScatter ", process_id);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->recvcounts[i]);
fprintf(trace_file, "%f %s\n", extra->comp_size, extra->datatype1);
fprintf(trace_file, "%s sleep %f\n", process_id, extra->sleep_duration);
break;
case TRACING_GATHERV: // rank gatherv send_size [recvcounts] root (sendtype) (recvtype)
- fprintf(trace_file, "%s gatherv %d ", process_id, extra->send_size);
+ fprintf(trace_file, "%s gatherV %d ", process_id, extra->send_size);
for (i = 0; i < extra->num_processes; i++)
fprintf(trace_file, "%d ", extra->recvcounts[i]);
fprintf(trace_file, "%d %s %s\n", extra->root, extra->datatype1, extra->datatype2);
trace_precision = xbt_cfg_get_int(OPT_TRACING_PRECISION);
}
-static std::vector<std::function<void()>> TRACE_start_functions;
-
-void TRACE_add_start_function(void (*func) ())
-{
- TRACE_start_functions.push_back(func);
-}
-
int TRACE_start()
{
if (TRACE_is_configured())
user_host_variables = xbt_dict_new_homogeneous(xbt_free_f);
user_vm_variables = xbt_dict_new_homogeneous(xbt_free_f);
user_link_variables = xbt_dict_new_homogeneous(xbt_free_f);
-
- for (auto func: TRACE_start_functions)
- func();
}
- TRACE_start_functions.clear();
return 0;
}
-static std::vector<std::function<void()>> TRACE_end_functions;
-void TRACE_add_end_function(void (*func) (void))
-{
- TRACE_end_functions.push_back(func);
-}
-
int TRACE_end()
{
int retval;
PJ_container_release();
PJ_type_release();
- for (auto func: TRACE_end_functions)
- func();
- TRACE_start_functions.clear();
-
xbt_dict_free(&user_link_variables);
xbt_dict_free(&user_host_variables);
xbt_dict_free(&user_vm_variables);
-/* Copyright (c) 2010, 2012-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
-#include <xbt/dict.h>
-#include <xbt/lib.h>
-#include <xbt/log.h>
-
-#include <surf/surf.h>
-#include <surf/surf_routing.h>
+#include "surf/surf.h"
#include "src/instr/instr_private.h"
-/* Copyright (c) 2010-2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "xbt/asserts.h"
#include "simgrid/jedule/jedule_sd_binding.h"
-#include "simgrid/forward.h"
-
-#include "../../simdag/simdag_private.hpp"
#include "simgrid/jedule/jedule.hpp"
+#include "src/simdag/simdag_private.hpp"
+
+#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/NetZone.hpp"
-#include "simgrid/s4u/engine.hpp"
#if HAVE_JEDULE
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/kernel/EngineImpl.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/kernel/routing/NetZoneImpl.hpp"
-#include <simgrid/s4u/host.hpp>
namespace simgrid {
namespace kernel {
this->permanent_receiver = actor.get()->getImpl();
}
/** @brief Pushes a communication activity into a mailbox
- * @param activity What to add
+ * @param comm What to add
*/
-void MailboxImpl::push(smx_activity_t synchro)
+void MailboxImpl::push(activity::Comm* comm)
{
- simgrid::kernel::activity::Comm* comm = static_cast<simgrid::kernel::activity::Comm*>(synchro);
this->comm_queue.push_back(comm);
comm->mbox = this;
}
#include <boost/circular_buffer.hpp>
#include "simgrid/s4u/Mailbox.hpp"
+#include "src/kernel/activity/SynchroComm.hpp"
#include "src/simix/ActorImpl.hpp"
#define MAX_MAILBOX_SIZE 10000000
static MailboxImpl* byNameOrNull(const char* name);
static MailboxImpl* byNameOrCreate(const char* name);
void setReceiver(s4u::ActorPtr actor);
- void push(smx_activity_t synchro);
+ void push(activity::Comm* comm);
void remove(smx_activity_t activity);
simgrid::s4u::Mailbox piface_; // Our interface
char* name_;
-/* Copyright (c) 2007-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include "src/kernel/activity/SynchroExec.hpp"
#include "src/surf/surf_interface.hpp"
-/* Copyright (c) 2007-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <xbt/log.h>
-
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include "src/kernel/context/Context.hpp"
#include "src/kernel/activity/SynchroSleep.hpp"
-/* Copyright (c) 2009-2011, 2013-2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2009-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/kernel/routing/NetPoint.hpp"
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "surf/surf_routing.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_route, surf, "Routing part of surf");
-/* Copyright (c) 2006-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/kernel/routing/NetZoneImpl.hpp"
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/surf/cpu_interface.hpp"
#include "src/surf/network_interface.hpp"
-/* Copyright (c) 2013-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2013-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <boost/algorithm/string.hpp>
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/kernel/routing/VivaldiZone.hpp"
-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
/** Take a per-page snapshot of a region
*
- * @param data The start of the region (must be at the beginning of a page)
+ * @param addr The start of the region (must be at the beginning of a page)
* @param page_count Number of pages of the region
* @return Snapshot page numbers of this new snapshot
*/
* @param data Memory page
* @return hash off the page
*/
-static inline __attribute__ ((always_inline))
-PageStore::hash_type mc_hash_page(const void* data)
+static XBT_ALWAYS_INLINE PageStore::hash_type mc_hash_page(const void* data)
{
const std::uint64_t* values = (const uint64_t*) data;
std::size_t n = xbt_pagesize / sizeof(uint64_t);
/** @brief Get a page from its page number
*
- * @param Number of the memory page in the store
+ * @param pageno Number of the memory page in the store
* @return Start of the page
*/
const void* get_page(std::size_t pageno) const;
};
-inline __attribute__((always_inline))
-void PageStore::unref_page(std::size_t pageno) {
+XBT_ALWAYS_INLINE void PageStore::unref_page(std::size_t pageno)
+{
if ((--this->page_counts_[pageno]) == 0)
this->remove_page(pageno);
}
-inline __attribute__((always_inline))
-void PageStore::ref_page(size_t pageno)
+XBT_ALWAYS_INLINE void PageStore::ref_page(size_t pageno)
{
++this->page_counts_[pageno];
}
-inline __attribute__((always_inline))
-const void* PageStore::get_page(std::size_t pageno) const
+XBT_ALWAYS_INLINE const void* PageStore::get_page(std::size_t pageno) const
{
return (void*) simgrid::mc::mmu::join(pageno, (std::uintptr_t) this->memory_);
}
-inline __attribute__((always_inline))
-std::size_t PageStore::get_ref(std::size_t pageno)
+XBT_ALWAYS_INLINE std::size_t PageStore::get_ref(std::size_t pageno)
{
return this->page_counts_[pageno];
}
-inline __attribute__((always_inline))
-std::size_t PageStore::size() {
+XBT_ALWAYS_INLINE std::size_t PageStore::size()
+{
return this->top_index_ - this->free_pages_.size();
}
-inline __attribute__((always_inline))
-std::size_t PageStore::capacity()
+XBT_ALWAYS_INLINE std::size_t PageStore::capacity()
{
return this->capacity_;
}
close(this->memory_file);
if (this->unw_underlying_addr_space != unw_local_addr_space) {
- unw_destroy_addr_space(this->unw_underlying_addr_space);
- _UPT_destroy(this->unw_underlying_context);
+ if (this->unw_underlying_addr_space)
+ unw_destroy_addr_space(this->unw_underlying_addr_space);
+ if (this->unw_underlying_context)
+ _UPT_destroy(this->unw_underlying_context);
}
unw_destroy_addr_space(this->unw_addr_space);
{
std::unique_ptr<simgrid::mc::Process> process(new simgrid::mc::Process(pid, socket));
// TODO, automatic detection of the config from the process
- process->privatized(
- xbt_cfg_get_boolean("smpi/privatize-global-variables"));
+ process->privatized(smpi_privatize_global_variables != SMPI_PRIVATIZE_NONE);
modelChecker_ = std::unique_ptr<ModelChecker>(
new simgrid::mc::ModelChecker(std::move(process)));
xbt_assert(mc_model_checker == nullptr);
-/* Copyright (c) 2011-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2011-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/mc/mc_smx.h"
#include "src/mc/VisitedState.hpp"
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_VisitedState, mc,
- "Logging specific to state equaity detection mechanisms");
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_VisitedState, mc, "Logging specific to state equality detection mechanisms");
namespace simgrid {
namespace mc {
return snapshot_compare(num1, s1, num2, s2);
}
-/**
- * \brief Save the current state
- * \return Snapshot of the current state.
- */
+/** @brief Save the current state */
VisitedState::VisitedState(unsigned long state_number)
{
simgrid::mc::Process* process = &(mc_model_checker->process());
this->checkNonTermination(next_state.get());
/* Check whether we already explored next_state in the past (but only if interested in state-equality reduction) */
- if (_sg_mc_max_visited_states == true)
+ if (_sg_mc_max_visited_states > 0)
visitedState_ = visitedStates_.addVisitedState(expandedStatesCount_, next_state.get(), true);
/* If this is a new state (or if we don't care about state-equality reduction) */
} else {
const smx_actor_t previous_issuer = MC_smx_simcall_get_issuer(&prev_state->internal_req);
- XBT_DEBUG("Simcall %d, process %lu (state %d) and simcall %d, process %lu (state %d) are independant",
+ XBT_DEBUG("Simcall %d, process %lu (state %d) and simcall %d, process %lu (state %d) are independent",
req->call, issuer->pid, state->num,
prev_state->internal_req.call,
previous_issuer->pid,
void ProcessComparisonState::initHeapInformation(xbt_mheap_t heap,
std::vector<simgrid::mc::IgnoredHeapRegion>* i)
{
- auto heaplimit = ((struct mdesc *) heap)->heaplimit;
- this->heapsize = ((struct mdesc *) heap)->heapsize;
+ auto heaplimit = heap->heaplimit;
+ this->heapsize = heap->heapsize;
this->to_ignore = i;
this->equals_to.assign(heaplimit * MAX_FRAGMENT_PER_BLOCK, HeapArea());
this->types.assign(heaplimit * MAX_FRAGMENT_PER_BLOCK, nullptr);
std::vector<simgrid::mc::IgnoredHeapRegion>* i1,
std::vector<simgrid::mc::IgnoredHeapRegion>* i2)
{
- if ((((struct mdesc *) heap1)->heaplimit !=
- ((struct mdesc *) heap2)->heaplimit)
- ||
- ((((struct mdesc *) heap1)->heapsize !=
- ((struct mdesc *) heap2)->heapsize)))
+ if ((heap1->heaplimit != heap2->heaplimit) || (heap1->heapsize != heap2->heapsize))
return -1;
- this->heaplimit = ((struct mdesc *) heap1)->heaplimit;
+ this->heaplimit = heap1->heaplimit;
this->std_heap_copy = *mc_model_checker->process().get_heap();
this->processStates[0].initHeapInformation(heap1, i1);
this->processStates[1].initHeapInformation(heap2, i2);
int equal, res_compare = 0;
/* Check busy blocks */
-
i1 = 1;
malloc_info heapinfo_temp1, heapinfo_temp2;
}
i2++;
-
}
if (!equal) {
}
if (heapinfo2b->type < 0) {
- fprintf(stderr, "Unkown mmalloc block type.\n");
+ fprintf(stderr, "Unknown mmalloc block type.\n");
abort();
}
equal = 1;
break;
}
-
}
i2++;
-
}
if (!equal) {
nb_diff1++;
break;
}
-
}
i1++;
-
}
-
}
/* All blocks/fragments are equal to another block/fragment ? */
* @param snapshot1 Snapshot of state 1
* @param snapshot2 Snapshot of state 2
* @param previous
- * @param type_id
+ * @param type
* @param area_size either a byte_size or an elements_count (?)
* @param check_ignore
* @param pointer_level
*
* TODO, handle subfields ((*p).bar.foo, (*p)[5].bar…)
*
- * @param type_id DWARF type ID of the root address
+ * @param type DWARF type ID of the root address
* @param area_size
* @return DWARF type ID for given offset
*/
// Compare the global variables separately for each simulates process:
for (size_t process_index = 0; process_index < process_count; process_index++) {
- int is_diff = compare_global_variables(state,
- object_info, process_index,
- &r1->privatized_data()[process_index],
- &r2->privatized_data()[process_index],
- snapshot1, snapshot2);
- if (is_diff) return 1;
+ if (compare_global_variables(state,
+ object_info, process_index,
+ &r1->privatized_data()[process_index],
+ &r2->privatized_data()[process_index],
+ snapshot1, snapshot2))
+ return 1;
}
return 0;
}
simgrid::mc::Process* process = &mc_model_checker->process();
int errors = 0;
- int res_init;
int hash_result = 0;
if (_sg_mc_hash) {
/* Compare enabled processes */
if (s1->enabled_processes != s2->enabled_processes) {
- XBT_VERB("(%d - %d) Different enabled processes", num1, num2);
- // return 1; ??
+ XBT_VERB("(%d - %d) Different amount of enabled processes", num1, num2);
+ return 1;
}
- unsigned long i = 0;
- size_t size_used1, size_used2;
- int is_diff = 0;
-
/* Compare size of stacks */
- while (i < s1->stacks.size()) {
- size_used1 = s1->stack_sizes[i];
- size_used2 = s2->stack_sizes[i];
+ int is_diff = 0;
+ for (unsigned long i = 0; i < s1->stacks.size(); i++) {
+ size_t size_used1 = s1->stack_sizes[i];
+ size_t size_used2 = s2->stack_sizes[i];
if (size_used1 != size_used2) {
#ifdef MC_DEBUG
XBT_DEBUG("(%d - %d) Different size used in stacks: %zu - %zu", num1, num2, size_used1, size_used2);
return 1;
#endif
}
- i++;
}
+ if (is_diff) // do not proceed if there is any stacks that don't match
+ return 1;
/* Init heap information used in heap comparison algorithm */
xbt_mheap_t heap1 = (xbt_mheap_t)s1->read_bytes(
alloca(sizeof(struct mdesc)), sizeof(struct mdesc),
remote(process->heap_address),
simgrid::mc::ProcessIndexMissing, simgrid::mc::ReadOptions::lazy());
- res_init = state_comparator->initHeapInformation(
- heap1, heap2, &s1->to_ignore, &s2->to_ignore);
+ int res_init = state_comparator->initHeapInformation(heap1, heap2, &s1->to_ignore, &s2->to_ignore);
if (res_init == -1) {
#ifdef MC_DEBUG
/* Stacks comparison */
int diff_local = 0;
-#ifdef MC_DEBUG
- is_diff = 0;
-#endif
for (unsigned int cursor = 0; cursor < s1->stacks.size(); cursor++) {
mc_snapshot_stack_t stack1 = &s1->stacks[cursor];
mc_snapshot_stack_t stack2 = &s2->stacks[cursor];
std::string const& name = region1->object_info()->file_name;
/* Compare global variables */
- is_diff =
- compare_global_variables(*state_comparator,
- region1->object_info(), simgrid::mc::ProcessIndexDisabled,
- region1, region2, s1, s2);
+ if (compare_global_variables(*state_comparator, region1->object_info(), simgrid::mc::ProcessIndexDisabled, region1,
+ region2, s1, s2)) {
- if (is_diff != 0) {
#ifdef MC_DEBUG
XBT_DEBUG("(%d - %d) Different global variables in %s",
num1, num2, name.c_str());
}
#if HAVE_MC
xbt_dynar_reset(simix_global->actors_vector);
- for (std::pair<int, smx_actor_t> kv : simix_global->process_list) {
+ for (std::pair<aid_t, smx_actor_t> kv : simix_global->process_list) {
xbt_dynar_push_as(simix_global->actors_vector, smx_actor_t, kv.second);
}
#endif
* @param size Byte size
* @return Number of memory pages
*/
-static inline __attribute__ ((always_inline))
-std::size_t chunkCount(std::size_t size)
+static XBT_ALWAYS_INLINE std::size_t chunkCount(std::size_t size)
{
size_t page_count = size >> xbt_pagebits;
if (size & (xbt_pagesize-1))
}
/** @brief Split into chunk number and remaining offset */
-static inline __attribute__ ((always_inline))
-std::pair<std::size_t, std::uintptr_t> split(std::uintptr_t offset)
+static XBT_ALWAYS_INLINE std::pair<std::size_t, std::uintptr_t> split(std::uintptr_t offset)
{
return {
offset >> xbt_pagebits,
}
/** Merge chunk number and remaining offset info a global offset */
-static inline __attribute__ ((always_inline))
-std::uintptr_t join(std::size_t page, std::uintptr_t offset)
+static XBT_ALWAYS_INLINE std::uintptr_t join(std::size_t page, std::uintptr_t offset)
{
return ((std::uintptr_t) page << xbt_pagebits) + offset;
}
-static inline __attribute__ ((always_inline))
-std::uintptr_t join(std::pair<std::size_t,std::uintptr_t> value)
+static XBT_ALWAYS_INLINE std::uintptr_t join(std::pair<std::size_t, std::uintptr_t> value)
{
return join(value.first, value.second);
}
-static inline __attribute__ ((always_inline))
-bool sameChunk(std::uintptr_t a, std::uintptr_t b)
+static XBT_ALWAYS_INLINE bool sameChunk(std::uintptr_t a, std::uintptr_t b)
{
return (a >> xbt_pagebits) == (b >> xbt_pagebits);
}
if (r1->issuer == r2->issuer)
return false;
- /* Wait with timeout transitions are not considered by the independence theorem, thus we consider them as dependant with all other transitions */
+ /* Wait with timeout transitions are not considered by the independence theorem, thus we consider them as dependent with all other transitions */
if ((r1->call == SIMCALL_COMM_WAIT && simcall_comm_wait__get__timeout(r1) > 0)
|| (r2->call == SIMCALL_COMM_WAIT
&& simcall_comm_wait__get__timeout(r2) > 0))
break;
default:
- THROW_UNIMPLEMENTED;
+ type = SIMIX_simcall_name(req->call);
+ args = bprintf("??");
+ break;
}
std::string str;
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <cassert>
-#include <cstddef>
-#include <cstdlib>
+#include "simgrid/s4u/Host.hpp"
-#include <memory>
-#include <type_traits>
-#include <utility>
-#include <vector>
-
-#include <xbt/log.h>
-#include <xbt/str.h>
-#include <xbt/swag.h>
-
-#include <simgrid/s4u/host.hpp>
-
-#include "src/simix/smx_private.h"
#include "src/mc/mc_smx.h"
#include "src/mc/ModelChecker.hpp"
XBT_PRIVATE void mc_region_restore_sparse(simgrid::mc::Process* process, mc_mem_region_t reg);
-static inline __attribute__((always_inline))
-void* mc_translate_address_region_chunked(uintptr_t addr, mc_mem_region_t region)
+static XBT_ALWAYS_INLINE void* mc_translate_address_region_chunked(uintptr_t addr, mc_mem_region_t region)
{
auto split = simgrid::mc::mmu::split(addr - region->start().address());
auto pageno = split.first;
return (char*) snapshot_page + offset;
}
-static inline __attribute__((always_inline))
-void* mc_translate_address_region(uintptr_t addr, mc_mem_region_t region, int process_index)
+static XBT_ALWAYS_INLINE void* mc_translate_address_region(uintptr_t addr, mc_mem_region_t region, int process_index)
{
switch (region->storage_type()) {
case simgrid::mc::StorageType::NoData:
extern "C" {
-static inline __attribute__ ((always_inline))
-mc_mem_region_t mc_get_region_hinted(void* addr, simgrid::mc::Snapshot* snapshot, int process_index, mc_mem_region_t region)
+static XBT_ALWAYS_INLINE mc_mem_region_t mc_get_region_hinted(void* addr, simgrid::mc::Snapshot* snapshot,
+ int process_index, mc_mem_region_t region)
{
if (region->contain(simgrid::mc::remote(addr)))
return region;
const void* addr1, simgrid::mc::Snapshot* snapshot1,
const void* addr2, simgrid::mc::Snapshot* snapshot2, int process_index, std::size_t size);
-static inline __attribute__ ((always_inline))
-const void* mc_snapshot_get_heap_end(simgrid::mc::Snapshot* snapshot)
+static XBT_ALWAYS_INLINE const void* mc_snapshot_get_heap_end(simgrid::mc::Snapshot* snapshot)
{
if(snapshot==nullptr)
xbt_die("snapshot is nullptr");
* @param size Size of the data to read in bytes
* @return Pointer where the data is located (target buffer of original location)
*/
-static inline __attribute__((always_inline))
-const void* MC_region_read(
- mc_mem_region_t region, void* target, const void* addr, std::size_t size)
+static XBT_ALWAYS_INLINE const void* MC_region_read(mc_mem_region_t region, void* target, const void* addr,
+ std::size_t size)
{
xbt_assert(region);
}
}
-static inline __attribute__ ((always_inline))
-void* MC_region_read_pointer(mc_mem_region_t region, const void* addr)
+static XBT_ALWAYS_INLINE void* MC_region_read_pointer(mc_mem_region_t region, const void* addr)
{
void* res;
return *(void**) MC_region_read(region, &res, addr, sizeof(void*));
region.size = size;
region.block = ((char*)stack - (char*)heap->heapbase) / BLOCKSIZE + 1;
#if HAVE_SMPI
- if (smpi_privatize_global_variables && process)
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP && process)
region.process_index = smpi_process_index_of_smx_process(process);
else
#endif
-/* Copyright (c) 2010, 2012-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010, 2012-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/instr/instr_private.h"
#include "src/msg/msg_private.h"
#include "src/simix/ActorImpl.hpp"
-/* Copyright (c) 2004-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/NetZone.hpp"
-#include "simgrid/s4u/engine.hpp"
#include "src/msg/msg_private.h"
#if HAVE_LUA
-/* Copyright (c) 2004-2015. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "instr/instr_interface.h"
#include "mc/mc.h"
XBT_DEBUG("ADD MSG LEVELS");
MSG_STORAGE_LEVEL = xbt_lib_add_level(storage_lib, (void_f_pvoid_t) __MSG_storage_destroy);
- MSG_FILE_LEVEL = xbt_lib_add_level(file_lib, (void_f_pvoid_t) __MSG_file_destroy);
if(xbt_cfg_get_boolean("clean-atexit"))
atexit(MSG_exit);
}
int MSG_task_listen(const char *alias)
{
simgrid::s4u::MailboxPtr mbox = simgrid::s4u::Mailbox::byName(alias);
- return !mbox->empty() ||
- (mbox->getImpl()->permanent_receiver && !mbox->getImpl()->done_comm_queue.empty());
+ return mbox->listen() ? 1 : 0;
}
/** \ingroup msg_task_usage
-/* Copyright (c) 2004-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/host.hpp"
-#include "simgrid/s4u/storage.hpp"
+#include "simgrid/s4u/Host.hpp"
+#include "simgrid/s4u/Storage.hpp"
#include "src/msg/msg_private.h"
#include "src/simix/ActorImpl.hpp"
#include "src/simix/smx_host_private.h"
}
/** \ingroup m_host_management
- * \brief Return a dynar containing all the hosts declared at a given point of time
+ * \brief Return a dynar containing all the hosts declared at a given point of time (including VMs)
* \remark The host order in the returned array is generally different from the host creation/declaration order in the
* XML platform (we use a hash table internally)
*/
xbt_dict_foreach(storage_list,cursor,mount_name,storage_name){
storage = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib,storage_name));
- xbt_dict_t content = simcall_storage_get_content(storage);
+ xbt_dict_t content = MSG_storage_get_content(storage);
xbt_dict_set(contents,mount_name, content,nullptr);
}
xbt_dict_free(&storage_list);
-/* Copyright (c) 2004-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/msg/msg_private.h"
+#include "src/surf/storage_interface.hpp"
#include <numeric>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_io, msg, "Logging specific to MSG (io)");
/********************************* File **************************************/
void __MSG_file_get_info(msg_file_t fd){
- msg_file_priv_t priv = MSG_file_priv(fd);
- xbt_dynar_t info = simcall_file_get_info(priv->simdata->smx_file);
+ xbt_dynar_t info = simcall_file_get_info(fd->simdata->smx_file);
sg_size_t *psize;
- priv->content_type = xbt_dynar_pop_as(info, char *);
- priv->storage_type = xbt_dynar_pop_as(info, char *);
- priv->storageId = xbt_dynar_pop_as(info, char *);
- priv->mount_point = xbt_dynar_pop_as(info, char *);
- psize = xbt_dynar_pop_as(info, sg_size_t*);
- priv->size = *psize;
+ fd->content_type = xbt_dynar_pop_as(info, char*);
+ fd->storage_type = xbt_dynar_pop_as(info, char*);
+ fd->storageId = xbt_dynar_pop_as(info, char*);
+ fd->mount_point = xbt_dynar_pop_as(info, char*);
+ psize = xbt_dynar_pop_as(info, sg_size_t*);
+ fd->size = *psize;
xbt_free(psize);
xbt_dynar_free_container(&info);
}
*/
msg_error_t MSG_file_set_data(msg_file_t fd, void *data)
{
- msg_file_priv_t priv = MSG_file_priv(fd);
- priv->data = data;
+ fd->data = data;
return MSG_OK;
}
*/
void *MSG_file_get_data(msg_file_t fd)
{
- msg_file_priv_t priv = MSG_file_priv(fd);
- return priv->data;
+ return fd->data;
}
/** \ingroup msg_file
/* Update the cached information first */
__MSG_file_get_info(fd);
- msg_file_priv_t priv = MSG_file_priv(fd);
XBT_INFO("File Descriptor information:\n"
"\t\tFull path: '%s'\n"
"\t\tSize: %llu\n"
"\t\tStorage Type: '%s'\n"
"\t\tContent Type: '%s'\n"
"\t\tFile Descriptor Id: %d",
- priv->fullpath, priv->size, priv->mount_point,
- priv->storageId, priv->storage_type,
- priv->content_type, priv->desc_id);
+ fd->fullpath, fd->size, fd->mount_point, fd->storageId, fd->storage_type, fd->content_type, fd->desc_id);
}
/** \ingroup msg_file
*/
sg_size_t MSG_file_read(msg_file_t fd, sg_size_t size)
{
- msg_file_priv_t file_priv = MSG_file_priv(fd);
sg_size_t read_size;
- if (file_priv->size == 0) /* Nothing to read, return */
+ if (fd->size == 0) /* Nothing to read, return */
return 0;
/* Find the host where the file is physically located and read it */
- msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, file_priv->storageId));
+ msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, fd->storageId));
msg_storage_priv_t storage_priv_src = MSG_storage_priv(storage_src);
- msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
- read_size = simcall_file_read(file_priv->simdata->smx_file, size, attached_host);
+ msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
+ read_size = simcall_file_read(fd->simdata->smx_file, size, attached_host);
if (strcmp(storage_priv_src->hostname, MSG_host_self()->cname())) {
/* the file is hosted on a remote host, initiate a communication between src and dest hosts for data transfer */
*/
sg_size_t MSG_file_write(msg_file_t fd, sg_size_t size)
{
- msg_file_priv_t file_priv = MSG_file_priv(fd);
-
if (size == 0) /* Nothing to write, return */
return 0;
/* Find the host where the file is physically located (remote or local)*/
- msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, file_priv->storageId));
+ msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, fd->storageId));
msg_storage_priv_t storage_priv_src = MSG_storage_priv(storage_src);
- msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
+ msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
if (strcmp(storage_priv_src->hostname, MSG_host_self()->cname())) {
/* the file is hosted on a remote host, initiate a communication between src and dest hosts for data transfer */
}
}
/* Write file on local or remote host */
- sg_size_t offset = simcall_file_tell(file_priv->simdata->smx_file);
- sg_size_t write_size = simcall_file_write(file_priv->simdata->smx_file, size, attached_host);
- file_priv->size = offset+write_size;
+ sg_size_t offset = simcall_file_tell(fd->simdata->smx_file);
+ sg_size_t write_size = simcall_file_write(fd->simdata->smx_file, size, attached_host);
+ fd->size = offset + write_size;
return write_size;
}
*/
msg_file_t MSG_file_open(const char* fullpath, void* data)
{
- char *name;
- msg_file_priv_t priv = xbt_new(s_msg_file_priv_t, 1);
- priv->data = data;
- priv->fullpath = xbt_strdup(fullpath);
- priv->simdata = xbt_new0(s_simdata_file_t,1);
- priv->simdata->smx_file = simcall_file_open(fullpath, MSG_host_self());
- priv->desc_id = MSG_host_get_file_descriptor_id(MSG_host_self());
-
- name = bprintf("%s:%s:%d", priv->fullpath, MSG_host_self()->cname(), priv->desc_id);
+ msg_file_t fd = xbt_new(s_msg_file_priv_t, 1);
+ fd->data = data;
+ fd->fullpath = xbt_strdup(fullpath);
+ fd->simdata = xbt_new0(s_simdata_file_t, 1);
+ fd->simdata->smx_file = simcall_file_open(fullpath, MSG_host_self());
+ fd->desc_id = MSG_host_get_file_descriptor_id(MSG_host_self());
- xbt_lib_set(file_lib, name, MSG_FILE_LEVEL, priv);
- msg_file_t fd = static_cast<msg_file_t>(xbt_lib_get_elm_or_null(file_lib, name));
__MSG_file_get_info(fd);
- xbt_free(name);
return fd;
}
*/
int MSG_file_close(msg_file_t fd)
{
- char *name;
- msg_file_priv_t priv = MSG_file_priv(fd);
- if (priv->data)
- xbt_free(priv->data);
-
- int res = simcall_file_close(priv->simdata->smx_file, MSG_host_self());
- name = bprintf("%s:%s:%d", priv->fullpath, MSG_host_self()->cname(), priv->desc_id);
- MSG_host_release_file_descriptor_id(MSG_host_self(), priv->desc_id);
- xbt_lib_unset(file_lib, name, MSG_FILE_LEVEL, 1);
- xbt_free(name);
+ if (fd->data)
+ xbt_free(fd->data);
+
+ int res = simcall_file_close(fd->simdata->smx_file, MSG_host_self());
+ MSG_host_release_file_descriptor_id(MSG_host_self(), fd->desc_id);
+ __MSG_file_destroy(fd);
+
return res;
}
*/
msg_error_t MSG_file_unlink(msg_file_t fd)
{
- msg_file_priv_t file_priv = MSG_file_priv(fd);
/* Find the host where the file is physically located (remote or local)*/
- msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, file_priv->storageId));
+ msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, fd->storageId));
msg_storage_priv_t storage_priv_src = MSG_storage_priv(storage_src);
- msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
- int res = simcall_file_unlink(file_priv->simdata->smx_file, attached_host);
+ msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
+ int res = simcall_file_unlink(fd->simdata->smx_file, attached_host);
+ __MSG_file_destroy(fd);
return static_cast<msg_error_t>(res);
}
* \return the size of the file (as a #sg_size_t)
*/
sg_size_t MSG_file_get_size(msg_file_t fd){
- msg_file_priv_t priv = MSG_file_priv(fd);
- return simcall_file_get_size(priv->simdata->smx_file);
+ return simcall_file_get_size(fd->simdata->smx_file);
}
/**
*/
msg_error_t MSG_file_seek(msg_file_t fd, sg_offset_t offset, int origin)
{
- msg_file_priv_t priv = MSG_file_priv(fd);
- return static_cast<msg_error_t>(simcall_file_seek(priv->simdata->smx_file, offset, origin));
+ return static_cast<msg_error_t>(simcall_file_seek(fd->simdata->smx_file, offset, origin));
}
/**
*/
sg_size_t MSG_file_tell(msg_file_t fd)
{
- msg_file_priv_t priv = MSG_file_priv(fd);
- return simcall_file_tell(priv->simdata->smx_file);
+ return simcall_file_tell(fd->simdata->smx_file);
}
const char *MSG_file_get_name(msg_file_t fd) {
xbt_assert((fd != nullptr), "Invalid parameters");
- msg_file_priv_t priv = MSG_file_priv(fd);
- return priv->fullpath;
+ return fd->fullpath;
}
/**
*/
msg_error_t MSG_file_move (msg_file_t fd, const char* fullpath)
{
- msg_file_priv_t priv = MSG_file_priv(fd);
- return static_cast<msg_error_t>(simcall_file_move(priv->simdata->smx_file, fullpath));
+ return static_cast<msg_error_t>(simcall_file_move(fd->simdata->smx_file, fullpath));
}
/**
*/
msg_error_t MSG_file_rcopy (msg_file_t file, msg_host_t host, const char* fullpath)
{
- msg_file_priv_t file_priv = MSG_file_priv(file);
- sg_size_t read_size;
-
/* Find the host where the file is physically located and read it */
- msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, file_priv->storageId));
+ msg_storage_t storage_src = static_cast<msg_storage_t>(xbt_lib_get_elm_or_null(storage_lib, file->storageId));
msg_storage_priv_t storage_priv_src = MSG_storage_priv(storage_src);
msg_host_t attached_host = MSG_host_by_name(storage_priv_src->hostname);
MSG_file_seek(file, 0, SEEK_SET);
- read_size = simcall_file_read(file_priv->simdata->smx_file, file_priv->size, attached_host);
+ sg_size_t read_size = simcall_file_read(file->simdata->smx_file, file->size, attached_host);
/* Find the real host destination where the file will be physically stored */
- xbt_dict_cursor_t cursor = nullptr;
+ xbt_dict_cursor_t cursor = nullptr;
msg_storage_t storage_dest = nullptr;
msg_host_t host_dest;
size_t longest_prefix_length = 0;
/**
* \brief Destroys a file (internal call only)
*/
-void __MSG_file_destroy(msg_file_priv_t file) {
+void __MSG_file_destroy(msg_file_t file)
+{
xbt_free(file->fullpath);
xbt_free(file->simdata);
xbt_free(file);
msg_storage_t __MSG_storage_create(smx_storage_t storage)
{
- const char *name = SIMIX_storage_get_name(storage);
- const char *host = SIMIX_storage_get_host(storage);
msg_storage_priv_t storage_private = xbt_new0(s_msg_storage_priv_t, 1);
- storage_private->hostname = host;
- xbt_lib_set(storage_lib,name,MSG_STORAGE_LEVEL,storage_private);
- return xbt_lib_get_elm_or_null(storage_lib, name);
+
+ storage_private->name = SIMIX_storage_get_name(storage);
+ storage_private->hostname = surf_storage_get_host(storage);
+ storage_private->size = surf_storage_get_size(storage);
+
+ xbt_lib_set(storage_lib, storage_private->name, MSG_STORAGE_LEVEL, storage_private);
+ return xbt_lib_get_elm_or_null(storage_lib, storage_private->name);
}
/**
*/
const char *MSG_storage_get_name(msg_storage_t storage) {
xbt_assert((storage != nullptr), "Invalid parameters");
- return SIMIX_storage_get_name(storage);
+ msg_storage_priv_t priv = MSG_storage_priv(storage);
+ return priv->name;
}
/** \ingroup msg_storage_management
* \return the free space size of the storage element (as a #sg_size_t)
*/
sg_size_t MSG_storage_get_free_size(msg_storage_t storage){
- return simcall_storage_get_free_size(storage);
+ return simgrid::simix::kernelImmediate([storage] { return surf_storage_resource_priv(storage)->getFreeSize(); });
}
/** \ingroup msg_storage_management
* \return the used space size of the storage element (as a #sg_size_t)
*/
sg_size_t MSG_storage_get_used_size(msg_storage_t storage){
- return simcall_storage_get_used_size(storage);
+ return simgrid::simix::kernelImmediate([storage] { return surf_storage_resource_priv(storage)->getUsedSize(); });
}
/** \ingroup msg_storage_management
*/
xbt_dict_t MSG_storage_get_content(msg_storage_t storage)
{
- return SIMIX_storage_get_content(storage);
+ std::map<std::string, sg_size_t*>* content =
+ simgrid::simix::kernelImmediate([storage] { return surf_storage_resource_priv(storage)->getContent(); });
+ xbt_dict_t content_dict = xbt_dict_new_homogeneous(nullptr);
+
+ for (auto entry : *content) {
+ xbt_dict_set(content_dict, entry.first.c_str(), entry.second, nullptr);
+ }
+ return content_dict;
}
/** \ingroup msg_storage_management
*/
sg_size_t MSG_storage_get_size(msg_storage_t storage)
{
- return SIMIX_storage_get_size(storage);
+ msg_storage_priv_t priv = MSG_storage_priv(storage);
+ return priv->size;
}
/** \ingroup msg_storage_management
msg_process_t sender = nullptr;
msg_process_t receiver = nullptr;
msg_host_t source = nullptr;
- double priority = 0.0;
- double bound = 0.0; /* Capping for CPU resource */
- double rate = 0.0; /* Capping for network resource */
+
+ double priority = 1.0;
+ double bound = 0.0; /* Capping for CPU resource, or 0 for no capping */
+ double rate = -1; /* Capping for network resource, or -1 for no capping*/
bool isused = false; /* Indicates whether the task is used in SIMIX currently */
int host_nb = 0; /* ==0 if sequential task; parallel task if not */
};
}
-typedef struct process_arg {
- const char *name;
- xbt_main_func_t code;
- void *data;
- msg_host_t m_host;
- int argc;
- char **argv;
- double kill_time;
-} s_process_arg_t, *process_arg_t;
-
typedef struct msg_comm {
smx_activity_t s_comm; /* SIMIX communication object encapsulated (the same for both processes) */
msg_task_t task_sent; /* task sent (NULL for the receiver) */
unsigned long int sent_msg; /* Total amount of messages sent during the simulation */
void (*task_copy_callback) (msg_task_t task, msg_process_t src, msg_process_t dst);
void_f_pvoid_t process_data_cleanup;
-} s_MSG_Global_t, *MSG_Global_t;
+} s_MSG_Global_t;
+typedef s_MSG_Global_t* MSG_Global_t;
SG_BEGIN_DECL()
XBT_PRIVATE msg_host_t __MSG_host_create(sg_host_t host);
XBT_PRIVATE msg_storage_t __MSG_storage_create(smx_storage_t storage);
XBT_PRIVATE void __MSG_storage_destroy(msg_storage_priv_t host);
-XBT_PRIVATE void __MSG_file_destroy(msg_file_priv_t host);
+XBT_PRIVATE void __MSG_file_destroy(msg_file_t file);
XBT_PRIVATE void MSG_process_cleanup_from_SIMIX(smx_actor_t smx_proc);
XBT_PRIVATE smx_actor_t MSG_process_create_from_SIMIX(const char* name, std::function<void()> code, void* data,
SG_END_DECL()
-XBT_PUBLIC(msg_process_t)
-MSG_process_create_from_stdfunc(const char* name, std::function<void()> code, void* data, msg_host_t host,
- xbt_dict_t properties);
-
inline void simdata_task::setUsed()
{
if (this->isused)
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "msg_private.h"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/simix/ActorImpl.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(msg_process, msg, "Logging specific to MSG (process)");
/******************************** Process ************************************/
/**
- * \brief Cleans the MSG data of a process.
- * \param smx_proc a SIMIX process
+ * \brief Cleans the MSG data of an actor
+ * \param smx_actor a SIMIX actor
*/
void MSG_process_cleanup_from_SIMIX(smx_actor_t smx_actor)
{
{
std::function<void()> function;
if (code)
- function = simgrid::xbt::wrapMain(code, argc, const_cast<const char*const*>(argv));
+ function = simgrid::xbt::wrapMain(code, argc, static_cast<const char* const*>(argv));
+
msg_process_t res = MSG_process_create_from_stdfunc(name, std::move(function), data, host, properties);
for (int i = 0; i != argc; ++i)
xbt_free(argv[i]);
task->data = data;
/* Simulator Data */
- simdata->compute = nullptr;
- simdata->comm = nullptr;
simdata->bytes_amount = message_size;
simdata->flops_amount = flop_amount;
- simdata->sender = nullptr;
- simdata->receiver = nullptr;
- simdata->source = nullptr;
- simdata->priority = 1.0;
- simdata->bound = 0;
- simdata->rate = -1.0;
- simdata->isused = 0;
-
- simdata->host_nb = 0;
- simdata->host_list = nullptr;
- simdata->flops_parallel_amount = nullptr;
- simdata->bytes_parallel_amount = nullptr;
+
TRACE_msg_task_create(task);
return task;
if (MSG_vm_is_running(vm))
MSG_vm_shutdown(vm);
- xbt_assert(MSG_vm_is_created(vm), "shutdown the given VM before destroying it");
+ xbt_assert(MSG_vm_is_created(vm) || __MSG_vm_is_state(vm, SURF_VM_STATE_DESTROYED),
+ "shutdown the given VM before destroying it");
/* Then, destroy the VM object */
simgrid::simix::kernelImmediate([vm]() {
SIMIX_process_kill(smx_process, issuer);
}
- setState(SURF_VM_STATE_CREATED);
+ setState(SURF_VM_STATE_DESTROYED);
/* FIXME: we may have to do something at the surf layer, e.g., vcpu action */
}
/* create a cpu action bound to the pm model at the destination. */
surf::CpuAction* new_cpu_action = static_cast<surf::CpuAction*>(destination->pimpl_cpu->execution_start(0));
- surf::Action::State state = action_->getState();
- if (state != surf::Action::State::done)
- XBT_CRITICAL("FIXME: may need a proper handling, %d", static_cast<int>(state));
if (action_->getRemainsNoUpdate() > 0)
XBT_CRITICAL("FIXME: need copy the state(?), %f", action_->getRemainsNoUpdate());
{
return pimpl_vm_->getPm();
}
+e_surf_vm_state_t VirtualMachine::getState()
+{
+ return pimpl_vm_->getState();
+}
/** @brief Retrieve a copy of the parameters of that VM/PM
* @details The ramsize and overcommit fields are used on the PM too */
-/* Copyright (c) 2006-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "xbt/log.h"
#include "simgrid/s4u/Actor.hpp"
-#include "simgrid/s4u/comm.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Comm.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/Mailbox.hpp"
#include "src/kernel/context/Context.hpp"
-XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor,"S4U actors");
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor, "S4U actors");
namespace simgrid {
namespace s4u {
simcall_process_auto_restart_set(pimpl_,autorestart);
}
+void Actor::onExit(int_f_pvoid_pvoid_t fun, void* data)
+{
+ simcall_process_on_exit(pimpl_, fun, data);
+}
+
+void Actor::migrate(Host* new_host)
+{
+ simcall_process_set_host(pimpl_, new_host);
+}
+
s4u::Host* Actor::host()
{
return this->pimpl_->host;
return this->pimpl_->name;
}
-int Actor::pid()
+aid_t Actor::pid()
{
return this->pimpl_->pid;
}
-int Actor::ppid()
+aid_t Actor::ppid()
{
return this->pimpl_->ppid;
}
+void Actor::suspend()
+{
+ simcall_process_suspend(pimpl_);
+}
+
+void Actor::resume()
+{
+ simcall_process_resume(pimpl_);
+}
+
+int Actor::isSuspended()
+{
+ return simcall_process_is_suspended(pimpl_);
+}
+
void Actor::setKillTime(double time) {
simcall_process_set_kill_time(pimpl_,time);
}
return simcall_process_get_kill_time(pimpl_);
}
-void Actor::kill(int pid) {
+void Actor::kill(aid_t pid)
+{
smx_actor_t process = SIMIX_process_from_PID(pid);
if(process != nullptr) {
simcall_process_kill(process);
// ***** Static functions *****
-ActorPtr Actor::byPid(int pid)
+ActorPtr Actor::byPid(aid_t pid)
{
smx_actor_t process = SIMIX_process_from_PID(pid);
if (process != nullptr)
return ActorPtr();
}
-void Actor::killAll() {
+void Actor::killAll()
+{
simcall_process_killall(1);
}
+void Actor::killAll(int resetPid)
+{
+ simcall_process_killall(resetPid);
+}
+
// ***** this_actor *****
namespace this_actor {
c.wait();
}
-int pid()
+void send(MailboxPtr chan, void* payload, double simulatedSize, double timeout)
+{
+ Comm& c = Comm::send_init(chan);
+ c.setRemains(simulatedSize);
+ c.setSrcData(payload);
+ // c.start() is optional.
+ c.wait(timeout);
+}
+
+Comm& isend(MailboxPtr chan, void* payload, double simulatedSize)
+{
+ return Comm::send_async(chan, payload, simulatedSize);
+}
+
+Comm& irecv(MailboxPtr chan, void** data)
+{
+ return Comm::recv_async(chan, data);
+}
+
+aid_t pid()
{
return SIMIX_process_self()->pid;
}
-int ppid()
+aid_t ppid()
{
return SIMIX_process_self()->ppid;
}
{
return SIMIX_process_self()->name;
}
+
+Host* host()
+{
+ return SIMIX_process_self()->host;
+}
+
+void suspend()
+{
+ simcall_process_suspend(SIMIX_process_self());
+}
+
+void resume()
+{
+ simcall_process_resume(SIMIX_process_self());
+}
+
+int isSuspended()
+{
+ return simcall_process_is_suspended(SIMIX_process_self());
+}
+
+void kill()
+{
+ simcall_process_kill(SIMIX_process_self());
+}
+
+void onExit(int_f_pvoid_pvoid_t fun, void* data)
+{
+ simcall_process_on_exit(SIMIX_process_self(), fun, data);
+}
+
+void migrate(Host* new_host)
+{
+ simcall_process_set_host(SIMIX_process_self(), new_host);
+}
}
}
}
-/* Copyright (c) 2006-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "xbt/log.h"
#include "src/msg/msg_private.h"
-#include "simgrid/s4u/comm.hpp"
-#include <simgrid/s4u/Mailbox.hpp>
-
+#include "simgrid/s4u/Comm.hpp"
+#include "simgrid/s4u/Mailbox.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(s4u_comm,s4u_activity,"S4U asynchronous communications");
s4u::Comm &Comm::recv_async(MailboxPtr dest, void **data) {
s4u::Comm &res = s4u::Comm::recv_init(dest);
- res.setDstData(data);
+ res.setDstData(data, sizeof(*data));
res.start();
return res;
}
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
#include <exception>
#include <mutex>
#include <xbt/ex.hpp>
#include <xbt/log.hpp>
-#include "src/simix/smx_synchro_private.h"
-#include "simgrid/s4u/conditionVariable.hpp"
+#include "simgrid/s4u/ConditionVariable.hpp"
#include "simgrid/simix.h"
+#include "src/simix/smx_synchro_private.h"
namespace simgrid {
namespace s4u {
/**
* Wait functions
*/
+void ConditionVariable::wait(MutexPtr lock)
+{
+ simcall_cond_wait(cond_, lock->mutex_);
+}
+
void ConditionVariable::wait(std::unique_lock<Mutex>& lock) {
simcall_cond_wait(cond_, lock.mutex()->mutex_);
}
/* s4u::Engine Simulation Engine and global functions. */
-/* Copyright (c) 2006-2015. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "instr/instr_interface.h"
#include "mc/mc.h"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/Mailbox.hpp"
#include "simgrid/s4u/NetZone.hpp"
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
-#include "simgrid/s4u/storage.hpp"
+#include "simgrid/s4u/Storage.hpp"
#include "simgrid/simix.h"
#include "src/kernel/EngineImpl.hpp"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/kernel/routing/NetZoneImpl.hpp"
#include "src/surf/network_interface.hpp"
-#include "surf/surf.h" // routing_platf. FIXME:KILLME. SOON
+#include "surf/surf.h" // routing_platf. FIXME:KILLME. SOON
XBT_LOG_NEW_CATEGORY(s4u,"Log channels of the S4U (Simgrid for you) interface");
delete point;
});
}
+
+bool Engine::isInitialized()
+{
+ return Engine::instance_ != nullptr;
}
}
+} // namespace
-/* Copyright (c) 2015. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2015-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "xbt/log.h"
+#include "simgrid/simix.h"
#include "src/msg/msg_private.h"
+#include "xbt/log.h"
#include "simgrid/s4u/Actor.hpp"
-#include "simgrid/s4u/comm.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Comm.hpp"
+#include "simgrid/s4u/File.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/Mailbox.hpp"
XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_file,"S4U files");
-#include "simgrid/s4u/file.hpp"
-#include "simgrid/s4u/host.hpp"
-#include "simgrid/simix.h"
namespace simgrid {
namespace s4u {
-File::File(const char* fullpath, void* userdata) : path_(fullpath)
+File::File(const char* fullpath, void* userdata) : path_(fullpath), userdata_(userdata)
{
// this cannot fail because we get a xbt_die if the mountpoint does not exist
pimpl_ = simcall_file_open(fullpath, Host::current());
#include <map>
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
-#include "simgrid/s4u/storage.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
+#include "simgrid/s4u/Storage.hpp"
#include "simgrid/simix.hpp"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/msg/msg_private.h"
return this->pimpl_cpu->getNbPStates();
}
+/**
+ * \brief Return the list of actors attached to an host.
+ *
+ * \param whereto a vector in which we should push actors living on that host
+ */
+void Host::actorList(std::vector<ActorPtr>* whereto)
+{
+ smx_actor_t actor = NULL;
+ xbt_swag_foreach(actor, this->extension<simgrid::simix::Host>()->process_list)
+ {
+ whereto->push_back(actor->ciface());
+ }
+}
+
/**
* \brief Find a route toward another host
*
return pimpl_->comm_queue.empty();
}
+bool Mailbox::listen()
+{
+ return !this->empty() || (pimpl_->permanent_receiver && !pimpl_->done_comm_queue.empty());
+}
+
smx_activity_t Mailbox::front()
{
return pimpl_->comm_queue.empty() ? nullptr : pimpl_->comm_queue.front();
-/* Copyright (c) 2006-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "xbt/log.h"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/s4u/NetZone.hpp"
-#include "simgrid/s4u/host.hpp"
#include "simgrid/simix.hpp"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/surf/network_interface.hpp" // Link FIXME: move to proper header
-/* Copyright (c) 2006-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/storage.hpp"
-
+#include "simgrid/s4u/Storage.hpp"
+#include "simgrid/simix.hpp"
+#include "src/surf/storage_interface.hpp"
#include "xbt/lib.h"
+#include <unordered_map>
+
extern xbt_lib_t storage_lib;
namespace simgrid {
namespace s4u {
-boost::unordered_map <std::string, Storage *> *Storage::storages_ = new boost::unordered_map<std::string, Storage*> ();
+std::unordered_map<std::string, Storage*>* Storage::storages_ = new std::unordered_map<std::string, Storage*>();
+
Storage::Storage(std::string name, smx_storage_t inferior) :
name_(name), pimpl_(inferior)
{
+ hostname_ = surf_storage_get_host(pimpl_);
+ size_ = surf_storage_get_size(pimpl_);
storages_->insert({name, this});
}
Storage::~Storage() = default;
-smx_storage_t Storage::inferior() {
+smx_storage_t Storage::inferior()
+{
return pimpl_;
}
-Storage &Storage::byName(const char*name) {
- s4u::Storage *res = nullptr;
+
+Storage& Storage::byName(const char* name)
+{
+ s4u::Storage* res = nullptr;
try {
res = storages_->at(name);
} catch (std::out_of_range& e) {
return *res;
}
-const char*Storage::name() {
+const char* Storage::name()
+{
return name_.c_str();
}
-sg_size_t Storage::sizeFree() {
- return simcall_storage_get_free_size(pimpl_);
+const char* Storage::host()
+{
+ return hostname_.c_str();
}
-sg_size_t Storage::sizeUsed() {
- return simcall_storage_get_used_size(pimpl_);
+
+sg_size_t Storage::sizeFree()
+{
+ return simgrid::simix::kernelImmediate([this] { return surf_storage_resource_priv(pimpl_)->getFreeSize(); });
}
+
+sg_size_t Storage::sizeUsed()
+{
+ return simgrid::simix::kernelImmediate([this] { return surf_storage_resource_priv(pimpl_)->getUsedSize(); });
+}
+
sg_size_t Storage::size() {
- return SIMIX_storage_get_size(pimpl_);
+ return size_;
+}
+
+xbt_dict_t Storage::properties()
+{
+ return simcall_storage_get_properties(pimpl_);
+}
+
+const char* Storage::property(const char* key)
+{
+ return static_cast<const char*>(xbt_dict_get_or_null(this->properties(), key));
+}
+
+void Storage::setProperty(const char* key, char* value)
+{
+ xbt_dict_set(this->properties(), key, value, nullptr);
+}
+
+std::map<std::string, sg_size_t*>* Storage::content()
+{
+ return simgrid::simix::kernelImmediate([this] { return surf_storage_resource_priv(this->pimpl_)->getContent(); });
+}
+
+std::unordered_map<std::string, Storage*>* Storage::allStorages()
+{
+ return storages_;
}
} /* namespace s4u */
-/* Copyright (c) 2009-2016. The SimGrid Team.
+/* Copyright (c) 2009-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "src/internal_config.h"
+#include "simdag_private.hpp"
#include "simgrid/simdag.h"
+#include "src/internal_config.h"
#include "xbt/file.h"
#include <string.h>
-#include "simdag_private.hpp"
+#include <unordered_map>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(sd_dotparse, sd, "Parsing DOT files");
#if HAVE_GRAPHVIZ
#include <graphviz/cgraph.h>
-typedef enum {
- sequential =0,
- parallel
-} seq_par_t;
-
-xbt_dynar_t SD_dotload_generic(const char * filename, seq_par_t seq_or_par, bool schedule);
+xbt_dynar_t SD_dotload_generic(const char* filename, bool sequential, bool schedule);
static void dot_task_p_free(void *task) {
SD_task_destroy(*(SD_task_t *)task);
* If this attribute is ommited, the default value is zero.
*/
xbt_dynar_t SD_dotload(const char *filename) {
- return SD_dotload_generic(filename, sequential, false);
+ return SD_dotload_generic(filename, true, false);
}
xbt_dynar_t SD_PTG_dotload(const char * filename) {
- return SD_dotload_generic(filename, parallel, false);
+ return SD_dotload_generic(filename, false, false);
}
xbt_dynar_t SD_dotload_with_sched(const char *filename) {
- return SD_dotload_generic(filename, sequential, true);
+ return SD_dotload_generic(filename, true, true);
}
static int edge_compare(const void *a, const void *b)
{
unsigned va = AGSEQ(*(Agedge_t **)a);
unsigned vb = AGSEQ(*(Agedge_t **)b);
- return va == vb ? 0 : (va < vb ? -1 : 1);
+ if (va == vb)
+ return 0;
+ else
+ return (va < vb ? -1 : 1);
}
-xbt_dynar_t SD_dotload_generic(const char * filename, seq_par_t seq_or_par, bool schedule){
+xbt_dynar_t SD_dotload_generic(const char* filename, bool sequential, bool schedule)
+{
xbt_assert(filename, "Unable to use a null file descriptor\n");
FILE *in_file = fopen(filename, "r");
xbt_assert(in_file != nullptr, "Failed to open file: %s", filename);
SD_task_t root;
SD_task_t end;
SD_task_t task;
- xbt_dict_t computers;
xbt_dynar_t computer = nullptr;
xbt_dict_cursor_t dict_cursor;
bool schedule_success = true;
- xbt_dict_t jobs = xbt_dict_new_homogeneous(nullptr);
+ std::unordered_map<std::string, SD_task_t> jobs;
xbt_dynar_t result = xbt_dynar_new(sizeof(SD_task_t), dot_task_p_free);
Agraph_t * dag_dot = agread(in_file, NIL(Agdisc_t *));
- if (schedule)
- computers = xbt_dict_new_homogeneous(nullptr);
+ xbt_dict_t computers = xbt_dict_new_homogeneous(nullptr);
/* Create all the nodes */
Agnode_t *node = nullptr;
for (node = agfstnode(dag_dot); node; node = agnxtnode(dag_dot, node)) {
char *name = agnameof(node);
double amount = atof(agget(node, (char*)"size"));
- task = static_cast<SD_task_t>(xbt_dict_get_or_null(jobs, name));
- if (task == nullptr) {
- if (seq_or_par == sequential){
+ if (jobs.find(name) == jobs.end()) {
+ if (sequential) {
XBT_DEBUG("See <job id=%s amount =%.0f>", name, amount);
task = SD_task_create_comp_seq(name, nullptr , amount);
} else {
task = SD_task_create_comp_par_amdahl(name, nullptr , amount, alpha);
}
- xbt_dict_set(jobs, name, task, nullptr);
+ jobs.insert({std::string(name), task});
if (strcmp(name,"root") && strcmp(name,"end"))
xbt_dynar_push(result, &task);
- if((seq_or_par == sequential) &&
- ((schedule && schedule_success) || XBT_LOG_ISENABLED(sd_dotparse, xbt_log_priority_verbose))){
+ if ((sequential) &&
+ ((schedule && schedule_success) || XBT_LOG_ISENABLED(sd_dotparse, xbt_log_priority_verbose))) {
/* try to take the information to schedule the task only if all is right*/
char *char_performer = agget(node, (char *) "performer");
char *char_order = agget(node, (char *) "order");
int performer = ((!char_performer || !strcmp(char_performer,"")) ? -1:atoi(char_performer));
int order = ((!char_order || !strcmp(char_order, ""))? -1:atoi(char_order));
- if((performer != -1 && order != -1) && performer < (int) sg_host_count()){
+ if ((performer != -1 && order != -1) && performer < static_cast<int>(sg_host_count())) {
/* required parameters are given and less performers than hosts are required */
XBT_DEBUG ("Task '%s' is scheduled on workstation '%d' in position '%d'", task->name, performer, order);
computer = static_cast<xbt_dynar_t> (xbt_dict_get_or_null(computers, char_performer));
}
/*Check if 'root' and 'end' nodes have been explicitly declared. If not, create them. */
- root = static_cast<SD_task_t>(xbt_dict_get_or_null(jobs, "root"));
- if (root == nullptr)
- root = (seq_or_par == sequential?SD_task_create_comp_seq("root", nullptr, 0):
- SD_task_create_comp_par_amdahl("root", nullptr, 0, 0));
+ if (jobs.find("root") == jobs.end())
+ root = (sequential ? SD_task_create_comp_seq("root", nullptr, 0)
+ : SD_task_create_comp_par_amdahl("root", nullptr, 0, 0));
+ else
+ root = jobs.at("root");
SD_task_set_state(root, SD_SCHEDULABLE); /* by design the root task is always SCHEDULABLE */
xbt_dynar_insert_at(result, 0, &root); /* Put it at the beginning of the dynar */
- end = static_cast<SD_task_t>(xbt_dict_get_or_null(jobs, "end"));
- if (end == nullptr)
- end = (seq_or_par == sequential?SD_task_create_comp_seq("end", nullptr, 0):
- SD_task_create_comp_par_amdahl("end", nullptr, 0, 0));
+ if (jobs.find("end") == jobs.end())
+ end = (sequential ? SD_task_create_comp_seq("end", nullptr, 0)
+ : SD_task_create_comp_par_amdahl("end", nullptr, 0, 0));
+ else
+ end = jobs.at("end");
/* Create edges */
xbt_dynar_t edges = xbt_dynar_new(sizeof(Agedge_t*), nullptr);
char *dst_name=agnameof(aghead(edge));
double size = atof(agget(edge, (char *) "size"));
- SD_task_t src = static_cast<SD_task_t>(xbt_dict_get_or_null(jobs, src_name));
- SD_task_t dst = static_cast<SD_task_t>(xbt_dict_get_or_null(jobs, dst_name));
+ SD_task_t src = jobs.at(src_name);
+ SD_task_t dst = jobs.at(dst_name);
if (size > 0) {
- char *name = bprintf("%s->%s", src_name, dst_name);
- XBT_DEBUG("See <transfer id=%s amount = %.0f>", name, size);
- task = static_cast<SD_task_t>(xbt_dict_get_or_null(jobs, name));
- if (task == nullptr) {
- if (seq_or_par == sequential)
- task = SD_task_create_comm_e2e(name, nullptr , size);
+ std::string name = std::string(src_name) + "->" + dst_name;
+ XBT_DEBUG("See <transfer id=%s amount = %.0f>", name.c_str(), size);
+ if (jobs.find(name) == jobs.end()) {
+ if (sequential)
+ task = SD_task_create_comm_e2e(name.c_str(), nullptr, size);
else
- task = SD_task_create_comm_par_mxn_1d_block(name, nullptr , size);
+ task = SD_task_create_comm_par_mxn_1d_block(name.c_str(), nullptr, size);
SD_task_dependency_add(nullptr, nullptr, src, task);
SD_task_dependency_add(nullptr, nullptr, task, dst);
- xbt_dict_set(jobs, name, task, nullptr);
+ jobs.insert({name, task});
xbt_dynar_push(result, &task);
} else {
- XBT_WARN("Task '%s' is defined more than once", name);
+ XBT_WARN("Task '%s' is defined more than once", name.c_str());
}
- xbt_free(name);
} else {
SD_task_dependency_add(nullptr, nullptr, src, dst);
}
}
agclose(dag_dot);
- xbt_dict_free(&jobs);
fclose(in_file);
if(schedule){
}
xbt_dynar_free(&computer);
}
- xbt_dict_free(&computers);
} else {
XBT_WARN("The scheduling is ignored");
xbt_dict_foreach(computers,dict_cursor,computer_name,computer)
xbt_dynar_free(&computer);
- xbt_dict_free(&computers);
xbt_dynar_free(&result);
result = nullptr;
}
}
+ xbt_dict_free(&computers);
+
if (result && !acyclic_graph_detail(result)) {
char* base = xbt_basename(filename);
XBT_ERROR("The DOT described in %s is not a DAG. It contains a cycle.", base);
-/* Copyright (c) 2006-2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2006-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "simdag_private.hpp"
-#include "simgrid/host.h"
-#include "simgrid/s4u/engine.hpp"
+#include "simgrid/s4u/Engine.hpp"
#include "simgrid/sg_config.h"
#include "src/include/instr/instr_interface.h"
#include "src/surf/surf_interface.hpp"
* \return a dynar of \ref SD_task_t whose state has changed.
* \see SD_task_schedule(), SD_task_watch()
*/
+void SD_simulate(double how_long)
+{
+ simgrid::sd::simulate(how_long);
+}
-xbt_dynar_t SD_simulate(double how_long) {
+void SD_simulate_with_update(double how_long, xbt_dynar_t changed_tasks_dynar)
+{
std::set<SD_task_t> *changed_tasks = simgrid::sd::simulate(how_long);
- xbt_dynar_t changed_tasks_dynar = xbt_dynar_new(sizeof(SD_task_t), nullptr);
for (const auto& task : *changed_tasks)
xbt_dynar_push(changed_tasks_dynar, &task);
- return changed_tasks_dynar;
}
/** @brief Returns the current clock, in seconds */
-/* Copyright (c) 2013-2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2013-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <vector>
-#include "xbt/dict.h"
#include "simgrid/host.h"
-#include <xbt/Extendable.hpp>
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
+#include "xbt/Extendable.hpp"
+#include "xbt/dict.h"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/simix/smx_host_private.h"
xbt_cfg_register_alias("smpi/host-speed","smpi/running_power");
xbt_cfg_register_alias("smpi/host-speed","smpi/running-power");
+ xbt_cfg_register_boolean("smpi/keep-temps", "no", nullptr, "Whether we should keep the generated temporary files.");
+
xbt_cfg_register_boolean("smpi/display-timing", "no", nullptr, "Whether we should display the timing after simulation.");
xbt_cfg_register_alias("smpi/display-timing", "smpi/display_timing");
"Whether SMPI_SHARED_MALLOC is enabled. Disable it for debugging purposes.");
xbt_cfg_register_alias("smpi/shared-malloc", "smpi/use-shared-malloc");
xbt_cfg_register_alias("smpi/shared-malloc", "smpi/use_shared_malloc");
+ xbt_cfg_register_double("smpi/shared-malloc-blocksize", 1UL << 20, nullptr, "Size of the bogus file which will be created for global shared allocations");
xbt_cfg_register_double("smpi/cpu-threshold", 1e-6, nullptr, "Minimal computation time (in seconds) not discarded, or -1 for infinity.");
xbt_cfg_register_alias("smpi/cpu-threshold", "smpi/cpu_threshold");
xbt_cfg_register_alias("smpi/send-is-detached-thresh","smpi/send_is_detached_thresh");
xbt_cfg_register_alias("smpi/send-is-detached-thresh","smpi/send_is_detached_thres");
- xbt_cfg_register_boolean("smpi/privatize-global-variables", "no", nullptr, "Whether we should privatize global variable at runtime.");
- xbt_cfg_register_alias("smpi/privatize-global-variables", "smpi/privatize_global_variables");
+ const char* default_privatization = std::getenv("SMPI_PRIVATIZATION");
+ if (default_privatization == nullptr)
+ default_privatization = "no";
+
+ xbt_cfg_register_string("smpi/privatization", default_privatization, nullptr,
+ "How we should privatize global variable at runtime (no, yes, mmap, dlopen).");
+
+ xbt_cfg_register_alias("smpi/privatization", "smpi/privatize-global-variables");
+ xbt_cfg_register_alias("smpi/privatization", "smpi/privatize_global_variables");
xbt_cfg_register_boolean("smpi/grow-injected-times", "yes", nullptr, "Whether we want to make the injected time in MPI_Iprobe and MPI_Test grow, to allow faster simulation. This can make simulation less precise, though.");
-/* Copyright (c) 2007-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <xbt/log.h>
#include <xbt/dict.h>
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include <mc/mc.h>
/**
* Garbage collection
*
- * Should be called some time to time to free the memory allocated for processes
- * that have finished (or killed).
+ * Should be called some time to time to free the memory allocated for processes that have finished (or killed).
*/
void SIMIX_process_empty_trash()
{
- smx_actor_t process = nullptr;
+ smx_actor_t process = static_cast<smx_actor_t>(xbt_swag_extract(simix_global->process_to_destroy));
- while ((process = (smx_actor_t) xbt_swag_extract(simix_global->process_to_destroy))) {
+ while (process) {
XBT_DEBUG("Getting rid of %p",process);
intrusive_ptr_release(process);
+ process = static_cast<smx_actor_t>(xbt_swag_extract(simix_global->process_to_destroy));
}
}
*
* \return the process created
*/
-smx_actor_t SIMIX_process_create(
- const char *name,
- std::function<void()> code,
- void *data,
- sg_host_t host,
- xbt_dict_t properties,
- smx_actor_t parent_process)
+smx_actor_t SIMIX_process_create(const char* name, std::function<void()> code, void* data, simgrid::s4u::Host* host,
+ xbt_dict_t properties, smx_actor_t parent_process)
{
XBT_DEBUG("Start process %s on host '%s'", name, host->cname());
process->ppid = parent_process->pid;
/* SMPI process have their own data segment and each other inherit from their father */
#if HAVE_SMPI
- if (smpi_privatize_global_variables) {
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
if (parent_process->pid != 0) {
SIMIX_segment_index_set(process, parent_process->segment_index);
} else {
/* Add properties */
process->properties = properties;
+ /* Make sure that the process is initialized for simix, in case we are called from the Host::onCreation signal */
+ if (host->extension<simgrid::simix::Host>() == nullptr)
+ host->extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
+
/* Add the process to it's host process list */
xbt_swag_insert(process, host->extension<simgrid::simix::Host>()->process_list);
process->ppid = parent_process->pid;
/* SMPI process have their own data segment and each other inherit from their father */
#if HAVE_SMPI
- if (smpi_privatize_global_variables) {
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
if (parent_process->pid != 0) {
SIMIX_segment_index_set(process, parent_process->segment_index);
} else {
{
process->new_host = dest;
}
+
void SIMIX_process_change_host(smx_actor_t process, sg_host_t dest)
{
xbt_assert((process != nullptr), "Invalid parameters");
{
XBT_IN("process = %p", process);
- if(process->context->iwannadie) {
+ if (process->context->iwannadie) {
XBT_VERB("Ignoring request to suspend a process that is currently dying.");
return;
}
- if(!process->suspended) return;
+ if (!process->suspended)
+ return;
process->suspended = 0;
/* resume the synchronization that was blocking the resumed process. */
/**
* \brief Returns the process from PID.
*/
-smx_actor_t SIMIX_process_from_PID(int PID)
+smx_actor_t SIMIX_process_from_PID(aid_t PID)
{
if (simix_global->process_list.find(PID) == simix_global->process_list.end())
return nullptr;
class ActorImpl {
public:
ActorImpl() : piface_(this) {}
+ ~ActorImpl();
// TODO, replace with boost intrusive container hooks
s_xbt_swag_hookup_t process_hookup = { nullptr, nullptr }; /* simix_global->process_list */
s_xbt_swag_hookup_t host_proc_hookup = { nullptr, nullptr }; /* smx_host->process_lis */
s_xbt_swag_hookup_t destroy_hookup = { nullptr, nullptr }; /* simix_global->process_to_destroy */
- unsigned long pid = 0;
- unsigned long ppid = -1;
+ aid_t pid = 0;
+ aid_t ppid = -1;
simgrid::xbt::string name;
const char* cname() { return name.c_str(); }
s4u::Host* host = nullptr; /* the host on which the process is running */
sg_host_t new_host = nullptr; /* if not null, the host on which the process must migrate to */
smx_activity_t waiting_synchro = nullptr; /* the current blocking synchro if any */
- std::list<smx_activity_t> comms ; /* the current non-blocking communication synchros */
+ std::list<smx_activity_t> comms; /* the current non-blocking communication synchros */
xbt_dict_t properties = nullptr;
s_smx_simcall_t simcall;
void *data = nullptr; /* kept for compatibility, it should be replaced with moddata */
smx_timer_t kill_timer = nullptr;
int segment_index = -1; /* Reference to an SMPI process' data segment. Default value is -1 if not in SMPI context*/
+ /* Refcounting */
+private:
+ std::atomic_int_fast32_t refcount_{1};
+public:
friend void intrusive_ptr_add_ref(ActorImpl* process)
{
- // Atomic operation! Do not split in two instructions!
- auto previous = (process->refcount_)++;
- xbt_assert(previous != 0);
- (void) previous;
+ // std::memory_order_relaxed ought to be enough here instead of std::memory_order_seq_cst
+ // But then, we have a threading issue when an actor commits a suicide:
+ // it seems that in this case, the worker thread kills the last occurrence of the actor
+ // while usually, the maestro does so. FIXME: we should change how actors suicide
+ process->refcount_.fetch_add(1, std::memory_order_seq_cst);
}
friend void intrusive_ptr_release(ActorImpl* process)
{
- // Atomic operation! Do not split in two instructions!
- auto count = --(process->refcount_);
- if (count == 0)
+ // inspired from http://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html
+ if (process->refcount_.fetch_sub(1, std::memory_order_release) == 1) {
+ // Make sure that any changes done on other threads before their acquire are committed before our delete
+ // http://stackoverflow.com/questions/27751025/why-is-an-acquire-barrier-needed-before-deleting-the-data-in-an-atomically-refer
+ std::atomic_thread_fence(std::memory_order_acquire);
delete process;
+ }
}
- ~ActorImpl();
-
+ /* S4U/implem interfaces */
+private:
+ simgrid::s4u::Actor piface_; // Our interface is part of ourselves
+public:
simgrid::s4u::ActorPtr iface() { return s4u::ActorPtr(&piface_); }
simgrid::s4u::Actor* ciface() { return &piface_; }
- void daemonize();
- bool isDaemon();
-
+ /* Daemon actors are automatically killed when the last non-daemon leaves */
private:
bool daemon = false;
- std::atomic_int_fast32_t refcount_ { 1 };
- simgrid::s4u::Actor piface_; // Our interface is part of ourselves
+public:
+ void daemonize();
+ bool isDaemon();
};
}
return simcall_BODY_file_move(fd, fullpath);
}
-/**
- * \ingroup simix_storage_management
- * \brief Returns the free space size on a given storage element.
- * \param storage a storage
- * \return Return the free space size on a given storage element (as sg_size_t)
- */
-sg_size_t simcall_storage_get_free_size (smx_storage_t storage){
- return simcall_BODY_storage_get_free_size(storage);
-}
-
-/**
- * \ingroup simix_storage_management
- * \brief Returns the used space size on a given storage element.
- * \param storage a storage
- * \return Return the used space size on a given storage element (as sg_size_t)
- */
-sg_size_t simcall_storage_get_used_size (smx_storage_t storage){
- return simcall_BODY_storage_get_used_size(storage);
-}
-
/**
* \ingroup simix_storage_management
* \brief Returns a dict of the properties assigned to a storage element.
return simcall_BODY_storage_get_properties(storage);
}
-/**
- * \ingroup simix_storage_management
- * \brief Returns a dict containing the content of a storage element.
- *
- * \param storage A storage element
- * \return The content of this storage element as a dict (full path file => size)
- */
-xbt_dict_t simcall_storage_get_content(smx_storage_t storage)
-{
- return simcall_BODY_storage_get_content(storage);
-}
-
void simcall_run_kernel(std::function<void()> const& code)
{
simcall_BODY_run_kernel(&code);
simgrid::simix::marshal<int>(simcall->result, result);
}
-static inline smx_storage_t simcall_storage_get_free_size__get__storage(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0]);
-}
-static inline void simcall_storage_get_free_size__set__storage(smx_simcall_t simcall, smx_storage_t arg) {
- simgrid::simix::marshal<smx_storage_t>(simcall->args[0], arg);
-}
-static inline sg_size_t simcall_storage_get_free_size__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<sg_size_t>(simcall->result);
-}
-static inline void simcall_storage_get_free_size__set__result(smx_simcall_t simcall, sg_size_t result){
- simgrid::simix::marshal<sg_size_t>(simcall->result, result);
-}
-
-static inline smx_storage_t simcall_storage_get_used_size__get__name(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0]);
-}
-static inline void simcall_storage_get_used_size__set__name(smx_simcall_t simcall, smx_storage_t arg) {
- simgrid::simix::marshal<smx_storage_t>(simcall->args[0], arg);
-}
-static inline sg_size_t simcall_storage_get_used_size__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<sg_size_t>(simcall->result);
-}
-static inline void simcall_storage_get_used_size__set__result(smx_simcall_t simcall, sg_size_t result){
- simgrid::simix::marshal<sg_size_t>(simcall->result, result);
-}
-
static inline smx_storage_t simcall_storage_get_properties__get__storage(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0]);
}
simgrid::simix::marshal<xbt_dict_t>(simcall->result, result);
}
-static inline smx_storage_t simcall_storage_get_content__get__storage(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0]);
-}
-static inline void simcall_storage_get_content__set__storage(smx_simcall_t simcall, smx_storage_t arg) {
- simgrid::simix::marshal<smx_storage_t>(simcall->args[0], arg);
-}
-static inline xbt_dict_t simcall_storage_get_content__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<xbt_dict_t>(simcall->result);
-}
-static inline void simcall_storage_get_content__set__result(smx_simcall_t simcall, xbt_dict_t result){
- simgrid::simix::marshal<xbt_dict_t>(simcall->result, result);
-}
-
static inline int simcall_mc_random__get__min(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[0]);
}
XBT_PRIVATE int simcall_HANDLER_file_seek(smx_simcall_t simcall, smx_file_t fd, sg_offset_t offset, int origin);
XBT_PRIVATE xbt_dynar_t simcall_HANDLER_file_get_info(smx_simcall_t simcall, smx_file_t fd);
XBT_PRIVATE int simcall_HANDLER_file_move(smx_simcall_t simcall, smx_file_t fd, const char* fullpath);
-XBT_PRIVATE sg_size_t simcall_HANDLER_storage_get_free_size(smx_simcall_t simcall, smx_storage_t storage);
-XBT_PRIVATE sg_size_t simcall_HANDLER_storage_get_used_size(smx_simcall_t simcall, smx_storage_t name);
XBT_PRIVATE int simcall_HANDLER_mc_random(smx_simcall_t simcall, int min, int max);
\ No newline at end of file
return simcall<int, smx_file_t, const char*>(SIMCALL_FILE_MOVE, fd, fullpath);
}
-inline static sg_size_t simcall_BODY_storage_get_free_size(smx_storage_t storage) {
- /* Go to that function to follow the code flow through the simcall barrier */
- if (0) simcall_HANDLER_storage_get_free_size(&SIMIX_process_self()->simcall, storage);
- return simcall<sg_size_t, smx_storage_t>(SIMCALL_STORAGE_GET_FREE_SIZE, storage);
- }
-
-inline static sg_size_t simcall_BODY_storage_get_used_size(smx_storage_t name) {
- /* Go to that function to follow the code flow through the simcall barrier */
- if (0) simcall_HANDLER_storage_get_used_size(&SIMIX_process_self()->simcall, name);
- return simcall<sg_size_t, smx_storage_t>(SIMCALL_STORAGE_GET_USED_SIZE, name);
- }
-
inline static xbt_dict_t simcall_BODY_storage_get_properties(smx_storage_t storage) {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) SIMIX_storage_get_properties(storage);
return simcall<xbt_dict_t, smx_storage_t>(SIMCALL_STORAGE_GET_PROPERTIES, storage);
}
-inline static xbt_dict_t simcall_BODY_storage_get_content(smx_storage_t storage) {
- /* Go to that function to follow the code flow through the simcall barrier */
- if (0) SIMIX_storage_get_content(storage);
- return simcall<xbt_dict_t, smx_storage_t>(SIMCALL_STORAGE_GET_CONTENT, storage);
- }
-
inline static int simcall_BODY_mc_random(int min, int max) {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_mc_random(&SIMIX_process_self()->simcall, min, max);
SIMCALL_FILE_SEEK,
SIMCALL_FILE_GET_INFO,
SIMCALL_FILE_MOVE,
- SIMCALL_STORAGE_GET_FREE_SIZE,
- SIMCALL_STORAGE_GET_USED_SIZE,
SIMCALL_STORAGE_GET_PROPERTIES,
- SIMCALL_STORAGE_GET_CONTENT,
SIMCALL_MC_RANDOM,
SIMCALL_SET_CATEGORY,
SIMCALL_RUN_KERNEL,
"SIMCALL_FILE_SEEK",
"SIMCALL_FILE_GET_INFO",
"SIMCALL_FILE_MOVE",
- "SIMCALL_STORAGE_GET_FREE_SIZE",
- "SIMCALL_STORAGE_GET_USED_SIZE",
"SIMCALL_STORAGE_GET_PROPERTIES",
- "SIMCALL_STORAGE_GET_CONTENT",
"SIMCALL_MC_RANDOM",
"SIMCALL_SET_CATEGORY",
"SIMCALL_RUN_KERNEL",
SIMIX_simcall_answer(simcall);
break;
-case SIMCALL_STORAGE_GET_FREE_SIZE:
- simgrid::simix::marshal<sg_size_t>(simcall->result, simcall_HANDLER_storage_get_free_size(simcall, simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0])));
- SIMIX_simcall_answer(simcall);
- break;
-
-case SIMCALL_STORAGE_GET_USED_SIZE:
- simgrid::simix::marshal<sg_size_t>(simcall->result, simcall_HANDLER_storage_get_used_size(simcall, simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0])));
- SIMIX_simcall_answer(simcall);
- break;
-
case SIMCALL_STORAGE_GET_PROPERTIES:
simgrid::simix::marshal<xbt_dict_t>(simcall->result, SIMIX_storage_get_properties(simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0])));
SIMIX_simcall_answer(simcall);
break;
-case SIMCALL_STORAGE_GET_CONTENT:
- simgrid::simix::marshal<xbt_dict_t>(simcall->result, SIMIX_storage_get_content(simgrid::simix::unmarshal<smx_storage_t>(simcall->args[0])));
- SIMIX_simcall_answer(simcall);
- break;
-
case SIMCALL_MC_RANDOM:
simgrid::simix::marshal<int>(simcall->result, simcall_HANDLER_mc_random(simcall, simgrid::simix::unmarshal<int>(simcall->args[0]), simgrid::simix::unmarshal<int>(simcall->args[1])));
SIMIX_simcall_answer(simcall);
xbt_dynar_t file_get_info(smx_file_t fd);
int file_move(smx_file_t fd, const char* fullpath);
-sg_size_t storage_get_free_size(smx_storage_t storage);
-sg_size_t storage_get_used_size(smx_storage_t name);
xbt_dict_t storage_get_properties(smx_storage_t storage) [[nohandler]];
-xbt_dict_t storage_get_content(smx_storage_t storage) [[nohandler]];
int mc_random(int min, int max);
void set_category(smx_activity_t synchro, const char* category) [[nohandler]];
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright (c) 2014-2016. The SimGrid Team. All rights reserved.
+# Copyright (c) 2014-2017. The SimGrid Team. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the terms of the license (GNU LGPL) which comes with this package.
def handle(fd, func, simcalls, guarded_simcalls):
- def nonempty(e): return e != ''
- fd.write(
- '\n'.join(filter(nonempty, (func(simcall) for simcall in simcalls))))
+ def nonempty(e):
+ return e != ''
+ fd.write('\n'.join(filter(nonempty, (func(simcall) for simcall in simcalls))))
for guard, list in guarded_simcalls.items():
fd.write('\n#if %s\n' % (guard))
-/* Copyright (c) 2007, 2009-2016. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <string>
#include <vector>
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "smx_private.h"
#include "src/surf/xml/platf_private.hpp" // FIXME: KILLME. There must be a better way than mimicking XML here
-#include "xbt/dict.h"
-#include "xbt/log.h"
-#include "xbt/sysdep.h"
#include <xbt/ex.hpp>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(simix_deployment, simix, "Logging specific to SIMIX (deployment)");
#include <xbt/functional.hpp>
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/surf/surf_interface.hpp"
#include "src/surf/storage_interface.hpp"
#include "src/surf/xml/platf.hpp"
#include "smx_private.h"
-#include "xbt/str.h"
#include "xbt/ex.h" /* ex_backtrace_display */
#include "mc/mc.h"
#include "src/mc/mc_replay.h"
} else if (siginfo->si_signo == SIGSEGV) {
fprintf(stderr, "Segmentation fault.\n");
#if HAVE_SMPI
- if (smpi_enabled() && !smpi_privatize_global_variables) {
+ if (smpi_enabled() && smpi_privatize_global_variables == SMPI_PRIVATIZE_NONE) {
#if HAVE_PRIVATIZATION
- fprintf(stderr, "Try to enable SMPI variable privatization with --cfg=smpi/privatize-global-variables:yes.\n");
+ fprintf(stderr, "Try to enable SMPI variable privatization with --cfg=smpi/privatization:yes.\n");
#else
- fprintf(stderr, "Sadly, your system does not support --cfg=smpi/privatize-global-variables:yes (yet).\n");
+ fprintf(stderr, "Sadly, your system does not support --cfg=smpi/privatization:yes (yet).\n");
#endif /* HAVE_PRIVATIZATION */
}
#endif /* HAVE_SMPI */
sg_platf_init();
simgrid::s4u::onPlatformCreated.connect(SIMIX_post_create_environment);
simgrid::s4u::Host::onCreation.connect([](simgrid::s4u::Host& host) {
- host.extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
+ if (host.extension<simgrid::simix::Host>() == nullptr) // another callback to the same signal may have created it
+ host.extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
});
simgrid::surf::storageCreatedCallbacks.connect([](simgrid::surf::Storage* storage) {
xbt_os_mutex_destroy(simix_global->mutex);
simix_global->mutex = nullptr;
+#if HAVE_MC
+ xbt_dynar_free(&simix_global->actors_vector);
+#endif
/* Let's free maestro now */
delete simix_global->maestro_process->context;
SIMIX_wake_processes();
} while (SIMIX_execute_tasks());
+ /* If only daemon processes remain, cancel their actions, mark them to die and reschedule them */
+ if (simix_global->process_list.size() == simix_global->daemons.size())
+ for (const auto& dmon : simix_global->daemons) {
+ XBT_DEBUG("Kill %s", dmon->cname());
+ SIMIX_process_kill(dmon, simix_global->maestro_process);
+ }
}
time = SIMIX_timer_next();
XBT_DEBUG("### time %f, #processes %zu, #to_run %lu", time, simix_global->process_list.size(),
xbt_dynar_length(simix_global->process_to_run));
- /* If only daemon processes remain, cancel their actions, mark them to die and reschedule them */
- if (simix_global->process_list.size() == simix_global->daemons.size())
- for (const auto& dmon : simix_global->daemons) {
- XBT_DEBUG("Kill %s", dmon->cname());
- SIMIX_process_kill(dmon, simix_global->maestro_process);
- }
if (xbt_dynar_is_empty(simix_global->process_to_run) &&
!simix_global->process_list.empty())
int SIMIX_is_maestro()
{
- return simix_global==nullptr /*SimDag*/|| SIMIX_process_self() == simix_global->maestro_process;
+ smx_actor_t self = SIMIX_process_self();
+ return simix_global == nullptr /*SimDag*/ || self == nullptr || self == simix_global->maestro_process;
}
};
}
}
-typedef simgrid::simix::Host s_smx_host_priv_t;
SG_BEGIN_DECL()
XBT_PRIVATE void SIMIX_host_add_auto_restart_process(sg_host_t host,
-/* Copyright (c) 2007-2010, 2012-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <xbt/log.h>
#include <xbt/dict.h>
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include <mc/mc.h>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(simix_io, simix, "Logging specific to SIMIX (io)");
-
/**
* \brief Internal function to create a SIMIX storage.
* \param name name of the storage to create
return surf_host_file_move(host, file->surf_file, fullpath);
}
-sg_size_t SIMIX_storage_get_size(smx_storage_t storage){
- return surf_storage_get_size(storage);
-}
-
-sg_size_t simcall_HANDLER_storage_get_free_size(smx_simcall_t simcall, smx_storage_t storage)
-{
- return SIMIX_storage_get_free_size(simcall->issuer, storage);
-}
-
-sg_size_t SIMIX_storage_get_free_size(smx_actor_t process, smx_storage_t storage)
-{
- return surf_storage_get_free_size(storage);
-}
-
-sg_size_t simcall_HANDLER_storage_get_used_size(smx_simcall_t simcall, smx_storage_t storage)
-{
- return SIMIX_storage_get_used_size(simcall->issuer, storage);
-}
-
-sg_size_t SIMIX_storage_get_used_size(smx_actor_t process, smx_storage_t storage)
-{
- return surf_storage_get_used_size(storage);
-}
-
xbt_dict_t SIMIX_storage_get_properties(smx_storage_t storage){
return surf_storage_get_properties(storage);
}
return sg_storage_name(storage);
}
-xbt_dict_t SIMIX_storage_get_content(smx_storage_t storage){
- return surf_storage_get_content(storage);
-}
-
-const char* SIMIX_storage_get_host(smx_storage_t storage){
- return surf_storage_get_host(storage);
-}
-
void SIMIX_io_destroy(smx_activity_t synchro)
{
simgrid::kernel::activity::Io *io = static_cast<simgrid::kernel::activity::Io*>(synchro);
XBT_PRIVATE int SIMIX_file_seek(smx_actor_t process, smx_file_t fd, sg_offset_t offset, int origin);
XBT_PRIVATE int SIMIX_file_move(smx_actor_t process, smx_file_t fd, const char* fullpath);
-XBT_PRIVATE sg_size_t SIMIX_storage_get_free_size(smx_actor_t process, smx_storage_t storage);
-XBT_PRIVATE sg_size_t SIMIX_storage_get_used_size(smx_actor_t process, smx_storage_t storage);
-
XBT_PRIVATE xbt_dict_t SIMIX_storage_get_properties(smx_storage_t storage);
XBT_PRIVATE void SIMIX_io_destroy(smx_activity_t synchro);
#include <xbt/ex.hpp>
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include "mc/mc.h"
#include "simgrid/s4u/Mailbox.hpp"
#include "src/simix/smx_private.h"
#include "src/surf/cpu_interface.hpp"
#include "src/surf/surf_interface.hpp"
-#include "xbt/dict.h"
-#include "xbt/log.h"
#include "src/kernel/activity/SynchroComm.hpp"
#include "src/surf/network_interface.hpp"
static void SIMIX_waitany_remove_simcall_from_actions(smx_simcall_t simcall);
static void SIMIX_comm_copy_data(smx_activity_t comm);
static void SIMIX_comm_start(smx_activity_t synchro);
-static smx_activity_t _find_matching_comm(boost::circular_buffer_space_optimized<smx_activity_t> *deque, e_smx_comm_type_t type,
- int (*match_fun)(void *, void *,smx_activity_t), void *user_data, smx_activity_t my_synchro, bool remove_matching);
+static simgrid::kernel::activity::Comm*
+_find_matching_comm(boost::circular_buffer_space_optimized<smx_activity_t>* deque, e_smx_comm_type_t type,
+ int (*match_fun)(void*, void*, smx_activity_t), void* user_data, smx_activity_t my_synchro,
+ bool remove_matching);
/**
- * \brief Checks if there is a communication synchro queued in a deque matching our needs
+ * \brief Checks if there is a communication activity queued in a deque matching our needs
* \param type The type of communication we are looking for (comm_send, comm_recv)
- * \return The communication synchro if found, nullptr otherwise
+ * \return The communication activity if found, nullptr otherwise
*/
-static smx_activity_t _find_matching_comm(boost::circular_buffer_space_optimized<smx_activity_t> *deque, e_smx_comm_type_t type,
- int (*match_fun)(void *, void *,smx_activity_t), void *this_user_data, smx_activity_t my_synchro, bool remove_matching)
+static simgrid::kernel::activity::Comm*
+_find_matching_comm(boost::circular_buffer_space_optimized<smx_activity_t>* deque, e_smx_comm_type_t type,
+ int (*match_fun)(void*, void*, smx_activity_t), void* this_user_data, smx_activity_t my_synchro,
+ bool remove_matching)
{
void* other_user_data = nullptr;
XBT_DEBUG("send from %p", mbox);
/* Prepare a synchro describing us, so that it gets passed to the user-provided filter of other side */
- simgrid::kernel::activity::Comm* this_synchro = new simgrid::kernel::activity::Comm(SIMIX_COMM_SEND);
+ simgrid::kernel::activity::Comm* this_comm = new simgrid::kernel::activity::Comm(SIMIX_COMM_SEND);
/* Look for communication synchro matching our needs. We also provide a description of
* ourself so that the other side also gets a chance of choosing if it wants to match with us.
*
* If it is not found then push our communication into the rendez-vous point */
- smx_activity_t other_synchro =
- _find_matching_comm(&mbox->comm_queue, SIMIX_COMM_RECEIVE, match_fun, data, this_synchro, /*remove_matching*/true);
- simgrid::kernel::activity::Comm *other_comm = static_cast<simgrid::kernel::activity::Comm*>(other_synchro);
+ simgrid::kernel::activity::Comm* other_comm =
+ _find_matching_comm(&mbox->comm_queue, SIMIX_COMM_RECEIVE, match_fun, data, this_comm, /*remove_matching*/ true);
-
- if (!other_synchro) {
- other_synchro = this_synchro;
- other_comm = static_cast<simgrid::kernel::activity::Comm*>(other_synchro);
+ if (!other_comm) {
+ other_comm = this_comm;
if (mbox->permanent_receiver!=nullptr){
//this mailbox is for small messages, which have to be sent right now
- other_synchro->state = SIMIX_READY;
+ other_comm->state = SIMIX_READY;
other_comm->dst_proc=mbox->permanent_receiver.get();
other_comm->ref();
- mbox->done_comm_queue.push_back(other_synchro);
+ mbox->done_comm_queue.push_back(other_comm);
XBT_DEBUG("pushing a message into the permanent receive list %p, comm %p", mbox, &(other_comm));
}else{
- mbox->push(this_synchro);
+ mbox->push(this_comm);
}
} else {
XBT_DEBUG("Receive already pushed");
- this_synchro->unref();
+ this_comm->unref();
other_comm->state = SIMIX_READY;
other_comm->type = SIMIX_COMM_READY;
}
- src_proc->comms.push_back(other_synchro);
-
+ src_proc->comms.push_back(other_comm);
if (detached) {
other_comm->detached = true;
XBT_DEBUG("recv from %p %p", mbox, &mbox->comm_queue);
simgrid::kernel::activity::Comm* this_synchro = new simgrid::kernel::activity::Comm(SIMIX_COMM_RECEIVE);
- smx_activity_t other_synchro;
+ simgrid::kernel::activity::Comm* other_comm;
//communication already done, get it inside the list of completed comms
if (mbox->permanent_receiver != nullptr && ! mbox->done_comm_queue.empty()) {
XBT_DEBUG("We have a comm that has probably already been received, trying to match it, to skip the communication");
//find a match in the list of already received comms
- other_synchro = _find_matching_comm(&mbox->done_comm_queue, SIMIX_COMM_SEND, match_fun, data, this_synchro,/*remove_matching*/true);
+ other_comm = _find_matching_comm(&mbox->done_comm_queue, SIMIX_COMM_SEND, match_fun, data, this_synchro,
+ /*remove_matching*/ true);
//if not found, assume the receiver came first, register it to the mailbox in the classical way
- if (!other_synchro) {
+ if (!other_comm) {
XBT_DEBUG("We have messages in the permanent receive list, but not the one we are looking for, pushing request into list");
- other_synchro = this_synchro;
+ other_comm = this_synchro;
mbox->push(this_synchro);
} else {
- simgrid::kernel::activity::Comm *other_comm = static_cast<simgrid::kernel::activity::Comm*>(other_synchro);
-
if(other_comm->surf_comm && other_comm->remains() < 1e-12) {
XBT_DEBUG("comm %p has been already sent, and is finished, destroy it",other_comm);
other_comm->state = SIMIX_DONE;
static_cast<simgrid::kernel::activity::Comm*>(this_synchro)->unref();
}
} else {
- /* Prepare a synchro describing us, so that it gets passed to the user-provided filter of other side */
+ /* Prepare a comm describing us, so that it gets passed to the user-provided filter of other side */
- /* Look for communication synchro matching our needs. We also provide a description of
+ /* Look for communication activity matching our needs. We also provide a description of
* ourself so that the other side also gets a chance of choosing if it wants to match with us.
*
* If it is not found then push our communication into the rendez-vous point */
- other_synchro = _find_matching_comm(&mbox->comm_queue, SIMIX_COMM_SEND, match_fun, data, this_synchro,/*remove_matching*/true);
+ other_comm = _find_matching_comm(&mbox->comm_queue, SIMIX_COMM_SEND, match_fun, data, this_synchro,
+ /*remove_matching*/ true);
- if (!other_synchro) {
+ if (!other_comm) {
XBT_DEBUG("Receive pushed first %zu", mbox->comm_queue.size());
- other_synchro = this_synchro;
+ other_comm = this_synchro;
mbox->push(this_synchro);
} else {
this_synchro->unref();
- simgrid::kernel::activity::Comm *other_comm = static_cast<simgrid::kernel::activity::Comm*>(other_synchro);
other_comm->state = SIMIX_READY;
other_comm->type = SIMIX_COMM_READY;
}
- dst_proc->comms.push_back(other_synchro);
+ dst_proc->comms.push_back(other_comm);
}
/* Setup communication synchro */
- simgrid::kernel::activity::Comm *other_comm = static_cast<simgrid::kernel::activity::Comm*>(other_synchro);
other_comm->dst_proc = dst_proc;
other_comm->dst_buff = dst_buff;
other_comm->dst_buff_size = dst_buff_size;
other_comm->copy_data_fun = copy_data_fun;
if (MC_is_active() || MC_record_replay_is_active()) {
- other_synchro->state = SIMIX_RUNNING;
- return other_synchro;
+ other_comm->state = SIMIX_RUNNING;
+ return other_comm;
}
- SIMIX_comm_start(other_synchro);
- return other_synchro;
+ SIMIX_comm_start(other_comm);
+ return other_comm;
}
smx_activity_t simcall_HANDLER_comm_iprobe(smx_simcall_t simcall, smx_mailbox_t mbox,
smx_context_factory_t context_factory = nullptr;
xbt_dynar_t process_to_run = nullptr;
xbt_dynar_t process_that_ran = nullptr;
- std::map<int, smx_actor_t> process_list;
+ std::map<aid_t, smx_actor_t> process_list;
#if HAVE_MC
/* MCer cannot read the std::map above in the remote process, so we copy the info it needs in a dynar.
* FIXME: This is supposed to be a temporary hack.
#ifndef SIMIX_SYNCHRO_PRIVATE_H
#define SIMIX_SYNCHRO_PRIVATE_H
-#include "simgrid/s4u/conditionVariable.hpp"
+#include "simgrid/s4u/ConditionVariable.hpp"
#include "xbt/swag.h"
namespace simgrid {
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "src/smpi/SmpiHost.hpp"
+#include "simgrid/s4u/VirtualMachine.hpp"
+#include "smpi/smpi_utils.hpp"
+
+#include <string>
+#include <vector>
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_host, smpi, "Logging specific to SMPI (host)");
+
+namespace simgrid {
+namespace smpi {
+
+simgrid::xbt::Extension<simgrid::s4u::Host, SmpiHost> SmpiHost::EXTENSION_ID;
+
+double SmpiHost::orecv(size_t size)
+{
+ double current = orecv_parsed_values.empty() ? 0.0 : orecv_parsed_values.front().values[0] +
+ orecv_parsed_values.front().values[1] * size;
+
+ // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
+ // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
+ for (auto fact : orecv_parsed_values) {
+ if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
+ XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current);
+ return current;
+ } else {
+ // If the next section is too large, the current section must be used.
+ // Hence, save the cost, as we might have to use it.
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ }
+ XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current);
+
+ return current;
+}
+
+double SmpiHost::osend(size_t size)
+{
+ double current =
+ osend_parsed_values.empty() ? 0.0 : osend_parsed_values[0].values[0] + osend_parsed_values[0].values[1] * size;
+ // Iterate over all the sections that were specified and find the right
+ // value. (fact.factor represents the interval sizes; we want to find the
+ // section that has fact.factor <= size and no other such fact.factor <= size)
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
+ for (auto& fact : osend_parsed_values) {
+ if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
+ XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current);
+ return current;
+ } else {
+ // If the next section is too large, the current section must be used.
+ // Hence, save the cost, as we might have to use it.
+ current = fact.values[0] + fact.values[1] * size;
+ }
+ }
+ XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current);
+
+ return current;
+}
+
+double SmpiHost::oisend(size_t size)
+{
+ double current =
+ oisend_parsed_values.empty() ? 0.0 : oisend_parsed_values[0].values[0] + oisend_parsed_values[0].values[1] * size;
+
+ // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
+ // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
+ // Note: parse_factor() (used before) already sorts the vector we iterate over!
+ for (auto& fact : oisend_parsed_values) {
+ if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
+ XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current);
+ return current;
+ } else {
+ // If the next section is too large, the current section must be used.
+ // Hence, save the cost, as we might have to use it.
+ current = fact.values[0] + fact.values[1] * size;
+ }
+ }
+ XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current);
+
+ return current;
+}
+
+SmpiHost::SmpiHost(simgrid::s4u::Host *ptr) : host(ptr)
+{
+ if (!SmpiHost::EXTENSION_ID.valid())
+ SmpiHost::EXTENSION_ID = simgrid::s4u::Host::extension_create<SmpiHost>();
+
+ const char* orecv_string = host->property("smpi/or");
+ if (orecv_string != nullptr) {
+ orecv_parsed_values = parse_factor(orecv_string);
+ } else {
+ orecv_parsed_values = parse_factor(xbt_cfg_get_string("smpi/or"));
+ }
+
+ const char* osend_string = host->property("smpi/os");
+ if (osend_string != nullptr) {
+ osend_parsed_values = parse_factor(osend_string);
+ } else {
+ osend_parsed_values = parse_factor(xbt_cfg_get_string("smpi/os"));
+ }
+
+ const char* oisend_string = host->property("smpi/ois");
+ if (oisend_string != nullptr) {
+ oisend_parsed_values = parse_factor(oisend_string);
+ } else {
+ oisend_parsed_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
+ }
+}
+
+SmpiHost::~SmpiHost()=default;
+
+static void onCreation(simgrid::s4u::Host& host)
+{
+}
+
+static void onHostDestruction(simgrid::s4u::Host& host)
+{
+ // Ignore virtual machines
+ if (dynamic_cast<simgrid::s4u::VirtualMachine*>(&host))
+ return;
+}
+
+void sg_smpi_host_init()
+{
+ simgrid::s4u::Host::onCreation.connect(&onCreation);
+ simgrid::s4u::Host::onDestruction.connect(&onHostDestruction);
+}
+
+}
+}
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef SMPI_HOST_HPP_
+#define SMPI_HOST_HPP_
+
+#include "src/include/smpi/smpi_utils.hpp"
+
+#include "simgrid/s4u/Host.hpp"
+#include <string>
+#include <vector>
+#include <xbt/Extendable.hpp>
+#include <xbt/config.hpp>
+
+namespace simgrid {
+namespace smpi {
+
+void sg_smpi_host_init();
+static void onHostDestruction(simgrid::s4u::Host& host);
+static void onCreation(simgrid::s4u::Host& host);
+
+class SmpiHost {
+
+ private:
+ std::vector<s_smpi_factor_t> orecv_parsed_values;
+ std::vector<s_smpi_factor_t> osend_parsed_values;
+ std::vector<s_smpi_factor_t> oisend_parsed_values;
+ simgrid::s4u::Host *host = nullptr;
+
+ public:
+ static simgrid::xbt::Extension<simgrid::s4u::Host, SmpiHost> EXTENSION_ID;
+
+ explicit SmpiHost(simgrid::s4u::Host *ptr);
+ ~SmpiHost();
+
+ double orecv(size_t size);
+ double osend(size_t size);
+ double oisend(size_t size);
+};
+
+}
+}
+#endif
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
+namespace simgrid{
+namespace smpi{
// Allgather - gather/bcast algorithm
int Coll_allgather_GB::allgather(void *send_buff, int send_count,
MPI_Datatype send_type, void *recv_buff,
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* See COPYRIGHT in top-level directory.
*/
#include "../colls_private.h"
+namespace simgrid{
+namespace smpi{
int Coll_allgather_mvapich2_smp::allgather(void *sendbuf,int sendcnt, MPI_Datatype sendtype,
void *recvbuf, int recvcnt,MPI_Datatype recvtype,
mpi_errno = Colls::bcast (recvbuf, recvcnt * size, recvtype, 0, shmem_comm);
return mpi_errno;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
*/
//#include <star-reduction.c>
-
+namespace simgrid{
+namespace smpi{
int
Coll_allreduce_lr::allreduce(void *sbuf, void *rbuf, int rcount,
MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
return 0;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
*/
#include "../colls_private.h"
-
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf,
void *recvbuf,
int count,
return (mpi_errno);
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
MPI_Datatype datatype,
MPI_Op op, MPI_Comm comm);
+
+namespace simgrid{
+namespace smpi{
static int MPIR_Allreduce_reduce_p2p_MV2( void *sendbuf,
void *recvbuf,
int count,
Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm);
return MPI_SUCCESS;
}
-
+
/* general two level allreduce helper function */
int Coll_allreduce_mvapich2_two_level::allreduce(void *sendbuf,
return (mpi_errno);
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
} \
#include "../colls_private.h"
+namespace simgrid{
+namespace smpi{
int
Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count,
MPI_Datatype dtype,
if (NULL != inbuf[1]) smpi_free_tmp_buffer(inbuf[1]);
return ret;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "../colls_private.h"
-
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_rab_rdb::allreduce(void *sbuff, void *rbuff, int count,
MPI_Datatype dtype, MPI_Op op,
MPI_Comm comm)
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
//#include <star-reduction.c>
-
+namespace simgrid{
+namespace smpi{
// NP pow of 2 for now
int Coll_allreduce_rab1::allreduce(void *sbuff, void *rbuff,
int count, MPI_Datatype dtype,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
//#include <star-reduction.c>
+namespace simgrid{
+namespace smpi{
// this requires that count >= NP
int Coll_allreduce_rab2::allreduce(void *sbuff, void *rbuff,
int count, MPI_Datatype dtype,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
//#include <star-reduction.c>
-
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_rdb::allreduce(void *sbuff, void *rbuff, int count,
MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
{
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "../colls_private.h"
-
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_redbcast::allreduce(void *buf, void *buf2, int count,
MPI_Datatype datatype, MPI_Op op,
MPI_Comm comm)
Colls::bcast(buf2, count, datatype, 0, comm);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
3) binomial_tree bcast intra-communication between root of each SMP node
4) binomial_tree bcast inside each SMP node
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_smp_binomial_pipeline::allreduce(void *send_buf,
void *recv_buf, int count,
MPI_Datatype dtype,
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
3) binomial_tree bcast intra-communication between root of each SMP node
4) binomial_tree bcast inside each SMP node
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_smp_binomial::allreduce(void *send_buf, void *recv_buf,
int count, MPI_Datatype dtype,
MPI_Op op, MPI_Comm comm)
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
2) Recursive doubling intra-communication between root of each SMP node
3) binomial_tree bcast inside each SMP node
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_smp_rdb::allreduce(void *send_buf, void *recv_buf, int count,
MPI_Datatype dtype, MPI_Op op,
MPI_Comm comm)
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
3) allgather - inter between root of each SMP node
4) binomial_tree bcast inside each SMP node
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_smp_rsag_lr::allreduce(void *send_buf, void *recv_buf,
int count, MPI_Datatype dtype,
MPI_Op op, MPI_Comm comm)
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
3) allgather - inter between root of each SMP node
4) binomial_tree bcast inside each SMP node
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_smp_rsag_rab::allreduce(void *sbuf, void *rbuf, int count,
MPI_Datatype dtype, MPI_Op op,
MPI_Comm comm)
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
3) allgather - inter between root of each SMP node
4) binomial_tree bcast inside each SMP node
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_smp_rsag::allreduce(void *send_buf, void *recv_buf,
int count, MPI_Datatype dtype, MPI_Op op,
MPI_Comm comm)
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
}
return 0;
}
+namespace simgrid{
+namespace smpi{
int Coll_alltoall_2dmesh::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
smpi_free_tmp_buffer(tmp_buff2);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
}
return 0;
}
-
+namespace simgrid{
+namespace smpi{
int Coll_alltoall_3dmesh::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
void *recv_buff, int recv_count,
smpi_free_tmp_buffer(tmp_buff2);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#define MV2_ALLTOALL_THROTTLE_FACTOR 4
#include "../colls_private.h"
-
+namespace simgrid{
+namespace smpi{
int Coll_alltoall_mvapich2_scatter_dest::alltoall(
void *sendbuf,
int sendcount,
return (mpi_errno);
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_pair_light_barrier::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_pair_mpi_barrier::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_pair_one_barrier::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
-
+namespace simgrid{
+namespace smpi{
int Coll_alltoall_pair_rma::alltoall(void *send_buff, int send_count, MPI_Datatype send_type,
void *recv_buff, int recv_count, MPI_Datatype recv_type,
MPI_Comm comm)
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: MPICH / slightly modified by Ahmad Faraj.
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int Coll_alltoall_rdb::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
void *recv_buff, int recv_count,
smpi_free_tmp_buffer(tmp_buff);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_ring_light_barrier::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_ring_mpi_barrier::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_ring_one_barrier::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoall_ring::alltoall(void *send_buff, int send_count,
MPI_Datatype send_type, void *recv_buff,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not
* less...
**/
+namespace simgrid{
+namespace smpi{
int Coll_alltoallv_bruck::alltoallv(void *sendbuf, int *sendcounts, int *senddisps,
MPI_Datatype sendtype, void *recvbuf,
int *recvcounts, int *recvdisps, MPI_Datatype recvtype,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* differently and so will not have to duplicate code.
* GEF Oct05 after asking Jeff.
*/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_ompi_basic_linear::alltoallv(void *sbuf, int *scounts, int *sdisps,
MPI_Datatype sdtype,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_pair_light_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_pair_mpi_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_pair_one_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int Coll_alltoallv_pair::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
void *recv_buff, int *recv_counts, int *recv_disps,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_ring_light_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_ring_mpi_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_ring_one_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_alltoallv_ring::alltoallv(void *send_buff, int *send_counts, int *send_disps,
MPI_Datatype send_type,
}
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
#include "../coll_tuned_topo.h"
-
+namespace simgrid{
+namespace smpi{
int Coll_barrier_mvapich2_pair::barrier(MPI_Comm comm)
{
return mpi_errno;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* synchronous gurantee made by last ring of sends are synchronous
*
*/
+namespace simgrid{
+namespace smpi{
int Coll_barrier_ompi_doublering::barrier(MPI_Comm comm
)
{
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
int bcast_NTSB_segment_size_in_byte = 8192;
-
+namespace simgrid{
+namespace smpi{
int Coll_bcast_NTSB::bcast(void *buf, int count, MPI_Datatype datatype,
int root, MPI_Comm comm)
{
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* Non-topology-specific pipelined linear-bcast function
0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion
*/
+namespace simgrid{
+namespace smpi{
int Coll_bcast_NTSL_Isend::bcast(void *buf, int count, MPI_Datatype datatype,
int root, MPI_Comm comm)
{
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* Non-topology-specific pipelined linear-bcast function
0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion
*/
+namespace simgrid{
+namespace smpi{
int Coll_bcast_NTSL::bcast(void *buf, int count, MPI_Datatype datatype,
int root, MPI_Comm comm)
{
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
int bcast_SMP_binary_segment_byte = 8192;
-
+namespace simgrid{
+namespace smpi{
int Coll_bcast_SMP_binary::bcast(void *buf, int count,
MPI_Datatype datatype, int root,
MPI_Comm comm)
return 1;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "../colls_private.h"
-
+namespace simgrid{
+namespace smpi{
int Coll_bcast_SMP_binomial::bcast(void *buf, int count,
MPI_Datatype datatype, int root,
MPI_Comm comm)
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
int bcast_SMP_linear_segment_byte = 8192;
-
+namespace simgrid{
+namespace smpi{
int Coll_bcast_SMP_linear::bcast(void *buf, int count,
MPI_Datatype datatype, int root,
MPI_Comm comm)
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE
#define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128
#endif
-
+namespace simgrid{
+namespace smpi{
/* Non-topology-specific pipelined linear-bcast function */
int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count,
MPI_Datatype datatype,
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#define HEADER_SIZE 1024
#define MAX_NODE 1024
+namespace simgrid{
+namespace smpi{
/* Non-topology-specific pipelined linear-bcast function */
int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count,
MPI_Datatype datatype, int root,
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE
#define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128
#endif
-
+namespace simgrid{
+namespace smpi{
/* Non-topology-specific pipelined linear-bcast function */
int Coll_bcast_arrival_scatter::bcast(void *buf, int count,
MPI_Datatype datatype, int root,
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: MPIH / modified by Ahmad Faraj
****************************************************************************/
-
+namespace simgrid{
+namespace smpi{
int
Coll_bcast_binomial_tree::bcast(void *buff, int count,
MPI_Datatype data_type, int root,
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
int flattree_segment_in_byte = 8192;
-
+namespace simgrid{
+namespace smpi{
int
Coll_bcast_flattree_pipeline::bcast(void *buff, int count,
MPI_Datatype data_type, int root,
free(status_array);
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "../colls_private.h"
-
+namespace simgrid{
+namespace smpi{
int
Coll_bcast_flattree::bcast(void *buff, int count, MPI_Datatype data_type,
int root, MPI_Comm comm)
}
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#define mv2_bcast_large_msg 512*1024
#define mv2_knomial_intra_node_threshold 131072
#define mv2_scatter_rd_inter_leader_bcast 1
+namespace simgrid{
+namespace smpi{
int Coll_bcast_mvapich2_inter_node::bcast(void *buffer,
int count,
MPI_Datatype datatype,
return mpi_errno;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#define MAXTREEFANOUT 32
-
+namespace simgrid{
+namespace smpi{
int Coll_bcast_ompi_pipeline::bcast( void* buffer,
int original_count,
MPI_Datatype datatype,
return (MPI_SUCCESS);
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
#include "../coll_tuned_topo.h"
#define MAXTREEFANOUT 32
+namespace simgrid{
+namespace smpi{
int
Coll_bcast_ompi_split_bintree::bcast ( void* buffer,
}
+}
+}
+
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Auther: MPIH / modified by Ahmad Faraj
****************************************************************************/
+namespace simgrid{
+namespace smpi{
int
Coll_bcast_scatter_LR_allgather::bcast(void *buff, int count,
MPI_Datatype data_type, int root,
return MPI_SUCCESS;
}
+
+}
+}
#include "../colls_private.h"
+namespace simgrid{
+namespace smpi{
static int scatter_for_bcast(
int root,
return mpi_errno;
}
+
int
Coll_bcast_scatter_rdb_allgather::bcast (
void *buffer,
/* xbt_free(tmp_buf);*/
return mpi_errno;
}
+
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* Non-topology-specific pipelined linear-bcast function
0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion
*/
+namespace simgrid{
+namespace smpi{
int Coll_reduce_NTSL::reduce(void *buf, void *rbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root,
MPI_Comm comm)
free(tmp_buf);
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#ifndef MAX_NODE
#define MAX_NODE 1024
#endif
-
+namespace simgrid{
+namespace smpi{
/* Non-topology-specific pipelined linear-reduce function */
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
int count,
return MPI_SUCCESS;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
//#include <star-reduction.c>
-
+namespace simgrid{
+namespace smpi{
int Coll_reduce_binomial::reduce(void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root,
MPI_Comm comm)
return 0;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
//#include <star-reduction.c>
-
+namespace simgrid{
+namespace smpi{
int
Coll_reduce_flat_tree::reduce(void *sbuf, void *rbuf, int count,
MPI_Datatype dtype, MPI_Op op,
/* All done */
return 0;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
return 0;
}
+namespace simgrid{
+namespace smpi{
int Coll_reduce_mvapich2_knomial::reduce (
void *sendbuf,
void *recvbuf,
return mpi_errno;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
int count,
MPI_Datatype datatype,
MPI_Op op, int root, MPI_Comm comm);
-
+namespace simgrid{
+namespace smpi{
int Coll_reduce_mvapich2_two_level::reduce( void *sendbuf,
void *recvbuf,
int count,
fn_exit:
return mpi_errno;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
#include "../coll_tuned_topo.h"
-
+namespace simgrid{
+namespace smpi{
int smpi_coll_tuned_ompi_reduce_generic( void* sendbuf, void* recvbuf, int original_count,
MPI_Datatype datatype, MPI_Op op,
meaning that at least one datatype must fit in the segment !
*/
+
int Coll_reduce_ompi_chain::reduce( void *sendbuf, void *recvbuf, int count,
MPI_Datatype datatype,
MPI_Op op, int root,
/* copied function (with appropriate renaming) ends here */
+}
+}
}
REDUCE_LIMITS
-
+namespace simgrid{
+namespace smpi{
static int MPI_I_anyReduce(void* Sendbuf, void* Recvbuf, int count, MPI_Datatype mpi_datatype, MPI_Op mpi_op, int root, MPI_Comm comm, int is_all)
{
char *scr1buf, *scr2buf, *scr3buf, *xxx, *sendbuf, *recvbuf;
{
return( MPI_I_anyReduce(Sendbuf, Recvbuf, count, datatype, op, -1, comm, 1) );
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
reduce
Author: MPICH
*/
-
+namespace simgrid{
+namespace smpi{
int Coll_reduce_scatter_gather::reduce(void *sendbuf, void *recvbuf,
int count, MPI_Datatype datatype,
MPI_Op op, int root, MPI_Comm comm)
return 0;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
return retval;
}
-
+namespace simgrid{
+namespace smpi{
int Coll_reduce_scatter_mpich_pair::reduce_scatter(void *sendbuf, void *recvbuf, int recvcounts[],
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
smpi_free_tmp_buffer(tmp_results);
return MPI_SUCCESS;
}
-
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* Returns: - MPI_SUCCESS or error code
* Limitation: - Works only for commutative operations.
*/
+namespace simgrid{
+namespace smpi{
int
Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter(void *sbuf,
void *rbuf,
if (NULL != inbuf_free[1]) smpi_free_tmp_buffer(inbuf_free[1]);
return ret;
}
+}
+}
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm);
+namespace simgrid{
+namespace smpi{
+
int Coll_scatter_mvapich2_two_level_direct::scatter(void *sendbuf,
int sendcnt,
MPI_Datatype sendtype,
return (mpi_errno);
}
+}
+}
+
-/* Copyright (c) 2013-2014. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "../colls_private.h"
#include "../coll_tuned_topo.h"
+namespace simgrid{
+namespace smpi{
+
int
Coll_scatter_ompi_binomial::scatter(void *sbuf, int scount,
return MPI_SUCCESS;
}
+
+}
+}
-/* Copyright (c) 2013-2015. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* selector with default/naive Simgrid algorithms. These should not be trusted for performance evaluations */
-/* Copyright (c) 2009-2010, 2013-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* selector for collective algorithms based on openmpi's default coll_tuned_decision_fixed selector */
-/* Copyright (c) 2009-2010, 2013-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
as Shumilin's ring algorithm is unknown, default to ring'
*/
+namespace simgrid{
+namespace smpi{
int (*intel_allreduce_functions_table[])(void *sendbuf,
void *recvbuf,
args2);\
}
-namespace simgrid{
-namespace smpi{
COLL_APPLY(IMPI_COLL_SELECT, COLL_ALLGATHERV_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_disps, recv_type, comm));
COLL_APPLY(IMPI_COLL_SELECT, COLL_ALLREDUCE_SIG, (sbuf, rbuf, rcount, dtype, op, comm));
/* selector for collective algorithms based on mpich decision logic */
-/* Copyright (c) 2009-2010, 2013-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
End Algorithm: MPI_Allreduce
*/
+namespace simgrid{
+namespace smpi{
int Coll_allreduce_mpich::allreduce(void *sbuf, void *rbuf, int count,
MPI_Datatype dtype, MPI_Op op, MPI_Comm comm)
{
End Algorithm: MPI_Gather
*/
-namespace simgrid{
-namespace smpi{
int Coll_gather_mpich::gather(void *sbuf, int scount,
MPI_Datatype sdtype,
root, comm);
}
-}
-}
-
/* This is the default implementation of scatter. The algorithm is:
Algorithm: MPI_Scatter
}
return ret;
}
+}
+}
/* selector for collective algorithms based on mvapich decision logic */
-/* Copyright (c) 2009-2010, 2013-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "smpi_mvapich2_selector_stampede.h"
-
namespace simgrid{
namespace smpi{
}
}
+
+
void smpi_coll_cleanup_mvapich2(void){
int i=0;
if(mv2_alltoall_thresholds_table)
mv2_alltoall_tuning_table **mv2_alltoall_thresholds_table = NULL;
-#define MPIR_Alltoall_bruck_MV2 Coll_alltoall_bruck::alltoall
-#define MPIR_Alltoall_RD_MV2 Coll_alltoall_rdb::alltoall
-#define MPIR_Alltoall_Scatter_dest_MV2 Coll_alltoall_mvapich2_scatter_dest::alltoall
-#define MPIR_Alltoall_pairwise_MV2 Coll_alltoall_pair::alltoall
-#define MPIR_Alltoall_inplace_MV2 Coll_alltoall_ring::alltoall
+#define MPIR_Alltoall_bruck_MV2 simgrid::smpi::Coll_alltoall_bruck::alltoall
+#define MPIR_Alltoall_RD_MV2 simgrid::smpi::Coll_alltoall_rdb::alltoall
+#define MPIR_Alltoall_Scatter_dest_MV2 simgrid::smpi::Coll_alltoall_mvapich2_scatter_dest::alltoall
+#define MPIR_Alltoall_pairwise_MV2 simgrid::smpi::Coll_alltoall_pair::alltoall
+#define MPIR_Alltoall_inplace_MV2 simgrid::smpi::Coll_alltoall_ring::alltoall
static void init_mv2_alltoall_tables_stampede(){
int agg_table_sum = 0;
mv2_alltoall_tuning_table **table_ptrs = NULL;
mv2_alltoall_num_ppn_conf = 3;
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_alltoall_thresholds_table = static_cast<mv2_alltoall_tuning_table**>(xbt_malloc(sizeof(mv2_alltoall_tuning_table *)
* mv2_alltoall_num_ppn_conf));
table_ptrs = static_cast<mv2_alltoall_tuning_table**>(xbt_malloc(sizeof(mv2_alltoall_tuning_table *)
return 0;
}
-#define MPIR_Allgather_Bruck_MV2 Coll_allgather_bruck::allgather
-#define MPIR_Allgather_RD_MV2 Coll_allgather_rdb::allgather
-#define MPIR_Allgather_Ring_MV2 Coll_allgather_ring::allgather
-#define MPIR_2lvl_Allgather_MV2 Coll_allgather_mvapich2_smp::allgather
+#define MPIR_Allgather_Bruck_MV2 simgrid::smpi::Coll_allgather_bruck::allgather
+#define MPIR_Allgather_RD_MV2 simgrid::smpi::Coll_allgather_rdb::allgather
+#define MPIR_Allgather_Ring_MV2 simgrid::smpi::Coll_allgather_ring::allgather
+#define MPIR_2lvl_Allgather_MV2 simgrid::smpi::Coll_allgather_mvapich2_smp::allgather
static void init_mv2_allgather_tables_stampede(){
int i;
int agg_table_sum = 0;
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_allgather_tuning_table **table_ptrs = NULL;
mv2_allgather_num_ppn_conf = 3;
mv2_allgather_thresholds_table
-#define MPIR_Gather_MV2_Direct Coll_gather_ompi_basic_linear::gather
-#define MPIR_Gather_MV2_two_level_Direct Coll_gather_mvapich2_two_level::gather
-#define MPIR_Gather_intra Coll_gather_mpich::gather
+#define MPIR_Gather_MV2_Direct simgrid::smpi::Coll_gather_ompi_basic_linear::gather
+#define MPIR_Gather_MV2_two_level_Direct simgrid::smpi::Coll_gather_mvapich2_two_level::gather
+#define MPIR_Gather_intra simgrid::smpi::Coll_gather_mpich::gather
static void init_mv2_gather_tables_stampede(){
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_size_gather_tuning_table=7;
mv2_gather_thresholds_table = static_cast<mv2_gather_tuning_table*>(xbt_malloc(mv2_size_gather_tuning_table*
sizeof (mv2_gather_tuning_table)));
int mv2_size_allgatherv_tuning_table = 0;
mv2_allgatherv_tuning_table *mv2_allgatherv_thresholds_table = NULL;
-#define MPIR_Allgatherv_Rec_Doubling_MV2 Coll_allgatherv_mpich_rdb::allgatherv
-#define MPIR_Allgatherv_Bruck_MV2 Coll_allgatherv_ompi_bruck::allgatherv
-#define MPIR_Allgatherv_Ring_MV2 Coll_allgatherv_mpich_ring::allgatherv
+#define MPIR_Allgatherv_Rec_Doubling_MV2 simgrid::smpi::Coll_allgatherv_mpich_rdb::allgatherv
+#define MPIR_Allgatherv_Bruck_MV2 simgrid::smpi::Coll_allgatherv_ompi_bruck::allgatherv
+#define MPIR_Allgatherv_Ring_MV2 simgrid::smpi::Coll_allgatherv_mpich_ring::allgatherv
static void init_mv2_allgatherv_tables_stampede(){
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_size_allgatherv_tuning_table = 6;
mv2_allgatherv_thresholds_table = static_cast<mv2_allgatherv_tuning_table*>(xbt_malloc(mv2_size_allgatherv_tuning_table *
sizeof (mv2_allgatherv_tuning_table)));
MPI_Datatype datatype,
MPI_Op op, MPI_Comm comm)
{
- Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm);
+ simgrid::smpi::Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm);
return MPI_SUCCESS;
}
MPI_Datatype datatype,
MPI_Op op, MPI_Comm comm)
{
- Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm);
+ simgrid::smpi::Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm);
return MPI_SUCCESS;
}
-#define MPIR_Allreduce_pt2pt_rd_MV2 Coll_allreduce_rdb::allreduce
-#define MPIR_Allreduce_pt2pt_rs_MV2 Coll_allreduce_mvapich2_rs::allreduce
-#define MPIR_Allreduce_two_level_MV2 Coll_allreduce_mvapich2_two_level::allreduce
+#define MPIR_Allreduce_pt2pt_rd_MV2 simgrid::smpi::Coll_allreduce_rdb::allreduce
+#define MPIR_Allreduce_pt2pt_rs_MV2 simgrid::smpi::Coll_allreduce_mvapich2_rs::allreduce
+#define MPIR_Allreduce_two_level_MV2 simgrid::smpi::Coll_allreduce_mvapich2_two_level::allreduce
static void init_mv2_allreduce_tables_stampede(){
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_size_allreduce_tuning_table = 8;
mv2_allreduce_thresholds_table = static_cast<mv2_allreduce_tuning_table*>(xbt_malloc(mv2_size_allreduce_tuning_table *
sizeof (mv2_allreduce_tuning_table)));
#define INTRA_NODE_ROOT 0
-#define MPIR_Pipelined_Bcast_Zcpy_MV2 Coll_bcast_mpich::bcast
-#define MPIR_Pipelined_Bcast_MV2 Coll_bcast_mpich::bcast
-#define MPIR_Bcast_binomial_MV2 Coll_bcast_binomial_tree::bcast
-#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 Coll_bcast_scatter_LR_allgather::bcast
-#define MPIR_Bcast_scatter_doubling_allgather_MV2 Coll_bcast_scatter_rdb_allgather::bcast
-#define MPIR_Bcast_scatter_ring_allgather_MV2 Coll_bcast_scatter_LR_allgather::bcast
-#define MPIR_Shmem_Bcast_MV2 Coll_bcast_mpich::bcast
-#define MPIR_Bcast_tune_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast
-#define MPIR_Bcast_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast
-#define MPIR_Knomial_Bcast_intra_node_MV2 Coll_bcast_mvapich2_knomial_intra_node::bcast
-#define MPIR_Bcast_intra_MV2 Coll_bcast_mvapich2_intra_node::bcast
+#define MPIR_Pipelined_Bcast_Zcpy_MV2 simgrid::smpi::Coll_bcast_mpich::bcast
+#define MPIR_Pipelined_Bcast_MV2 simgrid::smpi::Coll_bcast_mpich::bcast
+#define MPIR_Bcast_binomial_MV2 simgrid::smpi::Coll_bcast_binomial_tree::bcast
+#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 simgrid::smpi::Coll_bcast_scatter_LR_allgather::bcast
+#define MPIR_Bcast_scatter_doubling_allgather_MV2 simgrid::smpi::Coll_bcast_scatter_rdb_allgather::bcast
+#define MPIR_Bcast_scatter_ring_allgather_MV2 simgrid::smpi::Coll_bcast_scatter_LR_allgather::bcast
+#define MPIR_Shmem_Bcast_MV2 simgrid::smpi::Coll_bcast_mpich::bcast
+#define MPIR_Bcast_tune_inter_node_helper_MV2 simgrid::smpi::Coll_bcast_mvapich2_inter_node::bcast
+#define MPIR_Bcast_inter_node_helper_MV2 simgrid::smpi::Coll_bcast_mvapich2_inter_node::bcast
+#define MPIR_Knomial_Bcast_intra_node_MV2 simgrid::smpi::Coll_bcast_mvapich2_knomial_intra_node::bcast
+#define MPIR_Bcast_intra_MV2 simgrid::smpi::Coll_bcast_mvapich2_intra_node::bcast
static void init_mv2_bcast_tables_stampede(){
//Stampede,
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_size_bcast_tuning_table=8;
mv2_bcast_thresholds_table = static_cast<mv2_bcast_tuning_table*>(xbt_malloc(mv2_size_bcast_tuning_table *
sizeof (mv2_bcast_tuning_table)));
MPI_Comm comm_ptr)=NULL;
-#define MPIR_Reduce_inter_knomial_wrapper_MV2 Coll_reduce_mvapich2_knomial::reduce
-#define MPIR_Reduce_intra_knomial_wrapper_MV2 Coll_reduce_mvapich2_knomial::reduce
-#define MPIR_Reduce_binomial_MV2 Coll_reduce_binomial::reduce
-#define MPIR_Reduce_redscat_gather_MV2 Coll_reduce_scatter_gather::reduce
-#define MPIR_Reduce_shmem_MV2 Coll_reduce_ompi_basic_linear::reduce
-#define MPIR_Reduce_two_level_helper_MV2 Coll_reduce_mvapich2_two_level::reduce
+#define MPIR_Reduce_inter_knomial_wrapper_MV2 simgrid::smpi::Coll_reduce_mvapich2_knomial::reduce
+#define MPIR_Reduce_intra_knomial_wrapper_MV2 simgrid::smpi::Coll_reduce_mvapich2_knomial::reduce
+#define MPIR_Reduce_binomial_MV2 simgrid::smpi::Coll_reduce_binomial::reduce
+#define MPIR_Reduce_redscat_gather_MV2 simgrid::smpi::Coll_reduce_scatter_gather::reduce
+#define MPIR_Reduce_shmem_MV2 simgrid::smpi::Coll_reduce_ompi_basic_linear::reduce
+#define MPIR_Reduce_two_level_helper_MV2 simgrid::smpi::Coll_reduce_mvapich2_two_level::reduce
static void init_mv2_reduce_tables_stampede(){
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
/*Stampede*/
mv2_size_reduce_tuning_table = 8;
mv2_reduce_thresholds_table = static_cast<mv2_reduce_tuning_table*>(xbt_malloc(mv2_size_reduce_tuning_table *
MPI_Op op,
MPI_Comm comm)
{
- Coll_reduce_scatter_default::reduce_scatter(sendbuf,recvbuf,recvcnts,datatype,op,comm);
+ simgrid::smpi::Coll_reduce_scatter_default::reduce_scatter(sendbuf,recvbuf,recvcnts,datatype,op,comm);
return MPI_SUCCESS;
}
-#define MPIR_Reduce_scatter_non_comm_MV2 Coll_reduce_scatter_mpich_noncomm::reduce_scatter
-#define MPIR_Reduce_scatter_Rec_Halving_MV2 Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter
-#define MPIR_Reduce_scatter_Pair_Wise_MV2 Coll_reduce_scatter_mpich_pair::reduce_scatter
+#define MPIR_Reduce_scatter_non_comm_MV2 simgrid::smpi::Coll_reduce_scatter_mpich_noncomm::reduce_scatter
+#define MPIR_Reduce_scatter_Rec_Halving_MV2 simgrid::smpi::Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter
+#define MPIR_Reduce_scatter_Pair_Wise_MV2 simgrid::smpi::Coll_reduce_scatter_mpich_pair::reduce_scatter
static void init_mv2_reduce_scatter_tables_stampede(){
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
mv2_size_red_scat_tuning_table = 6;
mv2_red_scat_thresholds_table = static_cast<mv2_red_scat_tuning_table*>(xbt_malloc(mv2_size_red_scat_tuning_table *
sizeof (mv2_red_scat_tuning_table)));
return 0;
}
-#define MPIR_Scatter_MV2_Binomial Coll_scatter_ompi_binomial::scatter
-#define MPIR_Scatter_MV2_Direct Coll_scatter_ompi_basic_linear::scatter
-#define MPIR_Scatter_MV2_two_level_Binomial Coll_scatter_mvapich2_two_level_binomial::scatter
-#define MPIR_Scatter_MV2_two_level_Direct Coll_scatter_mvapich2_two_level_direct::scatter
+#define MPIR_Scatter_MV2_Binomial simgrid::smpi::Coll_scatter_ompi_binomial::scatter
+#define MPIR_Scatter_MV2_Direct simgrid::smpi::Coll_scatter_ompi_basic_linear::scatter
+#define MPIR_Scatter_MV2_two_level_Binomial simgrid::smpi::Coll_scatter_mvapich2_two_level_binomial::scatter
+#define MPIR_Scatter_MV2_two_level_Direct simgrid::smpi::Coll_scatter_mvapich2_two_level_direct::scatter
static void init_mv2_scatter_tables_stampede(){
- if(Colls::smpi_coll_cleanup_callback==NULL)
- Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2;
int agg_table_sum = 0;
int i;
/* selector for collective algorithms based on openmpi's default coll_tuned_decision_fixed selector */
-/* Copyright (c) 2009-2010, 2013-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2010, 2012-2015. The SimGrid Team.
+/* Copyright (c) 2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
SG_BEGIN_DECL()
-using namespace simgrid::smpi;
-
-
#define PERSISTENT 0x1
#define NON_PERSISTENT 0x2
#define SEND 0x4
#define COLL_TAG_GATHERV -2223
#define COLL_TAG_BCAST -3334
#define COLL_TAG_ALLREDUCE -4445
-#define SMPI_RMA_TAG -1234
+//SMPI_RMA_TAG has to be the smallest one, as it will be decremented for accumulate ordering.
+#define SMPI_RMA_TAG -6666
extern XBT_PRIVATE MPI_Comm MPI_COMM_UNINITIALIZED;
typedef SMPI_Dist_Graph_topology *MPIR_Dist_Graph_Topology;
-XBT_PRIVATE Process* smpi_process();
-XBT_PRIVATE Process* smpi_process_remote(int index);
+XBT_PRIVATE SMPI_Process* smpi_process();
+XBT_PRIVATE SMPI_Process* smpi_process_remote(int index);
XBT_PRIVATE int smpi_process_count();
XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, int index);
// utilities
extern XBT_PRIVATE double smpi_cpu_threshold;
extern XBT_PRIVATE double smpi_host_speed;
-extern XBT_PRIVATE bool smpi_privatize_global_variables;
+
+#define SMPI_PRIVATIZE_NONE 0
+#define SMPI_PRIVATIZE_MMAP 1
+#define SMPI_PRIVATIZE_DLOPEN 2
+#define SMPI_PRIVATIZE_DEFAULT SMPI_PRIVATIZE_MMAP
+extern XBT_PRIVATE int smpi_privatize_global_variables;
+
extern XBT_PRIVATE char* smpi_start_data_exe; //start of the data+bss segment of the executable
extern XBT_PRIVATE int smpi_size_data_exe; //size of the data+bss segment of the executable
XBT_PRIVATE void smpi_bench_destroy();
XBT_PRIVATE void smpi_bench_begin();
XBT_PRIVATE void smpi_bench_end();
+XBT_PRIVATE void smpi_shared_destroy();
XBT_PRIVATE void* smpi_get_tmp_sendbuffer(int size);
XBT_PRIVATE void* smpi_get_tmp_recvbuffer(int size);
#ifndef WIN32
#include <sys/mman.h>
#endif
-#include <sys/stat.h>
-#include <errno.h>
-#include <fcntl.h>
#include <math.h> // sqrt
-#include <unistd.h>
-#include <string.h>
#include <stdio.h>
#if HAVE_PAPI
#include <papi.h>
#endif
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#ifndef MAP_POPULATE
-#define MAP_POPULATE 0
-#endif
-
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_bench, smpi, "Logging specific to SMPI (benchmarking)");
-/* Shared allocations are handled through shared memory segments.
- * Associated data and metadata are used as follows:
- *
- * mmap #1
- * `allocs' dict ---- -.
- * ---------- shared_data_t shared_metadata_t / | | |
- * .->| <name> | ---> -------------------- <--. ----------------- | | | |
- * | ---------- | fd of <name> | | | size of mmap | --| | | |
- * | | count (2) | |-- | data | \ | | |
- * `----------------- | <name> | | ----------------- ---- |
- * -------------------- | ^ |
- * | | |
- * | | `allocs_metadata' dict |
- * | | ---------------------- |
- * | `-- | <addr of mmap #1> |<-'
- * | .-- | <addr of mmap #2> |<-.
- * | | ---------------------- |
- * | | |
- * | | |
- * | | |
- * | | mmap #2 |
- * | v ---- -'
- * | shared_metadata_t / | |
- * | ----------------- | | |
- * | | size of mmap | --| | |
- * `-- | data | | | |
- * ----------------- | | |
- * \ | |
- * ----
- */
-
-#define PTR_STRLEN (2 + 2 * sizeof(void*) + 1)
xbt_dict_t samples = nullptr; /* Allocated on first use */
-xbt_dict_t calls = nullptr; /* Allocated on first use */
-double smpi_cpu_threshold;
+double smpi_cpu_threshold = -1;
double smpi_host_speed;
-int smpi_loaded_page = -1;
-char* smpi_start_data_exe = nullptr;
-int smpi_size_data_exe = 0;
-bool smpi_privatize_global_variables;
shared_malloc_type smpi_cfg_shared_malloc = shmalloc_global;
double smpi_total_benched_time = 0;
smpi_privatisation_region_t smpi_privatisation_regions;
-namespace {
-
-/** Some location in the source code
- *
- * This information is used by SMPI_SHARED_MALLOC to allocate some shared memory for all simulated processes.
- */
-class smpi_source_location {
-public:
- smpi_source_location(const char* filename, int line)
- : filename(xbt_strdup(filename)), filename_length(strlen(filename)), line(line)
- {
- }
-
- /** Pointer to a static string containing the file name */
- char* filename = nullptr;
- int filename_length = 0;
- int line = 0;
-
- bool operator==(smpi_source_location const& that) const
- {
- return filename_length == that.filename_length && line == that.line &&
- std::memcmp(filename, that.filename, filename_length) == 0;
- }
- bool operator!=(smpi_source_location const& that) const { return !(*this == that); }
-};
-}
-
-namespace std {
-
-template <> class hash<smpi_source_location> {
-public:
- typedef smpi_source_location argument_type;
- typedef std::size_t result_type;
- result_type operator()(smpi_source_location const& loc) const
- {
- return xbt_str_hash_ext(loc.filename, loc.filename_length) ^
- xbt_str_hash_ext((const char*)&loc.line, sizeof(loc.line));
- }
-};
-}
-
-namespace {
-
-typedef struct {
- int fd = -1;
- int count = 0;
-} shared_data_t;
-
-std::unordered_map<smpi_source_location, shared_data_t> allocs;
-typedef std::unordered_map<smpi_source_location, shared_data_t>::value_type shared_data_key_type;
-
-typedef struct {
- size_t size;
- shared_data_key_type* data;
-} shared_metadata_t;
-
-std::unordered_map<void*, shared_metadata_t> allocs_metadata;
-}
-
-static size_t shm_size(int fd) {
- struct stat st;
-
- if(fstat(fd, &st) < 0) {
- xbt_die("Could not stat fd %d: %s", fd, strerror(errno));
- }
- return static_cast<size_t>(st.st_size);
-}
-
-#ifndef WIN32
-static void* shm_map(int fd, size_t size, shared_data_key_type* data) {
- char loc[PTR_STRLEN];
- shared_metadata_t meta;
-
- if(size > shm_size(fd) && (ftruncate(fd, static_cast<off_t>(size)) < 0)) {
- xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno));
- }
-
- void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if(mem == MAP_FAILED) {
- xbt_die(
- "Failed to map fd %d with size %zu: %s\n"
- "If you are running a lot of ranks, you may be exceeding the amount of mappings allowed per process.\n"
- "On Linux systems, change this value with sudo sysctl -w vm.max_map_count=newvalue (default value: 65536)\n"
- "Please see http://simgrid.gforge.inria.fr/simgrid/latest/doc/html/options.html#options_virt for more info.",
- fd, size, strerror(errno));
- }
- snprintf(loc, PTR_STRLEN, "%p", mem);
- meta.size = size;
- meta.data = data;
- allocs_metadata[mem] = meta;
- XBT_DEBUG("MMAP %zu to %p", size, mem);
- return mem;
-}
-#endif
-
void smpi_bench_destroy()
{
- allocs.clear();
- allocs_metadata.clear();
xbt_dict_free(&samples);
- xbt_dict_free(&calls);
}
extern "C" XBT_PUBLIC(void) smpi_execute_flops_(double *flops);
void smpi_bench_begin()
{
- if (smpi_privatize_global_variables) {
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) {
smpi_switch_data_segment(smpi_process()->index());
}
smpi_bench_end();
double now = SIMIX_get_clock();
- unsigned long long sec = (unsigned long long)now;
+ unsigned long long sec = static_cast<unsigned long long>(now);
unsigned long long pre = (now - sec) * smpi_rastro_resolution();
smpi_bench_begin();
return static_cast<unsigned long long>(sec) * smpi_rastro_resolution() + pre;
data->benching = 0;
}
-#ifndef WIN32
-static int smpi_shared_malloc_bogusfile = -1;
-static unsigned long smpi_shared_malloc_blocksize = 1UL << 20;
-void *smpi_shared_malloc(size_t size, const char *file, int line)
-{
- void* mem;
- if (size > 0 && smpi_cfg_shared_malloc == shmalloc_local) {
- smpi_source_location loc(file, line);
- auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
- auto data = res.first;
- if (res.second) {
- // The insertion did not take place.
- // Generate a shared memory name from the address of the shared_data:
- char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on Mac OS X (shm_open raises ENAMETOOLONG otherwise)
- snprintf(shmname, 31, "/shmalloc%p", &*data);
- int fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
- if (fd < 0) {
- if (errno == EEXIST)
- xbt_die("Please cleanup /dev/shm/%s", shmname);
- else
- xbt_die("An unhandled error occurred while opening %s. shm_open: %s", shmname, strerror(errno));
- }
- data->second.fd = fd;
- data->second.count = 1;
- mem = shm_map(fd, size, &*data);
- if (shm_unlink(shmname) < 0) {
- XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno));
- }
- XBT_DEBUG("Mapping %s at %p through %d", shmname, mem, fd);
- } else {
- mem = shm_map(data->second.fd, size, &*data);
- data->second.count++;
- }
- XBT_DEBUG("Shared malloc %zu in %p (metadata at %p)", size, mem, &*data);
-
- } else if (smpi_cfg_shared_malloc == shmalloc_global) {
- /* First reserve memory area */
- mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
-
- xbt_assert(mem != MAP_FAILED, "Failed to allocate %luMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root "
- "to allow big allocations.\n",
- (unsigned long)(size >> 20));
-
- /* Create bogus file if not done already */
- if (smpi_shared_malloc_bogusfile == -1) {
- /* Create a fd to a new file on disk, make it smpi_shared_malloc_blocksize big, and unlink it.
- * It still exists in memory but not in the file system (thus it cannot be leaked). */
- char* name = xbt_strdup("/tmp/simgrid-shmalloc-XXXXXX");
- smpi_shared_malloc_bogusfile = mkstemp(name);
- unlink(name);
- free(name);
- char* dumb = (char*)calloc(1, smpi_shared_malloc_blocksize);
- ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize);
- if(err<0)
- xbt_die("Could not write bogus file for shared malloc");
- free(dumb);
- }
-
- /* Map the bogus file in place of the anonymous memory */
- unsigned int i;
- for (i = 0; i < size / smpi_shared_malloc_blocksize; i++) {
- void* pos = (void*)((unsigned long)mem + i * smpi_shared_malloc_blocksize);
- void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED | MAP_POPULATE,
- smpi_shared_malloc_bogusfile, 0);
- xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
- "STARPU_MALLOC_SIMULATION_FOLD environment variable or the sysctl vm.max_map_count?",
- strerror(errno));
- }
- if (size % smpi_shared_malloc_blocksize) {
- void* pos = (void*)((unsigned long)mem + i * smpi_shared_malloc_blocksize);
- void* res = mmap(pos, size % smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_SHARED | MAP_POPULATE, smpi_shared_malloc_bogusfile, 0);
- xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
- "STARPU_MALLOC_SIMULATION_FOLD environment variable or the sysctl vm.max_map_count?",
- strerror(errno));
- }
-
- } else {
- mem = xbt_malloc(size);
- XBT_DEBUG("Classic malloc %zu in %p", size, mem);
- }
-
- return mem;
-}
-
-void smpi_shared_free(void *ptr)
-{
- if (smpi_cfg_shared_malloc == shmalloc_local) {
- char loc[PTR_STRLEN];
- snprintf(loc, PTR_STRLEN, "%p", ptr);
- auto meta = allocs_metadata.find(ptr);
- if (meta == allocs_metadata.end()) {
- XBT_WARN("Cannot free: %p was not shared-allocated by SMPI - maybe its size was 0?", ptr);
- return;
- }
- shared_data_t* data = &meta->second.data->second;
- if (munmap(ptr, meta->second.size) < 0) {
- XBT_WARN("Unmapping of fd %d failed: %s", data->fd, strerror(errno));
- }
- data->count--;
- if (data->count <= 0) {
- close(data->fd);
- allocs.erase(allocs.find(meta->second.data->first));
- XBT_DEBUG("Shared free - with removal - of %p", ptr);
- } else {
- XBT_DEBUG("Shared free - no removal - of %p, count = %d", ptr, data->count);
- }
-
- } else if (smpi_cfg_shared_malloc == shmalloc_global) {
- munmap(ptr, 0); // the POSIX says that I should not give 0 as a length, but it seems to work OK
-
- } else {
- XBT_DEBUG("Classic free of %p", ptr);
- xbt_free(ptr);
- }
-}
-#endif
-
-int smpi_shared_known_call(const char* func, const char* input)
-{
- char* loc = bprintf("%s:%s", func, input);
- int known = 0;
-
- if (calls==nullptr) {
- calls = xbt_dict_new_homogeneous(nullptr);
- }
- try {
- xbt_dict_get(calls, loc); /* Succeed or throw */
- known = 1;
- xbt_free(loc);
- }
- catch (xbt_ex& ex) {
- xbt_free(loc);
- if (ex.category != not_found_error)
- throw;
- }
- catch(...) {
- xbt_free(loc);
- throw;
- }
- return known;
-}
-
-void* smpi_shared_get_call(const char* func, const char* input) {
- char* loc = bprintf("%s:%s", func, input);
-
- if (calls == nullptr)
- calls = xbt_dict_new_homogeneous(nullptr);
- void* data = xbt_dict_get(calls, loc);
- xbt_free(loc);
- return data;
-}
-
-void* smpi_shared_set_call(const char* func, const char* input, void* data) {
- char* loc = bprintf("%s:%s", func, input);
-
- if (calls == nullptr)
- calls = xbt_dict_new_homogeneous(nullptr);
- xbt_dict_set(calls, loc, data, nullptr);
- xbt_free(loc);
- return data;
-}
-
-
-/** Map a given SMPI privatization segment (make a SMPI process active) */
-void smpi_switch_data_segment(int dest) {
- if (smpi_loaded_page == dest)//no need to switch, we've already loaded the one we want
- return;
-
- // So the job:
- smpi_really_switch_data_segment(dest);
-}
-
-/** Map a given SMPI privatization segment (make a SMPI process active) even if SMPI thinks it is already active
- *
- * When doing a state restoration, the state of the restored variables might not be consistent with the state of the
- * virtual memory. In this case, we to change the data segment.
- */
-void smpi_really_switch_data_segment(int dest)
-{
- if(smpi_size_data_exe == 0)//no need to switch
- return;
-
-#if HAVE_PRIVATIZATION
- if(smpi_loaded_page==-1){//initial switch, do the copy from the real page here
- for (int i=0; i< smpi_process_count(); i++){
- memcpy(smpi_privatisation_regions[i].address, TOPAGE(smpi_start_data_exe), smpi_size_data_exe);
- }
- }
-
- // FIXME, cross-process support (mmap across process when necessary)
- int current = smpi_privatisation_regions[dest].file_descriptor;
- XBT_DEBUG("Switching data frame to the one of process %d", dest);
- void* tmp =
- mmap(TOPAGE(smpi_start_data_exe), smpi_size_data_exe, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, current, 0);
- if (tmp != TOPAGE(smpi_start_data_exe))
- xbt_die("Couldn't map the new region");
- smpi_loaded_page = dest;
-#endif
-}
-
-int smpi_is_privatisation_file(char* file)
-{
- return strncmp("/dev/shm/my-buffer-", file, std::strlen("/dev/shm/my-buffer-")) == 0;
-}
-
-void smpi_initialize_global_memory_segments()
-{
-
-#if !HAVE_PRIVATIZATION
- smpi_privatize_global_variables=false;
- xbt_die("You are trying to use privatization on a system that does not support it. Don't.");
- return;
-#else
-
- smpi_get_executable_global_size();
-
- XBT_DEBUG ("bss+data segment found : size %d starting at %p", smpi_size_data_exe, smpi_start_data_exe );
-
- if (smpi_size_data_exe == 0){//no need to switch
- smpi_privatize_global_variables=false;
- return;
- }
-
- smpi_privatisation_regions = static_cast<smpi_privatisation_region_t>(
- xbt_malloc(smpi_process_count() * sizeof(struct s_smpi_privatisation_region)));
-
- for (int i=0; i< smpi_process_count(); i++){
- // create SIMIX_process_count() mappings of this size with the same data inside
- int file_descriptor;
- void* address = nullptr;
- char path[24];
- int status;
-
- do {
- snprintf(path, sizeof(path), "/smpi-buffer-%06x", rand() % 0xffffff);
- file_descriptor = shm_open(path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
- } while (file_descriptor == -1 && errno == EEXIST);
- if (file_descriptor < 0) {
- if (errno == EMFILE) {
- xbt_die("Impossible to create temporary file for memory mapping: %s\n\
-The open() system call failed with the EMFILE error code (too many files). \n\n\
-This means that you reached the system limits concerning the amount of files per process. \
-This is not a surprise if you are trying to virtualize many processes on top of SMPI. \
-Don't panic -- you should simply increase your system limits and try again. \n\n\
-First, check what your limits are:\n\
- cat /proc/sys/fs/file-max # Gives you the system-wide limit\n\
- ulimit -Hn # Gives you the per process hard limit\n\
- ulimit -Sn # Gives you the per process soft limit\n\
- cat /proc/self/limits # Displays any per-process limitation (including the one given above)\n\n\
-If one of these values is less than the amount of MPI processes that you try to run, then you got the explanation of this error. \
-Ask the Internet about tutorials on how to increase the files limit such as: https://rtcamp.com/tutorials/linux/increase-open-files-limit/",
- strerror(errno));
- }
- xbt_die("Impossible to create temporary file for memory mapping: %s", strerror(errno));
- }
-
- status = ftruncate(file_descriptor, smpi_size_data_exe);
- if (status)
- xbt_die("Impossible to set the size of the temporary file for memory mapping");
-
- /* Ask for a free region */
- address = mmap(nullptr, smpi_size_data_exe, PROT_READ | PROT_WRITE, MAP_SHARED, file_descriptor, 0);
- if (address == MAP_FAILED)
- xbt_die("Couldn't find a free region for memory mapping");
-
- status = shm_unlink(path);
- if (status)
- xbt_die("Impossible to unlink temporary file for memory mapping");
-
- // initialize the values
- memcpy(address, TOPAGE(smpi_start_data_exe), smpi_size_data_exe);
-
- // store the address of the mapping for further switches
- smpi_privatisation_regions[i].file_descriptor = file_descriptor;
- smpi_privatisation_regions[i].address = address;
- }
-#endif
-}
-
-void smpi_destroy_global_memory_segments(){
- if (smpi_size_data_exe == 0)//no need to switch
- return;
-#if HAVE_PRIVATIZATION
- for (int i=0; i< smpi_process_count(); i++) {
- if (munmap(smpi_privatisation_regions[i].address, smpi_size_data_exe) < 0)
- XBT_WARN("Unmapping of fd %d failed: %s", smpi_privatisation_regions[i].file_descriptor, strerror(errno));
- close(smpi_privatisation_regions[i].file_descriptor);
- }
- xbt_free(smpi_privatisation_regions);
-#endif
-}
-
extern "C" { /** These functions will be called from the user code **/
smpi_trace_call_location_t* smpi_trace_get_call_location() {
return smpi_process()->call_location();
/* smpi_coll.c -- various optimized routing for collectives */
-/* Copyright (c) 2009-2015. The SimGrid Team.
+/* Copyright (c) 2009-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/*High level handling of collective algorithms*/
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2010-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <stdlib.h>
-#include <limits.h>
-
-#include <xbt/dict.h>
-#include <xbt/ex.h>
-#include <xbt/ex.hpp>
-
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
#include "private.h"
#include "src/simix/smx_private.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)");
- Comm mpi_MPI_COMM_UNINITIALIZED;
+ simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED;
MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED;
/* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to
}
int Comm::dup(MPI_Comm* newcomm){
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
MPI_Group cp = new Group(this->group());
smpi_process()->set_replaying(false);
}
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
//identify neighbours in comm
Coll_allgather_mpich::allgather(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this);
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
}
Coll_bcast_mpich::bcast(&(is_uniform_),1, MPI_INT, 0, comm_intra );
- if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){ //we need to switch as the called function may silently touch global variables
smpi_switch_data_segment(smpi_process()->index());
}
// Are the ranks blocked ? = allocated contiguously on the SMP nodes
-/* Copyright (c) 2010-2015. The SimGrid Team.
+/* Copyright (c) 2010-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* smpi_datatype.cpp -- MPI primitives to handle datatypes */
-/* Copyright (c) 2009-2015. The SimGrid Team.
+/* Copyright (c) 2009-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_datatype, smpi, "Logging specific to SMPI (datatype)");
#define CREATE_MPI_DATATYPE(name, type) \
- static Datatype mpi_##name ( \
+ static simgrid::smpi::Datatype mpi_##name ( \
(char*) # name, \
sizeof(type), /* size */ \
0, /* lb */ \
const MPI_Datatype name = &mpi_##name;
#define CREATE_MPI_DATATYPE_NULL(name) \
- static Datatype mpi_##name ( \
+ static simgrid::smpi::Datatype mpi_##name ( \
(char*) # name, \
0, /* size */ \
0, /* lb */ \
int Datatype::copy(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype){
int count;
- if(smpi_privatize_global_variables){
+
+// FIXME Handle the case of a partial shared malloc.
+
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
smpi_switch_data_segment(smpi_process()->index());
}
/* First check if we really have something to do */
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* smpi_datatype.cpp -- MPI primitives to handle datatypes */
-/* Copyright (c) 2009-2015. The SimGrid Team.
+/* Copyright (c) 2009-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2004-2014. The SimGrid Team.
+/* Copyright (c) 2004-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "private.h"
#include "simgrid/msg.h" /* barrier */
-#include "xbt/dict.h"
+#include "src/smpi/SmpiHost.hpp"
#include "xbt/log.h"
-#include "xbt/sysdep.h"
-static xbt_dict_t smpi_instances = nullptr;
-extern int process_count;
-extern int* index_to_process_data;
+namespace simgrid {
+namespace smpi {
+namespace app {
+
+class Instance {
+public:
+ Instance(const char* name, int max_no_processes, int process_count, MPI_Comm comm, msg_bar_t finalization_barrier)
+ : name(name)
+ , size(max_no_processes)
+ , present_processes(0)
+ , index(process_count)
+ , comm_world(comm)
+ , finalization_barrier(finalization_barrier)
+ {
+ }
-typedef struct s_smpi_mpi_instance{
const char* name;
int size;
int present_processes;
- int index;
+ int index; // Badly named. This should be "no_processes_when_registering" ;)
MPI_Comm comm_world;
msg_bar_t finalization_barrier;
-} s_smpi_mpi_instance_t;
+};
+}
+}
+namespace s4u {
+extern std::map<std::string, simgrid::s4u::Host*> host_list;
+}
+}
+
+using simgrid::smpi::app::Instance;
+
+static std::map<std::string, Instance> smpi_instances;
+extern int process_count; // How many processes have been allocated over all instances?
+extern int* index_to_process_data;
/** \ingroup smpi_simulation
* \brief Registers a running instance of a MPI program.
{
SIMIX_function_register(name, code);
- s_smpi_mpi_instance_t* instance = (s_smpi_mpi_instance_t*)xbt_malloc(sizeof(s_smpi_mpi_instance_t));
+ static int already_called = 0;
+ if (!already_called) {
+ already_called = 1;
+ for (auto& item : simgrid::s4u::host_list) {
+ simgrid::s4u::Host* host = item.second;
+ host->extension_set(new simgrid::smpi::SmpiHost(host));
+ }
+ }
- instance->name = name;
- instance->size = num_processes;
- instance->present_processes = 0;
- instance->index = process_count;
- instance->comm_world = MPI_COMM_NULL;
- instance->finalization_barrier = MSG_barrier_init(num_processes);
+ Instance instance(name, num_processes, process_count, MPI_COMM_NULL, MSG_barrier_init(num_processes));
process_count+=num_processes;
- if(smpi_instances==nullptr){
- smpi_instances = xbt_dict_new_homogeneous(xbt_free_f);
- }
-
- xbt_dict_set(smpi_instances, name, (void*)instance, nullptr);
+ smpi_instances.insert(std::pair<std::string, Instance>(name, instance));
}
//get the index of the process in the process_data array
void smpi_deployment_register_process(const char* instance_id, int rank, int index)
{
-
- if(smpi_instances==nullptr){//no instance registered, we probably used smpirun.
+ if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
index_to_process_data[index]=index;
return;
}
- s_smpi_mpi_instance_t* instance =
- static_cast<s_smpi_mpi_instance_t*>(xbt_dict_get_or_null(smpi_instances, instance_id));
- xbt_assert(instance, "Error, unknown instance %s", instance_id);
+ Instance& instance = smpi_instances.at(instance_id);
- if(instance->comm_world == MPI_COMM_NULL){
- MPI_Group group = new Group(instance->size);
- instance->comm_world = new Comm(group, nullptr);
+ if (instance.comm_world == MPI_COMM_NULL) {
+ MPI_Group group = new simgrid::smpi::Group(instance.size);
+ instance.comm_world = new simgrid::smpi::Comm(group, nullptr);
}
- instance->present_processes++;
- index_to_process_data[index]=instance->index+rank;
- instance->comm_world->group()->set_mapping(index, rank);
+ instance.present_processes++;
+ index_to_process_data[index] = instance.index + rank;
+ instance.comm_world->group()->set_mapping(index, rank);
}
//get the index of the process in the process_data array
MPI_Comm* smpi_deployment_comm_world(const char* instance_id)
{
- if(smpi_instances==nullptr){//no instance registered, we probably used smpirun.
+ if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
return nullptr;
}
- s_smpi_mpi_instance_t* instance =
- static_cast<s_smpi_mpi_instance_t*>(xbt_dict_get_or_null(smpi_instances, instance_id));
- xbt_assert(instance, "Error, unknown instance %s", instance_id);
- return &instance->comm_world;
+ Instance& instance = smpi_instances.at(instance_id);
+ return &instance.comm_world;
}
msg_bar_t smpi_deployment_finalization_barrier(const char* instance_id)
{
- if(smpi_instances==nullptr){//no instance registered, we probably used smpirun.
+ if (smpi_instances.empty()) { // no instance registered, we probably used smpirun.
return nullptr;
}
- s_smpi_mpi_instance_t* instance =
- static_cast<s_smpi_mpi_instance_t*>(xbt_dict_get_or_null(smpi_instances, instance_id));
- xbt_assert(instance, "Error, unknown instance %s", instance_id);
- return instance->finalization_barrier;
+ Instance& instance = smpi_instances.at(instance_id);
+ return instance.finalization_barrier;
}
void smpi_deployment_cleanup_instances(){
- xbt_dict_cursor_t cursor = nullptr;
- s_smpi_mpi_instance_t* instance = nullptr;
- char *name = nullptr;
- xbt_dict_foreach(smpi_instances, cursor, name, instance) {
- if(instance->comm_world!=MPI_COMM_NULL)
- delete instance->comm_world->group();
- delete instance->comm_world;
- MSG_barrier_destroy(instance->finalization_barrier);
+ for (auto& item : smpi_instances) {
+ Instance instance = item.second;
+ if (instance.comm_world != MPI_COMM_NULL)
+ delete instance.comm_world->group();
+ delete instance.comm_world;
+ MSG_barrier_destroy(instance.finalization_barrier);
}
- xbt_dict_free(&smpi_instances);
}
-/* Copyright (c) 2013-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2013-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <xbt/log.h>
// FIXME: this plugin should be separated from the core
+#include "simgrid/s4u/Host.hpp"
#include <simgrid/plugins/energy.h>
#include <simgrid/simix.h>
-#include <simgrid/s4u/host.hpp>
#include <smpi/smpi.h>
-/* Copyright (c) 2007-2015. The SimGrid Team.
+/* Copyright (c) 2007-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* Handle Fortan - C conversion for MPI Types*/
-/* Copyright (c) 2010, 2013-2015. The SimGrid Team.
+/* Copyright (c) 2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2010-2015. The SimGrid Team.
+/* Copyright (c) 2010-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
}
static void smpi_init_fortran_types(){
- if(F2C::lookup() == nullptr){
+ if(simgrid::smpi::F2C::lookup() == nullptr){
MPI_COMM_WORLD->add_f();
MPI_BYTE->add_f();//MPI_BYTE
MPI_CHAR->add_f();//MPI_CHARACTER
*ierr = MPI_Finalize();
running_processes--;
if(running_processes==0){
- F2C::delete_lookup();
+ simgrid::smpi::F2C::delete_lookup();
}
}
void mpi_abort_(int* comm, int* errorcode, int* ierr) {
- *ierr = MPI_Abort(Comm::f2c(*comm), *errorcode);
+ *ierr = MPI_Abort(simgrid::smpi::Comm::f2c(*comm), *errorcode);
}
void mpi_comm_rank_(int* comm, int* rank, int* ierr) {
- *ierr = MPI_Comm_rank(Comm::f2c(*comm), rank);
+ *ierr = MPI_Comm_rank(simgrid::smpi::Comm::f2c(*comm), rank);
}
void mpi_comm_size_(int* comm, int* size, int* ierr) {
- *ierr = MPI_Comm_size(Comm::f2c(*comm), size);
+ *ierr = MPI_Comm_size(simgrid::smpi::Comm::f2c(*comm), size);
}
double mpi_wtime_() {
void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Comm_dup(Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Comm_dup(simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*newcomm = tmp->add_f();
}
void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Comm_create(Comm::f2c(*comm),Group::f2c(*group), &tmp);
+ *ierr = MPI_Comm_create(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), &tmp);
if(*ierr == MPI_SUCCESS) {
*newcomm = tmp->add_f();
}
}
void mpi_comm_free_(int* comm, int* ierr) {
- MPI_Comm tmp = Comm::f2c(*comm);
+ MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm);
*ierr = MPI_Comm_free(&tmp);
if(*ierr == MPI_SUCCESS) {
- Comm::free_f(*comm);
+ simgrid::smpi::Comm::free_f(*comm);
}
}
void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Comm_split(Comm::f2c(*comm), *color, *key, &tmp);
+ *ierr = MPI_Comm_split(simgrid::smpi::Comm::f2c(*comm), *color, *key, &tmp);
if(*ierr == MPI_SUCCESS) {
*comm_out = tmp->add_f();
}
void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) {
MPI_Group tmp;
- *ierr = MPI_Group_incl(Group::f2c(*group), *n, ranks, &tmp);
+ *ierr = MPI_Group_incl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp);
if(*ierr == MPI_SUCCESS) {
*group_out = tmp->add_f();
}
void mpi_comm_group_(int* comm, int* group_out, int* ierr) {
MPI_Group tmp;
- *ierr = MPI_Comm_group(Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Comm_group(simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*group_out = tmp->c2f();
}
void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
MPI_Request req;
buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Send_init(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm), &req);
+ *ierr = MPI_Send_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
if(*ierr == MPI_SUCCESS) {
*request = req->add_f();
}
void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
MPI_Request req;
buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Isend(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm), &req);
+ *ierr = MPI_Isend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
if(*ierr == MPI_SUCCESS) {
*request = req->add_f();
}
void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) {
MPI_Request req;
buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Irsend(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm), &req);
+ *ierr = MPI_Irsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
if(*ierr == MPI_SUCCESS) {
*request = req->add_f();
}
void mpi_send_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) {
buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Send(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm));
+ *ierr = MPI_Send(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_rsend_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) {
buf = static_cast<char *>(FORT_BOTTOM(buf));
- *ierr = MPI_Rsend(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm));
+ *ierr = MPI_Rsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount,
int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) {
sendbuf = static_cast<char *>( FORT_BOTTOM(sendbuf));
recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Sendrecv(sendbuf, *sendcount, Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount,
- Datatype::f2c(*recvtype), *src, *recvtag, Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
+ *ierr = MPI_Sendrecv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount,
+ simgrid::smpi::Datatype::f2c(*recvtype), *src, *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
}
void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) {
MPI_Request req;
buf = static_cast<char *>( FORT_BOTTOM(buf));
- *ierr = MPI_Recv_init(buf, *count, Datatype::f2c(*datatype), *src, *tag, Comm::f2c(*comm), &req);
+ *ierr = MPI_Recv_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
if(*ierr == MPI_SUCCESS) {
*request = req->add_f();
}
void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) {
MPI_Request req;
buf = static_cast<char *>( FORT_BOTTOM(buf));
- *ierr = MPI_Irecv(buf, *count, Datatype::f2c(*datatype), *src, *tag, Comm::f2c(*comm), &req);
+ *ierr = MPI_Irecv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req);
if(*ierr == MPI_SUCCESS) {
*request = req->add_f();
}
void mpi_recv_(void* buf, int* count, int* datatype, int* src, int* tag, int* comm, MPI_Status* status, int* ierr) {
buf = static_cast<char *>( FORT_BOTTOM(buf));
- *ierr = MPI_Recv(buf, *count, Datatype::f2c(*datatype), *src, *tag, Comm::f2c(*comm), status);
+ *ierr = MPI_Recv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), status);
}
void mpi_start_(int* request, int* ierr) {
- MPI_Request req = Request::f2c(*request);
+ MPI_Request req = simgrid::smpi::Request::f2c(*request);
*ierr = MPI_Start(&req);
}
reqs = xbt_new(MPI_Request, *count);
for(i = 0; i < *count; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
}
*ierr = MPI_Startall(*count, reqs);
xbt_free(reqs);
}
void mpi_wait_(int* request, MPI_Status* status, int* ierr) {
- MPI_Request req = Request::f2c(*request);
+ MPI_Request req = simgrid::smpi::Request::f2c(*request);
*ierr = MPI_Wait(&req, FORT_STATUS_IGNORE(status));
if(req==MPI_REQUEST_NULL){
- Request::free_f(*request);
+ simgrid::smpi::Request::free_f(*request);
*request=MPI_FORTRAN_REQUEST_NULL;
}
}
reqs = xbt_new(MPI_Request, *count);
for(i = 0; i < *count; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
}
*ierr = MPI_Waitany(*count, reqs, index, status);
if(reqs[*index]==MPI_REQUEST_NULL){
- Request::free_f(requests[*index]);
+ simgrid::smpi::Request::free_f(requests[*index]);
requests[*index]=MPI_FORTRAN_REQUEST_NULL;
}
xbt_free(reqs);
reqs = xbt_new(MPI_Request, *count);
for(i = 0; i < *count; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
}
*ierr = MPI_Waitall(*count, reqs, FORT_STATUSES_IGNORE(status));
for(i = 0; i < *count; i++) {
if(reqs[i]==MPI_REQUEST_NULL){
- Request::free_f(requests[i]);
+ simgrid::smpi::Request::free_f(requests[i]);
requests[i]=MPI_FORTRAN_REQUEST_NULL;
}
}
}
void mpi_barrier_(int* comm, int* ierr) {
- *ierr = MPI_Barrier(Comm::f2c(*comm));
+ *ierr = MPI_Barrier(simgrid::smpi::Comm::f2c(*comm));
}
void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) {
- *ierr = MPI_Bcast(buf, *count, Datatype::f2c(*datatype), *root, Comm::f2c(*comm));
+ *ierr = MPI_Bcast(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *root, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr) {
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
sendbuf = static_cast<char *>( FORT_BOTTOM(sendbuf));
recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Reduce(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op), *root, Comm::f2c(*comm));
+ *ierr = MPI_Reduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), *root, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) {
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op), Comm::f2c(*comm));
+ *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr) {
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, Datatype::f2c(*datatype),
- Op::f2c(*op), Comm::f2c(*comm));
+ *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, simgrid::smpi::Datatype::f2c(*datatype),
+ simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_scatter_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
int* root, int* comm, int* ierr) {
recvbuf = static_cast<char *>( FORT_IN_PLACE(recvbuf));
- *ierr = MPI_Scatter(sendbuf, *sendcount, Datatype::f2c(*sendtype),
- recvbuf, *recvcount, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm));
+ *ierr = MPI_Scatter(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype,
void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) {
recvbuf = static_cast<char *>( FORT_IN_PLACE(recvbuf));
- *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, Datatype::f2c(*sendtype),
- recvbuf, *recvcount, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm));
+ *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast<char *>( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE;
recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Gather(sendbuf, *sendcount, Datatype::f2c(*sendtype),
- recvbuf, *recvcount, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm));
+ *ierr = MPI_Gather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype,
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast<char *>( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE;
recvbuf = static_cast<char *>( FORT_BOTTOM(recvbuf));
- *ierr = MPI_Gatherv(sendbuf, *sendcount, Datatype::f2c(*sendtype),
- recvbuf, recvcounts, displs, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm));
+ *ierr = MPI_Gatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_allgather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype,
int* comm, int* ierr) {
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Allgather(sendbuf, *sendcount, Datatype::f2c(*sendtype),
- recvbuf, *recvcount, Datatype::f2c(*recvtype), Comm::f2c(*comm));
+ *ierr = MPI_Allgather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype,
void* recvbuf, int* recvcounts,int* displs, int* recvtype, int* comm, int* ierr) {
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Allgatherv(sendbuf, *sendcount, Datatype::f2c(*sendtype),
- recvbuf, recvcounts, displs, Datatype::f2c(*recvtype), Comm::f2c(*comm));
+ *ierr = MPI_Allgatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) {
- *ierr = MPI_Scan(sendbuf, recvbuf, *count, Datatype::f2c(*datatype),
- Op::f2c(*op), Comm::f2c(*comm));
+ *ierr = MPI_Scan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype),
+ simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype,
void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) {
- *ierr = MPI_Alltoall(sendbuf, *sendcount, Datatype::f2c(*sendtype),
- recvbuf, *recvcount, Datatype::f2c(*recvtype), Comm::f2c(*comm));
+ *ierr = MPI_Alltoall(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype,
void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) {
- *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, Datatype::f2c(*sendtype),
- recvbuf, recvcounts, recvdisps, Datatype::f2c(*recvtype), Comm::f2c(*comm));
+ *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, simgrid::smpi::Datatype::f2c(*sendtype),
+ recvbuf, recvcounts, recvdisps, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_test_ (int * request, int *flag, MPI_Status * status, int* ierr){
- MPI_Request req = Request::f2c(*request);
+ MPI_Request req = simgrid::smpi::Request::f2c(*request);
*ierr= MPI_Test(&req, flag, FORT_STATUS_IGNORE(status));
if(req==MPI_REQUEST_NULL){
- Request::free_f(*request);
+ simgrid::smpi::Request::free_f(*request);
*request=MPI_FORTRAN_REQUEST_NULL;
}
}
int i;
MPI_Request* reqs = xbt_new(MPI_Request, *count);
for(i = 0; i < *count; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
}
*ierr= MPI_Testall(*count, reqs, flag, FORT_STATUSES_IGNORE(statuses));
for(i = 0; i < *count; i++) {
if(reqs[i]==MPI_REQUEST_NULL){
- Request::free_f(requests[i]);
+ simgrid::smpi::Request::free_f(requests[i]);
requests[i]=MPI_FORTRAN_REQUEST_NULL;
}
}
}
void mpi_get_count_(MPI_Status * status, int* datatype, int *count, int* ierr){
- *ierr = MPI_Get_count(FORT_STATUS_IGNORE(status), Datatype::f2c(*datatype), count);
+ *ierr = MPI_Get_count(FORT_STATUS_IGNORE(status), simgrid::smpi::Datatype::f2c(*datatype), count);
}
void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr ){
- *ierr = MPI_Attr_get(Comm::f2c(*comm), *keyval, attr_value, flag);
+ *ierr = MPI_Attr_get(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value, flag);
}
void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr){
- *ierr= MPI_Type_extent(Datatype::f2c(*datatype), extent);
+ *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent);
}
void mpi_type_commit_(int* datatype, int* ierr){
- MPI_Datatype tmp= Datatype::f2c(*datatype);
+ MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype);
*ierr= MPI_Type_commit(&tmp);
}
void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){
MPI_Datatype tmp;
- *ierr= MPI_Type_vector(*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp);
+ *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){
MPI_Datatype tmp;
- *ierr= MPI_Type_vector(*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp);
+ *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){
MPI_Datatype tmp;
- *ierr= MPI_Type_hvector (*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp);
+ *ierr= MPI_Type_hvector (*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){
MPI_Datatype tmp;
- *ierr= MPI_Type_hvector(*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp);
+ *ierr= MPI_Type_hvector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
}
void mpi_type_free_(int* datatype, int* ierr){
- MPI_Datatype tmp= Datatype::f2c(*datatype);
+ MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype);
*ierr= MPI_Type_free (&tmp);
if(*ierr == MPI_SUCCESS) {
- F2C::free_f(*datatype);
+ simgrid::smpi::F2C::free_f(*datatype);
}
}
void mpi_type_ub_(int* datatype, MPI_Aint * disp, int* ierr){
- *ierr= MPI_Type_ub(Datatype::f2c(*datatype), disp);
+ *ierr= MPI_Type_ub(simgrid::smpi::Datatype::f2c(*datatype), disp);
}
void mpi_type_lb_(int* datatype, MPI_Aint * extent, int* ierr){
- *ierr= MPI_Type_extent(Datatype::f2c(*datatype), extent);
+ *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent);
}
void mpi_type_size_(int* datatype, int *size, int* ierr)
{
- *ierr = MPI_Type_size(Datatype::f2c(*datatype), size);
+ *ierr = MPI_Type_size(simgrid::smpi::Datatype::f2c(*datatype), size);
}
void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr){
}
void mpi_win_fence_( int* assert, int* win, int* ierr){
- *ierr = MPI_Win_fence(* assert, Win::f2c(*win));
+ *ierr = MPI_Win_fence(* assert, simgrid::smpi::Win::f2c(*win));
}
void mpi_win_free_( int* win, int* ierr){
- MPI_Win tmp = Win::f2c(*win);
+ MPI_Win tmp = simgrid::smpi::Win::f2c(*win);
*ierr = MPI_Win_free(&tmp);
if(*ierr == MPI_SUCCESS) {
- F2C::free_f(*win);
+ simgrid::smpi::F2C::free_f(*win);
}
}
void mpi_win_create_( int *base, MPI_Aint* size, int* disp_unit, int* info, int* comm, int *win, int* ierr){
MPI_Win tmp;
- *ierr = MPI_Win_create( static_cast<void*>(base), *size, *disp_unit, Info::f2c(*info), Comm::f2c(*comm),&tmp);
+ *ierr = MPI_Win_create( static_cast<void*>(base), *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),&tmp);
if(*ierr == MPI_SUCCESS) {
*win = tmp->add_f();
}
}
void mpi_win_post_(int* group, int assert, int* win, int* ierr){
- *ierr = MPI_Win_post(Group::f2c(*group), assert, Win::f2c(*win));
+ *ierr = MPI_Win_post(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win));
}
void mpi_win_start_(int* group, int assert, int* win, int* ierr){
- *ierr = MPI_Win_start(Group::f2c(*group), assert, Win::f2c(*win));
+ *ierr = MPI_Win_start(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win));
}
void mpi_win_complete_(int* win, int* ierr){
- *ierr = MPI_Win_complete(Win::f2c(*win));
+ *ierr = MPI_Win_complete(simgrid::smpi::Win::f2c(*win));
}
void mpi_win_wait_(int* win, int* ierr){
- *ierr = MPI_Win_wait(Win::f2c(*win));
+ *ierr = MPI_Win_wait(simgrid::smpi::Win::f2c(*win));
}
void mpi_win_set_name_ (int* win, char * name, int* ierr, int size){
char* tname = xbt_new(char,size+1);
strncpy(tname, name, size);
tname[size]='\0';
- *ierr = MPI_Win_set_name(Win::f2c(*win), tname);
+ *ierr = MPI_Win_set_name(simgrid::smpi::Win::f2c(*win), tname);
xbt_free(tname);
}
void mpi_win_get_name_ (int* win, char * name, int* len, int* ierr){
- *ierr = MPI_Win_get_name(Win::f2c(*win),name,len);
+ *ierr = MPI_Win_get_name(simgrid::smpi::Win::f2c(*win),name,len);
if(*len>0)
name[*len]=' ';//blank padding, not \0
}
strncpy(tvalue, value, valuelen);
tvalue[valuelen]='\0';
- *ierr = MPI_Info_set( Info::f2c(*info), tkey, tvalue);
+ *ierr = MPI_Info_set( simgrid::smpi::Info::f2c(*info), tkey, tvalue);
xbt_free(tkey);
xbt_free(tvalue);
}
char* tkey = xbt_new(char,keylen+1);
strncpy(tkey, key, keylen);
tkey[keylen]='\0';
- *ierr = MPI_Info_get(Info::f2c(*info),tkey,*valuelen, value, flag);
+ *ierr = MPI_Info_get(simgrid::smpi::Info::f2c(*info),tkey,*valuelen, value, flag);
xbt_free(tkey);
if(*flag!=0){
int replace=0;
}
void mpi_info_free_(int* info, int* ierr){
- MPI_Info tmp = Info::f2c(*info);
+ MPI_Info tmp = simgrid::smpi::Info::f2c(*info);
*ierr = MPI_Info_free(&tmp);
if(*ierr == MPI_SUCCESS) {
- F2C::free_f(*info);
+ simgrid::smpi::F2C::free_f(*info);
}
}
void mpi_get_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){
- *ierr = MPI_Get( static_cast<void*>(origin_addr),*origin_count, Datatype::f2c(*origin_datatype),*target_rank,
- *target_disp, *target_count,Datatype::f2c(*tarsmpi_type_f2c), Win::f2c(*win));
+ *ierr = MPI_Get( static_cast<void*>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
+ *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win));
}
void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* ierr){
- *ierr = MPI_Accumulate( static_cast<void *>(origin_addr),*origin_count, Datatype::f2c(*origin_datatype),*target_rank,
- *target_disp, *target_count,Datatype::f2c(*tarsmpi_type_f2c), Op::f2c(*op), Win::f2c(*win));
+ *ierr = MPI_Accumulate( static_cast<void *>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
+ *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win));
}
void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank,
MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){
- *ierr = MPI_Put( static_cast<void *>(origin_addr),*origin_count, Datatype::f2c(*origin_datatype),*target_rank,
- *target_disp, *target_count,Datatype::f2c(*tarsmpi_type_f2c), Win::f2c(*win));
+ *ierr = MPI_Put( static_cast<void *>(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank,
+ *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win));
}
//following are automatically generated, and have to be checked
void mpi_type_dup_ (int* datatype, int* newdatatype, int* ierr){
MPI_Datatype tmp;
- *ierr = MPI_Type_dup(Datatype::f2c(*datatype), &tmp);
+ *ierr = MPI_Type_dup(simgrid::smpi::Datatype::f2c(*datatype), &tmp);
if(*ierr == MPI_SUCCESS) {
*newdatatype = tmp->add_f();
}
char* tname = xbt_new(char, size+1);
strncpy(tname, name, size);
tname[size]='\0';
- *ierr = MPI_Type_set_name(Datatype::f2c(*datatype), tname);
+ *ierr = MPI_Type_set_name(simgrid::smpi::Datatype::f2c(*datatype), tname);
xbt_free(tname);
}
void mpi_type_get_name_ (int* datatype, char * name, int* len, int* ierr){
- *ierr = MPI_Type_get_name(Datatype::f2c(*datatype),name,len);
+ *ierr = MPI_Type_get_name(simgrid::smpi::Datatype::f2c(*datatype),name,len);
if(*len>0)
name[*len]=' ';
}
void mpi_type_get_attr_ (int* type, int* type_keyval, void *attribute_val, int* flag, int* ierr){
- *ierr = MPI_Type_get_attr ( Datatype::f2c(*type), *type_keyval, attribute_val,flag);
+ *ierr = MPI_Type_get_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val,flag);
}
void mpi_type_set_attr_ (int* type, int* type_keyval, void *attribute_val, int* ierr){
- *ierr = MPI_Type_set_attr ( Datatype::f2c(*type), *type_keyval, attribute_val);
+ *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val);
}
void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){
- *ierr = MPI_Type_delete_attr ( Datatype::f2c(*type), *type_keyval);
+ *ierr = MPI_Type_delete_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval);
}
void mpi_type_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){
- *ierr = MPI_Type_get_extent(Datatype::f2c(*datatype), lb, extent);
+ *ierr = MPI_Type_get_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent);
}
void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){
- *ierr = MPI_Type_get_true_extent(Datatype::f2c(*datatype), lb, extent);
+ *ierr = MPI_Type_get_true_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent);
}
void mpi_op_create_ (void * function, int* commute, int* op, int* ierr){
}
void mpi_op_free_ (int* op, int* ierr){
- MPI_Op tmp=Op::f2c(*op);
+ MPI_Op tmp= simgrid::smpi::Op::f2c(*op);
*ierr = MPI_Op_free(& tmp);
if(*ierr == MPI_SUCCESS) {
- F2C::free_f(*op);
+ simgrid::smpi::F2C::free_f(*op);
}
}
void mpi_group_free_ (int* group, int* ierr){
- MPI_Group tmp=Group::f2c(*group);
+ MPI_Group tmp = simgrid::smpi::Group::f2c(*group);
*ierr = MPI_Group_free(&tmp);
if(*ierr == MPI_SUCCESS) {
- F2C::free_f(*group);
+ simgrid::smpi::F2C::free_f(*group);
}
}
void mpi_group_size_ (int* group, int *size, int* ierr){
- *ierr = MPI_Group_size(Group::f2c(*group), size);
+ *ierr = MPI_Group_size(simgrid::smpi::Group::f2c(*group), size);
}
void mpi_group_rank_ (int* group, int *rank, int* ierr){
- *ierr = MPI_Group_rank(Group::f2c(*group), rank);
+ *ierr = MPI_Group_rank(simgrid::smpi::Group::f2c(*group), rank);
}
void mpi_group_translate_ranks_ (int* group1, int* n, int *ranks1, int* group2, int *ranks2, int* ierr)
{
- *ierr = MPI_Group_translate_ranks(Group::f2c(*group1), *n, ranks1, Group::f2c(*group2), ranks2);
+ *ierr = MPI_Group_translate_ranks(simgrid::smpi::Group::f2c(*group1), *n, ranks1, simgrid::smpi::Group::f2c(*group2), ranks2);
}
void mpi_group_compare_ (int* group1, int* group2, int *result, int* ierr){
- *ierr = MPI_Group_compare(Group::f2c(*group1), Group::f2c(*group2), result);
+ *ierr = MPI_Group_compare(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), result);
}
void mpi_group_union_ (int* group1, int* group2, int* newgroup, int* ierr){
MPI_Group tmp;
- *ierr = MPI_Group_union(Group::f2c(*group1), Group::f2c(*group2), &tmp);
+ *ierr = MPI_Group_union(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp);
if(*ierr == MPI_SUCCESS) {
*newgroup = tmp->add_f();
}
void mpi_group_intersection_ (int* group1, int* group2, int* newgroup, int* ierr){
MPI_Group tmp;
- *ierr = MPI_Group_intersection(Group::f2c(*group1), Group::f2c(*group2), &tmp);
+ *ierr = MPI_Group_intersection(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp);
if(*ierr == MPI_SUCCESS) {
*newgroup = tmp->add_f();
}
void mpi_group_difference_ (int* group1, int* group2, int* newgroup, int* ierr){
MPI_Group tmp;
- *ierr = MPI_Group_difference(Group::f2c(*group1), Group::f2c(*group2), &tmp);
+ *ierr = MPI_Group_difference(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp);
if(*ierr == MPI_SUCCESS) {
*newgroup = tmp->add_f();
}
void mpi_group_excl_ (int* group, int* n, int *ranks, int* newgroup, int* ierr){
MPI_Group tmp;
- *ierr = MPI_Group_excl(Group::f2c(*group), *n, ranks, &tmp);
+ *ierr = MPI_Group_excl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp);
if(*ierr == MPI_SUCCESS) {
*newgroup = tmp->add_f();
}
void mpi_group_range_incl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr)
{
MPI_Group tmp;
- *ierr = MPI_Group_range_incl(Group::f2c(*group), *n, ranges, &tmp);
+ *ierr = MPI_Group_range_incl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp);
if(*ierr == MPI_SUCCESS) {
*newgroup = tmp->add_f();
}
void mpi_group_range_excl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr)
{
MPI_Group tmp;
- *ierr = MPI_Group_range_excl(Group::f2c(*group), *n, ranges, &tmp);
+ *ierr = MPI_Group_range_excl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp);
if(*ierr == MPI_SUCCESS) {
*newgroup = tmp->add_f();
}
void mpi_comm_get_attr_ (int* comm, int* comm_keyval, void *attribute_val, int *flag, int* ierr){
- *ierr = MPI_Comm_get_attr (Comm::f2c(*comm), *comm_keyval, attribute_val, flag);
+ *ierr = MPI_Comm_get_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val, flag);
}
void mpi_comm_set_attr_ (int* comm, int* comm_keyval, void *attribute_val, int* ierr){
- *ierr = MPI_Comm_set_attr ( Comm::f2c(*comm), *comm_keyval, attribute_val);
+ *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val);
}
void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){
- *ierr = MPI_Comm_delete_attr (Comm::f2c(*comm), *comm_keyval);
+ *ierr = MPI_Comm_delete_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval);
}
void mpi_comm_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){
}
void mpi_comm_get_name_ (int* comm, char* name, int* len, int* ierr){
- *ierr = MPI_Comm_get_name(Comm::f2c(*comm), name, len);
+ *ierr = MPI_Comm_get_name(simgrid::smpi::Comm::f2c(*comm), name, len);
if(*len>0)
name[*len]=' ';
}
void mpi_comm_compare_ (int* comm1, int* comm2, int *result, int* ierr){
- *ierr = MPI_Comm_compare(Comm::f2c(*comm1), Comm::f2c(*comm2), result);
+ *ierr = MPI_Comm_compare(simgrid::smpi::Comm::f2c(*comm1), simgrid::smpi::Comm::f2c(*comm2), result);
}
void mpi_comm_disconnect_ (int* comm, int* ierr){
- MPI_Comm tmp=Comm::f2c(*comm);
+ MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm);
*ierr = MPI_Comm_disconnect(&tmp);
if(*ierr == MPI_SUCCESS) {
- Comm::free_f(*comm);
+ simgrid::smpi::Comm::free_f(*comm);
}
}
void mpi_request_free_ (int* request, int* ierr){
- MPI_Request tmp=Request::f2c(*request);
+ MPI_Request tmp=simgrid::smpi::Request::f2c(*request);
*ierr = MPI_Request_free(&tmp);
if(*ierr == MPI_SUCCESS) {
- Request::free_f(*request);
+ simgrid::smpi::Request::free_f(*request);
}
}
void mpi_sendrecv_replace_ (void *buf, int* count, int* datatype, int* dst, int* sendtag, int* src, int* recvtag,
int* comm, MPI_Status* status, int* ierr)
{
- *ierr = MPI_Sendrecv_replace(buf, *count, Datatype::f2c(*datatype), *dst, *sendtag, *src,
- *recvtag, Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
+ *ierr = MPI_Sendrecv_replace(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *sendtag, *src,
+ *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
}
void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* status, int* ierr)
reqs = xbt_new(MPI_Request, *count);
for(i = 0; i < *count; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
}
*ierr = MPI_Testany(*count, reqs, index, flag, FORT_STATUS_IGNORE(status));
if(*index!=MPI_UNDEFINED && reqs[*index]==MPI_REQUEST_NULL){
- Request::free_f(requests[*index]);
+ simgrid::smpi::Request::free_f(requests[*index]);
requests[*index]=MPI_FORTRAN_REQUEST_NULL;
}
xbt_free(reqs);
reqs = xbt_new(MPI_Request, *incount);
for(i = 0; i < *incount; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
}
*ierr = MPI_Waitsome(*incount, reqs, outcount, indices, status);
for(i=0;i<*outcount;i++){
if(reqs[indices[i]]==MPI_REQUEST_NULL){
- Request::free_f(requests[indices[i]]);
+ simgrid::smpi::Request::free_f(requests[indices[i]]);
requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL;
}
}
void mpi_reduce_local_ (void *inbuf, void *inoutbuf, int* count, int* datatype, int* op, int* ierr){
- *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op));
+ *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op));
}
void mpi_reduce_scatter_block_ (void *sendbuf, void *recvbuf, int* recvcount, int* datatype, int* op, int* comm,
int* ierr)
{
sendbuf = static_cast<char *>( FORT_IN_PLACE(sendbuf));
- *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, Datatype::f2c(*datatype), Op::f2c(*op),
- Comm::f2c(*comm));
+ *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op),
+ simgrid::smpi::Comm::f2c(*comm));
}
void mpi_pack_size_ (int* incount, int* datatype, int* comm, int* size, int* ierr) {
- *ierr = MPI_Pack_size(*incount, Datatype::f2c(*datatype), Comm::f2c(*comm), size);
+ *ierr = MPI_Pack_size(*incount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Comm::f2c(*comm), size);
}
void mpi_cart_coords_ (int* comm, int* rank, int* maxdims, int* coords, int* ierr) {
- *ierr = MPI_Cart_coords(Comm::f2c(*comm), *rank, *maxdims, coords);
+ *ierr = MPI_Cart_coords(simgrid::smpi::Comm::f2c(*comm), *rank, *maxdims, coords);
}
void mpi_cart_create_ (int* comm_old, int* ndims, int* dims, int* periods, int* reorder, int* comm_cart, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Cart_create(Comm::f2c(*comm_old), *ndims, dims, periods, *reorder, &tmp);
+ *ierr = MPI_Cart_create(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, *reorder, &tmp);
if(*ierr == MPI_SUCCESS) {
*comm_cart = tmp->add_f();
}
}
void mpi_cart_get_ (int* comm, int* maxdims, int* dims, int* periods, int* coords, int* ierr) {
- *ierr = MPI_Cart_get(Comm::f2c(*comm), *maxdims, dims, periods, coords);
+ *ierr = MPI_Cart_get(simgrid::smpi::Comm::f2c(*comm), *maxdims, dims, periods, coords);
}
void mpi_cart_map_ (int* comm_old, int* ndims, int* dims, int* periods, int* newrank, int* ierr) {
- *ierr = MPI_Cart_map(Comm::f2c(*comm_old), *ndims, dims, periods, newrank);
+ *ierr = MPI_Cart_map(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, newrank);
}
void mpi_cart_rank_ (int* comm, int* coords, int* rank, int* ierr) {
- *ierr = MPI_Cart_rank(Comm::f2c(*comm), coords, rank);
+ *ierr = MPI_Cart_rank(simgrid::smpi::Comm::f2c(*comm), coords, rank);
}
void mpi_cart_shift_ (int* comm, int* direction, int* displ, int* source, int* dest, int* ierr) {
- *ierr = MPI_Cart_shift(Comm::f2c(*comm), *direction, *displ, source, dest);
+ *ierr = MPI_Cart_shift(simgrid::smpi::Comm::f2c(*comm), *direction, *displ, source, dest);
}
void mpi_cart_sub_ (int* comm, int* remain_dims, int* comm_new, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Cart_sub(Comm::f2c(*comm), remain_dims, &tmp);
+ *ierr = MPI_Cart_sub(simgrid::smpi::Comm::f2c(*comm), remain_dims, &tmp);
if(*ierr == MPI_SUCCESS) {
*comm_new = tmp->add_f();
}
}
void mpi_cartdim_get_ (int* comm, int* ndims, int* ierr) {
- *ierr = MPI_Cartdim_get(Comm::f2c(*comm), ndims);
+ *ierr = MPI_Cartdim_get(simgrid::smpi::Comm::f2c(*comm), ndims);
}
void mpi_graph_create_ (int* comm_old, int* nnodes, int* index, int* edges, int* reorder, int* comm_graph, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Graph_create(Comm::f2c(*comm_old), *nnodes, index, edges, *reorder, &tmp);
+ *ierr = MPI_Graph_create(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, *reorder, &tmp);
if(*ierr == MPI_SUCCESS) {
*comm_graph = tmp->add_f();
}
}
void mpi_graph_get_ (int* comm, int* maxindex, int* maxedges, int* index, int* edges, int* ierr) {
- *ierr = MPI_Graph_get(Comm::f2c(*comm), *maxindex, *maxedges, index, edges);
+ *ierr = MPI_Graph_get(simgrid::smpi::Comm::f2c(*comm), *maxindex, *maxedges, index, edges);
}
void mpi_graph_map_ (int* comm_old, int* nnodes, int* index, int* edges, int* newrank, int* ierr) {
- *ierr = MPI_Graph_map(Comm::f2c(*comm_old), *nnodes, index, edges, newrank);
+ *ierr = MPI_Graph_map(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, newrank);
}
void mpi_graph_neighbors_ (int* comm, int* rank, int* maxneighbors, int* neighbors, int* ierr) {
- *ierr = MPI_Graph_neighbors(Comm::f2c(*comm), *rank, *maxneighbors, neighbors);
+ *ierr = MPI_Graph_neighbors(simgrid::smpi::Comm::f2c(*comm), *rank, *maxneighbors, neighbors);
}
void mpi_graph_neighbors_count_ (int* comm, int* rank, int* nneighbors, int* ierr) {
- *ierr = MPI_Graph_neighbors_count(Comm::f2c(*comm), *rank, nneighbors);
+ *ierr = MPI_Graph_neighbors_count(simgrid::smpi::Comm::f2c(*comm), *rank, nneighbors);
}
void mpi_graphdims_get_ (int* comm, int* nnodes, int* nedges, int* ierr) {
- *ierr = MPI_Graphdims_get(Comm::f2c(*comm), nnodes, nedges);
+ *ierr = MPI_Graphdims_get(simgrid::smpi::Comm::f2c(*comm), nnodes, nedges);
}
void mpi_topo_test_ (int* comm, int* top_type, int* ierr) {
- *ierr = MPI_Topo_test(Comm::f2c(*comm), top_type);
+ *ierr = MPI_Topo_test(simgrid::smpi::Comm::f2c(*comm), top_type);
}
void mpi_error_class_ (int* errorcode, int* errorclass, int* ierr) {
}
void mpi_errhandler_get_ (int* comm, void* errhandler, int* ierr) {
- *ierr = MPI_Errhandler_get(Comm::f2c(*comm), static_cast<MPI_Errhandler*>(errhandler));
+ *ierr = MPI_Errhandler_get(simgrid::smpi::Comm::f2c(*comm), static_cast<MPI_Errhandler*>(errhandler));
}
void mpi_errhandler_set_ (int* comm, void* errhandler, int* ierr) {
- *ierr = MPI_Errhandler_set(Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
+ *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
}
void mpi_comm_set_errhandler_ (int* comm, void* errhandler, int* ierr) {
- *ierr = MPI_Errhandler_set(Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
+ *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast<MPI_Errhandler*>(errhandler));
}
void mpi_comm_get_errhandler_ (int* comm, void* errhandler, int* ierr) {
- *ierr = MPI_Errhandler_set(Comm::f2c(*comm), static_cast<MPI_Errhandler*>(errhandler));
+ *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), static_cast<MPI_Errhandler*>(errhandler));
}
void mpi_type_contiguous_ (int* count, int* old_type, int* newtype, int* ierr) {
MPI_Datatype tmp;
- *ierr = MPI_Type_contiguous(*count, Datatype::f2c(*old_type), &tmp);
+ *ierr = MPI_Type_contiguous(*count, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
}
void mpi_cancel_ (int* request, int* ierr) {
- MPI_Request tmp=Request::f2c(*request);
+ MPI_Request tmp=simgrid::smpi::Request::f2c(*request);
*ierr = MPI_Cancel(&tmp);
}
reqs = xbt_new(MPI_Request, *incount);
for(i = 0; i < *incount; i++) {
- reqs[i] = Request::f2c(requests[i]);
+ reqs[i] = simgrid::smpi::Request::f2c(requests[i]);
indices[i]=0;
}
*ierr = MPI_Testsome(*incount, reqs, outcount, indices, FORT_STATUSES_IGNORE(statuses));
for(i=0;i<*incount;i++){
if(indices[i] && reqs[indices[i]]==MPI_REQUEST_NULL){
- Request::free_f(requests[indices[i]]);
+ simgrid::smpi::Request::free_f(requests[indices[i]]);
requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL;
}
}
}
void mpi_comm_test_inter_ (int* comm, int* flag, int* ierr) {
- *ierr = MPI_Comm_test_inter(Comm::f2c(*comm), flag);
+ *ierr = MPI_Comm_test_inter(simgrid::smpi::Comm::f2c(*comm), flag);
}
void mpi_unpack_ (void* inbuf, int* insize, int* position, void* outbuf, int* outcount, int* type, int* comm,
int* ierr) {
- *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, Datatype::f2c(*type), Comm::f2c(*comm));
+ *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*type), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_pack_external_size_ (char *datarep, int* incount, int* datatype, MPI_Aint *size, int* ierr){
- *ierr = MPI_Pack_external_size(datarep, *incount, Datatype::f2c(*datatype), size);
+ *ierr = MPI_Pack_external_size(datarep, *incount, simgrid::smpi::Datatype::f2c(*datatype), size);
}
void mpi_pack_external_ (char *datarep, void *inbuf, int* incount, int* datatype, void *outbuf, MPI_Aint* outcount,
MPI_Aint *position, int* ierr){
- *ierr = MPI_Pack_external(datarep, inbuf, *incount, Datatype::f2c(*datatype), outbuf, *outcount, position);
+ *ierr = MPI_Pack_external(datarep, inbuf, *incount, simgrid::smpi::Datatype::f2c(*datatype), outbuf, *outcount, position);
}
void mpi_unpack_external_ ( char *datarep, void *inbuf, MPI_Aint* insize, MPI_Aint *position, void *outbuf,
int* outcount, int* datatype, int* ierr){
- *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, Datatype::f2c(*datatype));
+ *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*datatype));
}
void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) {
MPI_Datatype tmp;
- *ierr = MPI_Type_hindexed(*count, blocklens, indices, Datatype::f2c(*old_type), &tmp);
+ *ierr = MPI_Type_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr){
MPI_Datatype tmp;
- *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, Datatype::f2c(*old_type), &tmp);
+ *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype,
int* ierr) {
MPI_Datatype tmp;
- *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, Datatype::f2c(*old_type), &tmp);
+ *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) {
MPI_Datatype tmp;
- *ierr = MPI_Type_indexed(*count, blocklens, indices, Datatype::f2c(*old_type), &tmp);
+ *ierr = MPI_Type_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype,
int* ierr){
MPI_Datatype tmp;
- *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, Datatype::f2c(*old_type), &tmp);
+ *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
int i=0;
MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
for(i=0; i< *count; i++){
- types[i] = Datatype::f2c(old_types[i]);
+ types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
}
*ierr = MPI_Type_struct(*count, blocklens, indices, types, &tmp);
if(*ierr == MPI_SUCCESS) {
int i=0;
MPI_Datatype* types = static_cast<MPI_Datatype*>(xbt_malloc(*count*sizeof(MPI_Datatype)));
for(i=0; i< *count; i++){
- types[i] = Datatype::f2c(old_types[i]);
+ types[i] = simgrid::smpi::Datatype::f2c(old_types[i]);
}
*ierr = MPI_Type_create_struct(*count, blocklens, indices, types, &tmp);
if(*ierr == MPI_SUCCESS) {
}
void mpi_ssend_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* ierr) {
- *ierr = MPI_Ssend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm));
+ *ierr = MPI_Ssend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* request, int* ierr) {
MPI_Request tmp;
- *ierr = MPI_Ssend_init(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Ssend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*request = tmp->add_f();
}
void mpi_intercomm_create_ (int* local_comm, int *local_leader, int* peer_comm, int* remote_leader, int* tag,
int* comm_out, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Intercomm_create(Comm::f2c(*local_comm), *local_leader,Comm::f2c(*peer_comm), *remote_leader,
+ *ierr = MPI_Intercomm_create(simgrid::smpi::Comm::f2c(*local_comm), *local_leader, simgrid::smpi::Comm::f2c(*peer_comm), *remote_leader,
*tag, &tmp);
if(*ierr == MPI_SUCCESS) {
*comm_out = tmp->add_f();
void mpi_intercomm_merge_ (int* comm, int* high, int* comm_out, int* ierr) {
MPI_Comm tmp;
- *ierr = MPI_Intercomm_merge(Comm::f2c(*comm), *high, &tmp);
+ *ierr = MPI_Intercomm_merge(simgrid::smpi::Comm::f2c(*comm), *high, &tmp);
if(*ierr == MPI_SUCCESS) {
*comm_out = tmp->add_f();
}
}
void mpi_bsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* ierr) {
- *ierr = MPI_Bsend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm));
+ *ierr = MPI_Bsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
MPI_Request tmp;
- *ierr = MPI_Bsend_init(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Bsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*request = tmp->add_f();
}
void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
MPI_Request tmp;
- *ierr = MPI_Ibsend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Ibsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*request = tmp->add_f();
}
void mpi_comm_remote_group_ (int* comm, int* group, int* ierr) {
MPI_Group tmp;
- *ierr = MPI_Comm_remote_group(Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Comm_remote_group(simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*group = tmp->c2f();
}
}
void mpi_comm_remote_size_ (int* comm, int* size, int* ierr) {
- *ierr = MPI_Comm_remote_size(Comm::f2c(*comm), size);
+ *ierr = MPI_Comm_remote_size(simgrid::smpi::Comm::f2c(*comm), size);
}
void mpi_issend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
MPI_Request tmp;
- *ierr = MPI_Issend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Issend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*request = tmp->add_f();
}
}
void mpi_probe_ (int* source, int* tag, int* comm, MPI_Status* status, int* ierr) {
- *ierr = MPI_Probe(*source, *tag, Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
+ *ierr = MPI_Probe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status));
}
void mpi_attr_delete_ (int* comm, int* keyval, int* ierr) {
- *ierr = MPI_Attr_delete(Comm::f2c(*comm), *keyval);
+ *ierr = MPI_Attr_delete(simgrid::smpi::Comm::f2c(*comm), *keyval);
}
void mpi_attr_put_ (int* comm, int* keyval, void* attr_value, int* ierr) {
- *ierr = MPI_Attr_put(Comm::f2c(*comm), *keyval, attr_value);
+ *ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value);
}
void mpi_rsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) {
MPI_Request tmp;
- *ierr = MPI_Rsend_init(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Rsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*request = tmp->add_f();
}
}
void mpi_pack_ (void* inbuf, int* incount, int* type, void* outbuf, int* outcount, int* position, int* comm, int* ierr) {
- *ierr = MPI_Pack(inbuf, *incount, Datatype::f2c(*type), outbuf, *outcount, position, Comm::f2c(*comm));
+ *ierr = MPI_Pack(inbuf, *incount, simgrid::smpi::Datatype::f2c(*type), outbuf, *outcount, position, simgrid::smpi::Comm::f2c(*comm));
}
void mpi_get_elements_ (MPI_Status* status, int* datatype, int* elements, int* ierr) {
- *ierr = MPI_Get_elements(status, Datatype::f2c(*datatype), elements);
+ *ierr = MPI_Get_elements(status, simgrid::smpi::Datatype::f2c(*datatype), elements);
}
void mpi_dims_create_ (int* nnodes, int* ndims, int* dims, int* ierr) {
}
void mpi_iprobe_ (int* source, int* tag, int* comm, int* flag, MPI_Status* status, int* ierr) {
- *ierr = MPI_Iprobe(*source, *tag, Comm::f2c(*comm), flag, status);
+ *ierr = MPI_Iprobe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), flag, status);
}
void mpi_type_get_envelope_ ( int* datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner,
int* ierr){
- *ierr = MPI_Type_get_envelope( Datatype::f2c(*datatype), num_integers,
+ *ierr = MPI_Type_get_envelope( simgrid::smpi::Datatype::f2c(*datatype), num_integers,
num_addresses, num_datatypes, combiner);
}
void mpi_type_get_contents_ (int* datatype, int* max_integers, int* max_addresses, int* max_datatypes,
int* array_of_integers, MPI_Aint* array_of_addresses,
int* array_of_datatypes, int* ierr){
- *ierr = MPI_Type_get_contents(Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes,
+ *ierr = MPI_Type_get_contents(simgrid::smpi::Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes,
array_of_integers, array_of_addresses, reinterpret_cast<MPI_Datatype*>(array_of_datatypes));
}
MPI_Datatype tmp;
*ierr = MPI_Type_create_darray(*size, *rank, *ndims, array_of_gsizes,
array_of_distribs, array_of_dargs, array_of_psizes,
- *order, Datatype::f2c(*oldtype), &tmp) ;
+ *order, simgrid::smpi::Datatype::f2c(*oldtype), &tmp) ;
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_type_create_resized_ (int* oldtype,MPI_Aint* lb, MPI_Aint* extent, int*newtype, int* ierr){
MPI_Datatype tmp;
- *ierr = MPI_Type_create_resized(Datatype::f2c(*oldtype),*lb, *extent, &tmp);
+ *ierr = MPI_Type_create_resized(simgrid::smpi::Datatype::f2c(*oldtype),*lb, *extent, &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
int* order, int* oldtype, int*newtype, int* ierr){
MPI_Datatype tmp;
*ierr = MPI_Type_create_subarray(*ndims,array_of_sizes, array_of_subsizes, array_of_starts, *order,
- Datatype::f2c(*oldtype), &tmp);
+ simgrid::smpi::Datatype::f2c(*oldtype), &tmp);
if(*ierr == MPI_SUCCESS) {
*newtype = tmp->add_f();
}
void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* sendtypes, void *recvbuf, int *recvcnts,
int *rdispls, int* recvtypes, int* comm, int* ierr){
*ierr = MPI_Alltoallw( sendbuf, sendcnts, sdispls, reinterpret_cast<MPI_Datatype*>(sendtypes), recvbuf, recvcnts, rdispls,
- reinterpret_cast<MPI_Datatype*>(recvtypes), Comm::f2c(*comm));
+ reinterpret_cast<MPI_Datatype*>(recvtypes), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_exscan_ (void *sendbuf, void *recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr){
- *ierr = MPI_Exscan(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op), Comm::f2c(*comm));
+ *ierr = MPI_Exscan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm));
}
void mpi_comm_set_name_ (int* comm, char* name, int* ierr, int size){
char* tname = xbt_new(char, size+1);
strncpy(tname, name, size);
tname[size]='\0';
- *ierr = MPI_Comm_set_name (Comm::f2c(*comm), tname);
+ *ierr = MPI_Comm_set_name (simgrid::smpi::Comm::f2c(*comm), tname);
xbt_free(tname);
}
void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){
MPI_Comm tmp;
- *ierr = MPI_Comm_dup_with_info(Comm::f2c(*comm),Info::f2c(*info),&tmp);
+ *ierr = MPI_Comm_dup_with_info(simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info),&tmp);
if(*ierr == MPI_SUCCESS) {
*newcomm = tmp->add_f();
}
void mpi_comm_split_type_ (int* comm, int* split_type, int* key, int* info, int* newcomm, int* ierr){
MPI_Comm tmp;
- *ierr = MPI_Comm_split_type(Comm::f2c(*comm), *split_type, *key, Info::f2c(*info), &tmp);
+ *ierr = MPI_Comm_split_type(simgrid::smpi::Comm::f2c(*comm), *split_type, *key, simgrid::smpi::Info::f2c(*info), &tmp);
if(*ierr == MPI_SUCCESS) {
*newcomm = tmp->add_f();
}
}
void mpi_comm_set_info_ (int* comm, int* info, int* ierr){
- *ierr = MPI_Comm_set_info (Comm::f2c(*comm), Info::f2c(*info));
+ *ierr = MPI_Comm_set_info (simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info));
}
void mpi_comm_get_info_ (int* comm, int* info, int* ierr){
MPI_Info tmp;
- *ierr = MPI_Comm_get_info (Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Comm_get_info (simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr==MPI_SUCCESS){
*info = tmp->c2f();
}
}
void mpi_comm_call_errhandler_ (int* comm,int* errorcode, int* ierr){
- *ierr = MPI_Comm_call_errhandler(Comm::f2c(*comm), *errorcode);
+ *ierr = MPI_Comm_call_errhandler(simgrid::smpi::Comm::f2c(*comm), *errorcode);
}
void mpi_info_dup_ (int* info, int* newinfo, int* ierr){
MPI_Info tmp;
- *ierr = MPI_Info_dup(Info::f2c(*info), &tmp);
+ *ierr = MPI_Info_dup(simgrid::smpi::Info::f2c(*info), &tmp);
if(*ierr==MPI_SUCCESS){
*newinfo= tmp->add_f();
}
char* tkey = xbt_new(char, keylen+1);
strncpy(tkey, key, keylen);
tkey[keylen]='\0';
- *ierr = MPI_Info_get_valuelen( Info::f2c(*info), tkey, valuelen, flag);
+ *ierr = MPI_Info_get_valuelen( simgrid::smpi::Info::f2c(*info), tkey, valuelen, flag);
xbt_free(tkey);
}
char* tkey = xbt_new(char, keylen+1);
strncpy(tkey, key, keylen);
tkey[keylen]='\0';
- *ierr = MPI_Info_delete(Info::f2c(*info), tkey);
+ *ierr = MPI_Info_delete(simgrid::smpi::Info::f2c(*info), tkey);
xbt_free(tkey);
}
void mpi_info_get_nkeys_ ( int* info, int *nkeys, int* ierr){
- *ierr = MPI_Info_get_nkeys( Info::f2c(*info), nkeys);
+ *ierr = MPI_Info_get_nkeys( simgrid::smpi::Info::f2c(*info), nkeys);
}
void mpi_info_get_nthkey_ ( int* info, int* n, char *key, int* ierr, unsigned int keylen){
- *ierr = MPI_Info_get_nthkey( Info::f2c(*info), *n, key);
+ *ierr = MPI_Info_get_nthkey( simgrid::smpi::Info::f2c(*info), *n, key);
unsigned int i = 0;
for (i=strlen(key); i<keylen; i++)
key[i]=' ';
}
void mpi_request_get_status_ ( int* request, int *flag, MPI_Status* status, int* ierr){
- *ierr = MPI_Request_get_status( Request::f2c(*request), flag, status);
+ *ierr = MPI_Request_get_status( simgrid::smpi::Request::f2c(*request), flag, status);
}
void mpi_grequest_start_ ( void *query_fn, void *free_fn, void *cancel_fn, void *extra_state, int*request, int* ierr){
}
void mpi_grequest_complete_ ( int* request, int* ierr){
- *ierr = MPI_Grequest_complete( Request::f2c(*request));
+ *ierr = MPI_Grequest_complete( simgrid::smpi::Request::f2c(*request));
}
void mpi_status_set_cancelled_ (MPI_Status* status,int* flag, int* ierr){
}
void mpi_status_set_elements_ ( MPI_Status* status, int* datatype, int* count, int* ierr){
- *ierr = MPI_Status_set_elements( status, Datatype::f2c(*datatype), *count);
+ *ierr = MPI_Status_set_elements( status, simgrid::smpi::Datatype::f2c(*datatype), *count);
}
void mpi_comm_connect_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){
MPI_Comm tmp;
- *ierr = MPI_Comm_connect( port_name, *reinterpret_cast<MPI_Info*>(info), *root, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Comm_connect( port_name, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*newcomm = tmp->add_f();
}
void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){
MPI_Comm tmp;
- *ierr = MPI_Comm_accept( port_name, *reinterpret_cast<MPI_Info*>(info), *root, Comm::f2c(*comm), &tmp);
+ *ierr = MPI_Comm_accept( port_name, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp);
if(*ierr == MPI_SUCCESS) {
*newcomm = tmp->add_f();
}
void mpi_comm_spawn_ ( char *command, char *argv, int* maxprocs, int* info, int* root, int* comm, int* intercomm,
int* array_of_errcodes, int* ierr){
MPI_Comm tmp;
- *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast<MPI_Info*>(info), *root, Comm::f2c(*comm), &tmp,
+ *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast<MPI_Info*>(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp,
array_of_errcodes);
if(*ierr == MPI_SUCCESS) {
*intercomm = tmp->add_f();
int* comm, int* intercomm, int* array_of_errcodes, int* ierr){
MPI_Comm tmp;
*ierr = MPI_Comm_spawn_multiple(* count, &array_of_commands, &array_of_argv, array_of_maxprocs,
- reinterpret_cast<MPI_Info*>(array_of_info), *root, Comm::f2c(*comm), &tmp, array_of_errcodes);
+ reinterpret_cast<MPI_Info*>(array_of_info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes);
if(*ierr == MPI_SUCCESS) {
*intercomm = tmp->add_f();
}
}
void mpi_file_delete_ ( char* filename, int* info, int* ierr){
- *ierr= MPI_File_delete(filename, Info::f2c(*info));
+ *ierr= MPI_File_delete(filename, simgrid::smpi::Info::f2c(*info));
}
void mpi_file_open_ ( int* comm, char* filename, int* amode, int* info, int* fh, int* ierr){
- *ierr= MPI_File_open(Comm::f2c(*comm), filename, *amode, Info::f2c(*info), reinterpret_cast<MPI_File*>(*fh));
+ *ierr= MPI_File_open(simgrid::smpi::Comm::f2c(*comm), filename, *amode, simgrid::smpi::Info::f2c(*info), reinterpret_cast<MPI_File*>(*fh));
}
void mpi_file_set_view_ ( int* fh, long long int* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr){
- *ierr= MPI_File_set_view(reinterpret_cast<MPI_File>(*fh) , reinterpret_cast<MPI_Offset>(*offset), Datatype::f2c(*etype), Datatype::f2c(*filetype), datarep, Info::f2c(*info));
+ *ierr= MPI_File_set_view(reinterpret_cast<MPI_File>(*fh) , reinterpret_cast<MPI_Offset>(*offset), simgrid::smpi::Datatype::f2c(*etype), simgrid::smpi::Datatype::f2c(*filetype), datarep, simgrid::smpi::Info::f2c(*info));
}
void mpi_file_read_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){
- *ierr= MPI_File_read(reinterpret_cast<MPI_File>(*fh), buf, *count, Datatype::f2c(*datatype), status);
+ *ierr= MPI_File_read(reinterpret_cast<MPI_File>(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status);
}
void mpi_file_write_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){
- *ierr= MPI_File_write(reinterpret_cast<MPI_File>(*fh), buf, *count, Datatype::f2c(*datatype), status);
+ *ierr= MPI_File_write(reinterpret_cast<MPI_File>(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status);
}
} // extern "C"
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <spawn.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
#include "mc/mc.h"
#include "private.h"
#include "private.hpp"
#include "simgrid/s4u/Mailbox.hpp"
+#include "smpi/smpi_shared_malloc.hpp"
#include "simgrid/sg_config.h"
#include "src/kernel/activity/SynchroComm.hpp"
#include "src/mc/mc_record.h"
#include "src/mc/mc_replay.h"
#include "src/msg/msg_private.h"
#include "src/simix/smx_private.h"
+#include "src/surf/surf_interface.hpp"
+#include "src/smpi/SmpiHost.hpp"
#include "surf/surf.h"
#include "xbt/replay.hpp"
#include <xbt/config.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <string>
+#include <utility>
#include <vector>
+#include <memory>
+
+#if HAVE_SENDFILE
+#include <sys/sendfile.h>
+#endif
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_kernel, smpi, "Logging specific to SMPI (kernel)");
#include <boost/tokenizer.hpp>
#include <boost/algorithm/string.hpp> /* trim_right / trim_left */
+#ifndef RTLD_DEEPBIND
+/* RTLD_DEEPBIND is a bad idea of GNU ld that obviously does not exist on other platforms
+ * See https://www.akkadia.org/drepper/dsohowto.pdf
+ * and https://lists.freebsd.org/pipermail/freebsd-current/2016-March/060284.html
+*/
+#define RTLD_DEEPBIND 0
+#endif
+
+/* Mac OSX does not have any header file providing that definition so we have to duplicate it here. Bummers. */
+extern char** environ; /* we use it in posix_spawnp below */
+
#if HAVE_PAPI
#include "papi.h"
const char* papi_default_config_name = "default";
#endif
std::unordered_map<std::string, double> location2speedup;
-Process **process_data = nullptr;
+simgrid::smpi::Process **process_data = nullptr;
int process_count = 0;
int smpi_universe_size = 0;
int* index_to_process_data = nullptr;
return process_count;
}
-Process* smpi_process()
+simgrid::smpi::Process* smpi_process()
{
- simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data);
- return static_cast<Process*>(msgExt->data);
+ smx_actor_t me = SIMIX_process_self();
+ if (me == nullptr) // This happens sometimes (eg, when linking against NS3 because it pulls openMPI...)
+ return nullptr;
+ simgrid::MsgActorExt* msgExt = static_cast<simgrid::MsgActorExt*>(me->data);
+ return static_cast<simgrid::smpi::Process*>(msgExt->data);
}
-Process* smpi_process_remote(int index)
+simgrid::smpi::Process* smpi_process_remote(int index)
{
return process_data[index_to_process_data[index]];
}
}
void smpi_process_init(int *argc, char ***argv){
- Process::init(argc, argv);
+ simgrid::smpi::Process::init(argc, argv);
}
int smpi_process_index(){
smpi_comm_copy_data_callback = callback;
}
+static void print(std::vector<std::pair<size_t, size_t>> vec) {
+ std::fprintf(stderr, "{");
+ for (auto elt : vec) {
+ std::fprintf(stderr, "(0x%zx, 0x%zx),", elt.first, elt.second);
+ }
+ std::fprintf(stderr, "}\n");
+}
+static void memcpy_private(void* dest, const void* src, std::vector<std::pair<size_t, size_t>>& private_blocks)
+{
+ for(auto block : private_blocks) {
+ memcpy((uint8_t*)dest+block.first, (uint8_t*)src+block.first, block.second-block.first);
+ }
+}
+
+static void check_blocks(std::vector<std::pair<size_t, size_t>> &private_blocks, size_t buff_size) {
+ for(auto block : private_blocks) {
+ xbt_assert(block.first <= block.second && block.second <= buff_size, "Oops, bug in shared malloc.");
+ }
+}
+
void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
{
+ simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
+ int src_shared = 0;
+ int dst_shared = 0;
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+ std::vector<std::pair<size_t, size_t>> src_private_blocks;
+ std::vector<std::pair<size_t, size_t>> dst_private_blocks;
XBT_DEBUG("Copy the data over");
+ if((src_shared=smpi_is_shared(buff, src_private_blocks, &src_offset))) {
+ XBT_DEBUG("Sender %p is shared. Let's ignore it.", buff);
+ src_private_blocks = shift_and_frame_private_blocks(src_private_blocks, src_offset, buff_size);
+ }
+ else {
+ src_private_blocks.clear();
+ src_private_blocks.push_back(std::make_pair(0, buff_size));
+ }
+ if((dst_shared=smpi_is_shared((char*)comm->dst_buff, dst_private_blocks, &dst_offset))) {
+ XBT_DEBUG("Receiver %p is shared. Let's ignore it.", (char*)comm->dst_buff);
+ dst_private_blocks = shift_and_frame_private_blocks(dst_private_blocks, dst_offset, buff_size);
+ }
+ else {
+ dst_private_blocks.clear();
+ dst_private_blocks.push_back(std::make_pair(0, buff_size));
+ }
+ check_blocks(src_private_blocks, buff_size);
+ check_blocks(dst_private_blocks, buff_size);
+ auto private_blocks = merge_private_blocks(src_private_blocks, dst_private_blocks);
+ check_blocks(private_blocks, buff_size);
void* tmpbuff=buff;
- simgrid::kernel::activity::Comm *comm = dynamic_cast<simgrid::kernel::activity::Comm*>(synchro);
-
- if((smpi_privatize_global_variables) && (static_cast<char*>(buff) >= smpi_start_data_exe)
+ if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && (static_cast<char*>(buff) >= smpi_start_data_exe)
&& (static_cast<char*>(buff) < smpi_start_data_exe + smpi_size_data_exe )
){
XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
smpi_switch_data_segment(
- (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
+ (static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->src_proc->data)->data))->index()));
tmpbuff = static_cast<void*>(xbt_malloc(buff_size));
- memcpy(tmpbuff, buff, buff_size);
+ memcpy_private(tmpbuff, buff, private_blocks);
}
- if((smpi_privatize_global_variables) && ((char*)comm->dst_buff >= smpi_start_data_exe)
+ if((smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP) && ((char*)comm->dst_buff >= smpi_start_data_exe)
&& ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){
XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
smpi_switch_data_segment(
- (static_cast<Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
+ (static_cast<simgrid::smpi::Process*>((static_cast<simgrid::MsgActorExt*>(comm->dst_proc->data)->data))->index()));
}
+ XBT_DEBUG("Copying %zu bytes from %p to %p", buff_size, tmpbuff,comm->dst_buff);
+ memcpy_private(comm->dst_buff, tmpbuff, private_blocks);
- memcpy(comm->dst_buff, tmpbuff, buff_size);
if (comm->detached) {
// if this is a detached send, the source buffer was duplicated by SMPI
// sender to make the original buffer available to the application ASAP
//xbt_free(comm->comm.src_data);// inside SMPI the request is kept inside the user data and should be free
comm->src_buff = nullptr;
}
-
if(tmpbuff!=buff)xbt_free(tmpbuff);
+
}
void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size)
void smpi_global_init()
{
- int i;
MPI_Group group;
- int smpirun=0;
if (!MC_is_active()) {
global_timer = xbt_os_timer_new();
}
}
#endif
+
+ int smpirun = 0;
+ msg_bar_t finalization_barrier = nullptr;
if (process_count == 0){
process_count = SIMIX_process_count();
smpirun=1;
+ finalization_barrier = MSG_barrier_init(process_count);
}
smpi_universe_size = process_count;
- process_data = new Process*[process_count];
- for (i = 0; i < process_count; i++) {
- process_data[i] = new Process(i);
+ process_data = new simgrid::smpi::Process*[process_count];
+ for (int i = 0; i < process_count; i++) {
+ process_data[i] = new simgrid::smpi::Process(i, finalization_barrier);
}
//if the process was launched through smpirun script we generate a global mpi_comm_world
//if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance
- if(smpirun){
- group = new Group(process_count);
- MPI_COMM_WORLD = new Comm(group, nullptr);
+ if (smpirun) {
+ group = new simgrid::smpi::Group(process_count);
+ MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr);
MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast<void *>(process_count));
- msg_bar_t bar = MSG_barrier_init(process_count);
- for (i = 0; i < process_count; i++) {
+ for (int i = 0; i < process_count; i++)
group->set_mapping(i, i);
- process_data[i]->set_finalization_barrier(bar);
- }
}
}
int count = smpi_process_count();
smpi_bench_destroy();
+ smpi_shared_destroy();
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
delete MPI_COMM_WORLD->group();
MSG_barrier_destroy(process_data[0]->finalization_barrier());
}
for (int i = 0; i < count; i++) {
if(process_data[i]->comm_self()!=MPI_COMM_NULL){
- Comm::destroy(process_data[i]->comm_self());
+ simgrid::smpi::Comm::destroy(process_data[i]->comm_self());
}
if(process_data[i]->comm_intra()!=MPI_COMM_NULL){
- Comm::destroy(process_data[i]->comm_intra());
+ simgrid::smpi::Comm::destroy(process_data[i]->comm_intra());
}
xbt_os_timer_free(process_data[i]->timer());
xbt_mutex_destroy(process_data[i]->mailboxes_mutex());
if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){
MPI_COMM_WORLD->cleanup_smp();
- MPI_COMM_WORLD->cleanup_attr<Comm>();
- if(Colls::smpi_coll_cleanup_callback!=nullptr)
- Colls::smpi_coll_cleanup_callback();
+ MPI_COMM_WORLD->cleanup_attr<simgrid::smpi::Comm>();
+ if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr)
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback();
delete MPI_COMM_WORLD;
}
}
xbt_free(index_to_process_data);
- if(smpi_privatize_global_variables)
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
smpi_destroy_global_memory_segments();
smpi_free_static();
}
extern "C" {
-#ifndef WIN32
-
-void __attribute__ ((weak)) user_main_()
-{
- xbt_die("Should not be in this smpi_simulated_main");
-}
-
-int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv)
-{
- Process::init(&argc, &argv);
- user_main_();
- return 0;
-}
-
-inline static int smpi_main_wrapper(int argc, char **argv){
- int ret = smpi_simulated_main_(argc,argv);
- if(ret !=0){
- XBT_WARN("SMPI process did not return 0. Return value : %d", ret);
- smpi_process()->set_return_value(ret);
- }
- return 0;
-}
-
-int __attribute__ ((weak)) main(int argc, char **argv)
-{
- return smpi_main(smpi_main_wrapper, argc, argv);
-}
-
-#endif
-
static void smpi_init_logs(){
/* Connect log categories. See xbt/log.c */
XBT_LOG_CONNECT(smpi_request);
XBT_LOG_CONNECT(smpi_replay);
XBT_LOG_CONNECT(smpi_rma);
+ XBT_LOG_CONNECT(smpi_shared);
XBT_LOG_CONNECT(smpi_utils);
}
}
static void smpi_init_options(){
-
- Colls::set_collectives();
- Colls::smpi_coll_cleanup_callback=nullptr;
+ //return if already called
+ if (smpi_cpu_threshold > -1)
+ return;
+ simgrid::smpi::Colls::set_collectives();
+ simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr;
smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold");
smpi_host_speed = xbt_cfg_get_double("smpi/host-speed");
- smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables");
+ const char* smpi_privatize_option = xbt_cfg_get_string("smpi/privatization");
+ if (std::strcmp(smpi_privatize_option, "no") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
+ else if (std::strcmp(smpi_privatize_option, "yes") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
+ else if (std::strcmp(smpi_privatize_option, "mmap") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_MMAP;
+ else if (std::strcmp(smpi_privatize_option, "dlopen") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_DLOPEN;
+
+ // Some compatibility stuff:
+ else if (std::strcmp(smpi_privatize_option, "1") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_DEFAULT;
+ else if (std::strcmp(smpi_privatize_option, "0") == 0)
+ smpi_privatize_global_variables = SMPI_PRIVATIZE_NONE;
+
+ else
+ xbt_die("Invalid value for smpi/privatization: '%s'", smpi_privatize_option);
+
if (smpi_cpu_threshold < 0)
smpi_cpu_threshold = DBL_MAX;
}
}
-int smpi_main(int (*realmain) (int argc, char *argv[]), int argc, char *argv[])
+typedef std::function<int(int argc, char *argv[])> smpi_entry_point_type;
+typedef int (* smpi_c_entry_point_type)(int argc, char **argv);
+typedef void (*smpi_fortran_entry_point_type)();
+
+static int smpi_run_entry_point(smpi_entry_point_type entry_point, std::vector<std::string> args)
+{
+ const int argc = args.size();
+ std::unique_ptr<char*[]> argv(new char*[argc + 1]);
+ for (int i = 0; i != argc; ++i)
+ argv[i] = args[i].empty() ? const_cast<char*>(""): &args[i].front();
+ argv[argc] = nullptr;
+
+ int res = entry_point(argc, argv.get());
+ if (res != 0){
+ XBT_WARN("SMPI process did not return 0. Return value : %d", res);
+ smpi_process()->set_return_value(res);
+ }
+ return 0;
+}
+
+// TODO, remove the number of functions involved here
+static smpi_entry_point_type smpi_resolve_function(void* handle)
+{
+ smpi_fortran_entry_point_type entry_point_fortran = (smpi_fortran_entry_point_type)dlsym(handle, "user_main_");
+ if (entry_point_fortran != nullptr) {
+ return [entry_point_fortran](int argc, char** argv) {
+ smpi_process_init(&argc, &argv);
+ entry_point_fortran();
+ return 0;
+ };
+ }
+
+ smpi_c_entry_point_type entry_point = (smpi_c_entry_point_type)dlsym(handle, "main");
+ if (entry_point != nullptr) {
+ return entry_point;
+ }
+
+ return smpi_entry_point_type();
+}
+
+int smpi_main(const char* executable, int argc, char *argv[])
{
srand(SMPI_RAND_SEED);
* configuration tools */
return 0;
}
- smpi_init_logs();
TRACE_global_init(&argc, argv);
- TRACE_add_start_function(TRACE_smpi_alloc);
- TRACE_add_end_function(TRACE_smpi_release);
SIMIX_global_init(&argc, argv);
MSG_init(&argc,argv);
SMPI_switch_data_segment = &smpi_switch_data_segment;
- smpi_init_options();
+ simgrid::s4u::Host::onCreation.connect([](simgrid::s4u::Host& host) {
+ host.extension_set(new simgrid::smpi::SmpiHost(&host));
+ });
// parse the platform file: get the host list
SIMIX_create_environment(argv[1]);
- SIMIX_comm_set_copy_data_callback(smpi_comm_copy_data_callback);
- SIMIX_function_register_default(realmain);
- SIMIX_launch_application(argv[2]);
+ SIMIX_comm_set_copy_data_callback(smpi_comm_copy_buffer_callback);
- smpi_global_init();
+ static std::size_t rank = 0;
- smpi_check_options();
+ smpi_init_options();
- if(smpi_privatize_global_variables)
- smpi_initialize_global_memory_segments();
+ if (smpi_privatize_global_variables == SMPI_PRIVATIZE_DLOPEN) {
+
+ std::string executable_copy = executable;
+
+ // Prepare the copy of the binary (get its size)
+ struct stat fdin_stat;
+ stat(executable_copy.c_str(), &fdin_stat);
+ off_t fdin_size = fdin_stat.st_size;
+
+ simix_global->default_function = [executable_copy, fdin_size](std::vector<std::string> args) {
+ return std::function<void()>([executable_copy, fdin_size, args] {
+
+ // Copy the dynamic library:
+ std::string target_executable = executable_copy
+ + "_" + std::to_string(getpid())
+ + "_" + std::to_string(rank++) + ".so";
+
+ int fdin = open(executable_copy.c_str(), O_RDONLY);
+ xbt_assert(fdin >= 0, "Cannot read from %s", executable_copy.c_str());
+ int fdout = open(target_executable.c_str(), O_CREAT | O_RDWR, S_IRWXU);
+ xbt_assert(fdout >= 0, "Cannot write into %s", target_executable.c_str());
+
+#if HAVE_SENDFILE
+ ssize_t sent_size = sendfile(fdout, fdin, NULL, fdin_size);
+ xbt_assert(sent_size == fdin_size,
+ "Error while copying %s: only %zd bytes copied instead of %ld (errno: %d -- %s)",
+ target_executable.c_str(), sent_size, fdin_size, errno, strerror(errno));
+#else
+ XBT_VERB("Copy %d bytes into %s", static_cast<int>(fdin_size), target_executable.c_str());
+ const int bufsize = 1024 * 1024 * 4;
+ char buf[bufsize];
+ while (int got = read(fdin, buf, bufsize)) {
+ if (got == -1) {
+ xbt_assert(errno == EINTR, "Cannot read from %s", executable_copy.c_str());
+ } else {
+ char* p = buf;
+ int todo = got;
+ while (int done = write(fdout, p, todo)) {
+ if (done == -1) {
+ xbt_assert(errno == EINTR, "Cannot write into %s", target_executable.c_str());
+ } else {
+ p += done;
+ todo -= done;
+ }
+ }
+ }
+ }
+#endif
+ close(fdin);
+ close(fdout);
+
+ // Load the copy and resolve the entry point:
+ void* handle = dlopen(target_executable.c_str(), RTLD_LAZY | RTLD_LOCAL | RTLD_DEEPBIND);
+ if (xbt_cfg_get_boolean("smpi/keep-temps") == false)
+ unlink(target_executable.c_str());
+ if (handle == nullptr)
+ xbt_die("dlopen failed: %s (errno: %d -- %s)", dlerror(), errno, strerror(errno));
+ smpi_entry_point_type entry_point = smpi_resolve_function(handle);
+ if (!entry_point)
+ xbt_die("Could not resolve entry point");
+
+ smpi_run_entry_point(entry_point, args);
+ });
+ };
+
+ }
+ else {
+
+ // Load the dynamic library and resolve the entry point:
+ void* handle = dlopen(executable, RTLD_LAZY | RTLD_LOCAL | RTLD_DEEPBIND);
+ if (handle == nullptr)
+ xbt_die("dlopen failed for %s: %s (errno: %d -- %s)", executable, dlerror(), errno, strerror(errno));
+ smpi_entry_point_type entry_point = smpi_resolve_function(handle);
+ if (!entry_point)
+ xbt_die("main not found in %s", executable);
+ // TODO, register the executable for SMPI privatization
+
+ // Execute the same entry point for each simulated process:
+ simix_global->default_function = [entry_point](std::vector<std::string> args) {
+ return std::function<void()>([entry_point, args] {
+ smpi_run_entry_point(entry_point, args);
+ });
+ };
+
+ }
+
+ SIMIX_launch_application(argv[2]);
+
+ SMPI_init();
/* Clean IO before the run */
fflush(stdout);
return ret;
}
-// This function can be called from extern file, to initialize logs, options, and processes of smpi
-// without the need of smpirun
+// Called either directly from the user code, or from the code called by smpirun
void SMPI_init(){
smpi_init_logs();
smpi_init_options();
smpi_global_init();
smpi_check_options();
- if (TRACE_is_enabled() && TRACE_is_configured())
- TRACE_smpi_alloc();
- if(smpi_privatize_global_variables)
+ TRACE_smpi_alloc();
+ simgrid::surf::surfExitCallbacks.connect(TRACE_smpi_release);
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP)
smpi_initialize_global_memory_segments();
}
-/* Copyright (c) 2010, 2013-2015. The SimGrid Team.
+/* Copyright (c) 2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "private.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_group, smpi, "Logging specific to SMPI (group)");
- Group mpi_MPI_GROUP_EMPTY;
+
+simgrid::smpi::Group mpi_MPI_GROUP_EMPTY;
MPI_Group MPI_GROUP_EMPTY=&mpi_MPI_GROUP_EMPTY;
namespace simgrid{
-/* Copyright (c) 2010, 2013-2015. The SimGrid Team.
+/* Copyright (c) 2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2007-2015. The SimGrid Team.
+/* Copyright (c) 2007-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include <unordered_map>
#include <xbt/ex.hpp>
-namespace simgrid{
-namespace smpi{
-
-
typedef struct smpi_delete_fn{
MPI_Comm_delete_attr_function *comm_delete_fn;
MPI_Type_delete_attr_function *type_delete_fn;
smpi_delete_fn delete_fn;
int refcount;
} s_smpi_mpi_key_elem_t;
+
typedef struct s_smpi_key_elem *smpi_key_elem;
+namespace simgrid{
+namespace smpi{
+
class Keyval{
private:
std::unordered_map<int, void*> attributes_;
--- /dev/null
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <smpi/smpi.h>
+
+int main(int argc, char **argv)
+{
+ if (argc < 2) {
+ fprintf(stderr, "Usage: smpi_main <program to launch>\n");
+ exit(1);
+ }
+ return smpi_main(argv[1], argc - 1, argv + 1);
+}
#include <stdlib.h>
#include <sys/types.h>
+#include <string.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <errno.h>
#ifndef WIN32
#include <sys/mman.h>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_memory, smpi, "Memory layout support for SMPI");
+int smpi_loaded_page = -1;
+char* smpi_start_data_exe = nullptr;
+int smpi_size_data_exe = 0;
+int smpi_privatize_global_variables;
+
static const int PROT_RWX = (PROT_READ | PROT_WRITE | PROT_EXEC);
static const int PROT_RW = (PROT_READ | PROT_WRITE );
XBT_ATTRIB_UNUSED static const int PROT_RX = (PROT_READ | PROT_EXEC );
xbt_die("Did not find my data segment.");
}
#endif
+
+
+/** Map a given SMPI privatization segment (make a SMPI process active) */
+void smpi_switch_data_segment(int dest) {
+ if (smpi_loaded_page == dest)//no need to switch, we've already loaded the one we want
+ return;
+
+ // So the job:
+ smpi_really_switch_data_segment(dest);
+}
+
+/** Map a given SMPI privatization segment (make a SMPI process active) even if SMPI thinks it is already active
+ *
+ * When doing a state restoration, the state of the restored variables might not be consistent with the state of the
+ * virtual memory. In this case, we to change the data segment.
+ */
+void smpi_really_switch_data_segment(int dest)
+{
+ if(smpi_size_data_exe == 0)//no need to switch
+ return;
+
+#if HAVE_PRIVATIZATION
+ if(smpi_loaded_page==-1){//initial switch, do the copy from the real page here
+ for (int i=0; i< smpi_process_count(); i++){
+ memcpy(smpi_privatisation_regions[i].address, TOPAGE(smpi_start_data_exe), smpi_size_data_exe);
+ }
+ }
+
+ // FIXME, cross-process support (mmap across process when necessary)
+ int current = smpi_privatisation_regions[dest].file_descriptor;
+ XBT_DEBUG("Switching data frame to the one of process %d", dest);
+ void* tmp =
+ mmap(TOPAGE(smpi_start_data_exe), smpi_size_data_exe, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, current, 0);
+ if (tmp != TOPAGE(smpi_start_data_exe))
+ xbt_die("Couldn't map the new region (errno %d): %s", errno, strerror(errno));
+ smpi_loaded_page = dest;
+#endif
+}
+
+int smpi_is_privatisation_file(char* file)
+{
+ return strncmp("/dev/shm/my-buffer-", file, std::strlen("/dev/shm/my-buffer-")) == 0;
+}
+
+void smpi_initialize_global_memory_segments()
+{
+
+#if !HAVE_PRIVATIZATION
+ smpi_privatize_global_variables=false;
+ xbt_die("You are trying to use privatization on a system that does not support it. Don't.");
+ return;
+#else
+
+ smpi_get_executable_global_size();
+
+ XBT_DEBUG ("bss+data segment found : size %d starting at %p", smpi_size_data_exe, smpi_start_data_exe );
+
+ if (smpi_size_data_exe == 0){//no need to switch
+ smpi_privatize_global_variables=false;
+ return;
+ }
+
+ smpi_privatisation_regions = static_cast<smpi_privatisation_region_t>(
+ xbt_malloc(smpi_process_count() * sizeof(struct s_smpi_privatisation_region)));
+
+ for (int i=0; i< smpi_process_count(); i++){
+ // create SIMIX_process_count() mappings of this size with the same data inside
+ int file_descriptor;
+ void* address = nullptr;
+ char path[24];
+ int status;
+
+ do {
+ snprintf(path, sizeof(path), "/smpi-buffer-%06x", rand() % 0xffffff);
+ file_descriptor = shm_open(path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+ } while (file_descriptor == -1 && errno == EEXIST);
+ if (file_descriptor < 0) {
+ if (errno == EMFILE) {
+ xbt_die("Impossible to create temporary file for memory mapping: %s\n\
+The open() system call failed with the EMFILE error code (too many files). \n\n\
+This means that you reached the system limits concerning the amount of files per process. \
+This is not a surprise if you are trying to virtualize many processes on top of SMPI. \
+Don't panic -- you should simply increase your system limits and try again. \n\n\
+First, check what your limits are:\n\
+ cat /proc/sys/fs/file-max # Gives you the system-wide limit\n\
+ ulimit -Hn # Gives you the per process hard limit\n\
+ ulimit -Sn # Gives you the per process soft limit\n\
+ cat /proc/self/limits # Displays any per-process limitation (including the one given above)\n\n\
+If one of these values is less than the amount of MPI processes that you try to run, then you got the explanation of this error. \
+Ask the Internet about tutorials on how to increase the files limit such as: https://rtcamp.com/tutorials/linux/increase-open-files-limit/",
+ strerror(errno));
+ }
+ xbt_die("Impossible to create temporary file for memory mapping: %s", strerror(errno));
+ }
+
+ status = ftruncate(file_descriptor, smpi_size_data_exe);
+ if (status)
+ xbt_die("Impossible to set the size of the temporary file for memory mapping");
+
+ /* Ask for a free region */
+ address = mmap(nullptr, smpi_size_data_exe, PROT_READ | PROT_WRITE, MAP_SHARED, file_descriptor, 0);
+ if (address == MAP_FAILED)
+ xbt_die("Couldn't find a free region for memory mapping");
+
+ status = shm_unlink(path);
+ if (status)
+ xbt_die("Impossible to unlink temporary file for memory mapping");
+
+ // initialize the values
+ memcpy(address, TOPAGE(smpi_start_data_exe), smpi_size_data_exe);
+
+ // store the address of the mapping for further switches
+ smpi_privatisation_regions[i].file_descriptor = file_descriptor;
+ smpi_privatisation_regions[i].address = address;
+ }
+#endif
+}
+
+void smpi_destroy_global_memory_segments(){
+ if (smpi_size_data_exe == 0)//no need to switch
+ return;
+#if HAVE_PRIVATIZATION
+ for (int i=0; i< smpi_process_count(); i++) {
+ if (munmap(smpi_privatisation_regions[i].address, smpi_size_data_exe) < 0)
+ XBT_WARN("Unmapping of fd %d failed: %s", smpi_privatisation_regions[i].file_descriptor, strerror(errno));
+ close(smpi_privatisation_regions[i].file_descriptor);
+ }
+ xbt_free(smpi_privatisation_regions);
+#endif
+}
+
-/* Copyright ,(c) 2007-2014. The SimGrid Team.
- * All rights reserved.*/
+/* Copyright ,(c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license ,(GNU LGPL) which comes with this package. */
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_mpi, smpi, "Logging specific to SMPI ,(mpi)");
-#define NOT_YET_IMPLEMENTED { \
-XBT_WARN("Not yet implemented : %s. Please contact the Simgrid team if support is needed", __FUNCTION__); \
-return MPI_SUCCESS; \
-}
+#define NOT_YET_IMPLEMENTED \
+ { \
+ XBT_WARN("Not yet implemented : %s. Please contact the SimGrid team if support is needed", __FUNCTION__); \
+ return MPI_SUCCESS; \
+ }
#define WRAPPED_PMPI_CALL(type,name,args,args2) \
type name args { \
WRAPPED_PMPI_CALL(int,MPI_Comm_size,(MPI_Comm comm, int *size),(comm, size))
WRAPPED_PMPI_CALL(int,MPI_Comm_split,(MPI_Comm comm, int color, int key, MPI_Comm* comm_out),(comm, color, key, comm_out))
WRAPPED_PMPI_CALL(int,MPI_Comm_create_group,(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm* comm_out),(comm, group, tag, comm_out))
+WRAPPED_PMPI_CALL(int,MPI_Compare_and_swap,(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Win win), (origin_addr, compare_addr, result_addr, datatype, target_rank, target_disp, win));
WRAPPED_PMPI_CALL(int,MPI_Dims_create,(int nnodes, int ndims, int* dims) ,(nnodes, ndims, dims))
WRAPPED_PMPI_CALL(int,MPI_Error_class,(int errorcode, int* errorclass) ,(errorcode, errorclass))
WRAPPED_PMPI_CALL(int,MPI_Exscan,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, comm))
WRAPPED_PMPI_CALL(int,MPI_Get_processor_name,(char *name, int *resultlen),(name, resultlen))
WRAPPED_PMPI_CALL(int,MPI_Get_version ,(int *version,int *subversion),(version,subversion))
WRAPPED_PMPI_CALL(int,MPI_Get,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win),(origin_addr,origin_count, origin_datatype,target_rank, target_disp, target_count,target_datatype,win))
+WRAPPED_PMPI_CALL(int,MPI_Get_accumulate, (void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win),(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win))
+WRAPPED_PMPI_CALL(int,MPI_Fetch_and_op, (void *origin_addr, void *result_addr, MPI_Datatype datatype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win),(origin_addr, result_addr, datatype, target_rank, target_disp, op, win))
WRAPPED_PMPI_CALL(int,MPI_Group_compare,(MPI_Group group1, MPI_Group group2, int *result),(group1, group2, result))
WRAPPED_PMPI_CALL(int,MPI_Group_difference,(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup),(group1, group2, newgroup))
WRAPPED_PMPI_CALL(int,MPI_Group_excl,(MPI_Group group, int n, int *ranks, MPI_Group * newgroup),(group, n, ranks, newgroup))
WRAPPED_PMPI_CALL(int,MPI_Keyval_free,(int* keyval) ,(keyval))
WRAPPED_PMPI_CALL(int,MPI_Op_create,(MPI_User_function * function, int commute, MPI_Op * op),(function, commute, op))
WRAPPED_PMPI_CALL(int,MPI_Op_free,(MPI_Op * op),(op))
+WRAPPED_PMPI_CALL(int,MPI_Op_commutative,(MPI_Op op, int *commute), (op, commute))
WRAPPED_PMPI_CALL(int,MPI_Pack_size,(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) ,(incount, datatype, comm, size))
WRAPPED_PMPI_CALL(int,MPI_Pack,(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) ,(inbuf, incount, type, outbuf, outcount, position, comm))
WRAPPED_PMPI_CALL(int,MPI_Probe,(int source, int tag, MPI_Comm comm, MPI_Status* status) ,(source, tag, comm, status))
WRAPPED_PMPI_CALL(int,MPI_Put,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win),(origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype, win))
WRAPPED_PMPI_CALL(int,MPI_Query_thread,(int *provided),(provided))
+WRAPPED_PMPI_CALL(int,MPI_Raccumulate,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request),( origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype,op, win, request))
WRAPPED_PMPI_CALL(int,MPI_Recv_init,(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request),(buf, count, datatype, src, tag, comm, request))
WRAPPED_PMPI_CALL(int,MPI_Recv,(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status * status),(buf, count, datatype, src, tag, comm, status))
WRAPPED_PMPI_CALL(int,MPI_Reduce_local,(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op),(inbuf, inoutbuf, count, datatype, op))
WRAPPED_PMPI_CALL(int,MPI_Reduce_scatter,(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, recvcounts, datatype, op, comm))
WRAPPED_PMPI_CALL(int,MPI_Reduce,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, root, comm))
WRAPPED_PMPI_CALL(int,MPI_Request_free,(MPI_Request * request),(request))
+WRAPPED_PMPI_CALL(int,MPI_Rget,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request),(origin_addr,origin_count, origin_datatype,target_rank, target_disp, target_count,target_datatype,win, request))
+WRAPPED_PMPI_CALL(int,MPI_Rget_accumulate, (void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr, int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request),(origin_addr, origin_count, origin_datatype, result_addr, result_count, result_datatype, target_rank, target_disp, target_count, target_datatype, op, win, request))
+WRAPPED_PMPI_CALL(int,MPI_Rput,( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request),(origin_addr,origin_count, origin_datatype,target_rank,target_disp, target_count,target_datatype, win, request))
WRAPPED_PMPI_CALL(int,MPI_Scan,(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm),(sendbuf, recvbuf, count, datatype, op, comm))
WRAPPED_PMPI_CALL(int,MPI_Scatter,(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,int root, MPI_Comm comm),(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm))
WRAPPED_PMPI_CALL(int,MPI_Scatterv,(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount,MPI_Datatype recvtype, int root, MPI_Comm comm),(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm))
WRAPPED_PMPI_CALL(int,MPI_Waitsome,(int incount, MPI_Request requests[], int *outcount, int *indices, MPI_Status status[]),(incount, requests, outcount, indices, status))
WRAPPED_PMPI_CALL(int,MPI_Win_complete,(MPI_Win win),(win))
WRAPPED_PMPI_CALL(int,MPI_Win_create,( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win),( base, size, disp_unit, info, comm,win))
+WRAPPED_PMPI_CALL(int,MPI_Win_allocate,(MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win),(size, disp_unit, info, comm, base, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_attach,(MPI_Win win, void *base, MPI_Aint size),(win, base, size))
+WRAPPED_PMPI_CALL(int,MPI_Win_detach,(MPI_Win win, void *base),(win, base))
+WRAPPED_PMPI_CALL(int,MPI_Win_create_dynamic,( MPI_Info info, MPI_Comm comm, MPI_Win *win),(info, comm,win))
WRAPPED_PMPI_CALL(int,MPI_Win_fence,( int assert,MPI_Win win),( assert, win))
WRAPPED_PMPI_CALL(int,MPI_Win_free,( MPI_Win* win),(win))
WRAPPED_PMPI_CALL(int,MPI_Win_get_group,(MPI_Win win, MPI_Group * group),(win, group))
WRAPPED_PMPI_CALL(int,MPI_Win_get_name,(MPI_Win win, char * name, int* len),(win,name,len))
+WRAPPED_PMPI_CALL(int,MPI_Win_get_info,(MPI_Win win, MPI_Info * info),(win,info))
WRAPPED_PMPI_CALL(int,MPI_Win_post,(MPI_Group group, int assert, MPI_Win win),(group, assert, win))
WRAPPED_PMPI_CALL(int,MPI_Win_set_name,(MPI_Win win, char * name),(win, name))
+WRAPPED_PMPI_CALL(int,MPI_Win_set_info,(MPI_Win win, MPI_Info info),(win,info))
WRAPPED_PMPI_CALL(int,MPI_Win_start,(MPI_Group group, int assert, MPI_Win win),(group, assert, win))
WRAPPED_PMPI_CALL(int,MPI_Win_wait,(MPI_Win win),(win))
WRAPPED_PMPI_CALL(int,MPI_Win_lock,(int lock_type, int rank, int assert, MPI_Win win) ,(lock_type, rank, assert, win))
WRAPPED_PMPI_CALL(int,MPI_Win_unlock,(int rank, MPI_Win win),(rank, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_lock_all,(int assert, MPI_Win win) ,(assert, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_unlock_all,(MPI_Win win),(win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush,(int rank, MPI_Win win),(rank, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush_local,(int rank, MPI_Win win),(rank, win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush_all,(MPI_Win win),(win))
+WRAPPED_PMPI_CALL(int,MPI_Win_flush_local_all,(MPI_Win win),(win))
WRAPPED_PMPI_CALL(int,MPI_Win_get_attr, (MPI_Win type, int type_keyval, void *attribute_val, int* flag), (type, type_keyval, attribute_val, flag))
WRAPPED_PMPI_CALL(int,MPI_Win_set_attr, (MPI_Win type, int type_keyval, void *att), (type, type_keyval, att))
WRAPPED_PMPI_CALL(int,MPI_Win_delete_attr, (MPI_Win type, int comm_keyval), (type, comm_keyval))
#define BAND_OP(a, b) (b) &= (a)
#define BOR_OP(a, b) (b) |= (a)
#define BXOR_OP(a, b) (b) ^= (a)
-#define MAXLOC_OP(a, b) (b) = (a.value) < (b.value) ? (b) : (a)
-#define MINLOC_OP(a, b) (b) = (a.value) < (b.value) ? (a) : (b)
+#define MAXLOC_OP(a, b) (b) = (a.value) < (b.value) ? (b) : ((a.value) == (b.value) ? ((a.index) < (b.index) ? (a) : (b)) : (a))
+#define MINLOC_OP(a, b) (b) = (a.value) < (b.value) ? (a) : ((a.value) == (b.value) ? ((a.index) < (b.index) ? (a) : (b)) : (b))
#define APPLY_FUNC(a, b, length, type, func) \
{ \
memcpy(b, a, *length * (*datatype)->size());
}
+static void no_func(void *a, void *b, int *length, MPI_Datatype * datatype)
+{
+ /* obviously a no-op */
+}
+
#define CREATE_MPI_OP(name, func) \
static SMPI_Op mpi_##name (&(func) /* func */, true ); \
MPI_Op name = &mpi_##name;
CREATE_MPI_OP(MPI_MAXLOC, maxloc_func);
CREATE_MPI_OP(MPI_MINLOC, minloc_func);
CREATE_MPI_OP(MPI_REPLACE, replace_func);
+CREATE_MPI_OP(MPI_NO_OP, no_func);
namespace simgrid{
namespace smpi{
void Op::apply(void *invec, void *inoutvec, int *len, MPI_Datatype datatype)
{
- if(smpi_privatize_global_variables){//we need to switch as the called function may silently touch global variables
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){//we need to switch as the called function may silently touch global variables
XBT_DEBUG("Applying operation, switch to the right data frame ");
smpi_switch_data_segment(smpi_process()->index());
}
- if(!smpi_process()->replaying()){
+ if(!smpi_process()->replaying() && *len > 0){
if(! is_fortran_op_)
this->func_(invec, inoutvec, len, &datatype);
else{
+ XBT_DEBUG("Applying operation of length %d from %p and from/to %p", *len, invec, inoutvec);
int tmp = datatype->c2f();
/* Unfortunately, the C and Fortran version of the MPI standard do not agree on the type here,
thus the reinterpret_cast. */
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "private.h"
int PMPI_Init(int *argc, char ***argv)
{
- // PMPI_Init is call only one time by only by SMPI process
+ xbt_assert(simgrid::s4u::Engine::isInitialized(),
+ "Your MPI program was not properly initialized. The easiest is to use smpirun to start it.");
+ // PMPI_Init is called only once per SMPI process
int already_init;
MPI_Initialized(&already_init);
if(already_init == 0){
- Process::init(argc, argv);
+ simgrid::smpi::Process::init(argc, argv);
smpi_process()->mark_as_initialized();
int rank = smpi_process()->index();
TRACE_smpi_init(rank);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
TRACE_smpi_finalize(smpi_process()->index());
- smpi_process()->destroy();
return MPI_SUCCESS;
}
int PMPI_Get_library_version (char *version,int *len){
smpi_bench_end();
- snprintf(version,MPI_MAX_LIBRARY_VERSION_STRING,"SMPI Version %d.%d. Copyright The Simgrid Team 2007-2015",
+ snprintf(version, MPI_MAX_LIBRARY_VERSION_STRING, "SMPI Version %d.%d. Copyright The Simgrid Team 2007-2017",
SIMGRID_VERSION_MAJOR, SIMGRID_VERSION_MINOR);
*len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version);
smpi_bench_begin();
int PMPI_Abort(MPI_Comm comm, int errorcode)
{
smpi_bench_end();
- smpi_process()->destroy();
// FIXME: should kill all processes in comm instead
simcall_process_kill(SIMIX_process_self());
return MPI_SUCCESS;
if (*datatype == MPI_DATATYPE_NULL) {
return MPI_ERR_ARG;
} else {
- Datatype::unref(*datatype);
+ simgrid::smpi::Datatype::unref(*datatype);
return MPI_SUCCESS;
}
}
if (datatype == MPI_DATATYPE_NULL) {
retval=MPI_ERR_TYPE;
} else {
- *newtype = new Datatype(datatype, &retval);
+ *newtype = new simgrid::smpi::Datatype(datatype, &retval);
//error when duplicating, free the new datatype
if(retval!=MPI_SUCCESS){
- Datatype::unref(*newtype);
+ simgrid::smpi::Datatype::unref(*newtype);
*newtype = MPI_DATATYPE_NULL;
}
}
if (function == nullptr || op == nullptr) {
return MPI_ERR_ARG;
} else {
- *op = new Op(function, (commute!=0));
+ *op = new simgrid::smpi::Op(function, (commute!=0));
return MPI_SUCCESS;
}
}
}
}
+int PMPI_Op_commutative(MPI_Op op, int* commute){
+ if (op == MPI_OP_NULL) {
+ return MPI_ERR_OP;
+ } else if (commute==nullptr){
+ return MPI_ERR_ARG;
+ } else {
+ *commute = op->is_commutative();
+ return MPI_SUCCESS;
+ }
+}
+
int PMPI_Group_free(MPI_Group * group)
{
if (group == nullptr) {
return MPI_ERR_ARG;
} else {
if(*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_EMPTY)
- Group::unref(*group);
+ simgrid::smpi::Group::unref(*group);
*group = MPI_GROUP_NULL;
return MPI_SUCCESS;
}
return MPI_SUCCESS;
}else{
group->ref();
- *newcomm = new Comm(group, nullptr);
+ *newcomm = new simgrid::smpi::Comm(group, nullptr);
return MPI_SUCCESS;
}
}
} else if (*comm == MPI_COMM_NULL) {
return MPI_ERR_COMM;
} else {
- Comm::destroy(*comm);
+ simgrid::smpi::Comm::destroy(*comm);
*comm = MPI_COMM_NULL;
return MPI_SUCCESS;
}
} else if (*comm == MPI_COMM_NULL) {
return MPI_ERR_COMM;
} else {
- Comm::destroy(*comm);
+ simgrid::smpi::Comm::destroy(*comm);
*comm = MPI_COMM_NULL;
return MPI_SUCCESS;
}
} else if (dst == MPI_PROC_NULL) {
retval = MPI_SUCCESS;
} else {
- *request = Request::send_init(buf, count, datatype, dst, tag, comm);
+ *request = simgrid::smpi::Request::send_init(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
} else if (src == MPI_PROC_NULL) {
retval = MPI_SUCCESS;
} else {
- *request = Request::recv_init(buf, count, datatype, src, tag, comm);
+ *request = simgrid::smpi::Request::recv_init(buf, count, datatype, src, tag, comm);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
} else if (dst == MPI_PROC_NULL) {
retval = MPI_SUCCESS;
} else {
- *request = Request::ssend_init(buf, count, datatype, dst, tag, comm);
+ *request = simgrid::smpi::Request::ssend_init(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
}
}
if(retval != MPI_ERR_REQUEST) {
- Request::startall(count, requests);
+ simgrid::smpi::Request::startall(count, requests);
}
}
smpi_bench_begin();
if (*request == MPI_REQUEST_NULL) {
retval = MPI_ERR_ARG;
} else {
- Request::unref(request);
+ simgrid::smpi::Request::unref(request);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
extra->send_size = count*dt_size_send;
TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
- *request = Request::irecv(buf, count, datatype, src, tag, comm);
+ *request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm);
retval = MPI_SUCCESS;
TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size());
- *request = Request::isend(buf, count, datatype, dst, tag, comm);
+ *request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size());
- *request = Request::issend(buf, count, datatype, dst, tag, comm);
+ *request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
} else if (src == MPI_PROC_NULL) {
- Status::empty(status);
+ simgrid::smpi::Status::empty(status);
status->MPI_SOURCE = MPI_PROC_NULL;
retval = MPI_SUCCESS;
} else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){
extra->send_size = count * dt_size_send;
TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
- Request::recv(buf, count, datatype, src, tag, comm, status);
+ simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status);
retval = MPI_SUCCESS;
// the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
}
- Request::send(buf, count, datatype, dst, tag, comm);
+ simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size());
- Request::ssend(buf, count, datatype, dst, tag, comm);
+ simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm);
retval = MPI_SUCCESS;
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
} else if (!sendtype->is_valid() || !recvtype->is_valid()) {
retval = MPI_ERR_TYPE;
} else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) {
- Status::empty(status);
+ simgrid::smpi::Status::empty(status);
status->MPI_SOURCE = MPI_PROC_NULL;
retval = MPI_SUCCESS;
}else if (dst >= comm->group()->size() || dst <0 ||
TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
TRACE_smpi_send(rank, rank, dst_traced, sendtag,sendcount*sendtype->size());
- Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm,
+ simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm,
status);
retval = MPI_SUCCESS;
void* recvbuf = xbt_new0(char, size);
retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status);
if(retval==MPI_SUCCESS){
- Datatype::copy(recvbuf, count, datatype, buf, count, datatype);
+ simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype);
}
xbt_free(recvbuf);
retval = MPI_ERR_ARG;
} else if (*request == MPI_REQUEST_NULL) {
*flag= true;
- Status::empty(status);
+ simgrid::smpi::Status::empty(status);
retval = MPI_SUCCESS;
} else {
int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1;
extra->type = TRACING_TEST;
TRACE_smpi_testing_in(rank, extra);
- *flag = Request::test(request,status);
+ *flag = simgrid::smpi::Request::test(request,status);
TRACE_smpi_testing_out(rank);
retval = MPI_SUCCESS;
if (index == nullptr || flag == nullptr) {
retval = MPI_ERR_ARG;
} else {
- *flag = Request::testany(count, requests, index, status);
+ *flag = simgrid::smpi::Request::testany(count, requests, index, status);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
if (flag == nullptr) {
retval = MPI_ERR_ARG;
} else {
- *flag = Request::testall(count, requests, statuses);
+ *flag = simgrid::smpi::Request::testall(count, requests, statuses);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
} else if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
} else if (source == MPI_PROC_NULL) {
- Status::empty(status);
+ simgrid::smpi::Status::empty(status);
status->MPI_SOURCE = MPI_PROC_NULL;
retval = MPI_SUCCESS;
} else {
- Request::probe(source, tag, comm, status);
+ simgrid::smpi::Request::probe(source, tag, comm, status);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
int retval = 0;
smpi_bench_end();
- if ((flag == nullptr) || (status == nullptr)) {
+ if (flag == nullptr) {
retval = MPI_ERR_ARG;
} else if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
} else if (source == MPI_PROC_NULL) {
*flag=true;
- Status::empty(status);
+ simgrid::smpi::Status::empty(status);
status->MPI_SOURCE = MPI_PROC_NULL;
retval = MPI_SUCCESS;
} else {
- Request::iprobe(source, tag, comm, flag, status);
+ simgrid::smpi::Request::iprobe(source, tag, comm, flag, status);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
smpi_bench_end();
- Status::empty(status);
+ simgrid::smpi::Status::empty(status);
if (request == nullptr) {
retval = MPI_ERR_ARG;
extra->type = TRACING_WAIT;
TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
- Request::wait(request, status);
+ simgrid::smpi::Request::wait(request, status);
retval = MPI_SUCCESS;
//the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
extra->send_size=count;
TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
- *index = Request::waitany(count, requests, status);
+ *index = simgrid::smpi::Request::waitany(count, requests, status);
if(*index!=MPI_UNDEFINED){
int src_traced = savedvals[*index].src;
extra->send_size=count;
TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
- int retval =Request::waitall(count, requests, status);
+ int retval = simgrid::smpi::Request::waitall(count, requests, status);
for (int i = 0; i < count; i++) {
if(savedvals[i].valid){
if (outcount == nullptr) {
retval = MPI_ERR_ARG;
} else {
- *outcount = Request::waitsome(incount, requests, indices, status);
+ *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
if (outcount == nullptr) {
retval = MPI_ERR_ARG;
} else {
- *outcount = Request::testsome(incount, requests, indices, status);
+ *outcount = simgrid::smpi::Request::testsome(incount, requests, indices, status);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
extra->send_size = count * dt_size_send;
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
if (comm->size() > 1)
- Colls::bcast(buf, count, datatype, root, comm);
+ simgrid::smpi::Colls::bcast(buf, count, datatype, root, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
extra->type = TRACING_BARRIER;
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- Colls::barrier(comm);
+ simgrid::smpi::Colls::barrier(comm);
//Barrier can be used to synchronize RMA calls. Finish all requests from comm before.
comm->finish_rma_calls();
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
- Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm);
+ simgrid::smpi::Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
- int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_GATHERV;
int dt_size_recv = 1;
if (known == 0)
dt_size_recv = recvtype->size();
- if ((comm->rank() == root)) {
+ if (comm->rank() == root) {
extra->recvcounts = xbt_new(int, size);
- for (i = 0; i < size; i++) // copy data to avoid bad free
+ for (int i = 0; i < size; i++) // copy data to avoid bad free
extra->recvcounts[i] = recvcounts[i] * dt_size_recv;
}
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
- retval = Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm);
+ retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm);
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
}
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
+ simgrid::smpi::Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
}
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
+ simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
}
extra->recv_size = recvcount * dt_size_recv;
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
- Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
}
}
int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
int root_traced = comm->group()->index(root);
- int i = 0;
int size = comm->size();
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
extra->type = TRACING_SCATTERV;
int dt_size_send = 1;
if (known == 0)
dt_size_send = sendtype->size();
- if ((comm->rank() == root)) {
+ if (comm->rank() == root) {
extra->sendcounts = xbt_new(int, size);
- for (i = 0; i < size; i++) // copy data to avoid bad free
+ for (int i = 0; i < size; i++) // copy data to avoid bad free
extra->sendcounts[i] = sendcounts[i] * dt_size_send;
}
extra->datatype2 = encode_datatype(recvtype, &known);
extra->recv_size = recvcount * dt_size_recv;
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
- retval = Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
}
TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
- Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
+ simgrid::smpi::Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
char* sendtmpbuf = static_cast<char*>(sendbuf);
if( sendbuf == MPI_IN_PLACE ) {
sendtmpbuf = static_cast<char*>(xbt_malloc(count*datatype->get_extent()));
- Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
+ simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
}
int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1;
instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1);
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm);
+ simgrid::smpi::Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm);
if( sendbuf == MPI_IN_PLACE )
xbt_free(sendtmpbuf);
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- retval = Colls::scan(sendbuf, recvbuf, count, datatype, op, comm);
+ retval = simgrid::smpi::Colls::scan(sendbuf, recvbuf, count, datatype, op, comm);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
}
}
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- retval = Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm);
+ retval = simgrid::smpi::Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
if (sendbuf == MPI_IN_PLACE)
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
+ simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
retval = MPI_SUCCESS;
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
int* recvcounts = static_cast<int*>(xbt_malloc(count * sizeof(int)));
for (int i = 0; i < count; i++)
recvcounts[i] = recvcount;
- Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
+ simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm);
xbt_free(recvcounts);
retval = MPI_SUCCESS;
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- retval = Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm);
+ retval = simgrid::smpi::Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
}
extra->num_processes = size;
TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
- retval = Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts,
+ retval = simgrid::smpi::Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts,
recvdisps, recvtype, comm);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
} else if (status->count % size != 0) {
return MPI_UNDEFINED;
} else {
- *count = Status::get_count(status, datatype);
+ *count = simgrid::smpi::Status::get_count(status, datatype);
return MPI_SUCCESS;
}
}
} else if (count<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_contiguous(count, old_type, 0, new_type);
+ return simgrid::smpi::Datatype::create_contiguous(count, old_type, 0, new_type);
}
}
} else if (count<0 || blocklen<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_vector(count, blocklen, stride, old_type, new_type);
+ return simgrid::smpi::Datatype::create_vector(count, blocklen, stride, old_type, new_type);
}
}
} else if (count<0 || blocklen<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_hvector(count, blocklen, stride, old_type, new_type);
+ return simgrid::smpi::Datatype::create_hvector(count, blocklen, stride, old_type, new_type);
}
}
} else if (count<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
+ return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
}
}
} else if (count<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
+ return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
}
}
int* blocklens=static_cast<int*>(xbt_malloc(blocklength*count*sizeof(int)));
for (int i = 0; i < count; i++)
blocklens[i]=blocklength;
- int retval = Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
+ int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type);
xbt_free(blocklens);
return retval;
}
} else if (count<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
+ return simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
}
}
int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int));
for (int i = 0; i < count; i++)
blocklens[i] = blocklength;
- int retval = Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
+ int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type);
xbt_free(blocklens);
return retval;
}
if (count<0){
return MPI_ERR_COUNT;
} else {
- return Datatype::create_struct(count, blocklens, indices, old_types, new_type);
+ return simgrid::smpi::Datatype::create_struct(count, blocklens, indices, old_types, new_type);
}
}
} else if (ndims < 0 || (ndims > 0 && (dims == nullptr || periodic == nullptr)) || comm_cart == nullptr) {
return MPI_ERR_ARG;
} else{
- Topo_Cart* topo = new Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart);
+ simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart);
if(*comm_cart==MPI_COMM_NULL)
delete topo;
return MPI_SUCCESS;
if (ndims < 1 || nnodes < 1) {
return MPI_ERR_DIMS;
}
- return Dims_create(nnodes, ndims, dims);
+ return simgrid::smpi::Topo_Cart::Dims_create(nnodes, ndims, dims);
}
int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
MPI_Aint disps[3] = {lb, 0, lb + extent};
MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB};
- *newtype = new Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types);
+ *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types);
(*newtype)->addflag(~DT_FLAG_COMMITED);
return MPI_SUCCESS;
}else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){
retval= MPI_ERR_OTHER;
}else{
- *win = new Win( base, size, disp_unit, info, comm);
+ *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_allocate( MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, void *base, MPI_Win *win){
+ int retval = 0;
+ smpi_bench_end();
+ if (comm == MPI_COMM_NULL) {
+ retval= MPI_ERR_COMM;
+ }else if (disp_unit <= 0 || size < 0 ){
+ retval= MPI_ERR_OTHER;
+ }else{
+ void* ptr = xbt_malloc(size);
+ if(ptr==nullptr)
+ return MPI_ERR_NO_MEM;
+ *static_cast<void**>(base) = ptr;
+ *win = new simgrid::smpi::Win( ptr, size, disp_unit, info, comm,1);
retval = MPI_SUCCESS;
}
smpi_bench_begin();
return retval;
}
+int PMPI_Win_create_dynamic( MPI_Info info, MPI_Comm comm, MPI_Win *win){
+ int retval = 0;
+ smpi_bench_end();
+ if (comm == MPI_COMM_NULL) {
+ retval= MPI_ERR_COMM;
+ }else{
+ *win = new simgrid::smpi::Win(info, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_attach(MPI_Win win, void *base, MPI_Aint size){
+ int retval = 0;
+ smpi_bench_end();
+ if(win == MPI_WIN_NULL){
+ retval = MPI_ERR_WIN;
+ } else if ((base == nullptr && size != 0) || size < 0 ){
+ retval= MPI_ERR_OTHER;
+ }else{
+ retval = win->attach(base, size);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_detach(MPI_Win win, void *base){
+ int retval = 0;
+ smpi_bench_end();
+ if(win == MPI_WIN_NULL){
+ retval = MPI_ERR_WIN;
+ } else if (base == nullptr){
+ retval= MPI_ERR_OTHER;
+ }else{
+ retval = win->detach(base);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+
int PMPI_Win_free( MPI_Win* win){
int retval = 0;
smpi_bench_end();
}
}
+int PMPI_Win_get_info(MPI_Win win, MPI_Info* info)
+{
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_WIN;
+ } else {
+ *info = win->info();
+ return MPI_SUCCESS;
+ }
+}
+
+int PMPI_Win_set_info(MPI_Win win, MPI_Info info)
+{
+ if (win == MPI_WIN_NULL) {
+ return MPI_ERR_TYPE;
+ } else {
+ win->set_info(info);
+ return MPI_SUCCESS;
+ }
+}
+
int PMPI_Win_get_group(MPI_Win win, MPI_Group * group){
if (win == MPI_WIN_NULL) {
return MPI_ERR_WIN;
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0) ||
(origin_addr==nullptr && origin_count > 0)){
return retval;
}
+int PMPI_Rget( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
int retval = 0;
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0) ||
(origin_addr==nullptr && origin_count > 0)){
return retval;
}
+int PMPI_Rput( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) || (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int dst_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr);
+ TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*origin_datatype->size());
+
+ retval = win->put( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, request);
+
+ TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
int retval = 0;
retval = MPI_SUCCESS;
} else if (target_rank <0){
retval = MPI_ERR_RANK;
- } else if (target_disp <0){
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0) ||
(origin_addr==nullptr && origin_count > 0)){
return retval;
}
+int PMPI_Raccumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0) ||
+ (origin_addr==nullptr && origin_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((!origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count,
+ target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Get_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
+ (result_addr==nullptr && result_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())||
+ (!result_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
+ result_count, result_datatype, target_rank, target_disp,
+ target_count, target_datatype, op);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+
+int PMPI_Rget_accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+MPI_Datatype target_datatype, MPI_Op op, MPI_Win win, MPI_Request* request){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ *request = MPI_REQUEST_NULL;
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
+ (result_addr==nullptr && result_count > 0)){
+ retval = MPI_ERR_COUNT;
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
+ (!target_datatype->is_valid())||
+ (!result_datatype->is_valid())) {
+ retval = MPI_ERR_TYPE;
+ } else if (op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else if(request == nullptr){
+ retval = MPI_ERR_REQUEST;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->get_accumulate( origin_addr, origin_count, origin_datatype, result_addr,
+ result_count, result_datatype, target_rank, target_disp,
+ target_count, target_datatype, op, request);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Fetch_and_op(void *origin_addr, void *result_addr, MPI_Datatype dtype, int target_rank, MPI_Aint target_disp, MPI_Op op, MPI_Win win){
+ return PMPI_Get_accumulate(origin_addr, origin_addr==nullptr?0:1, dtype, result_addr, 1, dtype, target_rank, target_disp, 1, dtype, op, win);
+}
+
+int PMPI_Compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (target_rank == MPI_PROC_NULL) {
+ retval = MPI_SUCCESS;
+ } else if (target_rank <0){
+ retval = MPI_ERR_RANK;
+ } else if (win->dynamic()==0 && target_disp <0){
+ //in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
+ retval = MPI_ERR_ARG;
+ } else if (origin_addr==nullptr || result_addr==nullptr || compare_addr==nullptr){
+ retval = MPI_ERR_COUNT;
+ } else if (!datatype->is_valid()) {
+ retval = MPI_ERR_TYPE;
+ } else {
+ int rank = smpi_process()->index();
+ MPI_Group group;
+ win->get_group(&group);
+ int src_traced = group->index(target_rank);
+ TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr);
+
+ retval = win->compare_and_swap( origin_addr, compare_addr, result_addr, datatype,
+ target_rank, target_disp);
+
+ TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win){
int retval = 0;
smpi_bench_end();
return retval;
}
+int PMPI_Win_lock_all(int assert, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->lock_all(assert);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_unlock_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->unlock_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local(int rank, MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else if (rank == MPI_PROC_NULL){
+ retval = MPI_SUCCESS;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local(rank);
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
+int PMPI_Win_flush_local_all(MPI_Win win){
+ int retval = 0;
+ smpi_bench_end();
+ if (win == MPI_WIN_NULL) {
+ retval = MPI_ERR_WIN;
+ } else {
+ int myrank = smpi_process()->index();
+ TRACE_smpi_collective_in(myrank, -1, __FUNCTION__, nullptr);
+ retval = win->flush_local_all();
+ TRACE_smpi_collective_out(myrank, -1, __FUNCTION__);
+ }
+ smpi_bench_begin();
+ return retval;
+}
+
int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){
void *ptr = xbt_malloc(size);
if(ptr==nullptr)
}
MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype){
- return static_cast<MPI_Datatype>(F2C::f2c(datatype));
+ return static_cast<MPI_Datatype>(simgrid::smpi::F2C::f2c(datatype));
}
MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){
}
MPI_Group PMPI_Group_f2c(MPI_Fint group){
- return Group::f2c(group);
+ return simgrid::smpi::Group::f2c(group);
}
MPI_Fint PMPI_Group_c2f(MPI_Group group){
}
MPI_Request PMPI_Request_f2c(MPI_Fint request){
- return static_cast<MPI_Request>(Request::f2c(request));
+ return static_cast<MPI_Request>(simgrid::smpi::Request::f2c(request));
}
MPI_Fint PMPI_Request_c2f(MPI_Request request) {
}
MPI_Win PMPI_Win_f2c(MPI_Fint win){
- return static_cast<MPI_Win>(Win::f2c(win));
+ return static_cast<MPI_Win>(simgrid::smpi::Win::f2c(win));
}
MPI_Fint PMPI_Win_c2f(MPI_Win win){
}
MPI_Op PMPI_Op_f2c(MPI_Fint op){
- return static_cast<MPI_Op>(Op::f2c(op));
+ return static_cast<MPI_Op>(simgrid::smpi::Op::f2c(op));
}
MPI_Fint PMPI_Op_c2f(MPI_Op op){
}
MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){
- return static_cast<MPI_Comm>(Comm::f2c(comm));
+ return static_cast<MPI_Comm>(simgrid::smpi::Comm::f2c(comm));
}
MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){
}
MPI_Info PMPI_Info_f2c(MPI_Fint info){
- return static_cast<MPI_Info>(Info::f2c(info));
+ return static_cast<MPI_Info>(simgrid::smpi::Info::f2c(info));
}
MPI_Fint PMPI_Info_c2f(MPI_Info info){
int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr};
smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr};
- return Keyval::keyval_create<Comm>(_copy_fn, _delete_fn, keyval, extra_state);
+ return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Comm>(_copy_fn, _delete_fn, keyval, extra_state);
}
int PMPI_Keyval_free(int* keyval) {
- return Keyval::keyval_free<Comm>(keyval);
+ return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Comm>(keyval);
}
int PMPI_Attr_delete(MPI_Comm comm, int keyval) {
else if (comm==MPI_COMM_NULL)
return MPI_ERR_COMM;
else
- return comm->attr_delete<Comm>(keyval);
+ return comm->attr_delete<simgrid::smpi::Comm>(keyval);
}
int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) {
static int one = 1;
static int zero = 0;
- static int tag_ub = 1000000;
+ static int tag_ub = INT_MAX;
static int last_used_code = MPI_ERR_LASTCODE;
if (comm==MPI_COMM_NULL){
*static_cast<int**>(attr_value) = &one;
return MPI_SUCCESS;
default:
- return comm->attr_get<Comm>(keyval, attr_value, flag);
+ return comm->attr_get<simgrid::smpi::Comm>(keyval, attr_value, flag);
}
}
else if (comm==MPI_COMM_NULL)
return MPI_ERR_COMM;
else
- return comm->attr_put<Comm>(keyval, attr_value);
+ return comm->attr_put<simgrid::smpi::Comm>(keyval, attr_value);
}
int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag)
if (type==MPI_DATATYPE_NULL)
return MPI_ERR_TYPE;
else
- return type->attr_get<Datatype>(type_keyval, attribute_val, flag);
+ return type->attr_get<simgrid::smpi::Datatype>(type_keyval, attribute_val, flag);
}
int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val)
if (type==MPI_DATATYPE_NULL)
return MPI_ERR_TYPE;
else
- return type->attr_put<Datatype>(type_keyval, attribute_val);
+ return type->attr_put<simgrid::smpi::Datatype>(type_keyval, attribute_val);
}
int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval)
if (type==MPI_DATATYPE_NULL)
return MPI_ERR_TYPE;
else
- return type->attr_delete<Datatype>(type_keyval);
+ return type->attr_delete<simgrid::smpi::Datatype>(type_keyval);
}
int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval,
{
smpi_copy_fn _copy_fn={nullptr,copy_fn,nullptr};
smpi_delete_fn _delete_fn={nullptr,delete_fn,nullptr};
- return Keyval::keyval_create<Datatype>(_copy_fn, _delete_fn, keyval, extra_state);
+ return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Datatype>(_copy_fn, _delete_fn, keyval, extra_state);
}
int PMPI_Type_free_keyval(int* keyval) {
- return Keyval::keyval_free<Datatype>(keyval);
+ return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Datatype>(keyval);
}
int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag)
*flag = 1;
return MPI_SUCCESS;
default:
- return win->attr_get<Win>(keyval, attribute_val, flag);
+ return win->attr_get<simgrid::smpi::Win>(keyval, attribute_val, flag);
}
}
if (win==MPI_WIN_NULL)
return MPI_ERR_TYPE;
else
- return win->attr_put<Win>(type_keyval, attribute_val);
+ return win->attr_put<simgrid::smpi::Win>(type_keyval, attribute_val);
}
int PMPI_Win_delete_attr (MPI_Win win, int type_keyval)
if (win==MPI_WIN_NULL)
return MPI_ERR_TYPE;
else
- return win->attr_delete<Win>(type_keyval);
+ return win->attr_delete<simgrid::smpi::Win>(type_keyval);
}
int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_attr_function* delete_fn, int* keyval,
{
smpi_copy_fn _copy_fn={nullptr, nullptr, copy_fn};
smpi_delete_fn _delete_fn={nullptr, nullptr, delete_fn};
- return Keyval::keyval_create<Win>(_copy_fn, _delete_fn, keyval, extra_state);
+ return simgrid::smpi::Keyval::keyval_create<simgrid::smpi::Win>(_copy_fn, _delete_fn, keyval, extra_state);
}
int PMPI_Win_free_keyval(int* keyval) {
- return Keyval::keyval_free<Win>(keyval);
+ return simgrid::smpi::Keyval::keyval_free<simgrid::smpi::Win>(keyval);
}
int PMPI_Info_create( MPI_Info *info){
if (info == nullptr)
return MPI_ERR_ARG;
- *info = new Info();
+ *info = new simgrid::smpi::Info();
return MPI_SUCCESS;
}
int PMPI_Info_free( MPI_Info *info){
if (info == nullptr || *info==nullptr)
return MPI_ERR_ARG;
- Info::unref(*info);
+ simgrid::smpi::Info::unref(*info);
*info=MPI_INFO_NULL;
return MPI_SUCCESS;
}
int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){
if (info == nullptr || newinfo==nullptr)
return MPI_ERR_ARG;
- *newinfo = new Info(info);
+ *newinfo = new simgrid::smpi::Info(info);
return MPI_SUCCESS;
}
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)");
//TODO : replace
-extern Process **process_data;
+extern simgrid::smpi::Process **process_data;
extern int* index_to_process_data;
#define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1)
namespace simgrid{
namespace smpi{
-Process::Process(int index)
+Process::Process(int index, msg_bar_t finalization_barrier)
+ : finalization_barrier_(finalization_barrier)
{
char name[MAILBOX_NAME_MAXLEN];
- index_ = MPI_UNDEFINED;
- argc_ = nullptr;
- argv_ = nullptr;
mailbox_ = simgrid::s4u::Mailbox::byName(get_mailbox_name(name, index));
mailbox_small_ = simgrid::s4u::Mailbox::byName(get_mailbox_name_small(name, index));
mailboxes_mutex_ = xbt_mutex_init();
timer_ = xbt_os_timer_new();
+ state_ = SMPI_UNINITIALIZED;
if (MC_is_active())
MC_ignore_heap(timer_, xbt_os_timer_size());
- comm_self_ = MPI_COMM_NULL;
- comm_intra_ = MPI_COMM_NULL;
- comm_world_ = nullptr;
- state_ = SMPI_UNINITIALIZED;
- sampling_ = 0;
- finalization_barrier_ = nullptr;
- return_value_ = 0;
#if HAVE_PAPI
if (xbt_cfg_get_string("smpi/papi-events")[0] != '\0') {
#endif
}
-void Process::set_data(int index, int *argc, char ***argv)
+void Process::set_data(int index, int* argc, char*** argv)
{
-
char* instance_id = (*argv)[1];
comm_world_ = smpi_deployment_comm_world(instance_id);
msg_bar_t bar = smpi_deployment_finalization_barrier(instance_id);
if (bar!=nullptr) // don't overwrite the default one
finalization_barrier_ = bar;
- index_ = index;
instance_id_ = instance_id;
- replaying_ = false;
+ index_ = index;
static_cast<simgrid::MsgActorExt*>(SIMIX_process_self()->data)->data = this;
argv_ = argv;
// set the process attached to the mailbox
mailbox_small_->setReceiver(simgrid::s4u::Actor::self());
- XBT_DEBUG("<%d> New process in the game: %p", index, SIMIX_process_self());
+ process_ = SIMIX_process_self();
+ XBT_DEBUG("<%d> New process in the game: %p", index_, SIMIX_process_self());
}
-void Process::destroy()
+/** @brief Prepares the current process for termination. */
+void Process::finalize()
{
- if(smpi_privatize_global_variables){
- smpi_switch_data_segment(index_);
- }
state_ = SMPI_FINALIZED;
XBT_DEBUG("<%d> Process left the game", index_);
-}
-/** @brief Prepares the current process for termination. */
-void Process::finalize()
-{
// This leads to an explosion of the search graph which cannot be reduced:
if(MC_is_active() || MC_record_replay_is_active())
return;
return false;
}
-void Process::set_user_data(void *data)
-{
- data_ = data;
-}
-
-void *Process::get_user_data()
-{
- return data_;
+smx_actor_t Process::process(){
+ return process_;
}
return sampling_;
}
-void Process::set_finalization_barrier(msg_bar_t bar){
- finalization_barrier_=bar;
-}
-
msg_bar_t Process::finalization_barrier(){
return finalization_barrier_;
}
int rank = xbt_str_parse_int((*argv)[2], "Invalid rank: %s");
smpi_deployment_register_process(instance_id, rank, index);
- if(smpi_privatize_global_variables){
+ if(smpi_privatize_global_variables == SMPI_PRIVATIZE_MMAP){
/* Now using segment index of the process */
index = proc->segment_index;
/* Done at the process's creation */
-/* Copyright (c) 2009-2010, 2012-2014. The SimGrid Team.
+/* Copyright (c) 2009-2010, 2012-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
class Process {
private:
- double simulated_;
- int *argc_;
- char ***argv_;
+ double simulated_ = 0 /* Used to time with simulated_start/elapsed */;
+ int* argc_ = nullptr;
+ char*** argv_ = nullptr;
simgrid::s4u::MailboxPtr mailbox_;
simgrid::s4u::MailboxPtr mailbox_small_;
xbt_mutex_t mailboxes_mutex_;
xbt_os_timer_t timer_;
- MPI_Comm comm_self_;
- MPI_Comm comm_intra_;
- MPI_Comm* comm_world_;
- void *data_; /* user data */
- int index_;
+ MPI_Comm comm_self_ = MPI_COMM_NULL;
+ MPI_Comm comm_intra_ = MPI_COMM_NULL;
+ MPI_Comm* comm_world_ = nullptr;
+ int index_ = MPI_UNDEFINED;
char state_;
- int sampling_; /* inside an SMPI_SAMPLE_ block? */
- char* instance_id_;
- bool replaying_; /* is the process replaying a trace */
+ int sampling_ = 0; /* inside an SMPI_SAMPLE_ block? */
+ char* instance_id_ = nullptr;
+ bool replaying_ = false; /* is the process replaying a trace */
msg_bar_t finalization_barrier_;
- int return_value_;
+ int return_value_ = 0;
smpi_trace_call_location_t trace_call_loc_;
+ smx_actor_t process_ = nullptr;
#if HAVE_PAPI
/** Contains hardware data as read by PAPI **/
int papi_event_set_;
papi_counter_t papi_counter_data_;
#endif
public:
- Process(int index);
- void destroy();
- void set_data(int index, int *argc, char ***argv);
+ explicit Process(int index, msg_bar_t barrier);
+ void set_data(int index, int* argc, char*** argv);
void finalize();
int finalized();
int initialized();
void mark_as_initialized();
void set_replaying(bool value);
bool replaying();
- void set_user_data(void *data);
- void *get_user_data();
smpi_trace_call_location_t* call_location();
int index();
MPI_Comm comm_world();
smx_mailbox_t mailbox();
smx_mailbox_t mailbox_small();
xbt_mutex_t mailboxes_mutex();
- #if HAVE_PAPI
+#if HAVE_PAPI
int papi_event_set(void);
papi_counter_t& papi_counters(void);
- #endif
+#endif
xbt_os_timer_t timer();
void simulated_start();
double simulated_elapsed();
void set_sampling(int s);
int sampling();
msg_bar_t finalization_barrier();
- void set_finalization_barrier(msg_bar_t bar);
int return_value();
void set_return_value(int val);
static void init(int *argc, char ***argv);
+ smx_actor_t process();
};
#define KEY_SIZE (sizeof(int) * 2 + 1)
-using namespace simgrid::smpi;
-
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_replay,smpi,"Trace Replay with SMPI");
int communicator_size = 0;
"Please contact the Simgrid team if support is needed", __FUNCTION__, i, mandatory, optional);\
}
+namespace simgrid {
+namespace smpi {
+
static void action_init(const char *const *action)
{
XBT_DEBUG("Initialize the counters");
log_timed_action (action, clock);
}
+}} // namespace simgrid::smpi
+
void smpi_replay_run(int *argc, char***argv){
/* First initializes everything */
- Process::init(argc, argv);
+ simgrid::smpi::Process::init(argc, argv);
smpi_process()->mark_as_initialized();
smpi_process()->set_replaying(true);
TRACE_smpi_collective_in(rank, -1, operation, extra);
TRACE_smpi_collective_out(rank, -1, operation);
xbt_free(operation);
- xbt_replay_action_register("init", action_init);
- xbt_replay_action_register("finalize", action_finalize);
- xbt_replay_action_register("comm_size", action_comm_size);
- xbt_replay_action_register("comm_split", action_comm_split);
- xbt_replay_action_register("comm_dup", action_comm_dup);
- xbt_replay_action_register("send", action_send);
- xbt_replay_action_register("Isend", action_Isend);
- xbt_replay_action_register("recv", action_recv);
- xbt_replay_action_register("Irecv", action_Irecv);
- xbt_replay_action_register("test", action_test);
- xbt_replay_action_register("wait", action_wait);
- xbt_replay_action_register("waitAll", action_waitall);
- xbt_replay_action_register("barrier", action_barrier);
- xbt_replay_action_register("bcast", action_bcast);
- xbt_replay_action_register("reduce", action_reduce);
- xbt_replay_action_register("allReduce", action_allReduce);
- xbt_replay_action_register("allToAll", action_allToAll);
- xbt_replay_action_register("allToAllV", action_allToAllv);
- xbt_replay_action_register("gather", action_gather);
- xbt_replay_action_register("gatherV", action_gatherv);
- xbt_replay_action_register("allGather", action_allgather);
- xbt_replay_action_register("allGatherV", action_allgatherv);
- xbt_replay_action_register("reduceScatter", action_reducescatter);
- xbt_replay_action_register("compute", action_compute);
+ xbt_replay_action_register("init", simgrid::smpi::action_init);
+ xbt_replay_action_register("finalize", simgrid::smpi::action_finalize);
+ xbt_replay_action_register("comm_size", simgrid::smpi::action_comm_size);
+ xbt_replay_action_register("comm_split", simgrid::smpi::action_comm_split);
+ xbt_replay_action_register("comm_dup", simgrid::smpi::action_comm_dup);
+ xbt_replay_action_register("send", simgrid::smpi::action_send);
+ xbt_replay_action_register("Isend", simgrid::smpi::action_Isend);
+ xbt_replay_action_register("recv", simgrid::smpi::action_recv);
+ xbt_replay_action_register("Irecv", simgrid::smpi::action_Irecv);
+ xbt_replay_action_register("test", simgrid::smpi::action_test);
+ xbt_replay_action_register("wait", simgrid::smpi::action_wait);
+ xbt_replay_action_register("waitAll", simgrid::smpi::action_waitall);
+ xbt_replay_action_register("barrier", simgrid::smpi::action_barrier);
+ xbt_replay_action_register("bcast", simgrid::smpi::action_bcast);
+ xbt_replay_action_register("reduce", simgrid::smpi::action_reduce);
+ xbt_replay_action_register("allReduce", simgrid::smpi::action_allReduce);
+ xbt_replay_action_register("allToAll", simgrid::smpi::action_allToAll);
+ xbt_replay_action_register("allToAllV", simgrid::smpi::action_allToAllv);
+ xbt_replay_action_register("gather", simgrid::smpi::action_gather);
+ xbt_replay_action_register("gatherV", simgrid::smpi::action_gatherv);
+ xbt_replay_action_register("allGather", simgrid::smpi::action_allgather);
+ xbt_replay_action_register("allGatherV", simgrid::smpi::action_allgatherv);
+ xbt_replay_action_register("reduceScatter", simgrid::smpi::action_reducescatter);
+ xbt_replay_action_register("compute", simgrid::smpi::action_compute);
//if we have a delayed start, sleep here.
if(*argc>2){
requests[i] = req;
i++;
}
- Request::waitall(count_requests, requests, status);
+ simgrid::smpi::Request::waitall(count_requests, requests, status);
}
delete get_reqq_self();
active_processes--;
TRACE_smpi_collective_out(rank, -1, operation);
TRACE_smpi_finalize(smpi_process()->index());
- smpi_process()->destroy();
xbt_free(operation);
}
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <xbt/config.hpp>
-#include <algorithm>
#include "private.h"
#include "mc/mc.h"
#include "src/mc/mc_replay.h"
-#include "src/simix/smx_private.h"
-#include "simgrid/sg_config.h"
-#include "smpi/smpi_utils.hpp"
-#include <simgrid/s4u/host.hpp>
+#include "src/smpi/SmpiHost.hpp"
#include "src/kernel/activity/SynchroComm.hpp"
+#include <algorithm>
+
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_request, smpi, "Logging specific to SMPI (reques)");
static simgrid::config::Flag<double> smpi_iprobe_sleep(
static simgrid::config::Flag<double> smpi_test_sleep(
"smpi/test", "Minimum time to inject inside a call to MPI_Test", 1e-4);
-std::vector<s_smpi_factor_t> smpi_os_values;
-std::vector<s_smpi_factor_t> smpi_or_values;
std::vector<s_smpi_factor_t> smpi_ois_values;
extern void (*smpi_comm_copy_data_callback) (smx_activity_t, void*, size_t);
-static double smpi_os(size_t size)
-{
- if (smpi_os_values.empty()) {
- smpi_os_values = parse_factor(xbt_cfg_get_string("smpi/os"));
- }
- double current=smpi_os_values.empty()?0.0:smpi_os_values[0].values[0]+smpi_os_values[0].values[1]*size;
- // Iterate over all the sections that were specified and find the right
- // value. (fact.factor represents the interval sizes; we want to find the
- // section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the vector we iterate over!
- for (auto& fact : smpi_os_values) {
- if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
- XBT_DEBUG("os : %zu <= %zu return %.10f", size, fact.factor, current);
- return current;
- }else{
- // If the next section is too large, the current section must be used.
- // Hence, save the cost, as we might have to use it.
- current = fact.values[0]+fact.values[1]*size;
- }
- }
- XBT_DEBUG("Searching for smpi/os: %zu is larger than the largest boundary, return %.10f", size, current);
-
- return current;
-}
-
-static double smpi_ois(size_t size)
-{
- if (smpi_ois_values.empty()) {
- smpi_ois_values = parse_factor(xbt_cfg_get_string("smpi/ois"));
- }
- double current=smpi_ois_values.empty()?0.0:smpi_ois_values[0].values[0]+smpi_ois_values[0].values[1]*size;
- // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
- // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the vector we iterate over!
- for (auto& fact : smpi_ois_values) {
- if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
- XBT_DEBUG("ois : %zu <= %zu return %.10f", size, fact.factor, current);
- return current;
- }else{
- // If the next section is too large, the current section must be used.
- // Hence, save the cost, as we might have to use it.
- current = fact.values[0]+fact.values[1]*size;
- }
- }
- XBT_DEBUG("Searching for smpi/ois: %zu is larger than the largest boundary, return %.10f", size, current);
-
- return current;
-}
-
-static double smpi_or(size_t size)
-{
- if (smpi_or_values.empty()) {
- smpi_or_values = parse_factor(xbt_cfg_get_string("smpi/or"));
- }
-
- double current=smpi_or_values.empty()?0.0:smpi_or_values.front().values[0]+smpi_or_values.front().values[1]*size;
-
- // Iterate over all the sections that were specified and find the right value. (fact.factor represents the interval
- // sizes; we want to find the section that has fact.factor <= size and no other such fact.factor <= size)
- // Note: parse_factor() (used before) already sorts the vector we iterate over!
- for (auto fact : smpi_or_values) {
- if (size <= fact.factor) { // Values already too large, use the previously computed value of current!
- XBT_DEBUG("or : %zu <= %zu return %.10f", size, fact.factor, current);
- return current;
- } else {
- // If the next section is too large, the current section must be used.
- // Hence, save the cost, as we might have to use it.
- current=fact.values[0]+fact.values[1]*size;
- }
- }
- XBT_DEBUG("smpi_or: %zu is larger than largest boundary, return %.10f", size, current);
-
- return current;
-}
-
namespace simgrid{
namespace smpi{
Request::Request(void *buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, unsigned flags) : buf_(buf), old_type_(datatype), src_(src), dst_(dst), tag_(tag), comm_(comm), flags_(flags)
{
void *old_buf = nullptr;
- if((((flags & RECV) != 0) && ((flags & ACCUMULATE) !=0)) || (datatype->flags() & DT_FLAG_DERIVED)){
+// FIXME Handle the case of a partial shared malloc.
+ if ((((flags & RECV) != 0) && ((flags & ACCUMULATE) != 0)) || (datatype->flags() & DT_FLAG_DERIVED)) {
// This part handles the problem of non-contiguous memory
old_buf = buf;
- buf_ = count==0 ? nullptr : xbt_malloc(count*datatype->size());
- if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
- datatype->serialize(old_buf, buf_, count);
+ if (count==0){
+ buf_ = nullptr;
+ }else {
+ buf_ = xbt_malloc(count*datatype->size());
+ if ((datatype->flags() & DT_FLAG_DERIVED) && ((flags & SEND) != 0)) {
+ datatype->serialize(old_buf, buf_, count);
+ }
}
}
// This part handles the problem of non-contiguous memory (for the unserialisation at the reception)
if ((flags_ & RECV) != 0) {
this->print_request("New recv");
+ simgrid::smpi::Process* process = smpi_process_remote(dst_);
+
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
- xbt_mutex_t mut = smpi_process()->mailboxes_mutex();
+ xbt_mutex_t mut = process->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
if (async_small_thresh == 0 && (flags_ & RMA) == 0 ) {
- mailbox = smpi_process()->mailbox();
+ mailbox = process->mailbox();
}
else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) {
//We have to check both mailboxes (because SSEND messages are sent to the large mbox).
//begin with the more appropriate one : the small one.
- mailbox = smpi_process()->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted in the small mailbox %p (in case of SSEND)?", mailbox);
smx_activity_t action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv,
static_cast<void*>(this));
if (action == nullptr) {
- mailbox = smpi_process()->mailbox();
+ mailbox = process->mailbox();
XBT_DEBUG("No, nothing in the small mailbox test the other one : %p", mailbox);
action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("Still nothing, switch back to the small mailbox : %p", mailbox);
- mailbox = smpi_process()->mailbox_small();
+ mailbox = process->mailbox_small();
}
} else {
XBT_DEBUG("yes there was something for us in the large mailbox");
}
} else {
- mailbox = smpi_process()->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("Is there a corresponding send already posted the small mailbox?");
smx_activity_t action = simcall_comm_iprobe(mailbox, 0, src_,tag_, &match_recv, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("No, nothing in the permanent receive mailbox");
- mailbox = smpi_process()->mailbox();
+ mailbox = process->mailbox();
} else {
XBT_DEBUG("yes there was something for us in the small mailbox");
}
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
real_size_=size_;
- action_ = simcall_comm_irecv(SIMIX_process_self(), mailbox, buf_, &real_size_, &match_recv,
- ! smpi_process()->replaying()? smpi_comm_copy_data_callback
+ action_ = simcall_comm_irecv(process->process(), mailbox, buf_, &real_size_, &match_recv,
+ ! process->replaying()? smpi_comm_copy_data_callback
: &smpi_comm_null_copy_buffer_callback, this, -1.0);
XBT_DEBUG("recv simcall posted");
if (async_small_thresh != 0 || (flags_ & RMA) != 0 )
xbt_mutex_release(mut);
} else { /* the RECV flag was not set, so this is a send */
- int receiver = dst_;
-
+ simgrid::smpi::Process* process = smpi_process_remote(dst_);
int rank = src_;
if (TRACE_smpi_view_internals()) {
- TRACE_smpi_send(rank, rank, receiver, tag_, size_);
+ TRACE_smpi_send(rank, rank, dst_, tag_, size_);
}
this->print_request("New send");
refcount_++;
if(!(old_type_->flags() & DT_FLAG_DERIVED)){
oldbuf = buf_;
- if (!smpi_process()->replaying() && oldbuf != nullptr && size_!=0){
+ if (!process->replaying() && oldbuf != nullptr && size_!=0){
if((smpi_privatize_global_variables != 0)
&& (static_cast<char*>(buf_) >= smpi_start_data_exe)
&& (static_cast<char*>(buf_) < smpi_start_data_exe + smpi_size_data_exe )){
//if we are giving back the control to the user without waiting for completion, we have to inject timings
double sleeptime = 0.0;
- if(detached_ != 0 || ((flags_ & (ISEND|SSEND)) != 0)){// issend should be treated as isend
- //isend and send timings may be different
- sleeptime = ((flags_ & ISEND) != 0) ? smpi_ois(size_) : smpi_os(size_);
+ if (detached_ != 0 || ((flags_ & (ISEND | SSEND)) != 0)) { // issend should be treated as isend
+ // isend and send timings may be different
+ sleeptime = ((flags_ & ISEND) != 0)
+ ? simgrid::s4u::Actor::self()->host()->extension<simgrid::smpi::SmpiHost>()->oisend(size_)
+ : simgrid::s4u::Actor::self()->host()->extension<simgrid::smpi::SmpiHost>()->osend(size_);
}
if(sleeptime > 0.0){
int async_small_thresh = xbt_cfg_get_int("smpi/async-small-thresh");
- xbt_mutex_t mut=smpi_process_remote(receiver)->mailboxes_mutex();
+ xbt_mutex_t mut=process->mailboxes_mutex();
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
if (!(async_small_thresh != 0 || (flags_ & RMA) !=0)) {
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
} else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
XBT_DEBUG("Is there a corresponding recv already posted in the large mailbox %p?", mailbox);
smx_activity_t action = simcall_comm_iprobe(mailbox, 1,dst_, tag_, &match_send,
static_cast<void*>(this));
if (action == nullptr) {
if ((flags_ & SSEND) == 0){
- mailbox = smpi_process_remote(receiver)->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("No, nothing in the large mailbox, message is to be sent on the small one %p", mailbox);
} else {
- mailbox = smpi_process_remote(receiver)->mailbox_small();
+ mailbox = process->mailbox_small();
XBT_DEBUG("SSEND : Is there a corresponding recv already posted in the small mailbox %p?", mailbox);
action = simcall_comm_iprobe(mailbox, 1,dst_, tag_, &match_send, static_cast<void*>(this));
if (action == nullptr) {
XBT_DEBUG("No, we are first, send to large mailbox");
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
}
}
} else {
XBT_DEBUG("Yes there was something for us in the large mailbox");
}
} else {
- mailbox = smpi_process_remote(receiver)->mailbox();
+ mailbox = process->mailbox();
XBT_DEBUG("Send request %p is in the large mailbox %p (buf: %p)",mailbox, this,buf_);
}
action_ = simcall_comm_isend(SIMIX_process_from_PID(src_+1), mailbox, size_, -1.0,
buf, real_size_, &match_send,
&xbt_free_f, // how to free the userdata if a detached send fails
- !smpi_process()->replaying() ? smpi_comm_copy_data_callback
+ !process->replaying() ? smpi_comm_copy_data_callback
: &smpi_comm_null_copy_buffer_callback, this,
// detach if msg size < eager/rdv switch limit
detached_);
int Request::testsome(int incount, MPI_Request requests[], int *indices, MPI_Status status[])
{
- int i;
int count = 0;
int count_dead = 0;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
- for(i = 0; i < incount; i++) {
- if((requests[i] != MPI_REQUEST_NULL)) {
- if(test(&requests[i], pstat)) {
- indices[i] = 1;
- count++;
- if(status != MPI_STATUSES_IGNORE) {
- status[i] = *pstat;
- }
- if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags_ & NON_PERSISTENT)
- requests[i]=MPI_REQUEST_NULL;
+ for (int i = 0; i < incount; i++) {
+ if (requests[i] != MPI_REQUEST_NULL) {
+ if (test(&requests[i], pstat)) {
+ indices[i] = 1;
+ count++;
+ if (status != MPI_STATUSES_IGNORE)
+ status[i] = *pstat;
+ if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags_ & NON_PERSISTENT)
+ requests[i] = MPI_REQUEST_NULL;
}
- }else{
+ } else {
count_dead++;
}
}
req->print_request("Finishing");
MPI_Datatype datatype = req->old_type_;
- if(((req->flags_ & ACCUMULATE) != 0) || (datatype->flags() & DT_FLAG_DERIVED)){
+// FIXME Handle the case of a partial shared malloc.
+ if (((req->flags_ & ACCUMULATE) != 0) ||
+ (datatype->flags() & DT_FLAG_DERIVED)) { // && (!smpi_is_shared(req->old_buf_))){
+
if (!smpi_process()->replaying()){
if( smpi_privatize_global_variables != 0 && (static_cast<char*>(req->old_buf_) >= smpi_start_data_exe)
&& ((char*)req->old_buf_ < smpi_start_data_exe + smpi_size_data_exe )){
datatype->unserialize(req->buf_, req->old_buf_, req->real_size_/datatype->size() , req->op_);
xbt_free(req->buf_);
}else if(req->flags_ & RECV){//apply op on contiguous buffer for accumulate
- int n =req->real_size_/datatype->size();
- req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ if(datatype->size()!=0){
+ int n =req->real_size_/datatype->size();
+ req->op_->apply(req->buf_, req->old_buf_, &n, datatype);
+ }
xbt_free(req->buf_);
}
}
}
if(req->detached_sender_ != nullptr){
//integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
- double sleeptime = smpi_or(req->real_size_);
+ double sleeptime = simgrid::s4u::Actor::self()->host()->extension<simgrid::smpi::SmpiHost>()->orecv(req->real_size());
if(sleeptime > 0.0){
simcall_process_sleep(sleeptime);
XBT_DEBUG("receiving size of %zu : sleep %f ", req->real_size_, sleeptime);
static int sort_accumulates(MPI_Request a, MPI_Request b)
{
- return (a->tag() < b->tag());
+ return (a->tag() > b->tag());
}
int Request::waitall(int count, MPI_Request requests[], MPI_Status status[])
-/* Copyright (c) 2010, 2013-2015. The SimGrid Team.
+/* Copyright (c) 2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
--- /dev/null
+/* Copyright (c) 2007, 2009-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/* Shared allocations are handled through shared memory segments.
+ * Associated data and metadata are used as follows:
+ *
+ * mmap #1
+ * `allocs' dict ---- -.
+ * ---------- shared_data_t shared_metadata_t / | | |
+ * .->| <name> | ---> -------------------- <--. ----------------- | | | |
+ * | ---------- | fd of <name> | | | size of mmap | --| | | |
+ * | | count (2) | |-- | data | \ | | |
+ * `----------------- | <name> | | ----------------- ---- |
+ * -------------------- | ^ |
+ * | | |
+ * | | `allocs_metadata' dict |
+ * | | ---------------------- |
+ * | `-- | <addr of mmap #1> |<-'
+ * | .-- | <addr of mmap #2> |<-.
+ * | | ---------------------- |
+ * | | |
+ * | | |
+ * | | |
+ * | | mmap #2 |
+ * | v ---- -'
+ * | shared_metadata_t / | |
+ * | ----------------- | | |
+ * | | size of mmap | --| | |
+ * `-- | data | | | |
+ * ----------------- | | |
+ * \ | |
+ * ----
+ */
+#include <map>
+
+#include "private.h"
+#include "private.hpp"
+#include "smpi/smpi_shared_malloc.hpp"
+#include "xbt/dict.h"
+#include <errno.h>
+
+#include <sys/types.h>
+#ifndef WIN32
+#include <sys/mman.h>
+#endif
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifndef MAP_POPULATE
+#define MAP_POPULATE 0
+#endif
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_shared, smpi, "Logging specific to SMPI (shared memory macros)");
+
+#define PTR_STRLEN (2 + 2 * sizeof(void*) + 1)
+
+namespace{
+/** Some location in the source code
+ *
+ * This information is used by SMPI_SHARED_MALLOC to allocate some shared memory for all simulated processes.
+ */
+
+class smpi_source_location {
+public:
+ smpi_source_location(const char* filename, int line)
+ : filename(xbt_strdup(filename)), filename_length(strlen(filename)), line(line)
+ {
+ }
+
+ /** Pointer to a static string containing the file name */
+ char* filename = nullptr;
+ int filename_length = 0;
+ int line = 0;
+
+ bool operator==(smpi_source_location const& that) const
+ {
+ return filename_length == that.filename_length && line == that.line &&
+ std::memcmp(filename, that.filename, filename_length) == 0;
+ }
+ bool operator!=(smpi_source_location const& that) const { return !(*this == that); }
+};
+}
+
+namespace std {
+
+template <> class hash<smpi_source_location> {
+public:
+ typedef smpi_source_location argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(smpi_source_location const& loc) const
+ {
+ return xbt_str_hash_ext(loc.filename, loc.filename_length) ^
+ xbt_str_hash_ext((const char*)&loc.line, sizeof(loc.line));
+ }
+};
+}
+
+namespace{
+
+typedef struct {
+ int fd = -1;
+ int count = 0;
+} shared_data_t;
+
+std::unordered_map<smpi_source_location, shared_data_t> allocs;
+typedef std::unordered_map<smpi_source_location, shared_data_t>::value_type shared_data_key_type;
+
+typedef struct {
+ size_t size;
+ std::vector<std::pair<size_t, size_t>> private_blocks;
+ shared_data_key_type* data;
+} shared_metadata_t;
+
+std::map<void*, shared_metadata_t> allocs_metadata;
+xbt_dict_t calls = nullptr; /* Allocated on first use */
+#ifndef WIN32
+static int smpi_shared_malloc_bogusfile = -1;
+static unsigned long smpi_shared_malloc_blocksize = 1UL << 20;
+#endif
+}
+
+
+void smpi_shared_destroy()
+{
+ allocs.clear();
+ allocs_metadata.clear();
+ xbt_dict_free(&calls);
+}
+
+static size_t shm_size(int fd) {
+ struct stat st;
+
+ if(fstat(fd, &st) < 0) {
+ xbt_die("Could not stat fd %d: %s", fd, strerror(errno));
+ }
+ return static_cast<size_t>(st.st_size);
+}
+
+#ifndef WIN32
+static void* shm_map(int fd, size_t size, shared_data_key_type* data) {
+ char loc[PTR_STRLEN];
+ shared_metadata_t meta;
+
+ if(size > shm_size(fd) && (ftruncate(fd, static_cast<off_t>(size)) < 0)) {
+ xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno));
+ }
+
+ void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if(mem == MAP_FAILED) {
+ xbt_die(
+ "Failed to map fd %d with size %zu: %s\n"
+ "If you are running a lot of ranks, you may be exceeding the amount of mappings allowed per process.\n"
+ "On Linux systems, change this value with sudo sysctl -w vm.max_map_count=newvalue (default value: 65536)\n"
+ "Please see http://simgrid.gforge.inria.fr/simgrid/latest/doc/html/options.html#options_virt for more info.",
+ fd, size, strerror(errno));
+ }
+ snprintf(loc, PTR_STRLEN, "%p", mem);
+ meta.size = size;
+ meta.data = data;
+ allocs_metadata[mem] = meta;
+ XBT_DEBUG("MMAP %zu to %p", size, mem);
+ return mem;
+}
+
+static void *smpi_shared_malloc_local(size_t size, const char *file, int line)
+{
+ void* mem;
+ smpi_source_location loc(file, line);
+ auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
+ auto data = res.first;
+ if (res.second) {
+ // The insertion did not take place.
+ // Generate a shared memory name from the address of the shared_data:
+ char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on Mac OS X (shm_open raises ENAMETOOLONG otherwise)
+ snprintf(shmname, 31, "/shmalloc%p", &*data);
+ int fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0) {
+ if (errno == EEXIST)
+ xbt_die("Please cleanup /dev/shm/%s", shmname);
+ else
+ xbt_die("An unhandled error occurred while opening %s. shm_open: %s", shmname, strerror(errno));
+ }
+ data->second.fd = fd;
+ data->second.count = 1;
+ mem = shm_map(fd, size, &*data);
+ if (shm_unlink(shmname) < 0) {
+ XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno));
+ }
+ XBT_DEBUG("Mapping %s at %p through %d", shmname, mem, fd);
+ } else {
+ mem = shm_map(data->second.fd, size, &*data);
+ data->second.count++;
+ }
+ XBT_DEBUG("Shared malloc %zu in %p (metadata at %p)", size, mem, &*data);
+ return mem;
+}
+
+// Align functions, from http://stackoverflow.com/questions/4840410/how-to-align-a-pointer-in-c
+#define PAGE_SIZE 0x1000
+#define ALIGN_UP(n, align) (((n) + (align)-1) & -(align))
+#define ALIGN_DOWN(n, align) ((n) & -(align))
+
+/*
+ * Similar to smpi_shared_malloc, but only sharing the blocks described by shared_block_offsets.
+ * This array contains the offsets (in bytes) of the block to share.
+ * Even indices are the start offsets (included), odd indices are the stop offsets (excluded).
+ * For instance, if shared_block_offsets == {27, 42}, then the elements mem[27], mem[28], ..., mem[41] are shared. The others are not.
+ */
+void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks)
+{
+ void *mem;
+ xbt_assert(smpi_shared_malloc_blocksize % PAGE_SIZE == 0, "The block size of shared malloc should be a multiple of the page size.");
+ /* First reserve memory area */
+ mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+
+ xbt_assert(mem != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root "
+ "to allow big allocations.\n",
+ size >> 20);
+
+ /* Create bogus file if not done already */
+ if (smpi_shared_malloc_bogusfile == -1) {
+ /* Create a fd to a new file on disk, make it smpi_shared_malloc_blocksize big, and unlink it.
+ * It still exists in memory but not in the file system (thus it cannot be leaked). */
+ smpi_shared_malloc_blocksize = static_cast<unsigned long>(xbt_cfg_get_double("smpi/shared-malloc-blocksize"));
+ XBT_DEBUG("global shared allocation. Blocksize %lu", smpi_shared_malloc_blocksize);
+ char* name = xbt_strdup("/tmp/simgrid-shmalloc-XXXXXX");
+ smpi_shared_malloc_bogusfile = mkstemp(name);
+ unlink(name);
+ xbt_free(name);
+ char* dumb = (char*)calloc(1, smpi_shared_malloc_blocksize);
+ ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize);
+ if(err<0)
+ xbt_die("Could not write bogus file for shared malloc");
+ xbt_free(dumb);
+ }
+
+ /* Map the bogus file in place of the anonymous memory */
+ for(int i_block = 0; i_block < nb_shared_blocks; i_block ++) {
+ size_t start_offset = shared_block_offsets[2*i_block];
+ size_t stop_offset = shared_block_offsets[2*i_block+1];
+ xbt_assert(start_offset < stop_offset, "start_offset (%zu) should be lower than stop offset (%zu)", start_offset, stop_offset);
+ xbt_assert(stop_offset <= size, "stop_offset (%zu) should be lower than size (%zu)", stop_offset, size);
+ if(i_block < nb_shared_blocks-1)
+ xbt_assert(stop_offset < shared_block_offsets[2*i_block+2],
+ "stop_offset (%zu) should be lower than its successor start offset (%zu)", stop_offset, shared_block_offsets[2*i_block+2]);
+ size_t start_block_offset = ALIGN_UP(start_offset, smpi_shared_malloc_blocksize);
+ size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize);
+ unsigned int i;
+ for (i = start_block_offset / smpi_shared_malloc_blocksize; i < stop_block_offset / smpi_shared_malloc_blocksize; i++) {
+ void* pos = (void*)((unsigned long)mem + i * smpi_shared_malloc_blocksize);
+ void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED | MAP_POPULATE,
+ smpi_shared_malloc_bogusfile, 0);
+ xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
+ "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?"
+ "You can also try using the sysctl vm.max_map_count",
+ strerror(errno));
+ }
+ size_t low_page_start_offset = ALIGN_UP(start_offset, PAGE_SIZE);
+ size_t low_page_stop_offset = start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE);
+ if(low_page_start_offset < low_page_stop_offset) {
+ void* pos = (void*)((unsigned long)mem + low_page_start_offset);
+ void* res = mmap(pos, low_page_stop_offset-low_page_start_offset, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED | MAP_POPULATE,
+ smpi_shared_malloc_bogusfile, 0);
+ xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
+ "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?"
+ "You can also try using the sysctl vm.max_map_count",
+ strerror(errno));
+ }
+ if(low_page_stop_offset <= stop_block_offset) {
+ size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE);
+ if(high_page_stop_offset > stop_block_offset) {
+ void* pos = (void*)((unsigned long)mem + stop_block_offset);
+ void* res = mmap(pos, high_page_stop_offset-stop_block_offset, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED | MAP_POPULATE,
+ smpi_shared_malloc_bogusfile, 0);
+ xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
+ "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?"
+ "You can also try using the sysctl vm.max_map_count",
+ strerror(errno));
+ }
+ }
+ }
+
+ shared_metadata_t newmeta;
+ //register metadata for memcpy avoidance
+ shared_data_key_type* data = (shared_data_key_type*)xbt_malloc(sizeof(shared_data_key_type));
+ data->second.fd = -1;
+ data->second.count = 1;
+ newmeta.size = size;
+ newmeta.data = data;
+ if(shared_block_offsets[0] > 0) {
+ newmeta.private_blocks.push_back(std::make_pair(0, shared_block_offsets[0]));
+ }
+ int i_block;
+ for(i_block = 0; i_block < nb_shared_blocks-1; i_block ++) {
+ newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], shared_block_offsets[2*i_block+2]));
+ }
+ if(shared_block_offsets[2*i_block+1] < size) {
+ newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], size));
+ }
+ allocs_metadata[mem] = newmeta;
+
+ return mem;
+}
+
+void *smpi_shared_malloc(size_t size, const char *file, int line) {
+ if (size > 0 && smpi_cfg_shared_malloc == shmalloc_local) {
+ return smpi_shared_malloc_local(size, file, line);
+ } else if (smpi_cfg_shared_malloc == shmalloc_global) {
+ int nb_shared_blocks = 1;
+ size_t shared_block_offsets[2] = {0, size};
+ return smpi_shared_malloc_partial(size, shared_block_offsets, nb_shared_blocks);
+ }
+ XBT_DEBUG("Classic malloc %zu", size);
+ return xbt_malloc(size);
+}
+
+int smpi_is_shared(void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset){
+ private_blocks.clear(); // being paranoid
+ if (allocs_metadata.empty())
+ return 0;
+ if ( smpi_cfg_shared_malloc == shmalloc_local || smpi_cfg_shared_malloc == shmalloc_global) {
+ auto low = allocs_metadata.lower_bound(ptr);
+ if (low->first==ptr) {
+ private_blocks = low->second.private_blocks;
+ *offset = 0;
+ return 1;
+ }
+ if (low == allocs_metadata.begin())
+ return 0;
+ low --;
+ if (ptr < (char*)low->first + low->second.size) {
+ xbt_assert(ptr > (char*)low->first, "Oops, there seems to be a bug in the shared memory metadata.");
+ *offset = ((uint8_t*)ptr) - ((uint8_t*) low->first);
+ private_blocks = low->second.private_blocks;
+ return 1;
+ }
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>> vec, size_t offset, size_t buff_size) {
+ std::vector<std::pair<size_t, size_t>> result;
+ for(auto block: vec) {
+ auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first-offset), buff_size),
+ std::min(std::max((size_t)0, block.second-offset), buff_size));
+ if(new_block.second > 0 && new_block.first < buff_size)
+ result.push_back(new_block);
+ }
+ return result;
+}
+
+std::vector<std::pair<size_t, size_t>> merge_private_blocks(std::vector<std::pair<size_t, size_t>> src, std::vector<std::pair<size_t, size_t>> dst) {
+ std::vector<std::pair<size_t, size_t>> result;
+ unsigned i_src=0, i_dst=0;
+ while(i_src < src.size() && i_dst < dst.size()) {
+ std::pair<size_t, size_t> block;
+ if(src[i_src].second <= dst[i_dst].first) {
+ i_src++;
+ }
+ else if(dst[i_dst].second <= src[i_src].first) {
+ i_dst++;
+ }
+ else { // src.second > dst.first && dst.second > src.first → the blocks are overlapping
+ block = std::make_pair(std::max(src[i_src].first, dst[i_dst].first),
+ std::min(src[i_src].second, dst[i_dst].second));
+ result.push_back(block);
+ if(src[i_src].second < dst[i_dst].second)
+ i_src ++;
+ else
+ i_dst ++;
+ }
+ }
+ return result;
+}
+
+void smpi_shared_free(void *ptr)
+{
+ if (smpi_cfg_shared_malloc == shmalloc_local) {
+ char loc[PTR_STRLEN];
+ snprintf(loc, PTR_STRLEN, "%p", ptr);
+ auto meta = allocs_metadata.find(ptr);
+ if (meta == allocs_metadata.end()) {
+ XBT_WARN("Cannot free: %p was not shared-allocated by SMPI - maybe its size was 0?", ptr);
+ return;
+ }
+ shared_data_t* data = &meta->second.data->second;
+ if (munmap(ptr, meta->second.size) < 0) {
+ XBT_WARN("Unmapping of fd %d failed: %s", data->fd, strerror(errno));
+ }
+ data->count--;
+ if (data->count <= 0) {
+ close(data->fd);
+ allocs.erase(allocs.find(meta->second.data->first));
+ allocs_metadata.erase(ptr);
+ XBT_DEBUG("Shared free - with removal - of %p", ptr);
+ } else {
+ XBT_DEBUG("Shared free - no removal - of %p, count = %d", ptr, data->count);
+ }
+
+ } else if (smpi_cfg_shared_malloc == shmalloc_global) {
+ auto meta = allocs_metadata.find(ptr);
+ if (meta != allocs_metadata.end()){
+ meta->second.data->second.count--;
+ if(meta->second.data->second.count==0)
+ xbt_free(meta->second.data);
+ }
+
+ munmap(ptr, meta->second.size);
+ } else {
+ XBT_DEBUG("Classic free of %p", ptr);
+ xbt_free(ptr);
+ }
+}
+#endif
+
+int smpi_shared_known_call(const char* func, const char* input)
+{
+ char* loc = bprintf("%s:%s", func, input);
+ int known = 0;
+
+ if (calls==nullptr) {
+ calls = xbt_dict_new_homogeneous(nullptr);
+ }
+ try {
+ xbt_dict_get(calls, loc); /* Succeed or throw */
+ known = 1;
+ xbt_free(loc);
+ }
+ catch (xbt_ex& ex) {
+ xbt_free(loc);
+ if (ex.category != not_found_error)
+ throw;
+ }
+ catch(...) {
+ xbt_free(loc);
+ throw;
+ }
+ return known;
+}
+
+void* smpi_shared_get_call(const char* func, const char* input) {
+ char* loc = bprintf("%s:%s", func, input);
+
+ if (calls == nullptr)
+ calls = xbt_dict_new_homogeneous(nullptr);
+ void* data = xbt_dict_get(calls, loc);
+ xbt_free(loc);
+ return data;
+}
+
+void* smpi_shared_set_call(const char* func, const char* input, void* data) {
+ char* loc = bprintf("%s:%s", func, input);
+
+ if (calls == nullptr)
+ calls = xbt_dict_new_homogeneous(nullptr);
+ xbt_dict_set(calls, loc, data, nullptr);
+ xbt_free(loc);
+ return data;
+}
+
-/* Copyright (c) 2011-2014. The SimGrid Team.
+/* Copyright (c) 2011-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
-/* Copyright (c) 2007-2015. The SimGrid Team.
+/* Copyright (c) 2007-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "private.h"
#include "src/simix/smx_private.h"
+namespace simgrid{
+namespace smpi{
+
void Status::empty(MPI_Status * status)
{
if(status != MPI_STATUS_IGNORE) {
{
return status->count / datatype->size();
}
+
+}
+}
/*
* This is a utility function, no need to have anything in the lower layer for this at all
*/
-int Dims_create(int nnodes, int ndims, int dims[])
+int Topo_Cart::Dims_create(int nnodes, int ndims, int dims[])
{
/* Get # of free-to-be-assigned processes and # of free dimensions */
int freeprocs = nnodes;
-/* Copyright (c) 2010-2015. The SimGrid Team.
+/* Copyright (c) 2010-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
int rank(int* coords, int* rank);
int shift(int direction, int disp, int *rank_source, int *rank_dest) ;
int dim_get(int *ndims);
+ static int Dims_create(int nnodes, int ndims, int dims[]);
};
~Topo_Dist_Graph();
};
-/*
- * This is a utility function, no need to have anything in the lower layer for this at all
- */
-extern int Dims_create(int nnodes, int ndims, int dims[]);
-
}
}
-/* Copyright (c) 2007-2015. The SimGrid Team.
+/* Copyright (c) 2007-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
std::unordered_map<int, smpi_key_elem> Win::keyvals_;
int Win::keyval_id_=0;
-Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm){
+Win::Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated, int dynamic): base_(base), size_(size), disp_unit_(disp_unit), assert_(0), info_(info), comm_(comm), allocated_(allocated), dynamic_(dynamic){
int comm_size = comm->size();
rank_ = comm->rank();
XBT_DEBUG("Creating window");
requests_ = new std::vector<MPI_Request>();
mut_=xbt_mutex_init();
lock_mut_=xbt_mutex_init();
+ atomic_mut_=xbt_mutex_init();
connected_wins_ = new MPI_Win[comm_size];
connected_wins_[rank_] = this;
count_ = 0;
if(rank_==0){
bar_ = MSG_barrier_init(comm_size);
}
+ mode_=0;
comm->add_rma_win(this);
MSG_barrier_destroy(bar_);
xbt_mutex_destroy(mut_);
xbt_mutex_destroy(lock_mut_);
+ xbt_mutex_destroy(atomic_mut_);
+
+ if(allocated_ !=0)
+ xbt_free(base_);
cleanup_attr<Win>();
}
+int Win::attach (void *base, MPI_Aint size){
+ if (!(base_ == MPI_BOTTOM || base_ == 0))
+ return MPI_ERR_ARG;
+ base_=0;//actually the address will be given in the RMA calls, as being the disp.
+ size_+=size;
+ return MPI_SUCCESS;
+}
+
+int Win::detach (void *base){
+ base_=MPI_BOTTOM;
+ size_=-1;
+ return MPI_SUCCESS;
+}
+
void Win::get_name(char* name, int* length){
if(name_==nullptr){
*length=0;
}
}
+MPI_Info Win::info(){
+ if(info_== MPI_INFO_NULL)
+ info_ = new Info();
+ info_->ref();
+ return info_;
+}
+
int Win::rank(){
return rank_;
}
return disp_unit_;
}
+int Win::dynamic(){
+ return dynamic_;
+}
+
+void Win::set_info(MPI_Info info){
+ if(info_!= MPI_INFO_NULL)
+ info->ref();
+ info_=info;
+}
void Win::set_name(char* name){
name_ = xbt_strdup(name);
int size = static_cast<int>(reqs->size());
// start all requests that have been prepared by another process
if (size > 0) {
- for (const auto& req : *reqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
MPI_Request* treqs = &(*reqs)[0];
-
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
}
count_=0;
xbt_mutex_release(mut_);
}
+
+ if(assert==MPI_MODE_NOSUCCEED)//there should be no ops after this one, tell we are closed.
+ opened_=0;
assert_ = assert;
MSG_barrier_wait(bar_);
}
int Win::put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype)
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get receiver pointer
MPI_Win recv_win = connected_wins_[target_rank];
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype, smpi_process()->index(),
comm_->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm_, MPI_OP_NULL);
+ //start send
+ sreq->start();
+
+ if(request!=nullptr){
+ *request=sreq;
+ }else{
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(sreq);
+ xbt_mutex_release(mut_);
+ }
+
//push request to receiver's win
xbt_mutex_acquire(recv_win->mut_);
recv_win->requests_->push_back(rreq);
+ rreq->start();
xbt_mutex_release(recv_win->mut_);
- //start send
- sreq->start();
- //push request to sender's win
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
}else{
Datatype::copy(origin_addr, origin_count, origin_datatype, recv_addr, target_count, target_datatype);
+ if(request!=nullptr)
+ *request = MPI_REQUEST_NULL;
}
return MPI_SUCCESS;
}
int Win::get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype)
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request)
{
//get sender pointer
MPI_Win send_win = connected_wins_[target_rank];
//start recv
rreq->start();
- //push request to sender's win
- xbt_mutex_acquire(mut_);
- requests_->push_back(rreq);
- xbt_mutex_release(mut_);
+
+ if(request!=nullptr){
+ *request=rreq;
+ }else{
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(rreq);
+ xbt_mutex_release(mut_);
+ }
+
}else{
Datatype::copy(send_addr, target_count, target_datatype, origin_addr, origin_count, origin_datatype);
+ if(request!=nullptr)
+ *request=MPI_REQUEST_NULL;
}
return MPI_SUCCESS;
int Win::accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op)
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request)
{
//get receiver pointer
void* recv_addr = static_cast<void*>(static_cast<char*>(recv_win->base_) + target_disp * recv_win->disp_unit_);
XBT_DEBUG("Entering MPI_Accumulate to %d", target_rank);
- //As the tag will be used for ordering of the operations, add count to it
+ //As the tag will be used for ordering of the operations, substract count from it (to avoid collisions with other SMPI tags, SMPI_RMA_TAG is set below all the other ones we use )
//prepare send_request
+
MPI_Request sreq = Request::rma_send_init(origin_addr, origin_count, origin_datatype,
- smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, comm_, op);
+ smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, comm_, op);
//prepare receiver request
MPI_Request rreq = Request::rma_recv_init(recv_addr, target_count, target_datatype,
- smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG+3+count_, recv_win->comm_, op);
+ smpi_process()->index(), comm_->group()->index(target_rank), SMPI_RMA_TAG-3-count_, recv_win->comm_, op);
count_++;
+
+ //start send
+ sreq->start();
//push request to receiver's win
xbt_mutex_acquire(recv_win->mut_);
recv_win->requests_->push_back(rreq);
+ rreq->start();
xbt_mutex_release(recv_win->mut_);
- //start send
- sreq->start();
- //push request to sender's win
- xbt_mutex_acquire(mut_);
- requests_->push_back(sreq);
- xbt_mutex_release(mut_);
+ if(request!=nullptr){
+ *request=sreq;
+ }else{
+ xbt_mutex_acquire(mut_);
+ requests_->push_back(sreq);
+ xbt_mutex_release(mut_);
+ }
return MPI_SUCCESS;
}
+int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+ int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+ MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){
+
+ //get sender pointer
+ MPI_Win send_win = connected_wins_[target_rank];
+
+ if(opened_==0){//check that post/start has been done
+ // no fence or start .. lock ok ?
+ int locked=0;
+ for(auto it : send_win->lockers_)
+ if (it == comm_->rank())
+ locked = 1;
+ if(locked != 1)
+ return MPI_ERR_WIN;
+ }
+
+ if(target_count*target_datatype->get_extent()>send_win->size_)
+ return MPI_ERR_ARG;
+
+ XBT_DEBUG("Entering MPI_Get_accumulate from %d", target_rank);
+ //need to be sure ops are correctly ordered, so finish request here ? slow.
+ MPI_Request req;
+ xbt_mutex_acquire(send_win->atomic_mut_);
+ get(result_addr, result_count, result_datatype, target_rank,
+ target_disp, target_count, target_datatype, &req);
+ if (req != MPI_REQUEST_NULL)
+ Request::wait(&req, MPI_STATUS_IGNORE);
+ if(op!=MPI_NO_OP)
+ accumulate(origin_addr, origin_count, origin_datatype, target_rank,
+ target_disp, target_count, target_datatype, op, &req);
+ if (req != MPI_REQUEST_NULL)
+ Request::wait(&req, MPI_STATUS_IGNORE);
+ xbt_mutex_release(send_win->atomic_mut_);
+ return MPI_SUCCESS;
+
+}
+
+int Win::compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp){
+ //get sender pointer
+ MPI_Win send_win = connected_wins_[target_rank];
+
+ if(opened_==0){//check that post/start has been done
+ // no fence or start .. lock ok ?
+ int locked=0;
+ for(auto it : send_win->lockers_)
+ if (it == comm_->rank())
+ locked = 1;
+ if(locked != 1)
+ return MPI_ERR_WIN;
+ }
+
+ XBT_DEBUG("Entering MPI_Compare_and_swap with %d", target_rank);
+ MPI_Request req;
+ xbt_mutex_acquire(send_win->atomic_mut_);
+ get(result_addr, 1, datatype, target_rank,
+ target_disp, 1, datatype, &req);
+ if (req != MPI_REQUEST_NULL)
+ Request::wait(&req, MPI_STATUS_IGNORE);
+ if(! memcmp (result_addr, compare_addr, datatype->get_extent() )){
+ put(origin_addr, 1, datatype, target_rank,
+ target_disp, 1, datatype);
+ }
+ xbt_mutex_release(send_win->atomic_mut_);
+ return MPI_SUCCESS;
+}
+
int Win::start(MPI_Group group, int assert){
/* From MPI forum advices
The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
int Win::wait(){
//naive, blocking implementation.
XBT_DEBUG("Entering MPI_Win_Wait");
- int i=0,j=0;
- int size = group_->size();
+ int i = 0;
+ int j = 0;
+ int size = group_->size();
MPI_Request* reqs = xbt_new0(MPI_Request, size);
while(j!=size){
int Win::lock(int lock_type, int rank, int assert){
MPI_Win target_win = connected_wins_[rank];
- int finished = finish_comms();
- XBT_DEBUG("Win_lock - Finished %d RMA calls", finished);
-
- //window already locked, we have to wait
- if (lock_type == MPI_LOCK_EXCLUSIVE)
+ if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){
xbt_mutex_acquire(target_win->lock_mut_);
+ target_win->mode_+= lock_type;//add the lock_type to differentiate case when we are switching from EXCLUSIVE to SHARED (no release needed in the unlock)
+ if(lock_type == MPI_LOCK_SHARED){//the window used to be exclusive, it's now shared.
+ xbt_mutex_release(target_win->lock_mut_);
+ }
+ } else if(!(target_win->mode_==MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE))
+ target_win->mode_+= lock_type; // don't set to exclusive if it's already shared
- xbt_mutex_acquire(target_win->mut_);
target_win->lockers_.push_back(comm_->rank());
- xbt_mutex_release(target_win->mut_);
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_lock %d - Finished %d RMA calls", rank, finished);
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_lock target %d - Finished %d RMA calls", rank, finished);
return MPI_SUCCESS;
}
+int Win::lock_all(int assert){
+ int i=0;
+ int retval = MPI_SUCCESS;
+ for (i=0; i<comm_->size();i++){
+ int ret = this->lock(MPI_LOCK_SHARED, i, assert);
+ if(ret != MPI_SUCCESS)
+ retval = ret;
+ }
+ return retval;
+}
+
int Win::unlock(int rank){
MPI_Win target_win = connected_wins_[rank];
+ int target_mode = target_win->mode_;
+ target_win->mode_= 0;
+ target_win->lockers_.remove(comm_->rank());
+ if (target_mode==MPI_LOCK_EXCLUSIVE){
+ xbt_mutex_release(target_win->lock_mut_);
+ }
- int finished = finish_comms();
- XBT_DEBUG("Win_unlock - Finished %d RMA calls", finished);
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_unlock %d - Finished %d RMA calls", rank, finished);
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_unlock target %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
- xbt_mutex_acquire(target_win->mut_);
- target_win->lockers_.remove(comm_->rank());
- xbt_mutex_release(target_win->mut_);
+int Win::unlock_all(){
+ int i=0;
+ int retval = MPI_SUCCESS;
+ for (i=0; i<comm_->size();i++){
+ int ret = this->unlock(i);
+ if(ret != MPI_SUCCESS)
+ retval = ret;
+ }
+ return retval;
+}
+
+int Win::flush(int rank){
+ MPI_Win target_win = connected_wins_[rank];
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush on local %d - Finished %d RMA calls", rank_, finished);
+ finished = target_win->finish_comms(rank_);
+ XBT_DEBUG("Win_flush on remote %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+
+int Win::flush_local(int rank){
+ int finished = finish_comms(rank);
+ XBT_DEBUG("Win_flush_local for rank %d - Finished %d RMA calls", rank, finished);
+ return MPI_SUCCESS;
+}
+
+int Win::flush_all(){
+ int i=0;
+ int finished = 0;
+ finished = finish_comms();
+ XBT_DEBUG("Win_flush_all on local - Finished %d RMA calls", finished);
+ for (i=0; i<comm_->size();i++){
+ finished = connected_wins_[i]->finish_comms(rank_);
+ XBT_DEBUG("Win_flush_all on %d - Finished %d RMA calls", i, finished);
+ }
+ return MPI_SUCCESS;
+}
- xbt_mutex_try_acquire(target_win->lock_mut_);
- xbt_mutex_release(target_win->lock_mut_);
+int Win::flush_local_all(){
+ int finished = finish_comms();
+ XBT_DEBUG("Win_flush_local_all - Finished %d RMA calls", finished);
return MPI_SUCCESS;
}
int Win::finish_comms(){
+ xbt_mutex_acquire(mut_);
//Finish own requests
std::vector<MPI_Request> *reqqs = requests_;
int size = static_cast<int>(reqqs->size());
if (size > 0) {
- xbt_mutex_acquire(mut_);
- // start all requests that have been prepared by another process
- for (const auto& req : *reqqs) {
- if (req && (req->flags() & PREPARED))
- req->start();
- }
-
MPI_Request* treqs = &(*reqqs)[0];
Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
reqqs->clear();
- xbt_mutex_release(mut_);
}
+ xbt_mutex_release(mut_);
+ return size;
+}
+int Win::finish_comms(int rank){
+ xbt_mutex_acquire(mut_);
+ //Finish own requests
+ std::vector<MPI_Request> *reqqs = requests_;
+ int size = static_cast<int>(reqqs->size());
+ if (size > 0) {
+ size = 0;
+ std::vector<MPI_Request>* myreqqs = new std::vector<MPI_Request>();
+ std::vector<MPI_Request>::iterator iter = reqqs->begin();
+ while (iter != reqqs->end()){
+ if(((*iter)!=MPI_REQUEST_NULL) && (((*iter)->src() == rank) || ((*iter)->dst() == rank))){
+ myreqqs->push_back(*iter);
+ iter = reqqs->erase(iter);
+ size++;
+ } else {
+ ++iter;
+ }
+ }
+ if(size >0){
+ MPI_Request* treqs = &(*myreqqs)[0];
+ Request::waitall(size, treqs, MPI_STATUSES_IGNORE);
+ myreqqs->clear();
+ delete myreqqs;
+ }
+ }
+ xbt_mutex_release(mut_);
return size;
}
-/* Copyright (c) 2010, 2013-2015. The SimGrid Team.
+/* Copyright (c) 2010, 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
MPI_Group group_;
int count_; //for ordering the accs
xbt_mutex_t lock_mut_;
+ xbt_mutex_t atomic_mut_;
std::list<int> lockers_;
int rank_; // to identify owner for barriers in MPI_COMM_WORLD
+ int mode_; // exclusive or shared lock
+ int allocated_;
+ int dynamic_;
public:
static std::unordered_map<int, smpi_key_elem> keyvals_;
static int keyval_id_;
- Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm);
+ Win(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, int allocated = 0, int dynamic = 0);
+ Win(MPI_Info info, MPI_Comm comm) : Win(MPI_BOTTOM, 0, 1, info, comm, 0, 1) {};
~Win();
+ int attach (void *base, MPI_Aint size);
+ int detach (void *base);
void get_name( char* name, int* length);
void get_group( MPI_Group* group);
void set_name( char* name);
int rank();
+ int dynamic();
int start(MPI_Group group, int assert);
int post(MPI_Group group, int assert);
int complete();
+ MPI_Info info();
+ void set_info( MPI_Info info);
int wait();
MPI_Aint size();
void* base();
int disp_unit();
int fence(int assert);
int put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype);
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request=nullptr);
int get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype);
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Request* request=nullptr);
int accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
- MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op);
+ MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request=nullptr);
+ int get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
+ int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
+ MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request=nullptr);
+ int compare_and_swap(void *origin_addr, void *compare_addr,
+ void *result_addr, MPI_Datatype datatype, int target_rank,
+ MPI_Aint target_disp);
static Win* f2c(int id);
+
int lock(int lock_type, int rank, int assert);
int unlock(int rank);
+ int lock_all(int assert);
+ int unlock_all();
+ int flush(int rank);
+ int flush_local(int rank);
+ int flush_all();
+ int flush_local_all();
int finish_comms();
+ int finish_comms(int rank);
};
#! /bin/sh
-# Copyright (c) 2007-2015. The SimGrid Team.
+# Copyright (c) 2007-2017. The SimGrid Team.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify it
list_set CFLAGS
list_set LINKARGS
if [ "@WIN32@" != "1" ]; then
- list_add CFLAGS "-Dmain=smpi_simulated_main_"
- list_add LINKARGS "-lsimgrid"
+ # list_add CFLAGS "-Dmain=smpi_simulated_main_"
+ list_add CFLAGS "-fpic"
+ list_add LINKARGS "-shared" "-lsimgrid" "-Wl,-z,defs"
else
list_add CFLAGS "-include" "@includedir@/smpi/smpi_main.h"
list_add LINKARGS "@libdir@\libsimgrid.dll"
#! /bin/sh
-# Copyright (c) 2014-2015. The SimGrid Team.
+# Copyright (c) 2014-2017. The SimGrid Team.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify it
list_set CXXFLAGS
list_set LINKARGS
if [ "@WIN32@" != "1" ]; then
- list_add CXXFLAGS "-Dmain=smpi_simulated_main_"
- list_add LINKARGS "-lsimgrid"
+ # list_add CXXFLAGS "-Dmain=smpi_simulated_main_"
+ list_add CXXFLAGS "-fpic"
+ list_add LINKARGS "-shared" "-lsimgrid"
else
list_add CXXFLAGS "-include" "@includedir@/smpi/smpi_main.h"
list_add LINKARGS "@libdir@\libsimgrid.dll"
#! /bin/sh
-# Copyright (c) 2012-2015. The SimGrid Team.
+# Copyright (c) 2012-2017. The SimGrid Team.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify it
@SMPITOOLS_SH@
-list_set FFLAGS "-ff2c" "-fno-second-underscore"
-list_set LINKARGS "-lsimgrid" "-lm" "-lgfortran"
+list_set FFLAGS "-fpic" "-ff2c" "-fno-second-underscore"
+list_set LINKARGS "-shared" "-lsimgrid" "-lm" "-lgfortran"
list_set TMPFILES
main_name=main
#! /bin/sh
-# Copyright (c) 2012-2015. The SimGrid Team.
+# Copyright (c) 2012-2017. The SimGrid Team.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify it
@SMPITOOLS_SH@
-list_set FFLAGS "-ff2c" "-fno-second-underscore"
-list_set LINKARGS "-lsimgrid" "-lm" "-lgfortran"
+list_set FFLAGS "-fpic" "-ff2c" "-fno-second-underscore"
+list_set LINKARGS "-shared" "-lsimgrid" "-lm" "-lgfortran"
list_set TMPFILES
main_name=main
NETWORK_LATENCY="${DEFAULT_NETWORK_LATENCY}"
SPEED="${DEFAULT_SPEED}"
-PRIVATIZE="--cfg=smpi/privatize-global-variables:yes"
+PRIVATIZE="--cfg=smpi/privatization:@HAVE_PRIVATIZATION@"
SIMOPTS="--cfg=surf/precision:1e-9 --cfg=network/model:SMPI --cfg=network/TCP-gamma:4194304"
;;
"-keep-temps")
KEEP="true"
+ SIMOPTS="$SIMOPTS --cfg=smpi/keep-temps:yes"
shift 1
;;
"-wrapper")
exit
fi
-if [ -n "$WRAPPER" ]; then
- EXEC="$WRAPPER $1"
-else
- EXEC="$1"
-fi
+EXEC="$1"
shift
# steel --cfg and --logs options
export SMPI_GLOBAL_SIZE=${NUMPROCS}
if [ -n "${KEEP}" ] ; then
- echo ${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PRIVATIZE} ${PLATFORMTMP} ${APPLICATIONTMP}
+ echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} kept."
fi
# * The FD 3 is used to temporarily store FD 1. This is because the shell connects FD 1 to /dev/null when the command
# is launched in the background: this can be overriden in bash but not in standard bourne shell.
exec 3<&0
-${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} <&3 3>&- &
+${WRAPPER} "@SMPIMAIN@" ${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP} <&3 3>&- &
pid=$!
exec 3>&-
wait $pid
#
if [ ${status} -ne 0 ] ; then
if [ -z ${KEEP} ]; then
- echo ${EXEC} ${TRACEOPTIONS} ${SIMOPTS} ${PRIVATIZE} ${PLATFORMTMP} ${APPLICATIONTMP}
+ echo ${EXEC} ${PRIVATIZE} ${TRACEOPTIONS} ${SIMOPTS} ${PLATFORMTMP} ${APPLICATIONTMP}
if [ ${HOSTFILETMP} = 1 ] ; then
echo "Generated hostfile ${HOSTFILE} kept."
fi
#!/bin/sh
-# Copyright (c) 2013-2014. The SimGrid Team.
+# Copyright (c) 2013-2017. The SimGrid Team.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify it
Action* HostImpl::open(const char* fullpath)
{
-
simgrid::surf::Storage* st = nullptr;
size_t longest_prefix_length = 0;
std::string path;
simgrid::surf::Storage* st = findStorageOnMountList(fd->mount);
/* Check if the file is on this storage */
- if (!xbt_dict_get_or_null(st->content_, fd->name)) {
+ if (st->content_->find(fd->name) == st->content_->end()) {
XBT_WARN("File %s is not on disk %s. Impossible to unlink", fd->name, st->cname());
return -1;
} else {
st->usedSize_ -= fd->size;
// Remove the file from storage
- xbt_dict_remove(st->content_, fd->name);
+ sg_size_t* psize = st->content_->at(fd->name);
+ delete psize;
+ st->content_->erase(fd->name);
xbt_free(fd->name);
xbt_free(fd->mount);
int HostImpl::fileSeek(surf_file_t fd, sg_offset_t offset, int origin)
{
-
switch (origin) {
case SEEK_SET:
fd->current_position = offset;
{
/* Check if the new full path is on the same mount point */
if (!strncmp((const char*)fd->mount, fullpath, strlen(fd->mount))) {
- sg_size_t* psize = (sg_size_t*)xbt_dict_get_or_null(findStorageOnMountList(fd->mount)->content_, fd->name);
- if (psize) { // src file exists
- sg_size_t* new_psize = xbt_new(sg_size_t, 1);
+ std::map<std::string, sg_size_t*>* content = findStorageOnMountList(fd->mount)->content_;
+ if (content->find(fd->name) != content->end()) { // src file exists
+ sg_size_t* psize = content->at(std::string(fd->name));
+ sg_size_t* new_psize = new sg_size_t;
*new_psize = *psize;
- xbt_dict_remove(findStorageOnMountList(fd->mount)->content_, fd->name);
+ delete psize;
+ content->erase(fd->name);
std::string path = std::string(fullpath).substr(strlen(fd->mount), strlen(fullpath));
- xbt_dict_set(findStorageOnMountList(fd->mount)->content_, path.c_str(), new_psize, nullptr);
+ content->insert({path.c_str(), new_psize});
XBT_DEBUG("Move file from %s to %s, size '%llu'", fd->name, fullpath, *psize);
return 0;
} else {
-/* Copyright (c) 2004-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#ifndef SURF_CPU_INTERFACE_HPP_
#define SURF_CPU_INTERFACE_HPP_
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/surf/maxmin_private.hpp"
/***********
-/* Copyright (c) 2010, 2012-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/instr/instr_private.h"
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "src/kernel/routing/NetZoneImpl.hpp"
#include "src/surf/network_interface.hpp"
#include "src/surf/xml/platf_private.hpp"
void lmm_print(lmm_system_t sys)
{
- void *_cnst, *_elem, *_var;
+ void* _cnst;
+ void* _elem;
+ void* _var;
lmm_constraint_t cnst = nullptr;
- lmm_element_t elem = nullptr;
- lmm_variable_t var = nullptr;
- xbt_swag_t cnst_list = nullptr;
- xbt_swag_t var_list = nullptr;
- xbt_swag_t elem_list = nullptr;
- xbt_strbuff_t buf = xbt_strbuff_new();
+ lmm_element_t elem = nullptr;
+ lmm_variable_t var = nullptr;
+ xbt_swag_t cnst_list = nullptr;
+ xbt_swag_t var_list = nullptr;
+ xbt_swag_t elem_list = nullptr;
+ std::string buf = std::string("MAX-MIN ( ");
double sum = 0.0;
/* Printing Objective */
var_list = &(sys->variable_set);
- xbt_strbuff_append(buf, "MAX-MIN ( ");
xbt_swag_foreach(_var, var_list) {
var = (lmm_variable_t)_var;
- xbt_strbuff_printf(buf, "'%d'(%f) ", var->id_int, var->weight);
+ buf = buf + "'" + std::to_string(var->id_int) + "'(" + std::to_string(var->weight) + ") ";
}
- xbt_strbuff_append(buf, ")");
- XBT_DEBUG("%20s", buf->data);
- xbt_strbuff_clear(buf);
+ buf += ")";
+ XBT_DEBUG("%20s", buf.c_str());
+ buf.clear();
XBT_DEBUG("Constraints");
/* Printing Constraints */
sum = 0.0;
//Show the enabled variables
elem_list = &(cnst->enabled_element_set);
- xbt_strbuff_append(buf, "\t");
- xbt_strbuff_printf(buf, "%s(", (cnst->sharing_policy)?"":"max");
+ buf += "\t";
+ buf += ((cnst->sharing_policy) ? "(" : "max(");
xbt_swag_foreach(_elem, elem_list) {
elem = (lmm_element_t)_elem;
- xbt_strbuff_printf(buf, "%f.'%d'(%f) %s ", elem->value,
- elem->variable->id_int, elem->variable->value,(cnst->sharing_policy)?"+":",");
+ buf = buf + std::to_string(elem->value) + ".'" + std::to_string(elem->variable->id_int) + "'(" +
+ std::to_string(elem->variable->value) + ")" + ((cnst->sharing_policy) ? " + " : " , ");
if(cnst->sharing_policy)
sum += elem->value * elem->variable->value;
else
elem_list = &(cnst->disabled_element_set);
xbt_swag_foreach(_elem, elem_list) {
elem = (lmm_element_t)_elem;
- xbt_strbuff_printf(buf, "%f.'%d'(%f) %s ", elem->value,
- elem->variable->id_int, elem->variable->value,(cnst->sharing_policy)?"+":",");
+ buf = buf + std::to_string(elem->value) + ".'" + std::to_string(elem->variable->id_int) + "'(" +
+ std::to_string(elem->variable->value) + ")" + ((cnst->sharing_policy) ? " + " : " , ");
if(cnst->sharing_policy)
sum += elem->value * elem->variable->value;
else
sum = MAX(sum,elem->value * elem->variable->value);
}
- xbt_strbuff_printf(buf, "0) <= %f ('%d')", cnst->bound, cnst->id_int);
+ buf = buf + "0) <= " + std::to_string(cnst->bound) + " ('" + std::to_string(cnst->id_int) + "')";
if (!cnst->sharing_policy) {
- xbt_strbuff_printf(buf, " [MAX-Constraint]");
+ buf += " [MAX-Constraint]";
}
- XBT_DEBUG("%s", buf->data);
- xbt_strbuff_clear(buf);
+ XBT_DEBUG("%s", buf.c_str());
+ buf.clear();
xbt_assert(!double_positive(sum - cnst->bound, cnst->bound*sg_maxmin_precision),
"Incorrect value (%f is not smaller than %f): %g", sum, cnst->bound, sum - cnst->bound);
//if(double_positive(sum - cnst->bound, cnst->bound*sg_maxmin_precision))
XBT_DEBUG("'%d'(%f) : %f", var->id_int, var->weight, var->value);
}
}
-
- xbt_strbuff_free(buf);
}
void lmm_solve(lmm_system_t sys)
-/* Copyright (c) 2013-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2013-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "maxmin_private.hpp"
#include "network_cm02.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/sg_config.h"
#include "src/instr/instr_private.h" // TRACE_is_enabled(). FIXME: remove by subscribing tracing to the surf signals
maxminSystem_->solve_fun = specificSolveFun;
}
-
-NetworkCm02Model::~NetworkCm02Model() {}
-
LinkImpl* NetworkCm02Model::createLink(const char* name, double bandwidth, double latency,
e_surf_link_sharing_policy_t policy)
{
public:
NetworkCm02Model();
explicit NetworkCm02Model(void (*solve_fun)(lmm_system_t self));
- virtual ~NetworkCm02Model();
+ virtual ~NetworkCm02Model() = default;
LinkImpl* createLink(const char* name, double bandwidth, double latency,
e_surf_link_sharing_policy_t policy) override;
void updateActionsStateLazy(double now, double delta) override;
* @param model The NetworkModel associated to this NetworkAction
* @param cost The cost of this NetworkAction in [TODO]
* @param failed [description]
- * @param var The lmm variable associated to this Action if it is part of a
- * LMM component
+ * @param var The lmm variable associated to this Action if it is part of a LMM component
*/
NetworkAction(simgrid::surf::Model* model, double cost, bool failed, lmm_variable_t var)
: simgrid::surf::Action(model, cost, failed, var){};
-/* Copyright (c) 2007-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "ns3/core-module.h"
#include "ns3/node.h"
-#include "ns3/ns3_interface.h"
#include "ns3/ns3_simulator.h"
#include "network_ns3.hpp"
-#include "simgrid/sg_config.h"
#include "src/instr/instr_private.h" // TRACE_is_enabled(). FIXME: remove by subscribing tracing to the surf signals
#include "src/kernel/routing/NetPoint.hpp"
-#include "src/surf/HostImpl.hpp"
-#include "src/surf/surf_private.h"
+#include "simgrid/s4u/Engine.hpp"
#include "simgrid/s4u/NetZone.hpp"
-#include "simgrid/s4u/engine.hpp"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(ns3, surf, "Logging specific to the SURF network NS3 module");
#ifndef NS3_INTERFACE_H
#define NS3_INTERFACE_H
-#include <simgrid/s4u/host.hpp>
+#include "simgrid/s4u/Host.hpp"
namespace simgrid {
namespace surf {
-/* Copyright (c) 2010, 2012-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/plugins/vm/VirtualMachineImpl.hpp"
#include "src/surf/cpu_interface.hpp"
+#include "simgrid/s4u/Engine.hpp"
+
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
-#include <simgrid/s4u/engine.hpp>
#include <string>
#include <utility>
#include <vector>
double energy_this_step = instantaneous_consumption * (finish_time - start_time);
+ //TODO Trace: Trace energy_this_step from start_time to finish_time in host->name()
+
this->total_energy = previous_energy + energy_this_step;
this->last_updated = finish_time;
this->pstate = host->pstate();
{
if (dynamic_cast<simgrid::s4u::VirtualMachine*>(&host)) // Ignore virtual machines
return;
+
+ //TODO Trace: set to zero the energy variable associated to host->name()
+
host.extension_set(new HostEnergy(&host));
}
{
for (simgrid::surf::Cpu* cpu : action->cpus()) {
simgrid::s4u::Host* host = cpu->getHost();
- if (host == nullptr)
- continue;
+ if (host != nullptr) {
- // If it's a VM, take the corresponding PM
- simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(host);
- if (vm) // If it's a VM, take the corresponding PM
- host = vm->pimpl_vm_->getPm();
+ // If it's a VM, take the corresponding PM
+ simgrid::s4u::VirtualMachine* vm = dynamic_cast<simgrid::s4u::VirtualMachine*>(host);
+ if (vm) // If it's a VM, take the corresponding PM
+ host = vm->pimpl_vm_->getPm();
- // Get the host_energy extension for the relevant host
- HostEnergy* host_energy = host->extension<HostEnergy>();
+ // Get the host_energy extension for the relevant host
+ HostEnergy* host_energy = host->extension<HostEnergy>();
- if (host_energy->last_updated < surf_get_clock())
- host_energy->update();
+ if (host_energy->last_updated < surf_get_clock())
+ host_energy->update();
+ }
}
}
-/* Copyright (c) 2010, 2012-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "src/plugins/vm/VirtualMachineImpl.hpp"
#include "src/surf/cpu_interface.hpp"
+#include "simgrid/s4u/Engine.hpp"
+
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
-#include <simgrid/s4u/engine.hpp>
#include <string>
#include <utility>
#include <vector>
double getCurrentLoad();
double getComputedFlops();
+ double getAverageLoad();
void update();
void reset();
private:
simgrid::s4u::Host* host = nullptr;
double last_updated = 0;
+ double last_reset = 0;
double current_flops = 0;
double computed_flops = 0;
};
simgrid::xbt::Extension<simgrid::s4u::Host, HostLoad> HostLoad::EXTENSION_ID;
HostLoad::HostLoad(simgrid::s4u::Host* ptr)
- : host(ptr), last_updated(surf_get_clock()), current_flops(lmm_constraint_get_usage(host->pimpl_cpu->constraint()))
+ : host(ptr)
+ , last_updated(surf_get_clock())
+ , last_reset(surf_get_clock())
+ , current_flops(lmm_constraint_get_usage(host->pimpl_cpu->constraint()))
{
}
return current_flops / (host->speed() * host->coreCount());
}
+double HostLoad::getAverageLoad()
+{
+ return getComputedFlops() / (host->speed() * host->coreCount() * (surf_get_clock() - last_reset));
+}
+
double HostLoad::getComputedFlops()
{
update();
void HostLoad::reset()
{
last_updated = surf_get_clock();
+ last_reset = surf_get_clock();
computed_flops = 0;
}
}
for (simgrid::surf::Cpu* cpu : action->cpus()) {
simgrid::s4u::Host* host = cpu->getHost();
- if (host == nullptr)
- continue;
-
- // Get the host_load extension for the relevant host
- HostLoad* host_load = host->extension<HostLoad>();
- host_load->update();
+ if (host != nullptr) {
+ // Get the host_load extension for the relevant host
+ HostLoad* host_load = host->extension<HostLoad>();
+ host_load->update();
+ }
}
}
surf_cpu_model_pm = new CpuL07Model(this,maxminSystem_);
}
-HostL07Model::~HostL07Model() = default;
+HostL07Model::~HostL07Model()
+{
+ lmm_system_free(maxminSystem_);
+ maxminSystem_ = nullptr;
+ delete surf_network_model;
+ delete surf_cpu_model_pm;
+}
CpuL07Model::CpuL07Model(HostL07Model *hmodel,lmm_system_t sys)
: CpuModel()
, hostModel_(hmodel)
- {
- maxminSystem_ = sys;
- }
-CpuL07Model::~CpuL07Model() {
- lmm_system_free(maxminSystem_);
+{
+ maxminSystem_ = sys;
+}
+
+CpuL07Model::~CpuL07Model()
+{
maxminSystem_ = nullptr;
}
+
NetworkL07Model::NetworkL07Model(HostL07Model *hmodel, lmm_system_t sys)
: NetworkModel()
, hostModel_(hmodel)
- {
- maxminSystem_ = sys;
- loopback_ = createLink("__loopback__", 498000000, 0.000015, SURF_LINK_FATPIPE);
- }
-NetworkL07Model::~NetworkL07Model()
{
- maxminSystem_ = nullptr; // Avoid multi-free
+ maxminSystem_ = sys;
+ loopback_ = createLink("__loopback__", 498000000, 0.000015, SURF_LINK_FATPIPE);
}
+NetworkL07Model::~NetworkL07Model()
+{
+ maxminSystem_ = nullptr;
+}
double HostL07Model::nextOccuringEvent(double now)
{
} else {
action->latency_ = 0.0;
}
- if ((action->latency_ == 0.0) && (action->isSuspended() == 0)) {
+ if ((action->latency_ <= 0.0) && (action->isSuspended() == 0)) {
action->updateBound();
lmm_update_variable_weight(maxminSystem_, action->getVariable(), 1.0);
}
action, action->getRemains(), lmm_variable_getvalue(action->getVariable()) * delta);
action->updateRemains(lmm_variable_getvalue(action->getVariable()) * delta);
- if (action->getMaxDuration() != NO_MAX_DURATION)
+ if (action->getMaxDuration() > NO_MAX_DURATION)
action->updateMaxDuration(delta);
XBT_DEBUG("Action (%p) : remains (%g).", action, action->getRemains());
*/
if (((action->getRemains() <= 0) && (lmm_get_variable_weight(action->getVariable()) > 0)) ||
- ((action->getMaxDuration() != NO_MAX_DURATION) && (action->getMaxDuration() <= 0))) {
+ ((action->getMaxDuration() > NO_MAX_DURATION) && (action->getMaxDuration() <= 0))) {
action->finish();
action->setState(Action::State::done);
} else {
double latency = 0.0;
this->hostList_->reserve(host_nb);
- for (int i = 0; i<host_nb; i++)
+ for (int i = 0; i < host_nb; i++) {
this->hostList_->push_back(host_list[i]);
+ if (flops_amount[i] > 0)
+ nb_used_host++;
+ }
/* Compute the number of affected resources... */
if(bytes_amount != nullptr) {
nb_link = affected_links.size();
}
- for (int i = 0; i < host_nb; i++)
- if (flops_amount[i] > 0)
- nb_used_host++;
-
XBT_DEBUG("Creating a parallel task (%p) with %d hosts and %d unique links.", this, host_nb, nb_link);
this->latency_ = latency;
action->updateBound();
}
}
+LinkL07::~LinkL07() = default;
/**********
* Action *
}
double lat_bound = sg_tcp_gamma / (2.0 * lat_current);
XBT_DEBUG("action (%p) : lat_bound = %g", this, lat_bound);
- if ((latency_ == 0.0) && (suspended_ == 0)) {
+ if ((latency_ <= 0.0) && (suspended_ == 0)) {
if (rate_ < 0)
lmm_update_variable_bound(getModel()->getMaxminSystem(), getVariable(), lat_bound);
else
class CpuL07 : public Cpu {
public:
CpuL07(CpuL07Model *model, simgrid::s4u::Host *host, std::vector<double> * speedPerPstate, int core);
- ~CpuL07();
+ ~CpuL07() override;
bool isUsed() override;
void apply_event(tmgr_trace_iterator_t event, double value) override;
Action *execution_start(double size) override;
public:
LinkL07(NetworkL07Model* model, const char* name, double bandwidth, double latency,
e_surf_link_sharing_policy_t policy);
- ~LinkL07(){ };
+ ~LinkL07() override;
bool isUsed() override;
void apply_event(tmgr_trace_iterator_t event, double value) override;
void setBandwidth(double value) override;
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/engine.hpp"
+#include "simgrid/s4u/Engine.hpp"
#include "src/kernel/EngineImpl.hpp"
#include "src/simix/smx_private.h"
}
}
+// FIXME: The following duplicates the content of s4u::Host
+namespace simgrid {
+namespace s4u {
+extern std::map<std::string, simgrid::s4u::Host*> host_list;
+}
+}
+
static int surf_parse_models_setup_already_called = 0;
+std::map<std::string, storage_type_t> storage_types;
/** The current AS in the parsing */
static simgrid::kernel::routing::NetZoneImpl* current_routing = nullptr;
}
/** Module management function: creates all internal data structures */
-void sg_platf_init() {
+void sg_platf_init()
+{ /* Do nothing: just for symmetry of user code */
}
/** Module management function: frees all internal data structures */
int rankId=0;
-
// What an inventive way of initializing the AS that I have as ancestor :-(
s_sg_platf_AS_cbarg_t AS;
AS.id = cluster->id;
xbt_dict_foreach(cluster->properties,cursor,key,data) {
xbt_dict_set(host.properties, key, xbt_strdup(data), nullptr);
}
- xbt_dict_free(&cluster->properties);
}
host.speed_per_pstate = cluster->speeds;
link.latency = 0;
link.policy = SURF_LINK_SHARED;
sg_platf_new_link(&link);
- linkUp = linkDown = simgrid::surf::LinkImpl::byName(tmp_link);
+ linkDown = simgrid::surf::LinkImpl::byName(tmp_link);
+ linkUp = linkDown;
free(tmp_link);
current_as->privateLinks_.insert(
{rankId * current_as->linkCountPerNode_ + current_as->hasLoopback_, {linkUp, linkDown}});
//call the cluster function that adds the others links
if (cluster->topology == SURF_CLUSTER_FAT_TREE) {
static_cast<FatTreeZone*>(current_as)->addProcessingNode(i);
- }
- else {
+ } else {
current_as->create_links_for_node(cluster, i, rankId,
rankId*current_as->linkCountPerNode_ + current_as->hasLoopback_ + current_as->hasLimiter_ );
}
xbt_free(host_id);
rankId++;
}
+ xbt_dict_free(&cluster->properties);
// Add a router.
XBT_DEBUG(" ");
xbt_assert(!xbt_lib_get_or_null(storage_lib, storage->id,ROUTING_STORAGE_LEVEL),
"Refusing to add a second storage named \"%s\"", storage->id);
- void* stype = xbt_lib_get_or_null(storage_type_lib, storage->type_id,ROUTING_STORAGE_TYPE_LEVEL);
- xbt_assert(stype,"No storage type '%s'", storage->type_id);
+ xbt_assert(storage_types.find(storage->type_id) != storage_types.end(), "No storage type '%s'", storage->type_id);
+ storage_type_t stype = storage_types.at(storage->type_id);
- XBT_DEBUG("ROUTING Create a storage name '%s' with type_id '%s' and content '%s'",
- storage->id,
- storage->type_id,
- storage->content);
+ XBT_DEBUG("ROUTING Create a storage name '%s' with type_id '%s' and content '%s'", storage->id, storage->type_id,
+ storage->content);
xbt_lib_set(storage_lib, storage->id, ROUTING_STORAGE_LEVEL, (void *) xbt_strdup(storage->type_id));
// if storage content is not specified use the content of storage_type if any
- if(!strcmp(storage->content,"") && strcmp(((storage_type_t) stype)->content,"")){
- storage->content = ((storage_type_t) stype)->content;
- storage->content_type = ((storage_type_t) stype)->content_type;
- XBT_DEBUG("For disk '%s' content is empty, inherit the content (of type %s) from storage type '%s' ",
- storage->id,((storage_type_t) stype)->content_type,
- ((storage_type_t) stype)->type_id);
+ if (!strcmp(storage->content, "") && strcmp(stype->content, "")) {
+ storage->content = stype->content;
+ storage->content_type = stype->content_type;
+ XBT_DEBUG("For disk '%s' content is empty, inherit the content (of type %s) from storage type '%s' ", storage->id,
+ stype->content_type, stype->type_id);
}
XBT_DEBUG("SURF storage create resource\n\t\tid '%s'\n\t\ttype '%s' "
- "\n\t\tmodel '%s' \n\t\tcontent '%s'\n\t\tcontent_type '%s' "
- "\n\t\tproperties '%p''\n",
- storage->id,
- ((storage_type_t) stype)->model,
- ((storage_type_t) stype)->type_id,
- storage->content,
- storage->content_type,
- storage->properties);
-
- auto s = surf_storage_model->createStorage(storage->id, ((storage_type_t)stype)->type_id, storage->content,
- storage->content_type, storage->attach);
+ "\n\t\tmodel '%s' \n\t\tcontent '%s'\n\t\tcontent_type '%s' "
+ "\n\t\tproperties '%p''\n",
+ storage->id, stype->model, stype->type_id, storage->content, storage->content_type, storage->properties);
+
+ auto s = surf_storage_model->createStorage(storage->id, stype->type_id, storage->content, storage->content_type,
+ storage->attach);
if (storage->properties) {
xbt_dict_cursor_t cursor = nullptr;
xbt_dict_free(&storage->properties);
}
}
-void sg_platf_new_storage_type(sg_platf_storage_type_cbarg_t storage_type){
- xbt_assert(!xbt_lib_get_or_null(storage_type_lib, storage_type->id,ROUTING_STORAGE_TYPE_LEVEL),
- "Reading a storage type, processing unit \"%s\" already exists", storage_type->id);
+void sg_platf_new_storage_type(sg_platf_storage_type_cbarg_t storage_type)
+{
+ xbt_assert(storage_types.find(storage_type->id) == storage_types.end(),
+ "Reading a storage type, processing unit \"%s\" already exists", storage_type->id);
storage_type_t stype = xbt_new0(s_storage_type_t, 1);
stype->model = xbt_strdup(storage_type->model);
stype->size = storage_type->size;
stype->model_properties = storage_type->model_properties;
- XBT_DEBUG("ROUTING Create a storage type id '%s' with model '%s', "
- "content '%s', and content_type '%s'",
- stype->type_id,
- stype->model,
- storage_type->content,
- storage_type->content_type);
-
- xbt_lib_set(storage_type_lib,
- stype->type_id,
- ROUTING_STORAGE_TYPE_LEVEL,
- (void *) stype);
+ XBT_DEBUG("ROUTING Create a storage type id '%s' with model '%s', content '%s', and content_type '%s'",
+ stype->type_id, stype->model, storage_type->content, storage_type->content_type);
+
+ storage_types.insert({std::string(stype->type_id), stype});
}
void sg_platf_new_mount(sg_platf_mount_cbarg_t mount){
sg_host_t host = sg_host_by_name(process->host);
if (!host) {
// The requested host does not exist. Do a nice message to the user
- char *tmp = bprintf("Cannot create process '%s': host '%s' does not exist\nExisting hosts: '",process->function, process->host);
- xbt_strbuff_t msg = xbt_strbuff_new_from(tmp);
- free(tmp);
- xbt_dynar_t all_hosts = xbt_dynar_sort_strings(sg_hosts_as_dynar());
- simgrid::s4u::Host* host;
- unsigned int cursor;
- xbt_dynar_foreach(all_hosts,cursor, host) {
- xbt_strbuff_append(msg, host->cname());
- xbt_strbuff_append(msg,"', '");
- if (msg->used > 1024) {
- msg->data[msg->used-3]='\0';
- msg->used -= 3;
-
- xbt_strbuff_append(msg," ...(list truncated)......");// That will be shortened by 3 chars when existing the loop
+ std::string msg = std::string("Cannot create process '") + process->function + "': host '" + process->host +
+ "' does not exist\nExisting hosts: '";
+ for (auto kv : simgrid::s4u::host_list) {
+ simgrid::s4u::Host* host = kv.second;
+ msg += host->name();
+ msg += "', '";
+ if (msg.length() > 1024) {
+ msg.pop_back(); // remove trailing quote
+ msg += "...(list truncated)......";
break;
}
}
- msg->data[msg->used-3]='\0';
- xbt_die("%s", msg->data);
+ xbt_die("%s", msg.c_str());
}
simgrid::simix::ActorCodeFactory& factory = SIMIX_get_actor_code_factory(process->function);
xbt_assert(factory, "Function '%s' unknown", process->function);
_sg_cfg_init_status = 2; /* HACK: direct access to the global controlling the level of configuration to prevent
* any further config now that we created some real content */
-
/* search the routing model */
simgrid::kernel::routing::NetZoneImpl* new_as = nullptr;
switch(AS->routing){
-/* Copyright (c) 2013-2015. The SimGrid Team.
+/* Copyright (c) 2013-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include "storage_interface.hpp"
#include "surf_private.h"
#include "xbt/file.h" /* xbt_getline */
+#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <fstream>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_storage, surf, "Logging specific to the SURF storage module");
-xbt_lib_t file_lib;
-int MSG_FILE_LEVEL = -1; // Msg file level
-
xbt_lib_t storage_lib;
int SIMIX_STORAGE_LEVEL = -1; // Simix storage level
int MSG_STORAGE_LEVEL = -1; // Msg storage level
int ROUTING_STORAGE_LEVEL = -1; // Routing for storage level
int SURF_STORAGE_LEVEL = -1;
-xbt_lib_t storage_type_lib;
-int ROUTING_STORAGE_TYPE_LEVEL = -1; //Routing for storage_type level
simgrid::surf::StorageModel *surf_storage_model = nullptr;
namespace simgrid {
, writeActions_(std::vector<StorageAction*>())
{
content_ = parseContent(content_name);
- attach_ = xbt_strdup(attach);
+ attach_ = xbt_strdup(attach);
turnOn();
XBT_DEBUG("Create resource with Bconnection '%f' Bread '%f' Bwrite '%f' and Size '%llu'", bconnection, bread, bwrite, size);
constraintRead_ = lmm_constraint_new(maxminSystem, this, bread);
Storage::~Storage(){
storageDestructedCallbacks(this);
- xbt_dict_free(&content_);
+ if (content_ != nullptr) {
+ for (auto entry : *content_)
+ delete entry.second;
+ delete content_;
+ }
free(typeId_);
free(contentType_);
free(attach_);
}
-xbt_dict_t Storage::parseContent(const char *filename)
+std::map<std::string, sg_size_t*>* Storage::parseContent(const char* filename)
{
usedSize_ = 0;
if ((!filename) || (strcmp(filename, "") == 0))
return nullptr;
- xbt_dict_t parse_content = xbt_dict_new_homogeneous(xbt_free_f);
-
- FILE *file = surf_fopen(filename, "r");
- xbt_assert(file, "Cannot open file '%s' (path=%s)", filename, (boost::join(surf_path, ":")).c_str());
+ std::map<std::string, sg_size_t*>* parse_content = new std::map<std::string, sg_size_t*>();
- char *line = nullptr;
- size_t len = 0;
- ssize_t read;
- char path[1024];
- sg_size_t size;
+ std::ifstream* fs = surf_ifsopen(filename);
- while ((read = xbt_getline(&line, &len, file)) != -1) {
- if (read){
- xbt_assert(sscanf(line,"%s %llu", path, &size) == 2, "Parse error in %s: %s",filename,line);
+ std::string line;
+ std::vector<std::string> tokens;
+ do {
+ std::getline(*fs, line);
+ boost::trim(line);
+ if (line.length() > 0) {
+ boost::split(tokens, line, boost::is_any_of(" \t"), boost::token_compress_on);
+ xbt_assert(tokens.size() == 2, "Parse error in %s: %s", filename, line.c_str());
+ sg_size_t size = std::stoull(tokens.at(1));
usedSize_ += size;
- sg_size_t *psize = xbt_new(sg_size_t, 1);
+ sg_size_t* psize = new sg_size_t;
*psize = size;
- xbt_dict_set(parse_content,path,psize,nullptr);
+ parse_content->insert({tokens.front(), psize});
}
- }
- free(line);
- fclose(file);
+ } while (!fs->eof());
+ delete fs;
return parse_content;
}
}
}
-xbt_dict_t Storage::getContent()
+std::map<std::string, sg_size_t*>* Storage::getContent()
{
/* For the moment this action has no cost, but in the future we could take in account access latency of the disk */
-
- xbt_dict_t content_dict = xbt_dict_new_homogeneous(nullptr);
- xbt_dict_cursor_t cursor = nullptr;
- char *file;
- sg_size_t *psize;
-
- xbt_dict_foreach(content_, cursor, file, psize){
- xbt_dict_set(content_dict,file,psize,nullptr);
- }
- return content_dict;
-}
-
-sg_size_t Storage::getSize(){
- return size_;
+ return content_;
}
sg_size_t Storage::getFreeSize(){
-/* Copyright (c) 2004-2015. The SimGrid Team.
+/* Copyright (c) 2004-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
#include <xbt/base.h>
#include <xbt/signal.hpp>
-#include "surf_interface.hpp"
#include "src/surf/PropertyHolder.hpp"
+#include "surf_interface.hpp"
+#include <map>
#ifndef STORAGE_INTERFACE_HPP_
#define STORAGE_INTERFACE_HPP_
* Classes *
***********/
-class StorageModel;
-class Storage;
class StorageAction;
/*************
public simgrid::surf::PropertyHolder {
public:
- /**
- * @brief Storage constructor
- *
- * @param model StorageModel associated to this Storage
- * @param name The name of the Storage
- * @param props Dictionary of properties associated to this Storage
- */
+ /** @brief Storage constructor */
Storage(Model* model, const char* name, lmm_system_t maxminSystem, double bread, double bwrite, double bconnection,
const char* type_id, const char* content_name, const char* content_type, sg_size_t size, const char* attach);
void turnOn() override;
void turnOff() override;
- xbt_dict_t content_;
+ std::map<std::string, sg_size_t*>* content_;
char* contentType_;
sg_size_t size_;
sg_size_t usedSize_;
*
* @return A xbt_dict_t with path as keys and size in bytes as values
*/
- virtual xbt_dict_t getContent();
-
- /**
- * @brief Get the size in bytes of the current Storage
- *
- * @return The size in bytes of the current Storage
- */
- virtual sg_size_t getSize();
+ virtual std::map<std::string, sg_size_t*>* getContent();
/**
* @brief Get the available size in bytes of the current Storage
*/
virtual sg_size_t getUsedSize();
-
- xbt_dict_t parseContent(const char *filename);
+ std::map<std::string, sg_size_t*>* parseContent(const char* filename);
std::vector<StorageAction*> writeActions_;
char *content_type;
char *type_id;
xbt_dict_t properties;
- xbt_dict_t model_properties;
+ std::map<std::string, std::string>* model_properties;
sg_size_t size;
-} s_storage_type_t, *storage_type_t;
+} s_storage_type_t;
+typedef s_storage_type_t* storage_type_t;
typedef struct s_mount {
void *storage;
char *name;
-} s_mount_t, *mount_t;
+} s_mount_t;
+typedef s_mount_t* mount_t;
typedef struct surf_file {
char *name;
-/* Copyright (c) 2013-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2013-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "storage_n11.hpp"
-#include "simgrid/s4u/engine.hpp"
+#include "simgrid/s4u/Engine.hpp"
#include "src/kernel/routing/NetPoint.hpp"
-#include "surf_private.h"
#include <math.h> /*ceil*/
XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(surf_storage);
/*************
* CallBacks *
*************/
-
-static inline void routing_storage_type_free(void *r)
-{
- storage_type_t stype = (storage_type_t) r;
- free(stype->model);
- free(stype->type_id);
- free(stype->content);
- free(stype->content_type);
- xbt_dict_free(&(stype->properties));
- xbt_dict_free(&(stype->model_properties));
- free(stype);
-}
+extern std::map<std::string, storage_type_t> storage_types;
static void check_disk_attachment()
{
instr_routing_define_callbacks();
ROUTING_STORAGE_LEVEL = xbt_lib_add_level(storage_lib, xbt_free_f);
- ROUTING_STORAGE_TYPE_LEVEL = xbt_lib_add_level(storage_type_lib, routing_storage_type_free);
SURF_STORAGE_LEVEL = xbt_lib_add_level(storage_lib, [](void *self) {
delete static_cast<simgrid::surf::Storage*>(self);
});
xbt_assert(!surf_storage_resource_priv(surf_storage_resource_by_name(id)),
"Storage '%s' declared several times in the platform file", id);
- storage_type_t storage_type = (storage_type_t) xbt_lib_get_or_null(storage_type_lib, type_id,ROUTING_STORAGE_TYPE_LEVEL);
+ storage_type_t storage_type = storage_types.at(type_id);
- double Bread = surf_parse_get_bandwidth((char*)xbt_dict_get(storage_type->model_properties, "Bread"),
- "property Bread, storage",type_id);
- double Bwrite = surf_parse_get_bandwidth((char*)xbt_dict_get(storage_type->model_properties, "Bwrite"),
- "property Bwrite, storage",type_id);
- double Bconnection = surf_parse_get_bandwidth((char*)xbt_dict_get(storage_type->model_properties, "Bconnection"),
- "property Bconnection, storage",type_id);
+ double Bread =
+ surf_parse_get_bandwidth(storage_type->model_properties->at("Bread").c_str(), "property Bread, storage", type_id);
+ double Bwrite = surf_parse_get_bandwidth(storage_type->model_properties->at("Bwrite").c_str(),
+ "property Bwrite, storage", type_id);
+ double Bconnection = surf_parse_get_bandwidth(storage_type->model_properties->at("Bconnection").c_str(),
+ "property Bconnection, storage", type_id);
Storage* storage = new StorageN11(this, id, maxminSystem_, Bread, Bwrite, Bconnection, type_id, (char*)content_name,
content_type, storage_type->size, (char*)attach);
// which becomes the new file size
action->file_->size = action->file_->current_position;
- sg_size_t *psize = xbt_new(sg_size_t,1);
- *psize = action->file_->size;
- xbt_dict_t content_dict = action->storage_->content_;
- xbt_dict_set(content_dict, action->file_->name, psize, nullptr);
+ sg_size_t* psize = new sg_size_t;
+ *psize = action->file_->size;
+ std::map<std::string, sg_size_t*>* content_dict = action->storage_->content_;
+ auto entry = content_dict->find(action->file_->name);
+ delete entry->second;
+ entry->second = psize;
}
action->updateRemains(lmm_variable_getvalue(action->getVariable()) * delta);
XBT_DEBUG("\tOpen file '%s'",path);
sg_size_t size;
- sg_size_t* psize = (sg_size_t*)xbt_dict_get_or_null(content_, path);
+ sg_size_t* psize = nullptr;
// if file does not exist create an empty file
- if(psize)
- size = *psize;
+ if (content_->find(path) != content_->end())
+ size = *(content_->at(path));
else {
- psize = xbt_new(sg_size_t,1);
- size = 0;
+ psize = new sg_size_t;
+ size = 0;
*psize = size;
- xbt_dict_set(content_, path, psize, nullptr);
+ content_->insert({path, psize});
XBT_DEBUG("File '%s' was not found, file created.",path);
}
surf_file_t file = xbt_new0(s_surf_file_t,1);
-/* Copyright (c) 2013-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2013-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/engine.hpp"
+#include "simgrid/s4u/Engine.hpp"
#include "src/instr/instr_private.h"
#include "src/plugins/vm/VirtualMachineImpl.hpp"
return host->pimpl_->fileMove(fd, fullpath);
}
-xbt_dict_t surf_storage_get_content(surf_resource_t resource){
- return static_cast<simgrid::surf::Storage*>(surf_storage_resource_priv(resource))->getContent();
-}
-
sg_size_t surf_storage_get_size(surf_resource_t resource){
- return static_cast<simgrid::surf::Storage*>(surf_storage_resource_priv(resource))->getSize();
+ return static_cast<simgrid::surf::Storage*>(surf_storage_resource_priv(resource))->size_;
}
sg_size_t surf_storage_get_free_size(surf_resource_t resource){
-/* Copyright (c) 2004-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "surf_interface.hpp"
-#include "cpu_interface.hpp"
#include "mc/mc.h"
-#include "network_interface.hpp"
-#include "simgrid/s4u/engine.hpp"
+#include "simgrid/s4u/Engine.hpp"
#include "simgrid/sg_config.h"
#include "src/instr/instr_private.h" // TRACE_is_enabled(). FIXME: remove by subscribing tracing to the surf signals
-#include "src/internal_config.h"
#include "src/kernel/routing/NetPoint.hpp"
-#include "src/simix/smx_host_private.h"
#include "src/surf/HostImpl.hpp"
-#include "surf_private.h"
+
+#include <fstream>
#include <vector>
XBT_LOG_NEW_CATEGORY(surf, "All SURF categories");
std::vector<std::string> surf_path;
std::vector<simgrid::s4u::Host*> host_that_restart;
xbt_dict_t watched_hosts_lib;
+extern std::map<std::string, storage_type_t> storage_types;
namespace simgrid {
namespace surf {
# define FILE_DELIM "/" /* FIXME: move to better location */
#endif
+std::ifstream* surf_ifsopen(const char* name)
+{
+ std::ifstream* fs = new std::ifstream();
+ xbt_assert(name);
+ if (__surf_is_absolute_file_path(name)) { /* don't mess with absolute file names */
+ fs->open(name, std::ifstream::in);
+ }
+
+ /* search relative files in the path */
+ for (auto path_elm : surf_path) {
+ std::string buff = path_elm + FILE_DELIM + name;
+ fs->open(buff.c_str(), std::ifstream::in);
+
+ if (!fs->fail()) {
+ XBT_DEBUG("Found file at %s", buff.c_str());
+ return fs;
+ }
+ }
+
+ return fs;
+}
FILE *surf_fopen(const char *name, const char *mode)
{
char *buff;
USER_HOST_LEVEL = simgrid::s4u::Host::extension_create(nullptr);
storage_lib = xbt_lib_new();
- storage_type_lib = xbt_lib_new();
- file_lib = xbt_lib_new();
watched_hosts_lib = xbt_dict_new_homogeneous(nullptr);
XBT_DEBUG("Add SURF levels");
if (!future_evt_set)
future_evt_set = new simgrid::trace_mgr::future_evt_set();
- TRACE_add_start_function(TRACE_surf_alloc);
- TRACE_add_end_function(TRACE_surf_release);
+ TRACE_surf_alloc();
+ simgrid::surf::surfExitCallbacks.connect(TRACE_surf_release);
sg_config_init(argc, argv);
sg_host_exit();
xbt_lib_free(&storage_lib);
sg_link_exit();
- xbt_lib_free(&storage_type_lib);
- xbt_lib_free(&file_lib);
xbt_dict_free(&watched_hosts_lib);
+ for (auto e : storage_types) {
+ storage_type_t stype = e.second;
+ free(stype->model);
+ free(stype->type_id);
+ free(stype->content);
+ free(stype->content_type);
+ xbt_dict_free(&(stype->properties));
+ delete stype->model_properties;
+ free(stype);
+ }
for (auto model : *all_existing_models)
delete model;
while(!modifiedSet_->empty()) {
Action *action = &(modifiedSet_->front());
modifiedSet_->pop_front();
- int max_dur_flag = 0;
+ bool max_dur_flag = false;
if (action->getStateSet() != runningActionSet_)
continue;
min = now + time_to_completion; // when the task will complete if nothing changes
}
- if ((action->getMaxDuration() != NO_MAX_DURATION) &&
+ if ((action->getMaxDuration() > NO_MAX_DURATION) &&
(min == -1 || action->getStartTime() + action->getMaxDuration() < min)) {
// when the task will complete anyway because of the deadline if any
min = action->getStartTime() + action->getMaxDuration();
- max_dur_flag = 1;
+ max_dur_flag = true;
}
if (min != -1) {
action->heapUpdate(actionHeap_, min, max_dur_flag ? MAX_DURATION : NORMAL);
- XBT_DEBUG("Insert at heap action(%p) min %f now %f", action, min,
- now);
- } else DIE_IMPOSSIBLE;
+ XBT_DEBUG("Insert at heap action(%p) min %f now %f", action, min, now);
+ } else
+ DIE_IMPOSSIBLE;
}
//hereafter must have already the min value for this resource model
if (variable_)
lmm_update_variable_bound(getModel()->getMaxminSystem(), variable_, bound);
- if (getModel()->getUpdateMechanism() == UM_LAZY && getLastUpdate()!=surf_get_clock())
+ if (getModel()->getUpdateMechanism() == UM_LAZY && getLastUpdate() != surf_get_clock())
heapRemove(getModel()->getActionHeap());
XBT_OUT();
}
double Action::getFinishTime()
{
/* keep the function behavior, some models (cpu_ti) change the finish time before the action end */
- return remains_ == 0 ? finishTime_ : -1;
+ return remains_ <= 0 ? finishTime_ : -1;
}
void Action::setData(void* data)
/* Generic functions common to all models */
XBT_PRIVATE FILE *surf_fopen(const char *name, const char *mode);
+XBT_PRIVATE std::ifstream* surf_ifsopen(const char* name);
/* The __surf_is_absolute_file_path() returns 1 if
* file_path is a absolute file path, in the other
#include "xbt/log.h"
#include "xbt/str.h"
-#include "src/surf/trace_mgr.hpp"
#include "src/surf/surf_interface.hpp"
+#include "src/surf/trace_mgr.hpp"
#include "surf_private.h"
#include "xbt/RngStream.h"
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/split.hpp>
+#include <fstream>
#include <math.h>
+#include <sstream>
#include <unordered_map>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_trace, surf, "Surf trace management");
-static std::unordered_map<const char *, simgrid::trace_mgr::trace*> trace_list;
+static std::unordered_map<const char*, simgrid::trace_mgr::trace*> trace_list;
simgrid::trace_mgr::trace::trace()=default;
simgrid::trace_mgr::trace::~trace()=default;
xbt_heap_free(p_heap);
}
-tmgr_trace_t tmgr_trace_new_from_string(const char *name, const char *input, double periodicity)
+tmgr_trace_t tmgr_trace_new_from_string(const char* name, std::string input, double periodicity)
{
int linecount = 0;
tmgr_event_t last_event = nullptr;
if (sscanf(val.c_str(), "PERIODICITY " "%lg" "\n", &periodicity) == 1)
continue;
- xbt_assert(sscanf(val.c_str(), "%lg" " " "%lg" "\n", &event.delta, &event.value) == 2,
- "%s:%d: Syntax error in trace\n%s", name, linecount, input);
+ xbt_assert(sscanf(val.c_str(), "%lg"
+ " "
+ "%lg"
+ "\n",
+ &event.delta, &event.value) == 2,
+ "%s:%d: Syntax error in trace\n%s", name, linecount, input.c_str());
if (last_event) {
xbt_assert(last_event->delta <= event.delta,
- "%s:%d: Invalid trace: Events must be sorted, but time %g > time %g.\n%s",
- name, linecount, last_event->delta, event.delta, input);
+ "%s:%d: Invalid trace: Events must be sorted, but time %g > time %g.\n%s", name, linecount,
+ last_event->delta, event.delta, input.c_str());
last_event->delta = event.delta - last_event->delta;
} else {
xbt_assert(filename && filename[0], "Cannot parse a trace from the null or empty filename");
xbt_assert(trace_list.find(filename) == trace_list.end(), "Refusing to define trace %s twice", filename);
- FILE *f = surf_fopen(filename, "r");
- xbt_assert(f != nullptr, "Cannot open file '%s' (path=%s)", filename, (boost::join(surf_path, ":")).c_str());
+ std::ifstream* f = surf_ifsopen(filename);
+ xbt_assert(!f->fail(), "Cannot open file '%s' (path=%s)", filename, (boost::join(surf_path, ":")).c_str());
+
+ std::stringstream buffer;
+ buffer << f->rdbuf();
+ tmgr_trace_t trace = tmgr_trace_new_from_string(filename, buffer.str(), 0.);
- char *tstr = xbt_str_from_file(f);
- fclose(f);
- tmgr_trace_t trace = tmgr_trace_new_from_string(filename, tstr, 0.);
- xbt_free(tstr);
+ delete f;
return trace;
}
XBT_PUBLIC(void) tmgr_finalize(void);
-XBT_PUBLIC(tmgr_trace_t) tmgr_trace_new_from_file(const char *filename);
-XBT_PUBLIC(tmgr_trace_t) tmgr_trace_new_from_string(const char *id, const char *input, double periodicity);
+XBT_PUBLIC(tmgr_trace_t) tmgr_trace_new_from_file(const char* filename);
+XBT_PUBLIC(tmgr_trace_t) tmgr_trace_new_from_string(const char* id, std::string input, double periodicity);
SG_END_DECL()
#include "simgrid/host.h"
#include "src/surf/xml/platf.hpp"
-#include <vector>
+#include <map>
#include <string>
+#include <vector>
SG_BEGIN_DECL()
#include "src/surf/xml/simgrid_dtd.h"
const char* content;
const char* content_type;
xbt_dict_t properties;
- xbt_dict_t model_properties;
+ std::map<std::string, std::string>* model_properties;
sg_size_t size;
} s_sg_platf_storage_type_cbarg_t, *sg_platf_storage_type_cbarg_t;
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <errno.h>
-#include <math.h>
-#include <stdarg.h> /* va_arg */
-
-#include "simgrid/link.h"
-#include "simgrid/s4u/engine.hpp"
+#include "simgrid/s4u/Engine.hpp"
#include "simgrid/sg_config.h"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/surf/network_interface.hpp"
-#include "src/surf/surf_private.h"
-#include "xbt/dict.h"
#include "xbt/file.h"
-#include "xbt/log.h"
-#include "xbt/misc.h"
-#include "xbt/str.h"
#include "src/surf/xml/platf_private.hpp"
#include <boost/algorithm/string.hpp>
/* The default current property receiver. Setup in the corresponding opening callbacks. */
xbt_dict_t current_property_set = nullptr;
-xbt_dict_t current_model_property_set = nullptr;
+std::map<std::string, std::string>* current_model_property_set = nullptr;
int AS_TAG = 0; // Whether we just opened an AS tag (to see what to do with the properties)
/* dictionary of random generator data */
XBT_DEBUG("Set AS property %s -> %s", A_surfxml_prop_id, A_surfxml_prop_value);
simgrid::s4u::NetZone* netzone = simgrid::s4u::Engine::instance()->netzoneByNameOrNull(A_surfxml_AS_id);
- netzone->setProperty(A_surfxml_prop_id, xbt_strdup(A_surfxml_prop_value));
+ netzone->setProperty(A_surfxml_prop_id, A_surfxml_prop_value);
}
else{
if (!current_property_set)
void STag_surfxml_model___prop(){
if (!current_model_property_set)
- current_model_property_set = xbt_dict_new_homogeneous(xbt_free_f);
+ current_model_property_set = new std::map<std::string, std::string>();
- xbt_dict_set(current_model_property_set, A_surfxml_model___prop_id, xbt_strdup(A_surfxml_model___prop_value), nullptr);
+ current_model_property_set->insert(
+ {std::string(A_surfxml_model___prop_id), std::string(A_surfxml_model___prop_value)});
}
void ETag_surfxml_prop(){/* Nothing to do */}
}
void xbt_automaton_exp_label_display(xbt_automaton_exp_label_t label){
+ printf("(");
switch(label->type){
- case 0 :
- printf("(");
- xbt_automaton_exp_label_display(label->u.or_and.left_exp);
- printf(" || ");
- xbt_automaton_exp_label_display(label->u.or_and.right_exp);
- printf(")");
- break;
- case 1 :
- printf("(");
- xbt_automaton_exp_label_display(label->u.or_and.left_exp);
- printf(" && ");
- xbt_automaton_exp_label_display(label->u.or_and.right_exp);
- printf(")");
- break;
- case 2 :
- printf("(!");
- xbt_automaton_exp_label_display(label->u.exp_not);
- printf(")");
- break;
- case 3 :
- printf("(%s)",label->u.predicat);
- break;
- case 4 :
- printf("(1)");
- break;
+ case 0:
+ xbt_automaton_exp_label_display(label->u.or_and.left_exp);
+ printf(" || ");
+ xbt_automaton_exp_label_display(label->u.or_and.right_exp);
+ break;
+ case 1:
+ xbt_automaton_exp_label_display(label->u.or_and.left_exp);
+ printf(" && ");
+ xbt_automaton_exp_label_display(label->u.or_and.right_exp);
+ break;
+ case 2:
+ printf("!");
+ xbt_automaton_exp_label_display(label->u.exp_not);
+ break;
+ case 3:
+ printf("%s", label->u.predicat);
+ break;
+ case 4:
+ printf("1");
+ break;
+ default:
+ break;
}
+ printf(")");
}
xbt_automaton_state_t xbt_automaton_get_current_state(xbt_automaton_t a){
}
int xbt_automaton_propositional_symbols_compare_value(xbt_dynar_t s1, xbt_dynar_t s2){
- int *iptr1, *iptr2;
- unsigned int cursor;
unsigned int nb_elem = xbt_dynar_length(s1);
- for(cursor=0;cursor<nb_elem;cursor++){
- iptr1 = xbt_dynar_get_ptr(s1, cursor);
- iptr2 = xbt_dynar_get_ptr(s2, cursor);
+ for (unsigned int cursor = 0; cursor < nb_elem; cursor++) {
+ int* iptr1 = xbt_dynar_get_ptr(s1, cursor);
+ int* iptr2 = xbt_dynar_get_ptr(s2, cursor);
if(*iptr1 != *iptr2)
return 1;
}
namespace simgrid {
namespace config {
-missing_key_error::~missing_key_error() {}
+missing_key_error::~missing_key_error() = default;
class Config;
/* dict - a generic dictionary, variation over hash table */
-/* Copyright (c) 2004-2015. The SimGrid Team.
+/* Copyright (c) 2004-2017. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-
#include <string.h>
#include <stdio.h>
*/
void xbt_dict_free(xbt_dict_t * dict)
{
- int i;
- xbt_dictelm_t current, previous;
+ xbt_dictelm_t current;
+ xbt_dictelm_t previous;
int table_size;
xbt_dictelm_t *table;
table = (*dict)->table;
/* Warning: the size of the table is 'table_size+1'...
* This is because table_size is used as a binary mask in xbt_dict_rehash */
- for (i = 0; (*dict)->count && i <= table_size; i++) {
+ for (int i = 0; (*dict)->count && i <= table_size; i++) {
current = table[i];
while (current != nullptr) {
previous = current;
/** Returns the amount of elements in the dict */
unsigned int xbt_dict_size(xbt_dict_t dict)
{
- return (dict ? (unsigned int) dict->count : (unsigned int) 0);
+ return (dict != nullptr ? static_cast<unsigned int>(dict->count) : static_cast<unsigned int>(0));
}
/* Expend the size of the dict */
{
unsigned int hash_code = xbt_str_hash_ext(key, key_len);
- xbt_dictelm_t current, previous = nullptr;
+ xbt_dictelm_t current;
+ xbt_dictelm_t previous = nullptr;
+ xbt_assert(!free_ctn, "Cannot set an individual free function in homogeneous dicts.");
XBT_CDEBUG(xbt_dict, "ADD %.*s hash = %u, size = %d, & = %u", key_len, key, hash_code,
dict->table_size, hash_code & dict->table_size);
current = dict->table[hash_code & dict->table_size];
if (current == nullptr) {
/* this key doesn't exist yet */
- current = xbt_dictelm_new(dict, key, key_len, hash_code, data, free_ctn);
+ current = xbt_dictelm_new(key, key_len, hash_code, data);
dict->count++;
if (previous == nullptr) {
dict->table[hash_code & dict->table_size] = current;
if (current == nullptr)
THROWF(not_found_error, 0, "key %.*s not found", key_len, key);
-
- if (previous != nullptr) {
- previous->next = current->next;
- } else {
- dict->table[hash_code & dict->table_size] = current->next;
+ else {
+ if (previous != nullptr) {
+ previous->next = current->next;
+ } else {
+ dict->table[hash_code & dict->table_size] = current->next;
+ }
}
if (!dict->table[hash_code & dict->table_size])
}
/* Report current sizes */
- if (count == 0)
- continue;
- if (size == 0)
- continue;
- printf("%uelm x %u cells; ", count, size);
+ if (count != 0 && size != 0)
+ printf("%uelm x %u cells; ", count, size);
}
printf("\n");
xbt_dynar_free(&sizes);
if (dict_elm_mallocator == nullptr)
dict_elm_mallocator = xbt_mallocator_new(256, dict_elm_mallocator_new_f, dict_elm_mallocator_free_f,
dict_elm_mallocator_reset_f);
- if (dict_het_elm_mallocator == nullptr)
- dict_het_elm_mallocator = xbt_mallocator_new(256, dict_het_elm_mallocator_new_f, dict_het_elm_mallocator_free_f,
- dict_het_elm_mallocator_reset_f);
}
/**
if (dict_elm_mallocator != nullptr) {
xbt_mallocator_free(dict_elm_mallocator);
dict_elm_mallocator = nullptr;
- xbt_mallocator_free(dict_het_elm_mallocator);
- dict_het_elm_mallocator = nullptr;
}
if (all_sizes) {
unsigned int count;
int total_count = 0;
printf("Overall stats:");
xbt_dynar_foreach(all_sizes, count, size) {
- if (count == 0)
- continue;
- if (size == 0)
- continue;
- printf("%uelm x %d cells; ", count, size);
- avg += count * size;
- total_count += size;
+ if (count != 0 && size != 0) {
+ printf("%uelm x %d cells; ", count, size);
+ avg += count * size;
+ total_count += size;
+ }
}
printf("; %f elm per cell\n", avg / (double) total_count);
}
/* dict - a generic dictionary, variation over hash table */
-/* Copyright (c) 2004-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_dict_elm, xbt_dict, "Dictionaries internals");
xbt_mallocator_t dict_elm_mallocator = NULL;
-xbt_mallocator_t dict_het_elm_mallocator = NULL;
-xbt_dictelm_t xbt_dictelm_new(xbt_dict_t dict, const char *key, int key_len, unsigned int hash_code, void *content,
- void_f_pvoid_t free_f)
+xbt_dictelm_t xbt_dictelm_new(const char* key, int key_len, unsigned int hash_code, void* content)
{
- xbt_dictelm_t element;
-
- if (dict->homogeneous) {
- xbt_assert(!free_f, "Cannot set an individual free function in homogeneous dicts.");
- element = xbt_mallocator_get(dict_elm_mallocator);
- } else {
- xbt_het_dictelm_t het_element = xbt_mallocator_get(dict_het_elm_mallocator);
- het_element->free_f = free_f;
- element = &het_element->element;
- }
+ xbt_dictelm_t element = xbt_mallocator_get(dict_elm_mallocator);
element->key = xbt_new(char, key_len + 1);
memcpy(element->key, key, key_len);
element->key[key_len] = '\0';
char *key = element->key;
void *content = element->content;
void_f_pvoid_t free_f;
- if (dict->homogeneous) {
- free_f = dict->free_f;
- xbt_mallocator_release(dict_elm_mallocator, element);
- } else {
- xbt_het_dictelm_t het_element = (xbt_het_dictelm_t)element;
- free_f = het_element->free_f;
- xbt_mallocator_release(dict_het_elm_mallocator, het_element);
- }
+ free_f = dict->free_f;
+ xbt_mallocator_release(dict_elm_mallocator, element);
xbt_free(key);
if (free_f && content)
void xbt_dictelm_set_data(xbt_dict_t dict, xbt_dictelm_t element, void *data, void_f_pvoid_t free_ctn)
{
void_f_pvoid_t free_f;
- if (dict->homogeneous) {
- free_f = dict->free_f;
- xbt_assert(!free_ctn, "Cannot set an individual free function in homogeneous dicts.");
- } else {
- xbt_het_dictelm_t het_element = (xbt_het_dictelm_t)element;
- free_f = het_element->free_f;
- het_element->free_f = free_ctn;
- }
+ free_f = dict->free_f;
+ xbt_assert(!free_ctn, "Cannot set an individual free function in homogeneous dicts.");
if (free_f && element->content)
free_f(element->content);
{
return xbt_new(s_xbt_dictelm_t, 1);
}
-
-void *dict_het_elm_mallocator_new_f(void)
-{
- return xbt_new(s_xbt_het_dictelm_t, 1);
-}
#define MAX_FILL_PERCENT 80
-typedef struct s_xbt_het_dictelm {
- s_xbt_dictelm_t element;
- void_f_pvoid_t free_f;
-} s_xbt_het_dictelm_t, *xbt_het_dictelm_t;
-
typedef struct s_xbt_dict {
void_f_pvoid_t free_f;
xbt_dictelm_t *table;
#define dict_elm_mallocator_free_f xbt_free_f
#define dict_elm_mallocator_reset_f ((void_f_pvoid_t)NULL)
-extern XBT_PRIVATE xbt_mallocator_t dict_het_elm_mallocator;
-extern XBT_PRIVATE void * dict_het_elm_mallocator_new_f(void);
-#define dict_het_elm_mallocator_free_f xbt_free_f
-#define dict_het_elm_mallocator_reset_f ((void_f_pvoid_t)NULL)
-
/*####[ Function prototypes ]################################################*/
-XBT_PRIVATE xbt_dictelm_t xbt_dictelm_new(xbt_dict_t dict, const char *key, int key_len,
- unsigned int hash_code, void *content, void_f_pvoid_t free_f);
+XBT_PRIVATE xbt_dictelm_t xbt_dictelm_new(const char* key, int key_len, unsigned int hash_code, void* content);
XBT_PRIVATE void xbt_dictelm_free(xbt_dict_t dict, xbt_dictelm_t element);
XBT_PRIVATE void xbt_dictelm_set_data(xbt_dict_t dict, xbt_dictelm_t element, void *data, void_f_pvoid_t free_ctn);
static inline void _sanity_check_idx(int idx)
{
- xbt_assert(idx >= 0, "dynar idx(=%d) < 0", (int) (idx));
+ xbt_assert(idx >= 0, "dynar idx(=%d) < 0", idx);
}
static inline void _check_inbound_idx(xbt_dynar_t dynar, int idx)
{
- if (idx < 0 || idx >= (int)dynar->used) {
+ if (idx < 0 || idx >= static_cast<int>(dynar->used)) {
THROWF(bound_error, idx, "dynar is not that long. You asked %d, but it's only %lu long",
- (int) (idx), (unsigned long) dynar->used);
+ idx, static_cast<unsigned long>(dynar->used));
}
}
unsigned long offset;
unsigned long cur;
- if (!n) return;
+ if (!n)
+ return;
_sanity_check_dynar(dynar);
_check_inbound_idx(dynar, idx);
*/
extern "C" void xbt_dynar_cursor_rm(xbt_dynar_t dynar, unsigned int* const cursor)
{
- xbt_dynar_remove_at(dynar, (*cursor)--, nullptr);
+ xbt_dynar_remove_at(dynar, *cursor, nullptr);
+ *cursor -= 1;
}
/** @brief Sorts a dynar according to the function <tt>compar_fn</tt>
++i;
} else {
if (colori == 0) {
- elm = _xbt_dynar_elm(dynar, ++p);
+ ++p;
+ elm = _xbt_dynar_elm(dynar, p);
++i;
} else { /* colori == 2 */
- elm = _xbt_dynar_elm(dynar, --q);
+ --q;
+ elm = _xbt_dynar_elm(dynar, q);
}
if (elm != elmi) {
memcpy(tmp, elm, elmsize);
{
int i ;
int size;
- if((!d1) && (!d2)) return 0;
- if((!d1) || (!d2))
- {
+ if((!d1) && (!d2))
+ return 0;
+ if((!d1) || (!d2)) {
XBT_DEBUG("nullptr dynar d1=%p d2=%p",d1,d2);
xbt_dynar_free(&d2);
return 1;
s1 = xbt_strdup(buf);
xbt_dynar_push(d, &s1);
}
- for (int cpt = 0; cpt < NB_ELEM; cpt++) {
- snprintf(buf,1023, "%d", cpt);
- s1 = xbt_strdup(buf);
- xbt_dynar_replace(d, cpt, &s1);
- }
- for (int cpt = 0; cpt < NB_ELEM; cpt++) {
- snprintf(buf,1023, "%d", cpt);
- s1 = xbt_strdup(buf);
- xbt_dynar_replace(d, cpt, &s1);
- }
- for (int cpt = 0; cpt < NB_ELEM; cpt++) {
- snprintf(buf,1023, "%d", cpt);
- s1 = xbt_strdup(buf);
- xbt_dynar_replace(d, cpt, &s1);
+ for (int i = 0 ; i < 3 ; i++) {
+ for (int cpt = 0; cpt < NB_ELEM; cpt++) {
+ snprintf(buf,1023, "%d", cpt);
+ s1 = xbt_strdup(buf);
+ xbt_dynar_replace(d, cpt, &s1);
+ }
}
for (int cpt = 0; cpt < NB_ELEM; cpt++) {
snprintf(buf,1023, "%d", cpt);
*/
void xbt_heap_push(xbt_heap_t H, void *content, double key)
{
- int count = ++(H->count);
+ H->count += 1;
+ int count = H->count;
int size = H->size;
xbt_heap_item_t item;
typedef struct xbt_heap_item {
void *content;
double key;
-} s_xbt_heap_item_t, *xbt_heap_item_t;
+} s_xbt_heap_item_t;
+typedef s_xbt_heap_item_t* xbt_heap_item_t;
typedef struct xbt_heap {
int size;
e_xbt_log_priority_t thresh;
int additivity;
xbt_log_appender_t appender;
-} s_xbt_log_setting_t, *xbt_log_setting_t;
+} s_xbt_log_setting_t;
+
+typedef s_xbt_log_setting_t* xbt_log_setting_t;
static xbt_dynar_t xbt_log_settings = NULL;
control_string += strspn(control_string, " ");
const char *name = control_string;
- control_string += strcspn(control_string, ".= ");
+ control_string += strcspn(control_string, ".:= ");
const char *dot = control_string;
control_string += strcspn(control_string, ":= ");
const char *eq = control_string;
- if(*dot != '.' && (*eq == '=' || *eq == ':'))
- xbt_die ("Invalid control string '%s'", orig_control_string);
+ xbt_assert(*dot == '.' || (*eq != '=' && *eq != ':'), "Invalid control string '%s'", orig_control_string);
if (!strncmp(dot + 1, "threshold", (size_t) (eq - dot - 1))) {
int i;
} else {
char buff[512];
snprintf(buff, MIN(512, eq - dot), "%s", dot + 1);
- THROWF(arg_error, 0, "Unknown setting of the log category: '%s'", buff);
+ xbt_die("Unknown setting of the log category: '%s'", buff);
}
set->catname = (char *) xbt_malloc(dot - name + 1);
#else /* damn, no sem_init(). Reimplement it */
xbt_os_mutex_acquire(next_sem_ID_lock);
- res->name = bprintf("/%d", ++next_sem_ID);
+ res->name = bprintf("/sg-%d", ++next_sem_ID);
xbt_os_mutex_release(next_sem_ID_lock);
+ sem_unlink(res->name);
res->ps = sem_open(res->name, O_CREAT, 0644, value);
if ((res->ps == (sem_t *) SEM_FAILED) && (errno == ENAMETOOLONG)) {
/* Old darwins only allow 13 chars. Did you create *that* amount of semaphores? */
res->name[13] = '\0';
+ sem_unlink(res->name);
res->ps = sem_open(res->name, O_CREAT, 0644, value);
}
if (res->ps == (sem_t *) SEM_FAILED)
*/
struct s_xbt_os_timer {
#if HAVE_POSIX_GETTIME
- struct timespec start, stop, elapse;
+ struct timespec start;
+ struct timespec stop;
+ struct timespec elapse;
#elif HAVE_GETTIMEOFDAY || defined(_WIN32)
- struct timeval start, stop, elapse;
+ struct timeval start;
+ struct timeval stop;
+ struct timeval elapse;
#else
- unsigned long int start, stop, elapse;
+ unsigned long int start;
+ unsigned long int stop;
+ unsigned long int elapse;
#endif
};
-size_t xbt_os_timer_size(void){
+size_t xbt_os_timer_size(void)
+{
return sizeof(struct s_xbt_os_timer);
}
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "src/internal_config.h"
#include "xbt/ex.hpp"
#include "xbt/log.h"
#include "xbt/replay.hpp"
-#include "xbt/str.h"
-#include "xbt/sysdep.h"
#include <boost/algorithm/string.hpp>
-#include <ctype.h>
-#include <errno.h>
-#include <wchar.h>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(replay,xbt,"Replay trace reader");
static void read_and_trim_line(std::ifstream* fs, std::string* line)
{
- std::getline(*fs, *line);
- boost::trim(*line);
+ do {
+ std::getline(*fs, *line);
+ boost::trim(*line);
+ } while (!fs->eof() && (line->length() == 0 || line->front() == '#'));
XBT_DEBUG("got from trace: %s", line->c_str());
}
std::string line;
public:
- char* filename_;
- int linenum = 0;
-
explicit ReplayReader(const char* filename)
{
- filename_ = xbt_strdup(filename);
- fs = new std::ifstream(filename, std::ifstream::in);
+ fs = new std::ifstream(filename, std::ifstream::in);
}
~ReplayReader()
{
- free(filename_);
delete fs;
}
bool get(ReplayAction* action);
bool ReplayReader::get(ReplayAction* action)
{
read_and_trim_line(fs, &line);
- linenum++;
- if (line.length() > 0 && line.find("#") == std::string::npos) {
- boost::split(*action, line, boost::is_any_of(" \t"), boost::token_compress_on);
- return !fs->eof();
- } else {
- if (fs->eof())
- return false;
- else
- return this->get(action);
- }
+ boost::split(*action, line, boost::is_any_of(" \t"), boost::token_compress_on);
+ return !fs->eof();
}
static ReplayAction* get_action(char* name)
if (action_queues.find(std::string(name)) != action_queues.end())
myqueue = action_queues.at(std::string(name));
if (myqueue == nullptr || myqueue->empty()) { // Nothing stored for me. Read the file further
- if (action_fs == nullptr) { // File closed now. There's nothing more to read. I'm out of here
- goto todo_done;
- }
// Read lines until I reach something for me (which breaks in loop body) or end of file reached
- while (!action_fs->eof()) {
+ while (true) {
std::string action_line;
read_and_trim_line(action_fs, &action_line);
- if (action_line.length() > 0 && action_line.find("#") == std::string::npos) {
- /* we cannot split in place here because we parse&store several lines for the colleagues... */
- action = new ReplayAction();
- boost::split(*action, action_line, boost::is_any_of(" \t"), boost::token_compress_on);
-
- // if it's for me, I'm done
- std::string evtname = action->front();
- if (evtname.compare(name) == 0) {
- return action;
- } else {
- // Else, I have to store it for the relevant colleague
- std::queue<ReplayAction*>* otherqueue = nullptr;
- if (action_queues.find(evtname) != action_queues.end())
- otherqueue = action_queues.at(evtname);
- if (otherqueue == nullptr) { // Damn. Create the queue of that guy
- otherqueue = new std::queue<ReplayAction*>();
- action_queues.insert({evtname, otherqueue});
- }
- otherqueue->push(action);
+ if (action_fs->eof())
+ break;
+ /* we cannot split in place here because we parse&store several lines for the colleagues... */
+ action = new ReplayAction();
+ boost::split(*action, action_line, boost::is_any_of(" \t"), boost::token_compress_on);
+
+ // if it's for me, I'm done
+ std::string evtname = action->front();
+ if (evtname.compare(name) == 0) {
+ return action;
+ } else {
+ // Else, I have to store it for the relevant colleague
+ std::queue<ReplayAction*>* otherqueue = nullptr;
+ if (action_queues.find(evtname) != action_queues.end())
+ otherqueue = action_queues.at(evtname);
+ else { // Damn. Create the queue of that guy
+ otherqueue = new std::queue<ReplayAction*>();
+ action_queues.insert({evtname, otherqueue});
}
+ otherqueue->push(action);
}
}
// end of file reached while searching in vain for more work
myqueue->pop();
return action;
}
-
-// All my actions in the file are done and either I or a colleague closed the file. Let's cleanup before leaving.
-todo_done:
- if (myqueue != nullptr) {
- delete myqueue;
- action_queues.erase(std::string(name));
- }
return nullptr;
}
simgrid::xbt::handle_action(evt);
delete evt;
}
+ if (action_queues.find(std::string(argv[0])) != action_queues.end()) {
+ std::queue<ReplayAction*>* myqueue = action_queues.at(std::string(argv[0]));
+ delete myqueue;
+ action_queues.erase(std::string(argv[0]));
+ }
} else { // Should have got my trace file in argument
simgrid::xbt::ReplayAction* evt = new simgrid::xbt::ReplayAction();
xbt_assert(argc >= 2, "No '%s' agent function provided, no simulation-wide trace file provided, "
argv[0]);
simgrid::xbt::ReplayReader* reader = new simgrid::xbt::ReplayReader(argv[1]);
while (reader->get(evt)) {
- if (evt->at(0).compare(argv[0]) == 0) {
+ if (evt->front().compare(argv[0]) == 0) {
simgrid::xbt::handle_action(evt);
} else {
- XBT_WARN("%s:%d: Ignore trace element not for me", reader->filename_, reader->linenum);
+ XBT_WARN("Ignore trace element not for me");
}
evt->clear();
}
{
simgrid::xbt::action_funs.insert({std::string(action_name), function});
}
+
+/**
+ * \ingroup XBT_replay
+ * \brief Get the function that was previously registered to handle a kind of action
+ *
+ * This can be useful if you want to override and extend an existing action.
+ */
+action_fun xbt_replay_action_get(const char* action_name)
+{
+ return simgrid::xbt::action_funs.at(std::string(action_name));
+}
return res;
}
-/** @brief creates a new string containing what can be read on a fd */
-char *xbt_str_from_file(FILE * file)
-{
- xbt_strbuff_t buff = xbt_strbuff_new();
- char *res;
- char bread[1024];
- memset(bread, 0, 1024);
-
- while (!feof(file)) {
- int got = fread(bread, 1, 1023, file);
- bread[got] = '\0';
- xbt_strbuff_append(buff, bread);
- }
-
- res = buff->data;
- xbt_strbuff_free_container(buff);
- return res;
-}
-
/** @brief Parse an integer out of a string, or raise an error
*
* The @a str is passed as argument to your @a error_msg, as follows:
# C examples
-foreach(x concurrent_rw get_sender host_on_off host_on_off_recv host_on_off_processes host_on_off_wait listen_async pid
- storage_client_server trace_integration)
+foreach(x get_sender host_on_off host_on_off_recv host_on_off_processes trace_integration)
add_executable (${x} ${x}/${x}.c)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
endforeach()
# CPP examples
-foreach(x process task_destroy_cancel)
+foreach(x task_destroy_cancel)
add_executable (${x} ${x}/${x}.cpp)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
${CMAKE_CURRENT_SOURCE_DIR}/trace_integration/test-hbp1-c1s1-c3s2.xml
${CMAKE_CURRENT_SOURCE_DIR}/trace_integration/test-hbp2.5-hbp1.5.xml PARENT_SCOPE)
-foreach(x concurrent_rw get_sender host_on_off host_on_off_processes host_on_off_recv host_on_off_wait listen_async pid
- process storage_client_server task_destroy_cancel trace_integration)
+foreach(x get_sender host_on_off host_on_off_processes host_on_off_recv task_destroy_cancel trace_integration)
ADD_TESH_FACTORIES(tesh-msg-${x} "thread;boost;ucontext;raw" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/msg/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x}/${x}.tesh)
endforeach()
+++ /dev/null
-/* Copyright (c) 2008-2010, 2012-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-#include <unistd.h>
-
-#define FILENAME1 "/home/doc/simgrid/examples/platforms/g5k.xml"
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(storage,"Messages specific for this simulation");
-
-static int host(int argc, char *argv[])
-{
- char name[2048];
- int id = MSG_process_self_PID();
- snprintf(name,2048,"%s%i", FILENAME1, id);
- msg_file_t file = MSG_file_open(name, NULL);
- XBT_INFO("process %d is writing!", id);
- MSG_file_write(file, 3000000);
- XBT_INFO("process %d goes to sleep for %d seconds", id, id);
- MSG_process_sleep(id);
- XBT_INFO("process %d is writing again!", id);
- MSG_file_write(file, 3000000);
- XBT_INFO("process %d goes to sleep for %d seconds", id, 6 - id);
- MSG_process_sleep(6-id);
- XBT_INFO("process %d is reading!", id);
- MSG_file_seek(file, 0, SEEK_SET);
- MSG_file_read(file, 3000000);
- XBT_INFO("process %d goes to sleep for %d seconds", id, id);
- MSG_process_sleep(id);
- XBT_INFO("process %d is reading again!", id);
- MSG_file_seek(file, 0, SEEK_SET);
- MSG_file_read(file, 3000000);
-
- XBT_INFO("process %d => Size of %s: %llu", id, MSG_file_get_name(file), MSG_file_get_size(file));
- MSG_file_close(file);
-
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- MSG_init(&argc, argv);
- MSG_create_environment(argv[1]);
-
- MSG_function_register("host", host);
- for(int i = 0 ; i < 5; i++){
- MSG_process_create("bob", host, NULL, MSG_host_by_name(xbt_strdup("bob")));
- }
-
- int res = MSG_main();
- XBT_INFO("Simulation time %g", MSG_get_clock());
-
- return res != MSG_OK;
-}
+++ /dev/null
-$ ./concurrent_rw$EXEEXT ${srcdir:=.}/../../../examples/platforms/storage/storage.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:bob@bob) process 1 is writing!
-> [ 0.000000] (2:bob@bob) process 2 is writing!
-> [ 0.000000] (3:bob@bob) process 3 is writing!
-> [ 0.000000] (4:bob@bob) process 4 is writing!
-> [ 0.000000] (5:bob@bob) process 5 is writing!
-> [ 0.500000] (1:bob@bob) process 1 goes to sleep for 1 seconds
-> [ 0.500000] (2:bob@bob) process 2 goes to sleep for 2 seconds
-> [ 0.500000] (3:bob@bob) process 3 goes to sleep for 3 seconds
-> [ 0.500000] (4:bob@bob) process 4 goes to sleep for 4 seconds
-> [ 0.500000] (5:bob@bob) process 5 goes to sleep for 5 seconds
-> [ 1.500000] (1:bob@bob) process 1 is writing again!
-> [ 1.600000] (1:bob@bob) process 1 goes to sleep for 5 seconds
-> [ 2.500000] (2:bob@bob) process 2 is writing again!
-> [ 2.600000] (2:bob@bob) process 2 goes to sleep for 4 seconds
-> [ 3.500000] (3:bob@bob) process 3 is writing again!
-> [ 3.600000] (3:bob@bob) process 3 goes to sleep for 3 seconds
-> [ 4.500000] (4:bob@bob) process 4 is writing again!
-> [ 4.600000] (4:bob@bob) process 4 goes to sleep for 2 seconds
-> [ 5.500000] (5:bob@bob) process 5 is writing again!
-> [ 5.600000] (5:bob@bob) process 5 goes to sleep for 1 seconds
-> [ 6.600000] (4:bob@bob) process 4 is reading!
-> [ 6.600000] (5:bob@bob) process 5 is reading!
-> [ 6.600000] (1:bob@bob) process 1 is reading!
-> [ 6.600000] (2:bob@bob) process 2 is reading!
-> [ 6.600000] (3:bob@bob) process 3 is reading!
-> [ 6.750000] (4:bob@bob) process 4 goes to sleep for 4 seconds
-> [ 6.750000] (5:bob@bob) process 5 goes to sleep for 5 seconds
-> [ 6.750000] (1:bob@bob) process 1 goes to sleep for 1 seconds
-> [ 6.750000] (2:bob@bob) process 2 goes to sleep for 2 seconds
-> [ 6.750000] (3:bob@bob) process 3 goes to sleep for 3 seconds
-> [ 7.750000] (1:bob@bob) process 1 is reading again!
-> [ 7.780000] (1:bob@bob) process 1 => Size of /home/doc/simgrid/examples/platforms/g5k.xml1: 6000000
-> [ 8.750000] (2:bob@bob) process 2 is reading again!
-> [ 8.780000] (2:bob@bob) process 2 => Size of /home/doc/simgrid/examples/platforms/g5k.xml2: 6000000
-> [ 9.750000] (3:bob@bob) process 3 is reading again!
-> [ 9.780000] (3:bob@bob) process 3 => Size of /home/doc/simgrid/examples/platforms/g5k.xml3: 6000000
-> [ 10.750000] (4:bob@bob) process 4 is reading again!
-> [ 10.780000] (4:bob@bob) process 4 => Size of /home/doc/simgrid/examples/platforms/g5k.xml4: 6000000
-> [ 11.750000] (5:bob@bob) process 5 is reading again!
-> [ 11.780000] (5:bob@bob) process 5 => Size of /home/doc/simgrid/examples/platforms/g5k.xml5: 6000000
-> [ 11.780000] (0:maestro@) Simulation time 11.78
MSG_host_off(jupiter);
if (comm) {
+ MSG_task_destroy(task);
MSG_comm_wait(comm, -1);
MSG_comm_destroy(comm);
}
+++ /dev/null
-/* Copyright (c) 2010-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h" /* Yeah! If you want to use msg, you need to include simgrid/msg.h */
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-
-static int master(int argc, char *argv[])
-{
- msg_host_t jupiter = MSG_host_by_name("Jupiter");
- XBT_INFO("Master waiting");
- MSG_process_sleep(1);
-
- XBT_INFO("Turning off the slave host");
- MSG_host_off(jupiter);
- XBT_INFO("Master has finished");
-
- return 0;
-}
-
-static int slave(int argc, char *argv[])
-{
- XBT_INFO("Slave waiting");
- // TODO, This should really be MSG_HOST_FAILURE
- MSG_process_sleep(5);
- XBT_ERROR("Slave should be off already.");
- return 1;
-}
-
-int main(int argc, char *argv[])
-{
- msg_error_t res;
-
- MSG_init(&argc, argv);
- xbt_assert(argc == 2, "Usage: %s platform_file\n\tExample: %s msg_platform.xml\n", argv[0], argv[0]);
-
- MSG_create_environment(argv[1]);
-
- MSG_process_create("master", master, NULL, MSG_get_host_by_name("Tremblay"));
- MSG_process_create("slave", slave, NULL, MSG_get_host_by_name("Jupiter"));
-
- res = MSG_main();
-
- XBT_INFO("Simulation time %g", MSG_get_clock());
-
- return res != MSG_OK;
-}
+++ /dev/null
-$ ./host_on_off_wait ${srcdir:=.}/../../../examples/platforms/small_platform.xml
-> [Tremblay:master:(1) 0.000000] [msg_test/INFO] Master waiting
-> [Jupiter:slave:(2) 0.000000] [msg_test/INFO] Slave waiting
-> [Tremblay:master:(1) 1.000000] [msg_test/INFO] Turning off the slave host
-> [Tremblay:master:(1) 1.000000] [msg_test/INFO] Master has finished
-> [1.000000] [msg_test/INFO] Simulation time 1
+++ /dev/null
-/* Bug report: https://github.com/simgrid/simgrid/issues/40
- *
- * Task.listen used to be on async mailboxes as it always returned false.
- * This occures in Java and C, but is only tested here in C.
- */
-
-#include "simgrid/msg.h"
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-
-static int server(int argc, char *argv[])
-{
- msg_task_t task = MSG_task_create("a", 0, 0, (char*)"Some data");
- MSG_task_isend(task, "mailbox");
-
- xbt_assert(MSG_task_listen("mailbox")); // True (1)
- XBT_INFO("Task listen works on regular mailboxes");
- task = NULL;
- MSG_task_receive(&task, "mailbox");
- xbt_assert(!strcmp("Some data", MSG_task_get_data(task)), "Data received: %s", (char*)MSG_task_get_data(task));
- MSG_task_destroy(task);
- XBT_INFO("Data successfully received from regular mailbox");
-
- MSG_mailbox_set_async("mailbox2");
- task = MSG_task_create("b", 0, 0, (char*)"More data");
- MSG_task_isend(task, "mailbox2");
-
- xbt_assert(MSG_task_listen("mailbox2")); // used to break.
- XBT_INFO("Task listen works on asynchronous mailboxes");
- task = NULL;
- MSG_task_receive(&task, "mailbox2");
- xbt_assert(!strcmp("More data", MSG_task_get_data(task)));
- MSG_task_destroy(task);
- XBT_INFO("Data successfully received from asynchronous mailbox");
-
- return 0;
-}
-
-int main(int argc, char *argv[])
-{
- MSG_init(&argc, argv);
- xbt_assert(argc==2);
- MSG_create_environment(argv[1]);
- MSG_process_create("test", server, NULL, MSG_host_by_name("Tremblay"));
- MSG_main();
-
- return 0;
-}
+++ /dev/null
-$ ./listen_async ${srcdir:=.}/../../../examples/platforms/small_platform.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:test@Tremblay) Task listen works on regular mailboxes
-> [ 0.000195] (1:test@Tremblay) Data successfully received from regular mailbox
-> [ 0.000195] (1:test@Tremblay) Task listen works on asynchronous mailboxes
-> [ 0.000195] (1:test@Tremblay) Data successfully received from asynchronous mailbox
-> [ 0.000195] (0:maestro@) Variable 3 still in system when freing it: this may be a bug
+++ /dev/null
-/* Copyright (c) 2009-2010, 2013-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-
-const char* mailbox = "mailbox";
-#define task_comp_size 1000
-#define task_comm_size 100000
-
-static int my_onexit(smx_process_exit_status_t status, int *pid){
- XBT_INFO("Process \"%d\" killed.", *pid);
- return 0;
-}
-
-static int sendpid(int argc, char *argv[])
-{
- int pid = MSG_process_self_PID();
- MSG_process_on_exit((int_f_pvoid_pvoid_t)my_onexit, &pid);
- msg_task_t task = MSG_task_create("pid", task_comp_size, task_comm_size, &pid);
- XBT_INFO("Sending pid of \"%d\".", pid);
- MSG_task_send(task, mailbox);
- XBT_INFO("Send of pid \"%d\" done.", pid);
- MSG_process_suspend(MSG_process_self());
- return 0;
-}
-
-static int killall(int argc, char *argv[]){
- msg_task_t task = NULL;
-
- for (int i=0; i<3;i++) {
- MSG_task_receive(&(task), mailbox);
- int pid = *(int*)MSG_task_get_data(task);
- MSG_task_destroy(task);
- XBT_INFO("Killing process \"%d\".", pid);
- MSG_process_kill(MSG_process_from_PID(pid));
- task = NULL;
- }
- return 0;
-}
-
-int main(int argc, char* argv[])
-{
- MSG_init(&argc, argv);
-
- xbt_assert(argc >= 2, "Usage: pid platform pid_to_kill");
-
- MSG_process_killall(atoi(argv[2]));
-
- MSG_create_environment(argv[1]);
- MSG_process_create("sendpid", sendpid, NULL, MSG_get_host_by_name("Tremblay"));
- MSG_process_create("sendpid", sendpid, NULL, MSG_get_host_by_name("Tremblay"));
- MSG_process_create("sendpid", sendpid, NULL, MSG_get_host_by_name("Tremblay"));
- MSG_process_create("killall", killall, NULL, MSG_get_host_by_name("Tremblay"));
-
- return MSG_main() != MSG_OK;
-}
+++ /dev/null
-$ ./pid ${srcdir:=.}/../../../examples/platforms/small_platform.xml 0 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:sendpid@Tremblay) Sending pid of "1".
-> [ 0.000000] (2:sendpid@Tremblay) Sending pid of "2".
-> [ 0.000000] (3:sendpid@Tremblay) Sending pid of "3".
-> [ 0.000402] (4:killall@Tremblay) Killing process "1".
-> [ 0.000402] (1:sendpid@Tremblay) Send of pid "1" done.
-> [ 0.000402] (1:sendpid@Tremblay) Process "1" killed.
-> [ 0.000804] (2:sendpid@Tremblay) Send of pid "2" done.
-> [ 0.000804] (4:killall@Tremblay) Killing process "2".
-> [ 0.000804] (2:sendpid@Tremblay) Process "2" killed.
-> [ 0.001206] (3:sendpid@Tremblay) Send of pid "3" done.
-> [ 0.001206] (4:killall@Tremblay) Killing process "3".
-> [ 0.001206] (3:sendpid@Tremblay) Process "3" killed.
-
-$ ./pid ${srcdir:=.}/../../../examples/platforms/small_platform.xml 2 "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (2:sendpid@Tremblay) Sending pid of "2".
-> [ 0.000000] (3:sendpid@Tremblay) Sending pid of "3".
-> [ 0.000000] (4:sendpid@Tremblay) Sending pid of "4".
-> [ 0.000402] (5:killall@Tremblay) Killing process "2".
-> [ 0.000402] (2:sendpid@Tremblay) Send of pid "2" done.
-> [ 0.000402] (2:sendpid@Tremblay) Process "2" killed.
-> [ 0.000804] (3:sendpid@Tremblay) Send of pid "3" done.
-> [ 0.000804] (5:killall@Tremblay) Killing process "3".
-> [ 0.000804] (3:sendpid@Tremblay) Process "3" killed.
-> [ 0.001206] (4:sendpid@Tremblay) Send of pid "4" done.
-> [ 0.001206] (5:killall@Tremblay) Killing process "4".
-> [ 0.001206] (4:sendpid@Tremblay) Process "4" killed.
+++ /dev/null
-/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
-
-static int slave(int argc, char* argv[])
-{
- MSG_process_sleep(.5);
- XBT_INFO("Slave started (PID:%d, PPID:%d)", MSG_process_self_PID(), MSG_process_self_PPID());
- while (1) {
- XBT_INFO("Plop i am %ssuspended", (MSG_process_is_suspended(MSG_process_self())) ? "" : "not ");
- MSG_process_sleep(1);
- }
- XBT_INFO("I'm done. See you!");
- return 0;
-}
-
-static int master(int argc, char* argv[])
-{
- MSG_process_sleep(1);
- xbt_dynar_t process_list = xbt_dynar_new(sizeof(msg_process_t), nullptr);
- MSG_host_get_process_list(MSG_host_self(), process_list);
-
- msg_process_t process = NULL;
- unsigned int cursor;
- xbt_dynar_foreach (process_list, cursor, process) {
- XBT_INFO("Process(pid=%d, ppid=%d, name=%s)", MSG_process_get_PID(process), MSG_process_get_PPID(process),
- MSG_process_get_name(process));
- if (MSG_process_self_PID() != MSG_process_get_PID(process))
- MSG_process_kill(process);
- }
- xbt_dynar_free(&process_list);
-
- process = MSG_process_create("slave from master", slave, NULL, MSG_host_self());
- MSG_process_sleep(2);
-
- XBT_INFO("Suspend Process(pid=%d)", MSG_process_get_PID(process));
- MSG_process_suspend(process);
-
- XBT_INFO("Process(pid=%d) is %ssuspended", MSG_process_get_PID(process),
- (MSG_process_is_suspended(process)) ? "" : "not ");
- MSG_process_sleep(2);
-
- XBT_INFO("Resume Process(pid=%d)", MSG_process_get_PID(process));
- MSG_process_resume(process);
-
- XBT_INFO("Process(pid=%d) is %ssuspended", MSG_process_get_PID(process),
- (MSG_process_is_suspended(process)) ? "" : "not ");
- MSG_process_sleep(2);
- MSG_process_kill(process);
-
- XBT_INFO("Goodbye now!");
- return 0;
-}
-
-int main(int argc, char* argv[])
-{
- MSG_init(&argc, argv);
- xbt_assert(argc == 2, "Usage: %s platform_file\n\t Example: %s msg_platform.xml\n", argv[0], argv[0]);
-
- MSG_create_environment(argv[1]);
-
- MSG_process_create("master", master, NULL, MSG_get_host_by_name("Tremblay"));
- MSG_process_create("slave", slave, NULL, MSG_get_host_by_name("Tremblay"));
-
- msg_error_t res = MSG_main();
-
- XBT_INFO("Simulation time %g", MSG_get_clock());
-
- return res != MSG_OK;
-}
+++ /dev/null
-
-$ ./process ${srcdir:=.}/../../../examples/platforms/small_platform.xml
-> [Tremblay:slave:(2) 0.500000] [msg_test/INFO] Slave started (PID:2, PPID:0)
-> [Tremblay:slave:(2) 0.500000] [msg_test/INFO] Plop i am not suspended
-> [Tremblay:master:(1) 1.000000] [msg_test/INFO] Process(pid=1, ppid=0, name=master)
-> [Tremblay:master:(1) 1.000000] [msg_test/INFO] Process(pid=2, ppid=0, name=slave)
-> [Tremblay:slave from master:(3) 1.500000] [msg_test/INFO] Slave started (PID:3, PPID:1)
-> [Tremblay:slave from master:(3) 1.500000] [msg_test/INFO] Plop i am not suspended
-> [Tremblay:slave from master:(3) 2.500000] [msg_test/INFO] Plop i am not suspended
-> [Tremblay:master:(1) 3.000000] [msg_test/INFO] Suspend Process(pid=3)
-> [Tremblay:master:(1) 3.000000] [msg_test/INFO] Process(pid=3) is suspended
-> [Tremblay:master:(1) 5.000000] [msg_test/INFO] Resume Process(pid=3)
-> [Tremblay:master:(1) 5.000000] [msg_test/INFO] Process(pid=3) is not suspended
-> [Tremblay:slave from master:(3) 5.000000] [msg_test/INFO] Plop i am not suspended
-> [Tremblay:slave from master:(3) 6.000000] [msg_test/INFO] Plop i am not suspended
-> [Tremblay:master:(1) 7.000000] [msg_test/INFO] Goodbye now!
-> [7.000000] [msg_test/INFO] Simulation time 7
+++ /dev/null
-/* Copyright (c) 2013-2015. The SimGrid Team.
- * All rights reserved. */
-
-/* This program is free software; you can redistribute it and/or modify it
- * under the terms of the license (GNU LGPL) which comes with this package. */
-
-#include "simgrid/msg.h"
-
-XBT_LOG_NEW_DEFAULT_CATEGORY(storage,"Messages specific for this simulation");
-
-static void display_storage_properties(msg_storage_t storage){
- xbt_dict_cursor_t cursor = NULL;
- char *key;
- char *data;
- xbt_dict_t props = MSG_storage_get_properties(storage);
- if (xbt_dict_length(props) > 0){
- XBT_INFO("\tProperties of mounted storage: %s", MSG_storage_get_name(storage));
- xbt_dict_foreach(props, cursor, key, data)
- XBT_INFO("\t\t'%s' -> '%s'", key, data);
- }else{
- XBT_INFO("\tNo property attached.");
- }
-}
-
-static sg_size_t write_local_file(const char *dest, sg_size_t file_size)
-{
- msg_file_t file = MSG_file_open(dest, NULL);
- sg_size_t written = MSG_file_write(file, file_size);
- XBT_INFO("%llu bytes on %llu bytes have been written by %s on /sd1",written, file_size,
- MSG_host_get_name(MSG_host_self()));
- MSG_file_close(file);
- return written;
-}
-
-static sg_size_t read_local_file(const char *src)
-{
- msg_file_t file = MSG_file_open(src, NULL);
- sg_size_t file_size = MSG_file_get_size(file);
-
- sg_size_t read = MSG_file_read(file, file_size);
- XBT_INFO("%s has read %llu on %s",MSG_host_get_name(MSG_host_self()),read,src);
- MSG_file_close(file);
-
- return read;
-}
-
-// Read src file on local disk and send a put message to remote host (size of message = size of src file)
-static int hsm_put(const char *remote_host, const char *src, const char *dest){
- // Read local src file, and return the size that was actually read
- sg_size_t read_size = read_local_file(src);
-
- // Send file
- XBT_INFO("%s sends %llu to %s",MSG_host_get_name(MSG_host_self()),read_size,remote_host);
- msg_task_t to_execute = MSG_task_create((const char*)"hsm_put", 0, (double) read_size, (void*)dest);
- MSG_task_send(to_execute, remote_host);
- MSG_process_sleep(.4);
- return 1;
-}
-
-static void display_storage_content(msg_storage_t storage){
- XBT_INFO("Print the content of the storage element: %s",MSG_storage_get_name(storage));
- xbt_dict_cursor_t cursor = NULL;
- char *file;
- sg_size_t *psize;
- xbt_dict_t content = MSG_storage_get_content(storage);
- if (content){
- xbt_dict_foreach(content, cursor, file, psize)
- XBT_INFO("\t%s size: %llu bytes", file, *psize);
- } else {
- XBT_INFO("\tNo content.");
- }
- xbt_dict_free(&content);
-}
-
-static void dump_storage_by_name(char *name){
- XBT_INFO("*** Dump a storage element ***");
- msg_storage_t storage = MSG_storage_get_by_name(name);
-
- if(storage){
- display_storage_content(storage);
- }
- else{
- XBT_INFO("Unable to retrieve storage element by its name: %s.", name);
- }
-}
-
-static void get_set_storage_data(const char *storage_name){
- XBT_INFO("*** GET/SET DATA for storage element: %s ***",storage_name);
- msg_storage_t storage = MSG_storage_get_by_name(storage_name);
- char *data = MSG_storage_get_data(storage);
- XBT_INFO("Get data: '%s'", data);
-
- MSG_storage_set_data(storage, xbt_strdup("Some data"));
- data = MSG_storage_get_data(storage);
- XBT_INFO("\tSet and get data: '%s'", data);
- xbt_free(data);
-}
-
-static void dump_platform_storages(void){
- unsigned int cursor;
- xbt_dynar_t storages = MSG_storages_as_dynar();
- msg_storage_t storage;
- xbt_dynar_foreach(storages, cursor, storage){
- XBT_INFO("Storage %s is attached to %s", MSG_storage_get_name(storage), MSG_storage_get_host(storage));
- MSG_storage_set_property_value(storage, "other usage", xbt_strdup("gpfs"));
- }
- xbt_dynar_free(&storages);
-}
-
-static void storage_info(msg_host_t host)
-{
- const char* host_name = MSG_host_get_name(host);
- XBT_INFO("*** Storage info on %s ***", host_name);
-
- xbt_dict_cursor_t cursor = NULL;
- char* mount_name;
- char* storage_name;
- msg_storage_t storage;
-
- xbt_dict_t storage_list = MSG_host_get_mounted_storage_list(MSG_host_self());
-
- xbt_dict_foreach(storage_list,cursor,mount_name,storage_name){
- XBT_INFO("\tStorage name: %s, mount name: %s", storage_name, mount_name);
-
- storage = MSG_storage_get_by_name(storage_name);
-
- sg_size_t free_size = MSG_storage_get_free_size(storage);
- sg_size_t used_size = MSG_storage_get_used_size(storage);
-
- XBT_INFO("\t\tFree size: %llu bytes", free_size);
- XBT_INFO("\t\tUsed size: %llu bytes", used_size);
-
- display_storage_properties(storage);
- dump_storage_by_name(storage_name);
- }
- xbt_dict_free(&storage_list);
-}
-
-static int client(int argc, char *argv[])
-{
- hsm_put("alice","/home/doc/simgrid/examples/msg/icomms/small_platform.xml","c:\\Windows\\toto.cxx");
- hsm_put("alice","/home/doc/simgrid/examples/msg/parallel_task/test_ptask_deployment.xml","c:\\Windows\\titi.xml");
- hsm_put("alice","/home/doc/simgrid/examples/msg/alias/masterslave_forwarder_with_alias.c","c:\\Windows\\tata.c");
-
- msg_task_t finalize = MSG_task_create("finalize", 0, 0, NULL);
- MSG_task_send(finalize, "alice");
-
- get_set_storage_data("Disk1");
-
- return 1;
-}
-
-static int server(int argc, char *argv[])
-{
- msg_task_t to_execute = NULL;
- XBT_ATTRIB_UNUSED int res;
-
- storage_info(MSG_host_self());
-
- XBT_INFO("Server waiting for transfers ...");
- while(1){
- res = MSG_task_receive(&(to_execute), MSG_host_get_name(MSG_host_self()));
- xbt_assert(res == MSG_OK, "MSG_task_get failed");
-
- const char *task_name;
- task_name = MSG_task_get_name(to_execute);
-
- if (!strcmp(task_name, "finalize")) { // Shutdown ...
- MSG_task_destroy(to_execute);
- break;
- } else if(!strcmp(task_name,"hsm_put")){// Receive file to save
- // Write file on local disk
- char *dest = MSG_task_get_data(to_execute);
- sg_size_t size_to_write = (sg_size_t)MSG_task_get_bytes_amount(to_execute);
- write_local_file(dest, size_to_write);
- }
-
- MSG_task_destroy(to_execute);
- to_execute = NULL;
- }
-
- storage_info(MSG_host_self());
- dump_platform_storages();
- return 1;
-}
-
-int main(int argc, char *argv[])
-{
- MSG_init(&argc, argv);
-
- /* Check the arguments */
- xbt_assert(argc == 2,"Usage: %s platform_file\n", argv[0]);
-
- MSG_create_environment(argv[1]);
-
- MSG_process_create("server", server, NULL, MSG_get_host_by_name("alice"));
- MSG_process_create("client", client, NULL, MSG_get_host_by_name("bob"));
-
- msg_error_t res = MSG_main();
- XBT_INFO("Simulated time: %g", MSG_get_clock());
-
- return res != MSG_OK;
-}
+++ /dev/null
-$ ./storage_client_server$EXEEXT ${srcdir:=.}/../../../examples/platforms/storage/storage.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:server@alice) *** Storage info on alice ***
-> [ 0.000000] (1:server@alice) Storage name: Disk2, mount name: c:
-> [ 0.000000] (1:server@alice) Free size: 534479374867 bytes
-> [ 0.000000] (1:server@alice) Used size: 2391537133 bytes
-> [ 0.000000] (1:server@alice) No property attached.
-> [ 0.000000] (1:server@alice) *** Dump a storage element ***
-> [ 0.000000] (1:server@alice) Print the content of the storage element: Disk2
-> [ 0.000000] (1:server@alice) \Windows\win.ini size: 92 bytes
-> [ 0.000000] (1:server@alice) \Windows\mib.bin size: 43131 bytes
-> [ 0.000000] (1:server@alice) \Windows\DtcInstall.log size: 1955 bytes
-> [ 0.000000] (1:server@alice) \Windows\vmgcoinstall.log size: 1585 bytes
-> [ 0.000000] (1:server@alice) \Windows\Starter.xml size: 31537 bytes
-> [ 0.000000] (1:server@alice) \Windows\_isusr32.dll size: 180320 bytes
-> [ 0.000000] (1:server@alice) \Windows\winhlp32.exe size: 10752 bytes
-> [ 0.000000] (1:server@alice) \Windows\setuperr.log size: 0 bytes
-> [ 0.000000] (1:server@alice) \Windows\system.ini size: 219 bytes
-> [ 0.000000] (1:server@alice) \Windows\hapint.exe size: 382056 bytes
-> [ 0.000000] (1:server@alice) \Windows\Professional.xml size: 31881 bytes
-> [ 0.000000] (1:server@alice) \Windows\regedit.exe size: 159232 bytes
-> [ 0.000000] (1:server@alice) \Windows\setupact.log size: 101663 bytes
-> [ 0.000000] (1:server@alice) \Windows\WindowsUpdate.log size: 1518934 bytes
-> [ 0.000000] (1:server@alice) \Windows\explorer.exe size: 2380944 bytes
-> [ 0.000000] (1:server@alice) \Windows\DirectX.log size: 10486 bytes
-> [ 0.000000] (1:server@alice) \Windows\WMSysPr9.prx size: 316640 bytes
-> [ 0.000000] (1:server@alice) \Windows\PFRO.log size: 6770 bytes
-> [ 0.000000] (1:server@alice) \Windows\csup.txt size: 12 bytes
-> [ 0.000000] (1:server@alice) \Windows\WLXPGSS.SCR size: 322048 bytes
-> [ 0.000000] (1:server@alice) \Windows\avastSS.scr size: 41664 bytes
-> [ 0.000000] (1:server@alice) \Windows\font1.sii size: 4907 bytes
-> [ 0.000000] (1:server@alice) \Windows\write.exe size: 10752 bytes
-> [ 0.000000] (1:server@alice) \Windows\font2.sii size: 8698 bytes
-> [ 0.000000] (1:server@alice) \Windows\CoreSingleLanguage.xml size: 31497 bytes
-> [ 0.000000] (1:server@alice) \Windows\dchcfg64.exe size: 335464 bytes
-> [ 0.000000] (1:server@alice) \Windows\notepad.exe size: 243712 bytes
-> [ 0.000000] (1:server@alice) \Windows\HelpPane.exe size: 883712 bytes
-> [ 0.000000] (1:server@alice) \Windows\hh.exe size: 17408 bytes
-> [ 0.000000] (1:server@alice) \Windows\DPINST.LOG size: 18944 bytes
-> [ 0.000000] (1:server@alice) \Windows\bfsvc.exe size: 75264 bytes
-> [ 0.000000] (1:server@alice) \Windows\splwow64.exe size: 126464 bytes
-> [ 0.000000] (1:server@alice) \Windows\MEMORY.DMP size: 2384027342 bytes
-> [ 0.000000] (1:server@alice) \Windows\dcmdev64.exe size: 93288 bytes
-> [ 0.000000] (1:server@alice) \Windows\twain_32.dll size: 50176 bytes
-> [ 0.000000] (1:server@alice) \Windows\bootstat.dat size: 67584 bytes
-> [ 0.000000] (1:server@alice) Server waiting for transfers ...
-> [ 0.000010] (2:client@bob) bob has read 972 on /home/doc/simgrid/examples/msg/icomms/small_platform.xml
-> [ 0.000010] (2:client@bob) bob sends 972 to alice
-> [ 0.001986] (1:server@alice) 972 bytes on 972 bytes have been written by alice on /sd1
-> [ 0.401976] (2:client@bob) bob has read 654 on /home/doc/simgrid/examples/msg/parallel_task/test_ptask_deployment.xml
-> [ 0.401976] (2:client@bob) bob sends 654 to alice
-> [ 0.403944] (1:server@alice) 654 bytes on 654 bytes have been written by alice on /sd1
-> [ 0.803996] (2:client@bob) bob has read 6217 on /home/doc/simgrid/examples/msg/alias/masterslave_forwarder_with_alias.c
-> [ 0.803996] (2:client@bob) bob sends 6217 to alice
-> [ 0.806104] (1:server@alice) 6217 bytes on 6217 bytes have been written by alice on /sd1
-> [ 1.207952] (1:server@alice) *** Storage info on alice ***
-> [ 1.207952] (2:client@bob) *** GET/SET DATA for storage element: Disk1 ***
-> [ 1.207952] (2:client@bob) Get data: '(null)'
-> [ 1.207952] (2:client@bob) Set and get data: 'Some data'
-> [ 1.207952] (1:server@alice) Storage name: Disk2, mount name: c:
-> [ 1.207952] (1:server@alice) Free size: 534479367024 bytes
-> [ 1.207952] (1:server@alice) Used size: 2391544976 bytes
-> [ 1.207952] (1:server@alice) No property attached.
-> [ 1.207952] (1:server@alice) *** Dump a storage element ***
-> [ 1.207952] (1:server@alice) Print the content of the storage element: Disk2
-> [ 1.207952] (1:server@alice) \Windows\titi.xml size: 654 bytes
-> [ 1.207952] (1:server@alice) \Windows\win.ini size: 92 bytes
-> [ 1.207952] (1:server@alice) \Windows\mib.bin size: 43131 bytes
-> [ 1.207952] (1:server@alice) \Windows\DtcInstall.log size: 1955 bytes
-> [ 1.207952] (1:server@alice) \Windows\vmgcoinstall.log size: 1585 bytes
-> [ 1.207952] (1:server@alice) \Windows\Starter.xml size: 31537 bytes
-> [ 1.207952] (1:server@alice) \Windows\_isusr32.dll size: 180320 bytes
-> [ 1.207952] (1:server@alice) \Windows\winhlp32.exe size: 10752 bytes
-> [ 1.207952] (1:server@alice) \Windows\setuperr.log size: 0 bytes
-> [ 1.207952] (1:server@alice) \Windows\system.ini size: 219 bytes
-> [ 1.207952] (1:server@alice) \Windows\hapint.exe size: 382056 bytes
-> [ 1.207952] (1:server@alice) \Windows\Professional.xml size: 31881 bytes
-> [ 1.207952] (1:server@alice) \Windows\regedit.exe size: 159232 bytes
-> [ 1.207952] (1:server@alice) \Windows\setupact.log size: 101663 bytes
-> [ 1.207952] (1:server@alice) \Windows\WindowsUpdate.log size: 1518934 bytes
-> [ 1.207952] (1:server@alice) \Windows\explorer.exe size: 2380944 bytes
-> [ 1.207952] (1:server@alice) \Windows\DirectX.log size: 10486 bytes
-> [ 1.207952] (1:server@alice) \Windows\WMSysPr9.prx size: 316640 bytes
-> [ 1.207952] (1:server@alice) \Windows\PFRO.log size: 6770 bytes
-> [ 1.207952] (1:server@alice) \Windows\toto.cxx size: 972 bytes
-> [ 1.207952] (1:server@alice) \Windows\csup.txt size: 12 bytes
-> [ 1.207952] (1:server@alice) \Windows\WLXPGSS.SCR size: 322048 bytes
-> [ 1.207952] (1:server@alice) \Windows\avastSS.scr size: 41664 bytes
-> [ 1.207952] (1:server@alice) \Windows\font1.sii size: 4907 bytes
-> [ 1.207952] (1:server@alice) \Windows\write.exe size: 10752 bytes
-> [ 1.207952] (1:server@alice) \Windows\font2.sii size: 8698 bytes
-> [ 1.207952] (1:server@alice) \Windows\CoreSingleLanguage.xml size: 31497 bytes
-> [ 1.207952] (1:server@alice) \Windows\dchcfg64.exe size: 335464 bytes
-> [ 1.207952] (1:server@alice) \Windows\notepad.exe size: 243712 bytes
-> [ 1.207952] (1:server@alice) \Windows\tata.c size: 6217 bytes
-> [ 1.207952] (1:server@alice) \Windows\HelpPane.exe size: 883712 bytes
-> [ 1.207952] (1:server@alice) \Windows\hh.exe size: 17408 bytes
-> [ 1.207952] (1:server@alice) \Windows\DPINST.LOG size: 18944 bytes
-> [ 1.207952] (1:server@alice) \Windows\bfsvc.exe size: 75264 bytes
-> [ 1.207952] (1:server@alice) \Windows\splwow64.exe size: 126464 bytes
-> [ 1.207952] (1:server@alice) \Windows\MEMORY.DMP size: 2384027342 bytes
-> [ 1.207952] (1:server@alice) \Windows\dcmdev64.exe size: 93288 bytes
-> [ 1.207952] (1:server@alice) \Windows\twain_32.dll size: 50176 bytes
-> [ 1.207952] (1:server@alice) \Windows\bootstat.dat size: 67584 bytes
-> [ 1.207952] (1:server@alice) Storage Disk1 is attached to bob
-> [ 1.207952] (1:server@alice) Storage Disk2 is attached to alice
-> [ 1.207952] (1:server@alice) Storage Disk3 is attached to carl
-> [ 1.207952] (1:server@alice) Storage Disk4 is attached to denise
-> [ 1.207952] (0:maestro@) Simulated time: 1.20795
--- /dev/null
+foreach(x actor concurrent_rw host_on_off_wait listen_async pid storage_client_server)
+ add_executable (${x} ${x}/${x}.cpp)
+ target_link_libraries(${x} simgrid)
+ set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
+
+ set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
+ set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.cpp)
+endforeach()
+
+set(teshsuite_src ${teshsuite_src} PARENT_SCOPE)
+set(tesh_files ${tesh_files} PARENT_SCOPE)
+set(xml_files ${xml_files} PARENT_SCOPE)
+
+foreach(x actor concurrent_rw host_on_off_wait listen_async pid storage_client_server)
+ ADD_TESH_FACTORIES(tesh-s4u-${x} "thread;boost;ucontext;raw" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/s4u/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x}/${x}.tesh)
+endforeach()
--- /dev/null
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u example");
+
+static void worker()
+{
+ simgrid::s4u::this_actor::sleep_for(.5);
+ XBT_INFO("Worker started (PID:%lu, PPID:%lu)", simgrid::s4u::this_actor::pid(), simgrid::s4u::this_actor::ppid());
+ while (1) {
+ XBT_INFO("Plop i am %ssuspended", simgrid::s4u::this_actor::isSuspended() ? "" : "not ");
+ simgrid::s4u::this_actor::sleep_for(1);
+ }
+ XBT_INFO("I'm done. See you!");
+}
+
+static void master()
+{
+ simgrid::s4u::this_actor::sleep_for(1);
+ std::vector<simgrid::s4u::ActorPtr>* actor_list = new std::vector<simgrid::s4u::ActorPtr>();
+ simgrid::s4u::this_actor::host()->actorList(actor_list);
+
+ for (auto actor : *actor_list) {
+ XBT_INFO("Actor (pid=%lu, ppid=%lu, name=%s)", actor->pid(), actor->ppid(), actor->name().c_str());
+ if (simgrid::s4u::this_actor::pid() != actor->pid())
+ actor->kill();
+ }
+
+ simgrid::s4u::ActorPtr actor =
+ simgrid::s4u::Actor::createActor("worker from master", simgrid::s4u::this_actor::host(), worker);
+ simgrid::s4u::this_actor::sleep_for(2);
+
+ XBT_INFO("Suspend Actor (pid=%lu)", actor->pid());
+ actor->suspend();
+
+ XBT_INFO("Actor (pid=%lu) is %ssuspended", actor->pid(), actor->isSuspended() ? "" : "not ");
+ simgrid::s4u::this_actor::sleep_for(2);
+
+ XBT_INFO("Resume Actor (pid=%lu)", actor->pid());
+ actor->resume();
+
+ XBT_INFO("Actor (pid=%lu) is %ssuspended", actor->pid(), actor->isSuspended() ? "" : "not ");
+ simgrid::s4u::this_actor::sleep_for(2);
+ actor->kill();
+
+ delete actor_list;
+ XBT_INFO("Goodbye now!");
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ e->loadPlatform(argv[1]);
+
+ simgrid::s4u::Actor::createActor("master", simgrid::s4u::Host::by_name("Tremblay"), master);
+ simgrid::s4u::Actor::createActor("worker", simgrid::s4u::Host::by_name("Tremblay"), worker);
+
+ e->run();
+ XBT_INFO("Simulation time %g", e->getClock());
+
+ return 0;
+}
--- /dev/null
+$ ./actor ${srcdir:=.}/../../../examples/platforms/small_platform.xml
+> [Tremblay:worker:(0) 0.500000] [s4u_test/INFO] Worker started (PID:2, PPID:0)
+> [Tremblay:worker:(0) 0.500000] [s4u_test/INFO] Plop i am not suspended
+> [Tremblay:master:(0) 1.000000] [s4u_test/INFO] Actor (pid=1, ppid=0, name=master)
+> [Tremblay:master:(0) 1.000000] [s4u_test/INFO] Actor (pid=2, ppid=0, name=worker)
+> [Tremblay:worker from master:(0) 1.500000] [s4u_test/INFO] Worker started (PID:3, PPID:1)
+> [Tremblay:worker from master:(0) 1.500000] [s4u_test/INFO] Plop i am not suspended
+> [Tremblay:worker from master:(0) 2.500000] [s4u_test/INFO] Plop i am not suspended
+> [Tremblay:master:(0) 3.000000] [s4u_test/INFO] Suspend Actor (pid=3)
+> [Tremblay:master:(0) 3.000000] [s4u_test/INFO] Actor (pid=3) is suspended
+> [Tremblay:master:(0) 5.000000] [s4u_test/INFO] Resume Actor (pid=3)
+> [Tremblay:master:(0) 5.000000] [s4u_test/INFO] Actor (pid=3) is not suspended
+> [Tremblay:worker from master:(0) 5.000000] [s4u_test/INFO] Plop i am not suspended
+> [Tremblay:worker from master:(0) 6.000000] [s4u_test/INFO] Plop i am not suspended
+> [Tremblay:master:(0) 7.000000] [s4u_test/INFO] Goodbye now!
+> [7.000000] [s4u_test/INFO] Simulation time 7
--- /dev/null
+/* Copyright (c) 2008-2010, 2012-2015, 2017. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include <unistd.h>
+
+#define FILENAME1 "/home/doc/simgrid/examples/platforms/g5k.xml"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u test");
+
+static void host()
+{
+ char name[2048];
+ int id = simgrid::s4u::this_actor::pid();
+ snprintf(name, 2048, "%s%i", FILENAME1, id);
+ simgrid::s4u::File* file = new simgrid::s4u::File(name, NULL);
+ XBT_INFO("process %d is writing!", id);
+ file->write(3000000);
+ XBT_INFO("process %d goes to sleep for %d seconds", id, id);
+ simgrid::s4u::this_actor::sleep_for(id);
+ XBT_INFO("process %d is writing again!", id);
+ file->write(3000000);
+ XBT_INFO("process %d goes to sleep for %d seconds", id, 6 - id);
+ simgrid::s4u::this_actor::sleep_for(6 - id);
+ XBT_INFO("process %d is reading!", id);
+ file->seek(0);
+ file->read(3000000);
+ XBT_INFO("process %d goes to sleep for %d seconds", id, id);
+ simgrid::s4u::this_actor::sleep_for(id);
+ XBT_INFO("process %d is reading again!", id);
+ file->seek(0);
+ file->read(3000000);
+
+ XBT_INFO("process %d => Size of %s: %llu", id, name, file->size());
+ // Close the file
+ delete file;
+}
+
+int main(int argc, char** argv)
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ e->loadPlatform(argv[1]);
+
+ for (int i = 0; i < 5; i++)
+ simgrid::s4u::Actor::createActor("host", simgrid::s4u::Host::by_name("bob"), host);
+
+ e->run();
+ XBT_INFO("Simulation time %g", e->getClock());
+
+ return 0;
+}
--- /dev/null
+$ ./concurrent_rw$EXEEXT ${srcdir:=.}/../../../examples/platforms/storage/storage.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (host@bob) process 1 is writing!
+> [ 0.000000] (host@bob) process 2 is writing!
+> [ 0.000000] (host@bob) process 3 is writing!
+> [ 0.000000] (host@bob) process 4 is writing!
+> [ 0.000000] (host@bob) process 5 is writing!
+> [ 0.500000] (host@bob) process 1 goes to sleep for 1 seconds
+> [ 0.500000] (host@bob) process 2 goes to sleep for 2 seconds
+> [ 0.500000] (host@bob) process 3 goes to sleep for 3 seconds
+> [ 0.500000] (host@bob) process 4 goes to sleep for 4 seconds
+> [ 0.500000] (host@bob) process 5 goes to sleep for 5 seconds
+> [ 1.500000] (host@bob) process 1 is writing again!
+> [ 1.600000] (host@bob) process 1 goes to sleep for 5 seconds
+> [ 2.500000] (host@bob) process 2 is writing again!
+> [ 2.600000] (host@bob) process 2 goes to sleep for 4 seconds
+> [ 3.500000] (host@bob) process 3 is writing again!
+> [ 3.600000] (host@bob) process 3 goes to sleep for 3 seconds
+> [ 4.500000] (host@bob) process 4 is writing again!
+> [ 4.600000] (host@bob) process 4 goes to sleep for 2 seconds
+> [ 5.500000] (host@bob) process 5 is writing again!
+> [ 5.600000] (host@bob) process 5 goes to sleep for 1 seconds
+> [ 6.600000] (host@bob) process 4 is reading!
+> [ 6.600000] (host@bob) process 5 is reading!
+> [ 6.600000] (host@bob) process 1 is reading!
+> [ 6.600000] (host@bob) process 2 is reading!
+> [ 6.600000] (host@bob) process 3 is reading!
+> [ 6.750000] (host@bob) process 4 goes to sleep for 4 seconds
+> [ 6.750000] (host@bob) process 5 goes to sleep for 5 seconds
+> [ 6.750000] (host@bob) process 1 goes to sleep for 1 seconds
+> [ 6.750000] (host@bob) process 2 goes to sleep for 2 seconds
+> [ 6.750000] (host@bob) process 3 goes to sleep for 3 seconds
+> [ 7.750000] (host@bob) process 1 is reading again!
+> [ 7.780000] (host@bob) process 1 => Size of /home/doc/simgrid/examples/platforms/g5k.xml1: 6000000
+> [ 8.750000] (host@bob) process 2 is reading again!
+> [ 8.780000] (host@bob) process 2 => Size of /home/doc/simgrid/examples/platforms/g5k.xml2: 6000000
+> [ 9.750000] (host@bob) process 3 is reading again!
+> [ 9.780000] (host@bob) process 3 => Size of /home/doc/simgrid/examples/platforms/g5k.xml3: 6000000
+> [ 10.750000] (host@bob) process 4 is reading again!
+> [ 10.780000] (host@bob) process 4 => Size of /home/doc/simgrid/examples/platforms/g5k.xml4: 6000000
+> [ 11.750000] (host@bob) process 5 is reading again!
+> [ 11.780000] (host@bob) process 5 => Size of /home/doc/simgrid/examples/platforms/g5k.xml5: 6000000
+> [ 11.780000] (maestro@) Simulation time 11.78
--- /dev/null
+/* Copyright (c) 2010-2015. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u example");
+
+static void master()
+{
+ simgrid::s4u::Host* jupiter = simgrid::s4u::Host::by_name("Jupiter");
+ XBT_INFO("Master waiting");
+ simgrid::s4u::this_actor::sleep_for(1);
+
+ XBT_INFO("Turning off the worker host");
+ jupiter->turnOff();
+ XBT_INFO("Master has finished");
+}
+
+static void worker()
+{
+ XBT_INFO("Worker waiting");
+ // TODO, This should really be MSG_HOST_FAILURE
+ simgrid::s4u::this_actor::sleep_for(5);
+ XBT_ERROR("Worker should be off already.");
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ e->loadPlatform(argv[1]);
+
+ simgrid::s4u::Actor::createActor("master", simgrid::s4u::Host::by_name("Tremblay"), master);
+ simgrid::s4u::Actor::createActor("worker", simgrid::s4u::Host::by_name("Jupiter"), worker);
+
+ e->run();
+ XBT_INFO("Simulation time %g", e->getClock());
+ return 0;
+}
--- /dev/null
+$ ./host_on_off_wait ${srcdir:=.}/../../../examples/platforms/small_platform.xml
+> [Tremblay:master:(0) 0.000000] [s4u_test/INFO] Master waiting
+> [Jupiter:worker:(0) 0.000000] [s4u_test/INFO] Worker waiting
+> [Tremblay:master:(0) 1.000000] [s4u_test/INFO] Turning off the worker host
+> [Tremblay:master:(0) 1.000000] [s4u_test/INFO] Master has finished
+> [1.000000] [s4u_test/INFO] Simulation time 1
--- /dev/null
+/* Bug report: https://github.com/simgrid/simgrid/issues/40
+ *
+ * Task.listen used to be on async mailboxes as it always returned false.
+ * This occures in Java and C, but is only tested here in C.
+ */
+
+#include "simgrid/s4u.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u example");
+
+static void server()
+{
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName("mailbox");
+
+ simgrid::s4u::this_actor::isend(mailbox, xbt_strdup("Some data"), 0);
+
+ xbt_assert(mailbox->listen()); // True (1)
+ XBT_INFO("Task listen works on regular mailboxes");
+ char* res = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox));
+
+ xbt_assert(!strcmp("Some data", res), "Data received: %s", res);
+ XBT_INFO("Data successfully received from regular mailbox");
+ xbt_free(res);
+
+ simgrid::s4u::MailboxPtr mailbox2 = simgrid::s4u::Mailbox::byName("mailbox2");
+ mailbox2->setReceiver(simgrid::s4u::Actor::self());
+
+ simgrid::s4u::this_actor::isend(mailbox2, xbt_strdup("More data"), 0);
+
+ xbt_assert(mailbox2->listen()); // used to break.
+ XBT_INFO("Task listen works on asynchronous mailboxes");
+
+ res = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox2));
+ xbt_assert(!strcmp("More data", res));
+ xbt_free(res);
+
+ XBT_INFO("Data successfully received from asynchronous mailbox");
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ e->loadPlatform(argv[1]);
+
+ simgrid::s4u::Actor::createActor("test", simgrid::s4u::Host::by_name("Tremblay"), server);
+
+ e->run();
+ return 0;
+}
--- /dev/null
+$ ./listen_async ${srcdir:=.}/../../../examples/platforms/small_platform.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (test@Tremblay) Task listen works on regular mailboxes
+> [ 0.000195] (test@Tremblay) Data successfully received from regular mailbox
+> [ 0.000195] (test@Tremblay) Task listen works on asynchronous mailboxes
+> [ 0.000195] (test@Tremblay) Data successfully received from asynchronous mailbox
+> [ 0.000195] (maestro@) Variable 3 still in system when freing it: this may be a bug
--- /dev/null
+/* Copyright (c) 2009-2010, 2013-2015. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this msg example");
+
+static int my_onexit(smx_process_exit_status_t status, int* pid)
+{
+ XBT_INFO("Process \"%d\" killed.", *pid);
+ return 0;
+}
+
+static void sendpid()
+{
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName("mailbox");
+ int pid = simgrid::s4u::this_actor::pid();
+ double comm_size = 100000;
+ simgrid::s4u::this_actor::onExit((int_f_pvoid_pvoid_t)my_onexit, &pid);
+
+ XBT_INFO("Sending pid of \"%d\".", pid);
+ simgrid::s4u::this_actor::send(mailbox, &pid, comm_size);
+ XBT_INFO("Send of pid \"%d\" done.", pid);
+
+ simgrid::s4u::this_actor::suspend();
+}
+
+static void killall()
+{
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName("mailbox");
+ for (int i = 0; i < 3; i++) {
+ int* pid = static_cast<int*>(simgrid::s4u::this_actor::recv(mailbox));
+ XBT_INFO("Killing process \"%d\".", *pid);
+ simgrid::s4u::Actor::byPid(*pid)->kill();
+ }
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ e->loadPlatform(argv[1]);
+
+ if (argc > 2)
+ simgrid::s4u::Actor::killAll(atoi(argv[2]));
+ else
+ simgrid::s4u::Actor::killAll();
+
+ simgrid::s4u::Actor::createActor("sendpid", simgrid::s4u::Host::by_name("Tremblay"), sendpid);
+ simgrid::s4u::Actor::createActor("sendpid", simgrid::s4u::Host::by_name("Tremblay"), sendpid);
+ simgrid::s4u::Actor::createActor("sendpid", simgrid::s4u::Host::by_name("Tremblay"), sendpid);
+ simgrid::s4u::Actor::createActor("killall", simgrid::s4u::Host::by_name("Tremblay"), killall);
+
+ e->run();
+
+ return 0;
+}
--- /dev/null
+$ ./pid ${srcdir:=.}/../../../examples/platforms/small_platform.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (sendpid@Tremblay) Sending pid of "1".
+> [ 0.000000] (sendpid@Tremblay) Sending pid of "2".
+> [ 0.000000] (sendpid@Tremblay) Sending pid of "3".
+> [ 0.000402] (killall@Tremblay) Killing process "1".
+> [ 0.000402] (sendpid@Tremblay) Send of pid "1" done.
+> [ 0.000402] (sendpid@Tremblay) Process "1" killed.
+> [ 0.000804] (sendpid@Tremblay) Send of pid "2" done.
+> [ 0.000804] (killall@Tremblay) Killing process "2".
+> [ 0.000804] (sendpid@Tremblay) Process "2" killed.
+> [ 0.001206] (sendpid@Tremblay) Send of pid "3" done.
+> [ 0.001206] (killall@Tremblay) Killing process "3".
+> [ 0.001206] (sendpid@Tremblay) Process "3" killed.
+
+$ ./pid ${srcdir:=.}/../../../examples/platforms/small_platform.xml 2 "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (sendpid@Tremblay) Sending pid of "2".
+> [ 0.000000] (sendpid@Tremblay) Sending pid of "3".
+> [ 0.000000] (sendpid@Tremblay) Sending pid of "4".
+> [ 0.000402] (killall@Tremblay) Killing process "2".
+> [ 0.000402] (sendpid@Tremblay) Send of pid "2" done.
+> [ 0.000402] (sendpid@Tremblay) Process "2" killed.
+> [ 0.000804] (sendpid@Tremblay) Send of pid "3" done.
+> [ 0.000804] (killall@Tremblay) Killing process "3".
+> [ 0.000804] (sendpid@Tremblay) Process "3" killed.
+> [ 0.001206] (sendpid@Tremblay) Send of pid "4" done.
+> [ 0.001206] (killall@Tremblay) Killing process "4".
+> [ 0.001206] (sendpid@Tremblay) Process "4" killed.
--- /dev/null
+/* Copyright (c) 2013-2015, 2017. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "simgrid/s4u.hpp"
+#include <string>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(storage, "Messages specific for this simulation");
+
+static void display_storage_properties(simgrid::s4u::Storage* storage)
+{
+ xbt_dict_cursor_t cursor = NULL;
+ char* key;
+ char* data;
+ xbt_dict_t props = storage->properties();
+ if (xbt_dict_length(props) > 0) {
+ XBT_INFO("\tProperties of mounted storage: %s", storage->name());
+ xbt_dict_foreach (props, cursor, key, data)
+ XBT_INFO("\t\t'%s' -> '%s'", key, data);
+ } else {
+ XBT_INFO("\tNo property attached.");
+ }
+}
+
+static sg_size_t write_local_file(const char* dest, sg_size_t file_size)
+{
+ simgrid::s4u::File* file = new simgrid::s4u::File(dest, nullptr);
+ sg_size_t written = file->write(file_size);
+ XBT_INFO("%llu bytes on %llu bytes have been written by %s on /sd1", written, file_size,
+ simgrid::s4u::Actor::self()->name().c_str());
+ delete file;
+ return written;
+}
+
+static sg_size_t read_local_file(const char* src)
+{
+ simgrid::s4u::File* file = new simgrid::s4u::File(src, nullptr);
+ sg_size_t file_size = file->size();
+ sg_size_t read = file->read(file_size);
+
+ XBT_INFO("%s has read %llu on %s", simgrid::s4u::Actor::self()->name().c_str(), read, src);
+ delete file;
+
+ return read;
+}
+
+// Read src file on local disk and send a put message to remote host (size of message = size of src file)
+static void hsm_put(const char* remote_host, const char* src, const char* dest)
+{
+ // Read local src file, and return the size that was actually read
+ sg_size_t read_size = read_local_file(src);
+
+ // Send file
+ XBT_INFO("%s sends %llu to %s", simgrid::s4u::this_actor::name().c_str(), read_size, remote_host);
+ char* payload = bprintf("%s %llu", dest, read_size);
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(remote_host);
+ simgrid::s4u::this_actor::send(mailbox, payload, static_cast<double>(read_size));
+ simgrid::s4u::this_actor::sleep_for(.4);
+}
+
+static void display_storage_content(simgrid::s4u::Storage* storage)
+{
+ XBT_INFO("Print the content of the storage element: %s", storage->name());
+ std::map<std::string, sg_size_t*>* content = storage->content();
+ if (!content->empty()) {
+ for (auto entry : *content)
+ XBT_INFO("\t%s size: %llu bytes", entry.first.c_str(), *entry.second);
+ } else {
+ XBT_INFO("\tNo content.");
+ }
+}
+
+static void dump_storage_by_name(char* name)
+{
+ XBT_INFO("*** Dump a storage element ***");
+ simgrid::s4u::Storage& storage = simgrid::s4u::Storage::byName(name);
+ display_storage_content(&storage);
+}
+
+static void get_set_storage_data(const char* storage_name)
+{
+ XBT_INFO("*** GET/SET DATA for storage element: %s ***", storage_name);
+ simgrid::s4u::Storage& storage = simgrid::s4u::Storage::byName(storage_name);
+
+ char* data = static_cast<char*>(storage.userdata());
+ XBT_INFO("Get data: '%s'", data);
+ storage.setUserdata(xbt_strdup("Some data"));
+ data = static_cast<char*>(storage.userdata());
+ XBT_INFO("\tSet and get data: '%s'", data);
+ xbt_free(data);
+}
+
+static void dump_platform_storages()
+{
+ std::unordered_map<std::string, simgrid::s4u::Storage*>* storages = simgrid::s4u::Storage().allStorages();
+
+ for (auto storage : *storages) {
+ XBT_INFO("Storage %s is attached to %s", storage.first.c_str(), storage.second->host());
+ storage.second->setProperty("other usage", xbt_strdup("gpfs"));
+ }
+ // Expected output in tesh file that's missing for now
+ //> [ 1.207952] (server@alice) Storage Disk3 is attached to carl
+ //> [ 1.207952] (server@alice) Storage Disk4 is attached to denise
+}
+
+static void storage_info(simgrid::s4u::Host* host)
+{
+ XBT_INFO("*** Storage info on %s ***", host->cname());
+ xbt_dict_cursor_t cursor = NULL;
+ char* mount_name;
+ char* storage_name;
+
+ xbt_dict_t storage_list = host->mountedStoragesAsDict();
+ xbt_dict_foreach (storage_list, cursor, mount_name, storage_name) {
+ XBT_INFO("\tStorage name: %s, mount name: %s", storage_name, mount_name);
+ simgrid::s4u::Storage& storage = simgrid::s4u::Storage::byName(storage_name);
+
+ sg_size_t free_size = storage.sizeFree();
+ sg_size_t used_size = storage.sizeUsed();
+
+ XBT_INFO("\t\tFree size: %llu bytes", free_size);
+ XBT_INFO("\t\tUsed size: %llu bytes", used_size);
+
+ display_storage_properties(&storage);
+ dump_storage_by_name(storage_name);
+ }
+ xbt_dict_free(&storage_list);
+}
+
+static void client()
+{
+ hsm_put("alice", "/home/doc/simgrid/examples/msg/icomms/small_platform.xml", "c:\\Windows\\toto.cxx");
+ hsm_put("alice", "/home/doc/simgrid/examples/msg/parallel_task/test_ptask_deployment.xml", "c:\\Windows\\titi.xml");
+ hsm_put("alice", "/home/doc/simgrid/examples/msg/alias/masterslave_forwarder_with_alias.c", "c:\\Windows\\tata.c");
+
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName("alice");
+ simgrid::s4u::this_actor::send(mailbox, xbt_strdup("finalize"), 0);
+
+ get_set_storage_data("Disk1");
+}
+
+static void server()
+{
+ storage_info(simgrid::s4u::this_actor::host());
+ simgrid::s4u::MailboxPtr mailbox = simgrid::s4u::Mailbox::byName(simgrid::s4u::this_actor::host()->cname());
+
+ XBT_INFO("Server waiting for transfers ...");
+ while (1) {
+ char* msg = static_cast<char*>(simgrid::s4u::this_actor::recv(mailbox));
+ if (!strcmp(msg, "finalize")) { // Shutdown ...
+ xbt_free(msg);
+ break;
+ } else { // Receive file to save
+ char* saveptr;
+ char* dest = strtok_r(msg, " ", &saveptr);
+ sg_size_t size_to_write = std::stoull(strtok_r(nullptr, " ", &saveptr));
+ write_local_file(dest, size_to_write);
+ xbt_free(dest);
+ }
+ }
+
+ storage_info(simgrid::s4u::this_actor::host());
+ dump_platform_storages();
+}
+
+int main(int argc, char* argv[])
+{
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ xbt_assert(argc == 2, "Usage: %s platform_file\n", argv[0]);
+ e->loadPlatform(argv[1]);
+
+ simgrid::s4u::Actor::createActor("server", simgrid::s4u::Host::by_name("alice"), server);
+ simgrid::s4u::Actor::createActor("client", simgrid::s4u::Host::by_name("bob"), client);
+
+ e->run();
+
+ XBT_INFO("Simulated time: %g", e->getClock());
+
+ return 0;
+}
--- /dev/null
+$ ./storage_client_server$EXEEXT ${srcdir:=.}/../../../examples/platforms/storage/storage.xml "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n"
+> [ 0.000000] (server@alice) *** Storage info on alice ***
+> [ 0.000000] (server@alice) Storage name: Disk2, mount name: c:
+> [ 0.000000] (server@alice) Free size: 534479374867 bytes
+> [ 0.000000] (server@alice) Used size: 2391537133 bytes
+> [ 0.000000] (server@alice) No property attached.
+> [ 0.000000] (server@alice) *** Dump a storage element ***
+> [ 0.000000] (server@alice) Print the content of the storage element: Disk2
+> [ 0.000000] (server@alice) \Windows\CoreSingleLanguage.xml size: 31497 bytes
+> [ 0.000000] (server@alice) \Windows\DPINST.LOG size: 18944 bytes
+> [ 0.000000] (server@alice) \Windows\DirectX.log size: 10486 bytes
+> [ 0.000000] (server@alice) \Windows\DtcInstall.log size: 1955 bytes
+> [ 0.000000] (server@alice) \Windows\HelpPane.exe size: 883712 bytes
+> [ 0.000000] (server@alice) \Windows\MEMORY.DMP size: 2384027342 bytes
+> [ 0.000000] (server@alice) \Windows\PFRO.log size: 6770 bytes
+> [ 0.000000] (server@alice) \Windows\Professional.xml size: 31881 bytes
+> [ 0.000000] (server@alice) \Windows\Starter.xml size: 31537 bytes
+> [ 0.000000] (server@alice) \Windows\WLXPGSS.SCR size: 322048 bytes
+> [ 0.000000] (server@alice) \Windows\WMSysPr9.prx size: 316640 bytes
+> [ 0.000000] (server@alice) \Windows\WindowsUpdate.log size: 1518934 bytes
+> [ 0.000000] (server@alice) \Windows\_isusr32.dll size: 180320 bytes
+> [ 0.000000] (server@alice) \Windows\avastSS.scr size: 41664 bytes
+> [ 0.000000] (server@alice) \Windows\bfsvc.exe size: 75264 bytes
+> [ 0.000000] (server@alice) \Windows\bootstat.dat size: 67584 bytes
+> [ 0.000000] (server@alice) \Windows\csup.txt size: 12 bytes
+> [ 0.000000] (server@alice) \Windows\dchcfg64.exe size: 335464 bytes
+> [ 0.000000] (server@alice) \Windows\dcmdev64.exe size: 93288 bytes
+> [ 0.000000] (server@alice) \Windows\explorer.exe size: 2380944 bytes
+> [ 0.000000] (server@alice) \Windows\font1.sii size: 4907 bytes
+> [ 0.000000] (server@alice) \Windows\font2.sii size: 8698 bytes
+> [ 0.000000] (server@alice) \Windows\hapint.exe size: 382056 bytes
+> [ 0.000000] (server@alice) \Windows\hh.exe size: 17408 bytes
+> [ 0.000000] (server@alice) \Windows\mib.bin size: 43131 bytes
+> [ 0.000000] (server@alice) \Windows\notepad.exe size: 243712 bytes
+> [ 0.000000] (server@alice) \Windows\regedit.exe size: 159232 bytes
+> [ 0.000000] (server@alice) \Windows\setupact.log size: 101663 bytes
+> [ 0.000000] (server@alice) \Windows\setuperr.log size: 0 bytes
+> [ 0.000000] (server@alice) \Windows\splwow64.exe size: 126464 bytes
+> [ 0.000000] (server@alice) \Windows\system.ini size: 219 bytes
+> [ 0.000000] (server@alice) \Windows\twain_32.dll size: 50176 bytes
+> [ 0.000000] (server@alice) \Windows\vmgcoinstall.log size: 1585 bytes
+> [ 0.000000] (server@alice) \Windows\win.ini size: 92 bytes
+> [ 0.000000] (server@alice) \Windows\winhlp32.exe size: 10752 bytes
+> [ 0.000000] (server@alice) \Windows\write.exe size: 10752 bytes
+> [ 0.000000] (server@alice) Server waiting for transfers ...
+> [ 0.000010] (client@bob) client has read 972 on /home/doc/simgrid/examples/msg/icomms/small_platform.xml
+> [ 0.000010] (client@bob) client sends 972 to alice
+> [ 0.001986] (server@alice) 972 bytes on 972 bytes have been written by server on /sd1
+> [ 0.401976] (client@bob) client has read 654 on /home/doc/simgrid/examples/msg/parallel_task/test_ptask_deployment.xml
+> [ 0.401976] (client@bob) client sends 654 to alice
+> [ 0.403944] (server@alice) 654 bytes on 654 bytes have been written by server on /sd1
+> [ 0.803996] (client@bob) client has read 6217 on /home/doc/simgrid/examples/msg/alias/masterslave_forwarder_with_alias.c
+> [ 0.803996] (client@bob) client sends 6217 to alice
+> [ 0.806104] (server@alice) 6217 bytes on 6217 bytes have been written by server on /sd1
+> [ 1.207952] (server@alice) *** Storage info on alice ***
+> [ 1.207952] (client@bob) *** GET/SET DATA for storage element: Disk1 ***
+> [ 1.207952] (client@bob) Get data: '(null)'
+> [ 1.207952] (client@bob) Set and get data: 'Some data'
+> [ 1.207952] (server@alice) Storage name: Disk2, mount name: c:
+> [ 1.207952] (server@alice) Free size: 534479367024 bytes
+> [ 1.207952] (server@alice) Used size: 2391544976 bytes
+> [ 1.207952] (server@alice) No property attached.
+> [ 1.207952] (server@alice) *** Dump a storage element ***
+> [ 1.207952] (server@alice) Print the content of the storage element: Disk2
+> [ 1.207952] (server@alice) \Windows\CoreSingleLanguage.xml size: 31497 bytes
+> [ 1.207952] (server@alice) \Windows\DPINST.LOG size: 18944 bytes
+> [ 1.207952] (server@alice) \Windows\DirectX.log size: 10486 bytes
+> [ 1.207952] (server@alice) \Windows\DtcInstall.log size: 1955 bytes
+> [ 1.207952] (server@alice) \Windows\HelpPane.exe size: 883712 bytes
+> [ 1.207952] (server@alice) \Windows\MEMORY.DMP size: 2384027342 bytes
+> [ 1.207952] (server@alice) \Windows\PFRO.log size: 6770 bytes
+> [ 1.207952] (server@alice) \Windows\Professional.xml size: 31881 bytes
+> [ 1.207952] (server@alice) \Windows\Starter.xml size: 31537 bytes
+> [ 1.207952] (server@alice) \Windows\WLXPGSS.SCR size: 322048 bytes
+> [ 1.207952] (server@alice) \Windows\WMSysPr9.prx size: 316640 bytes
+> [ 1.207952] (server@alice) \Windows\WindowsUpdate.log size: 1518934 bytes
+> [ 1.207952] (server@alice) \Windows\_isusr32.dll size: 180320 bytes
+> [ 1.207952] (server@alice) \Windows\avastSS.scr size: 41664 bytes
+> [ 1.207952] (server@alice) \Windows\bfsvc.exe size: 75264 bytes
+> [ 1.207952] (server@alice) \Windows\bootstat.dat size: 67584 bytes
+> [ 1.207952] (server@alice) \Windows\csup.txt size: 12 bytes
+> [ 1.207952] (server@alice) \Windows\dchcfg64.exe size: 335464 bytes
+> [ 1.207952] (server@alice) \Windows\dcmdev64.exe size: 93288 bytes
+> [ 1.207952] (server@alice) \Windows\explorer.exe size: 2380944 bytes
+> [ 1.207952] (server@alice) \Windows\font1.sii size: 4907 bytes
+> [ 1.207952] (server@alice) \Windows\font2.sii size: 8698 bytes
+> [ 1.207952] (server@alice) \Windows\hapint.exe size: 382056 bytes
+> [ 1.207952] (server@alice) \Windows\hh.exe size: 17408 bytes
+> [ 1.207952] (server@alice) \Windows\mib.bin size: 43131 bytes
+> [ 1.207952] (server@alice) \Windows\notepad.exe size: 243712 bytes
+> [ 1.207952] (server@alice) \Windows\regedit.exe size: 159232 bytes
+> [ 1.207952] (server@alice) \Windows\setupact.log size: 101663 bytes
+> [ 1.207952] (server@alice) \Windows\setuperr.log size: 0 bytes
+> [ 1.207952] (server@alice) \Windows\splwow64.exe size: 126464 bytes
+> [ 1.207952] (server@alice) \Windows\system.ini size: 219 bytes
+> [ 1.207952] (server@alice) \Windows\tata.c size: 6217 bytes
+> [ 1.207952] (server@alice) \Windows\titi.xml size: 654 bytes
+> [ 1.207952] (server@alice) \Windows\toto.cxx size: 972 bytes
+> [ 1.207952] (server@alice) \Windows\twain_32.dll size: 50176 bytes
+> [ 1.207952] (server@alice) \Windows\vmgcoinstall.log size: 1585 bytes
+> [ 1.207952] (server@alice) \Windows\win.ini size: 92 bytes
+> [ 1.207952] (server@alice) \Windows\winhlp32.exe size: 10752 bytes
+> [ 1.207952] (server@alice) \Windows\write.exe size: 10752 bytes
+> [ 1.207952] (server@alice) Storage Disk1 is attached to bob
+> [ 1.207952] (server@alice) Storage Disk2 is attached to alice
+> [ 1.207952] (maestro@) Simulated time: 1.20795
XBT_INFO("Scheduling DAX...");
scheduleDAX(dax);
XBT_INFO("DAX scheduled");
- xbt_dynar_t changed_tasks = SD_simulate(-1);
- xbt_dynar_free(&changed_tasks);
+ SD_simulate(-1);
XBT_INFO("Simulation done.");
{
double comm_cost[] = { 0.0, 0.0, 0.0, 0.0 };
double comp_cost[] = { 1.0 };
- xbt_dynar_t ret;
+ xbt_dynar_t ret = xbt_dynar_new(sizeof(SD_task_t), NULL);
SD_init(&argc, argv);
SD_create_environment(argv[1]);
SD_task_schedule(taskB, 1, hosts, comp_cost, comm_cost, -1.0);
xbt_free(hosts);
- ret = SD_simulate(-1.0);
+ SD_simulate_with_update(-1.0, ret);
xbt_assert(xbt_dynar_length(ret) == 2, "I was expecting the completion of 2 tasks, but I got %lu instead",
xbt_dynar_length(ret));
SD_task_destroy(taskA);
SD_task_destroy(taskB);
+ xbt_dynar_free(&ret);
XBT_INFO("Simulation time: %f", SD_get_clock());
-/* Copyright (c) 2008-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2008-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-
-#include <xbt/dict.h>
-#include <xbt/lib.h>
-#include <xbt/log.h>
-#include <xbt/sysdep.h>
#include <xbt/xbt_os_time.h>
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
-#include <simgrid/simdag.h>
+#include "simgrid/simdag.h"
#include "src/kernel/routing/NetPoint.hpp"
#include "src/surf/network_interface.hpp"
-/* Copyright (c) 2008-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2008-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "simgrid/s4u/engine.hpp"
-#include "simgrid/s4u/host.hpp"
+#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/s4u/Host.hpp"
#include "simgrid/simdag.h"
#include "src/kernel/routing/NetPoint.hpp"
#include "surf/surf_routing.h"
include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
- type-hvector type-indexed type-struct type-vector bug-17132 timers)
+ type-hvector type-indexed type-struct type-vector bug-17132 timers )
add_executable (${x} ${x}/${x}.c)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
endforeach()
if(NOT WIN32)
- add_executable (macro-shared macro-shared/macro-shared.c)
- target_link_libraries(macro-shared simgrid)
- set_target_properties(macro-shared PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/macro-shared)
+ foreach(x macro-shared macro-partial-shared macro-partial-shared-communication )
+ add_executable (${x} ${x}/${x}.c)
+ target_link_libraries(${x} simgrid)
+ set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
+
+ set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
+ set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.c)
+ endforeach()
endif()
endif()
-set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/macro-shared/macro-shared.c PARENT_SCOPE)
+set (teshsuite_src ${teshsuite_src} PARENT_SCOPE)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/coll-allreduce/coll-allreduce-large.tesh
${CMAKE_CURRENT_SOURCE_DIR}/coll-allreduce/coll-allreduce-automatic.tesh
${CMAKE_CURRENT_SOURCE_DIR}/coll-alltoall/clusters.tesh
- ${CMAKE_CURRENT_SOURCE_DIR}/macro-shared/macro-shared.tesh
${CMAKE_CURRENT_SOURCE_DIR}/pt2pt-pingpong/broken_hostfiles.tesh
${CMAKE_CURRENT_SOURCE_DIR}/pt2pt-pingpong/TI_output.tesh PARENT_SCOPE)
set(bin_files ${bin_files} ${CMAKE_CURRENT_SOURCE_DIR}/hostfile
if(enable_smpi)
if(NOT WIN32)
ADD_TESH_FACTORIES(tesh-smpi-macro-shared "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/macro-shared --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/macro-shared macro-shared.tesh)
+ ADD_TESH_FACTORIES(tesh-smpi-macro-partial-shared "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/macro-partial-shared --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/macro-partial-shared macro-partial-shared.tesh)
+ ADD_TESH_FACTORIES(tesh-smpi-macro-partial-shared-communication "thread;ucontext;raw;boost" --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/macro-partial-shared-communication --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/macro-partial-shared-communication macro-partial-shared-communication.tesh)
endif()
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
-$ ${bindir:=.}/../../../bin/smpirun -np 16 -platform ../../../examples/platforms/small_platform.xml -hostfile ../hostfile ${bindir:=.}/bug-17132 --cfg=smpi/simulate-computation:no --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -np 16 -platform ../../../examples/platforms/small_platform.xml -hostfile ../hostfile ${bindir:=.}/bug-17132 --cfg=smpi/simulate-computation:no --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> You requested to use 16 processes, but there is only 5 processes in your hostfile...
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
-> Walltime = 0.468274
\ No newline at end of file
+> Walltime = 0.468274
int main(int argc, char *argv[])
{
- int rank, size;
- int i;
- int *sb;
- int *rb;
+ int rank;
+ int size;
int status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int count = 2;
- sb = (int *) xbt_malloc(count * sizeof(int));
- rb = (int *) xbt_malloc(count * size * sizeof(int));
+ int* sb = (int *) xbt_malloc(count * sizeof(int));
+ int* rb = (int *) xbt_malloc(count * size * sizeof(int));
- for (i = 0; i < count; ++i)
+ for (int i = 0; i < count; ++i)
sb[i] = rank * count + i;
- for (i = 0; i < count * size; ++i)
+ for (int i = 0; i < count * size; ++i)
rb[i] = 0;
printf("[%d] sndbuf=[", rank);
- for (i = 0; i < count; i++)
+ for (int i = 0; i < count; i++)
printf("%d ", sb[i]);
printf("]\n");
status = MPI_Allgather(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
printf("[%d] rcvbuf=[", rank);
- for (i = 0; i < count * size; i++)
+ for (int i = 0; i < count * size; i++)
printf("%d ", rb[i]);
printf("]\n");
fflush(stdout);
}
}
- free(sb);
- free(rb);
+ xbt_free(sb);
+ xbt_free(rb);
MPI_Finalize();
return (EXIT_SUCCESS);
}
! output sort
p Test allgather
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
int main(int argc, char *argv[])
{
- int i,rank, size;
- int *sb, *rb;
- int *recv_counts, *recv_disps;
- int recv_sb_size;
+ int i;
+ int rank;
+ int size;
int status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
- recv_counts = (int *) xbt_malloc(size * sizeof(int));
- recv_disps = (int *) xbt_malloc(size * sizeof(int));
+ int* recv_counts = (int *) xbt_malloc(size * sizeof(int));
+ int* recv_disps = (int *) xbt_malloc(size * sizeof(int));
- recv_sb_size = 0;
+ int recv_sb_size = 0;
for (i = 0; i < size; i++) {
recv_counts[i] = i + 1;
recv_disps[i] = recv_sb_size;
recv_sb_size += i + 1;
}
- sb = (int *) xbt_malloc(recv_counts[rank] * sizeof(int));
- rb = (int *) xbt_malloc(recv_sb_size * sizeof(int));
+ int* sb = (int *) xbt_malloc(recv_counts[rank] * sizeof(int));
+ int* rb = (int *) xbt_malloc(recv_sb_size * sizeof(int));
for (i = 0; i < recv_counts[rank]; ++i)
sb[i] = recv_disps[rank] + i;
fflush(stdout);
}
}
- free(sb);
- free(rb);
- free(recv_counts);
- free(recv_disps);
+ xbt_free(sb);
+ xbt_free(rb);
+ xbt_free(recv_counts);
+ xbt_free(recv_disps);
MPI_Finalize();
return (EXIT_SUCCESS);
}
! output sort
p Test allgatherv
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
p Test allreduce
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! timeout 20
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
int main(int argc, char *argv[])
{
- int rank, size;
+ int rank;
+ int size;
int i;
- int *sb;
- int *rb;
int status;
int mult=1;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
- if (maxlen>1)mult=size;
- sb = (int *) xbt_malloc(size *maxlen * sizeof(int));
- rb = (int *) xbt_malloc(size *maxlen * sizeof(int));
+ if (maxlen > 1)
+ mult = size;
+ int* sb = (int *) xbt_malloc(size *maxlen * sizeof(int));
+ int* rb = (int *) xbt_malloc(size *maxlen * sizeof(int));
for (i = 0; i < size *maxlen; ++i) {
sb[i] = rank*size + i;
fflush(stdout);
}
}
- free(sb);
- free(rb);
+ xbt_free(sb);
+ xbt_free(rb);
MPI_Finalize();
return (EXIT_SUCCESS);
}
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test classic - backbone
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test separate clusters
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/two_clusters.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/two_clusters.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test torus
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test fat tree
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test fat tree IB
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
! output sort
p Test Dragonfly
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> node-0.acme.org
> [rank 1] -> node-1.acme.org
> [rank 2] -> node-2.acme.org
int main(int argc, char *argv[])
{
- int rank, size;
+ int rank;
+ int size;
int i;
- int *sb;
- int *rb;
int status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
- sb = (int *) xbt_malloc(size * sizeof(int) * 2);
- rb = (int *) xbt_malloc(size * sizeof(int) * 2);
+ int* sb = (int *) xbt_malloc(size * sizeof(int) * 2);
+ int* rb = (int *) xbt_malloc(size * sizeof(int) * 2);
for (i = 0; i < size; ++i) {
sb[i] = rank*size + i;
fflush(stdout);
}
}
- free(sb);
- free(rb);
+ xbt_free(sb);
+ xbt_free(rb);
MPI_Finalize();
return (EXIT_SUCCESS);
}
! output sort
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
static void print_buffer_int(void *buf, int len, char *msg, int rank)
{
- int tmp, *v;
+ int* v;
printf("[%d] %s (#%d): ", rank, msg, len);
- for (tmp = 0; tmp < len; tmp++) {
+ for (int tmp = 0; tmp < len; tmp++) {
v = buf;
printf("[%d]", v[tmp]);
}
int main(int argc, char **argv)
{
MPI_Comm comm;
- int *sbuf, *rbuf;
- int i,rank, size;
+ int i;
+ int rank;
+ int size;
int *sendcounts, *recvcounts, *rdispls, *sdispls;
MPI_Init(&argc, &argv);
/* Create the buffer */
MPI_Comm_size(comm, &size);
MPI_Comm_rank(comm, &rank);
- sbuf = (int *) xbt_malloc(size * size * sizeof(int));
- rbuf = (int *) xbt_malloc(size * size * sizeof(int));
+ int* sbuf = (int *) xbt_malloc(size * size * sizeof(int));
+ int* rbuf = (int *) xbt_malloc(size * size * sizeof(int));
/* Load up the buffers */
for (i = 0; i < size * size; i++) {
! output sort
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
int main(int argc, char **argv)
{
- int size, rank;
+ int size;
+ int rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
! output sort
p Test barrier
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> ... Barrier ....
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
int main(int argc, char **argv)
{
- int i, size, rank;
+ int i;
+ int size;
+ int rank;
int count = 2048;
MPI_Init(&argc, &argv);
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
int main(int argc, char *argv[])
{
- int i, rank, size;
- int *sb, *rb;
+ int rank;
+ int size;
int status;
int root = 0;
MPI_Comm_size(MPI_COMM_WORLD, &size);
int count = 2;
- sb = (int *) xbt_malloc(count * sizeof(int));
- rb = (int *) xbt_malloc(count * size * sizeof(int));
+ int* sb = (int *) xbt_malloc(count * sizeof(int));
+ int* rb = (int *) xbt_malloc(count * size * sizeof(int));
- for (i = 0; i < count; ++i)
+ for (int i = 0; i < count; ++i)
sb[i] = rank * count + i;
- for (i = 0; i < count * size; ++i)
+ for (int i = 0; i < count * size; ++i)
rb[i] = 0;
printf("[%d] sndbuf=[", rank);
- for (i = 0; i < count; i++)
+ for (int i = 0; i < count; i++)
printf("%d ", sb[i]);
printf("]\n");
if (rank == root) {
printf("[%d] rcvbuf=[", rank);
- for (i = 0; i < count * size; i++)
+ for (int i = 0; i < count * size; i++)
printf("%d ", rb[i]);
printf("]\n");
fflush(stdout);
}
}
- free(sb);
- free(rb);
+ xbt_free(sb);
+ xbt_free(rb);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return (EXIT_SUCCESS);
! timeout 30
p Test all to all
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
int main( int argc, char **argv )
{
- int err = 0, toterr;
- int *sendbuf, *recvbuf, *recvcounts;
- int size, rank, i, sumval;
+ int err = 0;
+ int toterr;
+ int size;
+ int rank;
+ int i;
MPI_Comm comm;
MPI_Init( &argc, &argv );
MPI_Comm_size( comm, &size );
MPI_Comm_rank( comm, &rank );
- sendbuf = (int *) malloc( size * sizeof(int) );
+ int* sendbuf = (int *) malloc( size * sizeof(int) );
for (i=0; i<size; i++)
sendbuf[i] = rank + i;
- recvcounts = (int *)malloc( size * sizeof(int) );
- recvbuf = (int *)malloc( size * sizeof(int) );
+ int* recvcounts = (int*) malloc (size * sizeof(int));
+ int* recvbuf = (int*) malloc (size * sizeof(int));
for (i=0; i<size; i++)
recvcounts[i] = 1;
MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
- sumval = size * rank + ((size - 1) * size)/2;
+ int sumval = size * rank + ((size - 1) * size)/2;
/* recvbuf should be size * (rank + i) */
if (recvbuf[0] != sumval) {
err++;
! output sort
p Test reduce_scatter
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> No Errors
> [rank 0] -> Tremblay
> [rank 10] -> Fafard
int main(int argc, char *argv[])
{
- int rank, size;
+ int rank;
+ int size;
int i;
- unsigned long long *sb;
- unsigned long long *rb;
int status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
- sb = (unsigned long long *) xbt_malloc(size * sizeof(unsigned long long));
- rb = (unsigned long long *) xbt_malloc(size * sizeof(unsigned long long));
+ unsigned long long* sb = (unsigned long long *) xbt_malloc(size * sizeof(unsigned long long));
+ unsigned long long* rb = (unsigned long long *) xbt_malloc(size * sizeof(unsigned long long));
for (i = 0; i < size; ++i) {
sb[i] = rank*size + i;
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
> [0] second sndbuf=[0 ]
> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
int main(int argc, char **argv)
{
- int size, rank;
+ int size;
+ int rank;
int success = 1;
int retval;
int sendcount = 1; // one double to each process
! output sort
p Test scatter
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
> [0] ok.
> [10] ok.
> [11] ok.
foreach (test ${umpire_tests_passing})
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! timeout 30")
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! output display" APPEND)
- write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
+ write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
endforeach()
foreach (test ${umpire_tests_deadlock} ${umpire_tests_problematic} )
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! timeout 30" )
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! expect return 3" APPEND)
write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "! output display" APPEND)
- write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
+ write_file(${CMAKE_CURRENT_BINARY_DIR}/${test}.tesh "\$ \${bindir:=.}/../../../../smpi_script/bin/smpirun -wrapper \"\${bindir:=.}/../../../../bin/simgrid-mc\" -hostfile ../../hostfile -platform ../../../../examples/platforms/small_platform.xml --log=xbt_cfg.thresh:warning -np 3 --cfg=smpi/host-speed:1e9 --cfg=smpi/coll_selector:mpich \${bindir:=.}/${test} --log=smpi_coll.thresh:error" APPEND)
endforeach()
endif()
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <mpi.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <assert.h>
+
+// Set the elements between buf[start] and buf[stop-1] to (i+value)%256
+static void set(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ for(size_t i = start; i < stop; i++) {
+ buf[i] = (i+value)%256;
+ }
+}
+
+// Return the number of times that an element is equal to (i+value)%256 between buf[start] and buf[stop-1].
+static int count_all(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ size_t occ = 0;
+ for(size_t i = start ; i < stop ; i++) {
+ if(buf[i] == (i+value)%256) {
+ occ ++;
+ }
+ }
+ return occ;
+}
+
+// Return true iff the values from buf[start] to buf[stop-1] are all equal to (i+value)%256.
+static int check_all(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ size_t occ = count_all(buf, start, stop, value);
+ return occ == stop-start;
+}
+
+// Return true iff "enough" elements are equal to (i+value)%256 between buf[start] and buf[stop-1].
+static int check_enough(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ int page_size = 0x1000;
+ size_t size = stop-start;
+ if(size <= 2*page_size) // we are not sure to have a whole page that is shared
+ return 1;
+ size_t occ = count_all(buf, start, stop, value);
+ return occ >= size - 2*page_size;
+}
+
+int main(int argc, char *argv[])
+{
+ MPI_Init(&argc, &argv);
+ int rank;
+ int size;
+ size_t mem_size = 0x1000000;
+ size_t shared_blocks[] = {
+ 0, 0x123456,
+ 0x130000, 0x130001,
+ 0x345678, 0x345789,
+ 0x444444, 0x555555,
+ 0x555556, 0x560000,
+ 0x800000, 0x1000000
+ };
+ int nb_blocks = (sizeof(shared_blocks)/sizeof(size_t))/2;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ //Let's Allocate a shared memory buffer
+ assert(size%2 == 0);
+ uint8_t *buf;
+ buf = SMPI_PARTIAL_SHARED_MALLOC(mem_size, shared_blocks, nb_blocks);
+ memset(buf, rank, mem_size);
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ // Even processes write their rank in private blocks
+ if(rank%2 == 0) {
+ for(int i = 0; i < nb_blocks-1; i++) {
+ size_t start = shared_blocks[2*i+1];
+ size_t stop = shared_blocks[2*i+2];
+ set(buf, start, stop, rank);
+ }
+ }
+ // Then, even processes send their buffer to their successor
+ if(rank%2 == 0) {
+ MPI_Send(buf, mem_size, MPI_UINT8_T, rank+1, 0, MPI_COMM_WORLD);
+ }
+ else {
+ MPI_Recv(buf, mem_size, MPI_UINT8_T, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ }
+
+
+ // Odd processes verify that they successfully received the message
+ if(rank%2 == 1) {
+ for(int i = 0; i < nb_blocks-1; i++) {
+ size_t start = shared_blocks[2*i+1];
+ size_t stop = shared_blocks[2*i+2];
+ int comm = check_all(buf, start, stop, rank-1);
+ printf("[%d] The result of the (normal) communication check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, comm);
+ }
+ memset(buf, rank, mem_size);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ // Then, even processes send a sub-part of their buffer their successor
+ // Note that the last block should not be copied entirely
+ if(rank%2 == 0) {
+ MPI_Send(buf+0x10000, mem_size-0xa00000, MPI_UINT8_T, rank+1, 0, MPI_COMM_WORLD);
+ }
+ else {
+ MPI_Recv(buf+0x10000, mem_size-0xa00000, MPI_UINT8_T, rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ }
+
+
+ // Odd processes verify that they successfully received the message
+ if(rank%2 == 1) {
+ for(int i = 0; i < nb_blocks-1; i++) {
+ size_t start = shared_blocks[2*i+1];
+ size_t stop = shared_blocks[2*i+2];
+ int comm = check_all(buf, start, stop, rank-1);
+ printf("[%d] The result of the (shifted) communication check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, comm);
+ }
+ }
+
+ SMPI_SHARED_FREE(buf);
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+p Test compute
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+! timeout 5
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-partial-shared-communication --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+> [3] The result of the (normal) communication check for block (0x123456, 0x130000) is: 1
+> [3] The result of the (normal) communication check for block (0x130001, 0x345678) is: 1
+> [3] The result of the (normal) communication check for block (0x345789, 0x444444) is: 1
+> [3] The result of the (normal) communication check for block (0x555555, 0x555556) is: 1
+> [3] The result of the (normal) communication check for block (0x560000, 0x800000) is: 1
+> [1] The result of the (normal) communication check for block (0x123456, 0x130000) is: 1
+> [1] The result of the (normal) communication check for block (0x130001, 0x345678) is: 1
+> [1] The result of the (normal) communication check for block (0x345789, 0x444444) is: 1
+> [1] The result of the (normal) communication check for block (0x555555, 0x555556) is: 1
+> [1] The result of the (normal) communication check for block (0x560000, 0x800000) is: 1
+> [3] The result of the (shifted) communication check for block (0x123456, 0x130000) is: 1
+> [3] The result of the (shifted) communication check for block (0x130001, 0x345678) is: 1
+> [3] The result of the (shifted) communication check for block (0x345789, 0x444444) is: 1
+> [3] The result of the (shifted) communication check for block (0x555555, 0x555556) is: 1
+> [3] The result of the (shifted) communication check for block (0x560000, 0x800000) is: 0
+> [1] The result of the (shifted) communication check for block (0x123456, 0x130000) is: 1
+> [1] The result of the (shifted) communication check for block (0x130001, 0x345678) is: 1
+> [1] The result of the (shifted) communication check for block (0x345789, 0x444444) is: 1
+> [1] The result of the (shifted) communication check for block (0x555555, 0x555556) is: 1
+> [1] The result of the (shifted) communication check for block (0x560000, 0x800000) is: 0
--- /dev/null
+/* Copyright (c) 2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdio.h>
+#include <mpi.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+// Set the elements between buf[start] and buf[stop-1] to (i+value)%256
+static void set(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ for(size_t i = start; i < stop; i++) {
+ buf[i] = (i+value)%256;
+ }
+}
+
+// Return the number of times that an element is equal to (i+value)%256 between buf[start] and buf[stop-1].
+static int count_all(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ size_t occ = 0;
+ for(size_t i = start ; i < stop ; i++) {
+ if(buf[i] == (i+value)%256) {
+ occ ++;
+ }
+ }
+ return occ;
+}
+
+// Return true iff the values from buf[start] to buf[stop-1] are all equal to (i+value)%256.
+static int check_all(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ size_t occ = count_all(buf, start, stop, value);
+ return occ == stop-start;
+}
+
+// Return true iff "enough" elements are equal to (i+value)%256 between buf[start] and buf[stop-1].
+static int check_enough(uint8_t *buf, size_t start, size_t stop, uint8_t value) {
+ int page_size = 0x1000;
+ size_t size = stop-start;
+ if(size <= 2*page_size) // we are not sure to have a whole page that is shared
+ return 1;
+ size_t occ = count_all(buf, start, stop, value);
+ return occ >= size - 2*page_size;
+}
+
+int main(int argc, char *argv[])
+{
+ MPI_Init(&argc, &argv);
+ int rank;
+ int size;
+ size_t mem_size = 0x1000000;
+ size_t shared_blocks[] = {
+ 0, 0x123456,
+ 0x130000, 0x130001,
+ 0x345678, 0x345789,
+ 0x444444, 0x555555,
+ 0x555556, 0x560000,
+ 0x800000, 0x1000000
+ };
+ int nb_blocks = (sizeof(shared_blocks)/sizeof(size_t))/2;
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ //Let's Allocate a shared memory buffer
+ uint8_t *buf;
+ buf = SMPI_PARTIAL_SHARED_MALLOC(mem_size, shared_blocks, nb_blocks);
+ set(buf, 0, mem_size, 0);
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ // Process 0 write in shared blocks
+ if(rank == 0) {
+ for(int i = 0; i < nb_blocks; i++) {
+ size_t start = shared_blocks[2*i];
+ size_t stop = shared_blocks[2*i+1];
+ set(buf, start, stop, 42);
+ }
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ // All processes check that their shared blocks have been written (at least partially)
+ for(int i = 0; i < nb_blocks; i++) {
+ size_t start = shared_blocks[2*i];
+ size_t stop = shared_blocks[2*i+1];
+ int is_shared = check_enough(buf, start, stop, 42);
+ printf("[%d] The result of the shared check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, is_shared);
+ }
+
+
+ // Check the private blocks
+ MPI_Barrier(MPI_COMM_WORLD);
+ for(int i = 0; i < nb_blocks-1; i++) {
+ size_t start = shared_blocks[2*i+1];
+ size_t stop = shared_blocks[2*i+2];
+ int is_private = check_all(buf, start, stop, 0);
+ printf("[%d] The result of the private check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, is_private);
+ }
+
+ SMPI_SHARED_FREE(buf);
+
+ MPI_Finalize();
+ return 0;
+}
--- /dev/null
+p Test compute
+! setenv LD_LIBRARY_PATH=../../lib
+! output sort
+! timeout 5
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-partial-shared --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
+> [0] The result of the shared check for block (0x0, 0x123456) is: 1
+> [0] The result of the shared check for block (0x130000, 0x130001) is: 1
+> [0] The result of the shared check for block (0x345678, 0x345789) is: 1
+> [0] The result of the shared check for block (0x444444, 0x555555) is: 1
+> [0] The result of the shared check for block (0x555556, 0x560000) is: 1
+> [0] The result of the shared check for block (0x800000, 0x1000000) is: 1
+> [3] The result of the shared check for block (0x0, 0x123456) is: 1
+> [3] The result of the shared check for block (0x130000, 0x130001) is: 1
+> [3] The result of the shared check for block (0x345678, 0x345789) is: 1
+> [3] The result of the shared check for block (0x444444, 0x555555) is: 1
+> [3] The result of the shared check for block (0x555556, 0x560000) is: 1
+> [3] The result of the shared check for block (0x800000, 0x1000000) is: 1
+> [1] The result of the shared check for block (0x0, 0x123456) is: 1
+> [1] The result of the shared check for block (0x130000, 0x130001) is: 1
+> [1] The result of the shared check for block (0x345678, 0x345789) is: 1
+> [1] The result of the shared check for block (0x444444, 0x555555) is: 1
+> [1] The result of the shared check for block (0x555556, 0x560000) is: 1
+> [1] The result of the shared check for block (0x800000, 0x1000000) is: 1
+> [2] The result of the shared check for block (0x0, 0x123456) is: 1
+> [2] The result of the shared check for block (0x130000, 0x130001) is: 1
+> [2] The result of the shared check for block (0x345678, 0x345789) is: 1
+> [2] The result of the shared check for block (0x444444, 0x555555) is: 1
+> [2] The result of the shared check for block (0x555556, 0x560000) is: 1
+> [2] The result of the shared check for block (0x800000, 0x1000000) is: 1
+> [0] The result of the private check for block (0x123456, 0x130000) is: 1
+> [0] The result of the private check for block (0x130001, 0x345678) is: 1
+> [0] The result of the private check for block (0x345789, 0x444444) is: 1
+> [0] The result of the private check for block (0x555555, 0x555556) is: 1
+> [0] The result of the private check for block (0x560000, 0x800000) is: 1
+> [3] The result of the private check for block (0x123456, 0x130000) is: 1
+> [3] The result of the private check for block (0x130001, 0x345678) is: 1
+> [3] The result of the private check for block (0x345789, 0x444444) is: 1
+> [3] The result of the private check for block (0x555555, 0x555556) is: 1
+> [3] The result of the private check for block (0x560000, 0x800000) is: 1
+> [1] The result of the private check for block (0x123456, 0x130000) is: 1
+> [1] The result of the private check for block (0x130001, 0x345678) is: 1
+> [1] The result of the private check for block (0x345789, 0x444444) is: 1
+> [1] The result of the private check for block (0x555555, 0x555556) is: 1
+> [1] The result of the private check for block (0x560000, 0x800000) is: 1
+> [2] The result of the private check for block (0x123456, 0x130000) is: 1
+> [2] The result of the private check for block (0x130001, 0x345678) is: 1
+> [2] The result of the private check for block (0x345789, 0x444444) is: 1
+> [2] The result of the private check for block (0x555555, 0x555556) is: 1
+> [2] The result of the private check for block (0x560000, 0x800000) is: 1
p Test compute and bench
! output sort
! timeout 45
-$ ${bindir:=.}/../../../bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform_with_routers.xml -np 3 --log=root.thres:warning ${bindir:=.}/macro-sample quiet --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform_with_routers.xml -np 3 --log=root.thres:warning ${bindir:=.}/macro-sample quiet --log=smpi_kernel.thres:warning
> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
> (0) Run the first computation. It's globally benched, and I want no more than 4 benchmarks (thres<0)
! setenv LD_LIBRARY_PATH=../../lib
! output sort
! timeout 5
-$ ${bindir:=.}/../../../bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-shared --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/macro-shared --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [0] After change, the value in the shared buffer is: 16053117601147974045
> [0] The value in the shared buffer is: 4
> [1] After change, the value in the shared buffer is: 16053117601147974045
# add_executable(opband opband.c)
# add_executable(opbor opbor.c)
# add_executable(opbxor opbxor.c)
-# add_executable(op_commutative op_commutative.c)
+ add_executable(op_commutative op_commutative.c)
# add_executable(opland opland.c)
# add_executable(oplor oplor.c)
# add_executable(oplxor oplxor.c)
# target_link_libraries(opband simgrid mtest_c)
# target_link_libraries(opbor simgrid mtest_c)
# target_link_libraries(opbxor simgrid mtest_c)
-# target_link_libraries(op_commutative simgrid mtest_c)
+ target_link_libraries(op_commutative simgrid mtest_c)
# target_link_libraries(opland simgrid mtest_c)
# target_link_libraries(oplor simgrid mtest_c)
# target_link_libraries(oplxor simgrid mtest_c)
set_target_properties(bcast_min_datatypes PROPERTIES COMPILE_FLAGS "-DBCAST_MIN_DATATYPES_ONLY" LINK_FLAGS "-DBCAST_MIN_DATATYPES_ONLY")
set_target_properties(bcast_comm_world PROPERTIES COMPILE_FLAGS "-DBCAST_COMM_WORLD_ONLY" LINK_FLAGS "-DBCAST_COMM_WORLD_ONLY")
+ # These tests take 5 to 15 seconds to run, so we don't want to run them several times.
+ # But at the same time, we'd like to check if they work for all factories and all privatization algorithm
+ # Thus the current matrix
+
+ # Test default selector: THREAD if available; RAW if not (with mmap privatization or none)
if(HAVE_THREAD_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-coll-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-coll-thread ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-thread PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
else()
if(HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-coll-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-coll-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
endif()
+
+ # Test OMPI selector: CONTEXT if available; RAW if not (with mmap privatization or none)
if(HAVE_UCONTEXT_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-coll-ompi-ucontext ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:ucontext -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION} -execarg=--cfg=smpi/bcast:binomial_tree)
+ ADD_TEST(test-smpi-mpich3-coll-ompi-ucontext ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:ucontext -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION} -execarg=--cfg=smpi/bcast:binomial_tree)
SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-ompi-ucontext PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
else()
if(HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-coll-ompi-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION} -execarg=--cfg=smpi/bcast:binomial_tree)
+ ADD_TEST(test-smpi-mpich3-coll-ompi-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION} -execarg=--cfg=smpi/bcast:binomial_tree)
SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-ompi-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
endif()
+
+ # Test MPICH selector: dlopen privatization and PTHREAD if exists (without priv and with raw if not)
+ if(HAVE_PRIVATIZATION AND HAVE_THREAD_CONTEXTS)
+ ADD_TEST(test-smpi-mpich3-coll-mpich-thread-dlopen ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:mpich -execarg=--cfg=smpi/privatization:dlopen)
+ SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-mpich-thread-dlopen PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ else()
+ if(HAVE_RAW_CONTEXTS)
+ ADD_TEST(test-smpi-mpich3-coll-mpich-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:mpich -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
+ SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-mpich-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ endif()
+ endif()
+
+ # Test MVAPICH2 selector: dlopen privatization and UCONTEXT if exists (without priv and with raw if not)
+ if(HAVE_PRIVATIZATION AND HAVE_UCONTEXT_CONTEXTS)
+ ADD_TEST(test-smpi-mpich3-coll-mvapich2-ucontext-dlopen ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:ucontext -execarg=--cfg=smpi/coll_selector:mvapich2 -execarg=--cfg=smpi/privatization:dlopen)
+ SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-mvapich2-ucontext-dlopen PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ else()
+ if(HAVE_RAW_CONTEXTS)
+ ADD_TEST(test-smpi-mpich3-coll-mvapich2-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:mvapich2 -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
+ SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-mvapich2-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ endif()
+ endif()
+
+ # Test IMPI selector: always raw, with dlopen if priv exists
if(HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-coll-mpich-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:mpich -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
- ADD_TEST(test-smpi-mpich3-coll-mvapich2-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:mvapich2 -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
- ADD_TEST(test-smpi-mpich3-coll-impi-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:impi -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
- SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-mpich-raw test-smpi-mpich3-coll-mpich-raw test-smpi-mpich3-coll-mvapich2-raw test-smpi-mpich3-coll-impi-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ if(HAVE_PRIVATIZATION)
+ ADD_TEST(test-smpi-mpich3-coll-impi-raw-dlopen ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:impi -execarg=--cfg=smpi/privatization:dlopen)
+ SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-impi-raw-dlopen PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ else()
+ ADD_TEST(test-smpi-mpich3-coll-impi-raw-nopriv ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:impi -execarg=--cfg=smpi/privatization:no)
+ SET_TESTS_PROPERTIES(test-smpi-mpich3-coll-impi-raw-nopriv PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+ endif()
endif()
endif()
reduce_mpich 5
reduce_mpich 10
reduce_local 2 mpiversion=2.2
-#op_commutative 2
+op_commutative 2
red3 10
red4 10
alltoall1 8
endif()
if (enable_smpi_MPICH3_testsuite AND HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-datatype-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/datatype ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-datatype-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/datatype ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/datatype -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-datatype-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
endif()
if (enable_smpi_MPICH3_testsuite AND HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-info-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/info ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/info -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-info-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/info ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/info -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-info-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
- foreach(file anyall bottom eagerdt inactivereq isendself isendirecv isendselfprobe issendselfcancel cancelanysrc pingping probenull
+ foreach(file anyall bottom eagerdt huge_anysrc huge_underflow inactivereq isendself isendirecv isendselfprobe issendselfcancel cancelanysrc pingping probenull
dtype_send probe-unexp sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull many_isend manylmt recv_any)
# not compiled files: big_count_status bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending mprobe
# cancelrecv greq1 icsend large_message pscancel rcancel rqfreeb rqstatus scancel2 scancel sendself scancel_unmatch
endif()
if (enable_smpi_MPICH3_testsuite AND HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/pt2pt ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-pt2pt-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/pt2pt ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/pt2pt -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-pt2pt-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
-foreach(file anyall bottom eagerdt inactivereq isendself isendirecv isendselfprobe issendselfcancel pingping probenull
+foreach(file anyall bottom eagerdt huge_anysrc huge_underflow inactivereq isendself isendirecv isendselfprobe issendselfcancel pingping probenull
probe-unexp sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull
big_count_status bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending
cancelrecv cancelanysrc dtype_send greq1 icsend large_message pscancel rcancel rqfreeb rqstatus scancel2 scancel sendself many_isend manylmt mprobe recv_any scancel_unmatch
--- /dev/null
+/*
+ * (C) 2017 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ *
+ * Portions of this code were written by Intel Corporation.
+ * Copyright (C) 2011-2017 Intel Corporation. Intel provides this material
+ * to Argonne National Laboratory subject to Software Grant and Corporate
+ * Contributor License Agreement dated February 8, 2012.
+ *
+ * This program checks if MPICH can correctly handle many outstanding large
+ * message transfers which use wildcard receives.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <mpi.h>
+
+#define N_TRY 16
+#define BLKSIZE (10*1024*1024)
+
+int main(int argc, char *argv[])
+{
+ int size, rank;
+ int dest;
+ int i;
+ char *buff;
+ MPI_Request reqs[N_TRY];
+
+ MPI_Init(&argc, &argv);
+
+ buff = malloc(N_TRY * BLKSIZE);
+ memset(buff, -1, N_TRY * BLKSIZE);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ dest = size - 1;
+
+ if (rank == 0) {
+ for (i = 0; i < N_TRY; i++)
+ MPI_Isend(buff + BLKSIZE*i, BLKSIZE, MPI_BYTE, dest, 0, MPI_COMM_WORLD, &reqs[i]);
+ MPI_Waitall(N_TRY, reqs, MPI_STATUSES_IGNORE);
+ }
+ else if (rank == dest) {
+ for (i = 0; i < N_TRY; i++)
+ MPI_Irecv(buff + BLKSIZE*i, BLKSIZE, MPI_BYTE, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &reqs[i]);
+ MPI_Waitall(N_TRY, reqs, MPI_STATUSES_IGNORE);
+ }
+
+ free(buff);
+
+ if (rank == 0)
+ puts(" No Errors");
+
+ MPI_Finalize();
+
+ return 0;
+}
--- /dev/null
+
+/*
+ * (C) 2017 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ *
+ * Portions of this code were written by Intel Corporation.
+ * Copyright (C) 2011-2017 Intel Corporation. Intel provides this material
+ * to Argonne National Laboratory subject to Software Grant and Corporate
+ * Contributor License Agreement dated February 8, 2012.
+ *
+ * This program checks if MPICH can correctly handle a huge message receive
+ * when the sender underflows by sending a much smaller message
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <mpi.h>
+
+#define HUGE_SIZE (10*1024*1024)
+
+int main(int argc, char *argv[])
+{
+ int size, rank;
+ int dest;
+ char *buff;
+
+ MPI_Init(&argc, &argv);
+
+ buff = malloc(HUGE_SIZE);
+ buff[0] = 0;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+ dest = size - 1;
+
+ /* Try testing underflow to make sure things work if we try to send 1 byte
+ * when receiving a huge message */
+ if (rank == 0) {
+ MPI_Send(buff, 1, MPI_BYTE, dest, 0, MPI_COMM_WORLD);
+ } else if (rank == dest) {
+ MPI_Recv(buff, HUGE_SIZE, MPI_BYTE, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+ }
+
+ free(buff);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (rank == 0)
+ puts(" No Errors");
+
+ MPI_Finalize();
+
+ return 0;
+}
big_count_status 1 mpiversion=3.0
many_isend 3
manylmt 2
+huge_anysrc 2
+huge_underflow 2
#Needs MPI_Irsend
#dtype_send 2
recv_any 2
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/")
foreach(file accfence1 accfence2_am accfence2 accpscw1 allocmem epochtest getfence1 getgroup manyrma3 nullpscw lockcontention lockopts contig_displ
- putfence1 putfidx putpscw1 test1_am test1 test1_dt test2_am test2 test3 test4 test5_am test5 transpose1 transpose2 lockcontention2
- transpose3 transpose4 transpose5 transpose6 transpose7 window_creation selfrma locknull
- at_complete acc-pairtype manyget large-small-acc
- lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed
- strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget)
- # not compiled files: acc-loc adlb_mimic1 badrma compare_and_swap contention_put
- # contention_putget contig_displ fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
- # fetch_and_op fkeyvalwin flush get_acc_local get_accumulate linked_list_bench_lock_all
- # linked_list_bench_lock_excl linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
- # manyrma2 mcs-mutex mixedsync mutex_bench lockcontention3
- # pscw_ordering put_base put_bottom req_example reqops rmanull rmazero rma-contig selfrma
- # strided_acc_subarray strided_getacc_indexed strided_getacc_indexed_shared
- # win_dynamic_acc win_flavors win_info win_shared win_shared_noncontig win_shared_noncontig_put
- # win_large_shm win_zero wintest get-struct atomic_rmw_fop atomic_rmw_gacc rget-unlock atomic_get atomic_rmw_cas
- # win_shared_zerobyte aint derived-acc-flush_local large-acc-flush_local lockall_dt lockall_dt_flushall
- # lockall_dt_flushlocalall lockall_dt_flushlocal lock_contention_dt lock_dt lock_dt_flush lock_dt_flushlocal
- #racc_local_comp rput_local_comp win_shared_create win_shared_put_flush_get win_shared_rma_flush_load
+ putfence1 putfidx putpscw1 test1_am test1 test1_dt test2_am test2 test3 test3_am test4 test4_am test5_am test5 transpose1 transpose2 lockcontention2
+ transpose3 transpose4 transpose5 transpose6 transpose7 window_creation selfrma locknull win_info
+ at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc fetch_and_op flush req_example rmanull rmazero badrma
+ lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed compare_and_swap
+ lockall_dt lockall_dt_flushall lock_dt_flush lockall_dt_flush lockall_dt_flushlocalall lockall_dt_flushlocal lock_dt_flushlocal
+ strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget
+ adlb_mimic1 lock_contention_dt acc-loc get_acc_local get_accumulate put_base put_bottom
+ linked_list_bench_lock_all linked_list_bench_lock_excl manyrma2 pscw_ordering rma-contig get-struct
+ rput_local_comp racc_local_comp)
+ # fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
+ # linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
+ # mcs-mutex mixedsync mutex_bench lockcontention3 reqops
+ # strided_getacc_indexed_shared rget-unlock
+ # win_flavors win_shared win_shared_noncontig win_shared_noncontig_put
+ # win_large_shm win_zero wintest atomic_rmw_fop atomic_rmw_gacc atomic_get atomic_rmw_cas
+ # win_shared_zerobyte aint derived-acc-flush_local large-acc-flush_local
+ # win_shared_create win_shared_put_flush_get win_shared_rma_flush_load
# wrma_flush_get
add_executable(${file} ${file}.c)
target_link_libraries(${file} simgrid mtest_c)
set_target_properties(${file} PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS}")
endforeach()
+
+ add_executable(transpose3_shm transpose3.c)
+ target_link_libraries(transpose3_shm simgrid mtest_c)
+ set_target_properties(transpose3_shm PROPERTIES COMPILE_FLAGS "${MPICH_FLAGS} -DUSE_WIN_ALLOCATE")
endif()
if (enable_smpi_MPICH3_testsuite AND HAVE_RAW_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-rma-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/rma ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/rma -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-rma-raw ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/rma ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/rma -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-rma-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
endif()
#define SIZE 100
-MPI_Win win;
-int win_buf[SIZE], origin_buf[SIZE], result_buf[SIZE];
int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
- MPI_Datatype result_type, int target_count, MPI_Datatype target_type)
+ MPI_Datatype result_type, int target_count, MPI_Datatype target_type, MPI_Win win, int* win_buf, int* origin_buf, int* result_buf);
+int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
+ MPI_Datatype result_type, int target_count, MPI_Datatype target_type, MPI_Win win, int* win_buf, int* origin_buf, int* result_buf)
{
int errs = 0, ret, origin_type_size, result_type_size;
int main(int argc, char *argv[])
{
+ MPI_Win win;
+ int win_buf[SIZE], origin_buf[SIZE], result_buf[SIZE];
int rank, nprocs, i, j, k;
int errs = 0;
MPI_Datatype types[4];
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
for (k = 0; k < 4; k++)
- do_test(0, types[i], 0, types[j], 0, types[k]);
+ do_test(0, types[i], 0, types[j], 0, types[k], win, win_buf, origin_buf, result_buf);
/* single zero-size datatype, but non-zero count */
for (i = 1; i < 4; i++) {
for (j = 1; j < 4; j++) {
- do_test(1, types[0], 0, types[i], 0, types[j]);
- do_test(0, types[i], 1, types[0], 0, types[j]);
- do_test(0, types[i], 0, types[j], 1, types[0]);
+ do_test(1, types[0], 0, types[i], 0, types[j], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 1, types[0], 0, types[j], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 0, types[j], 1, types[0], win, win_buf, origin_buf, result_buf);
}
}
/* two zero-size datatypes, but non-zero count */
for (i = 1; i < 4; i++) {
- do_test(1, types[0], 1, types[0], 0, types[i]);
- do_test(1, types[0], 0, types[i], 1, types[0]);
- do_test(0, types[i], 1, types[0], 1, types[0]);
+ do_test(1, types[0], 1, types[0], 0, types[i], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 0, types[i], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 1, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
- do_test(1, types[0], 2, types[0], 0, types[i]);
- do_test(2, types[0], 1, types[0], 0, types[i]);
+ do_test(1, types[0], 2, types[0], 0, types[i], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 1, types[0], 0, types[i], win, win_buf, origin_buf, result_buf);
- do_test(1, types[0], 0, types[i], 2, types[0]);
- do_test(2, types[0], 0, types[i], 1, types[0]);
+ do_test(1, types[0], 0, types[i], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 0, types[i], 1, types[0], win, win_buf, origin_buf, result_buf);
- do_test(0, types[i], 1, types[0], 2, types[0]);
- do_test(0, types[i], 2, types[0], 1, types[0]);
+ do_test(0, types[i], 1, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(0, types[i], 2, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
}
/* three zero-size datatypes, but non-zero count */
- do_test(1, types[0], 1, types[0], 1, types[0]);
- do_test(1, types[0], 1, types[0], 2, types[0]);
- do_test(1, types[0], 2, types[0], 1, types[0]);
- do_test(1, types[0], 2, types[0], 2, types[0]);
- do_test(2, types[0], 1, types[0], 1, types[0]);
- do_test(2, types[0], 1, types[0], 2, types[0]);
- do_test(2, types[0], 2, types[0], 1, types[0]);
+ do_test(1, types[0], 1, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 1, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 2, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(1, types[0], 2, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 1, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 1, types[0], 2, types[0], win, win_buf, origin_buf, result_buf);
+ do_test(2, types[0], 2, types[0], 1, types[0], win, win_buf, origin_buf, result_buf);
}
MPI_Win_fence(0, win);
#define MAXELEMS 6400
#define COUNT 1000
-static int me, nproc;
static const int verbose = 0;
int test_put(void);
int test_put(void)
{
+ int me, nproc;
+ MPI_Comm_size(MPI_COMM_WORLD, &nproc);
+ MPI_Comm_rank(MPI_COMM_WORLD, &me);
+
+ assert(COUNT <= MAXELEMS);
+
+ if (me == 0 && verbose) {
+ printf("Test starting on %d processes\n", nproc);
+ fflush(stdout);
+ }
+
MPI_Win dst_win;
double *dst_buf;
double src_buf[MAXELEMS];
int errs = 0;
MTest_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &nproc);
- MPI_Comm_rank(MPI_COMM_WORLD, &me);
-
- assert(COUNT <= MAXELEMS);
-
- if (me == 0 && verbose) {
- printf("Test starting on %d processes\n", nproc);
- fflush(stdout);
- }
errs = test_put();
#define MAXELEMS 6400
#define COUNT 1000
-static int me, nproc;
+
static const int verbose = 0;
void test_put(void);
void test_put(void)
{
+ int me, nproc;
+ MPI_Comm_size(MPI_COMM_WORLD, &nproc);
+ MPI_Comm_rank(MPI_COMM_WORLD, &me);
MPI_Win dst_win;
double *dst_buf;
double src_buf[MAXELEMS];
int main(int argc, char *argv[])
{
MPI_Init(&argc, &argv);
+ int me, nproc;
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
#endif
#define CMP(x, y) ((x - ((TYPE_C) (y))) > 1.0e-9)
-
+void reset_vars(TYPE_C * val_ptr, TYPE_C * res_ptr, MPI_Win win);
void reset_vars(TYPE_C * val_ptr, TYPE_C * res_ptr, MPI_Win win)
{
int i, rank, nproc;
for (i = 0; i < NTIMES; i++) {
Get_nextval(win, val_array, get_type, rank, nprocs, counter_vals + i);
- /* printf("Rank %d, counter %d\n", rank, value); */
+ /* printf("Rank %d, counter %d\n", rank, localvalue); */
}
MPI_Win_free(&win);
int errors = 0;
const int NITER = 1000;
-const int acc_val = 3;
+
int main(int argc, char **argv)
{
int rank, nproc;
int out_val, i, counter = 0;
MPI_Win win;
-
+ int acc_val = 3;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
#define MIN_NPROBE 1
#define ELEM_PER_ROW 16
-#define MIN(X,Y) ((X < Y) ? (X) : (Y))
-#define MAX(X,Y) ((X > Y) ? (X) : (Y))
+#define MYMIN(X,Y) ((X < Y) ? (X) : (Y))
+#define MYMAX(X,Y) ((X > Y) ? (X) : (Y))
/* Linked list pointer */
typedef struct {
static const int verbose = 0;
static const int print_perf = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &procid);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(procid, llist_win);
+ head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank,
(void *) next_tail_ptr.disp);
tail_ptr = next_tail_ptr;
- pollint = MAX(MIN_NPROBE, pollint / 2);
+ pollint = MYMAX(MIN_NPROBE, pollint / 2);
}
else {
for (j = 0; j < pollint; j++)
MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag,
MPI_STATUS_IGNORE);
- pollint = MIN(MAX_NPROBE, pollint * 2);
+ pollint = MYMIN(MAX_NPROBE, pollint * 2);
}
}
} while (!success);
#include <unistd.h>
#endif
-#define NUM_ELEMS 1000
+#define NUM_ELEMS 100
#define MAX_NPROBE nproc
#define MIN_NPROBE 1
#define ELEM_PER_ROW 16
-#define MIN(X,Y) ((X < Y) ? (X) : (Y))
-#define MAX(X,Y) ((X > Y) ? (X) : (Y))
+#define MYMIN(X,Y) ((X < Y) ? (X) : (Y))
+#define MYMAX(X,Y) ((X > Y) ? (X) : (Y))
/* Linked list pointer */
typedef struct {
static const int verbose = 0;
static const int print_perf = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
double time;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(procid, llist_win);
+ head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
(void *) tail_ptr.disp);
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, tail_ptr.rank, 0, llist_win);
-#if USE_ACC
+#ifdef USE_ACC
MPI_Accumulate(&new_elem_ptr, sizeof(llist_ptr_t), MPI_BYTE, tail_ptr.rank,
(MPI_Aint) & (((llist_elem_t *) tail_ptr.disp)->next),
sizeof(llist_ptr_t), MPI_BYTE, MPI_REPLACE, llist_win);
llist_ptr_t next_tail_ptr;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, tail_ptr.rank, 0, llist_win);
-#if USE_ACC
+#ifdef USE_ACC
MPI_Get_accumulate(NULL, 0, MPI_DATATYPE_NULL, &next_tail_ptr,
sizeof(llist_ptr_t), MPI_BYTE, tail_ptr.rank,
(MPI_Aint) & (((llist_elem_t *) tail_ptr.disp)->next),
printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank,
(void *) next_tail_ptr.disp);
tail_ptr = next_tail_ptr;
- pollint = MAX(MIN_NPROBE, pollint / 2);
+ pollint = MYMAX(MIN_NPROBE, pollint / 2);
}
else {
for (j = 0; j < pollint; j++)
MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag,
MPI_STATUS_IGNORE);
- pollint = MIN(MAX_NPROBE, pollint * 2);
+ pollint = MYMIN(MAX_NPROBE, pollint * 2);
}
}
} while (!success);
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
case 6: /* a few small puts (like strided put, but 1 word at a time) */
/* FIXME: The conditional and increment are reversed below. This looks
* like a bug, and currently prevents the following test from running. */
- for (j = 0; j++; j < veccount) {
+ for (j = 0; j < veccount; j++) {
if (buf[j * stride] != PUT_VAL + j) {
errs++;
printf("case %d: value is %d should be %d\n", i, buf[j * stride], PUT_VAL + j);
case 7: /* a few small accumulates (like strided acc, but 1 word at a time) */
/* FIXME: The conditional and increment are reversed below. This looks
* like a bug, and currently prevents the following test from running. */
- for (j = 0; j++; j < veccount) {
+ for (j = 0; j < veccount;j++) {
if (buf[j * stride] != ACC_VAL + j + OFFSET_2 + j * stride) {
errs++;
printf("case %d: value is %d should be %d\n", i,
#include <stdlib.h>
#include <string.h>
-#define MAX_COUNT 65536*4/16
-#define MAX_RMA_SIZE 2 /* 16 in manyrma performance test */
+#define MAX_COUNT 512
+#define MAX_RMA_SIZE 1 /* 16 in manyrma performance test */
#define MAX_RUNS 8
#define MAX_ITER_TIME 5.0 /* seconds */
#else
#ifdef USE_WIN_ALLOC_SHM
MPI_Info_create(&hdl->win_info);
- MPI_Info_set(hdl->win_info, "alloc_shm", "true");
+ MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"true");
#else
MPI_Info_create(&hdl->win_info);
- MPI_Info_set(hdl->win_info, "alloc_shm", "false");
+ MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"false");
#endif
MPI_Win_allocate(2 * sizeof(int), sizeof(int), hdl->win_info, hdl->comm,
&hdl->base, &hdl->window);
/* Use a global variable to inhibit compiler optimizations in the compute
* function. */
double junk = 0.0;
-
+void compute(int step, double *data);
void compute(int step, double *data)
{
int i;
MPI_Info_create(&win_info);
#ifdef USE_WIN_ALLOC_SHM
- MPI_Info_set(win_info, "alloc_shm", "true");
+ MPI_Info_set(win_info, (char*)"alloc_shm", (char*)"true");
#else
- MPI_Info_set(win_info, "alloc_shm", "false");
+ MPI_Info_set(win_info, (char*)"alloc_shm", (char*)"false");
#endif
MPI_Win_allocate(NSTEPS * N * sizeof(double), sizeof(double), win_info,
#include <string.h>
#include <mpi.h>
-#define MAX_DATA_SIZE (1024*128*16)
-#define MAX_NUM_ITERATIONS (8192*4)
+#define MAX_DATA_SIZE (1024)
+#define MAX_NUM_ITERATIONS (1024)
#define MIN_NUM_ITERATIONS 8
#define NUM_WARMUP_ITER 1
const int verbose = 0;
static int rank;
-void run_test(int lock_mode, int lock_assert)
+static void run_test(int lock_mode, int lock_assert)
{
int nproc, test_iter, target_rank, data_size;
- int *buf, *win_buf;
+ char *buf, *win_buf;
MPI_Win win;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
t_acc = MPI_Wtime();
MPI_Win_lock(lock_mode, target_rank, lock_assert, win);
- MPI_Accumulate(buf, data_size / sizeof(int), MPI_INT, target_rank,
- 0, data_size / sizeof(int), MPI_INT, MPI_SUM, win);
+ MPI_Accumulate(buf, data_size, MPI_BYTE, target_rank,
+ 0, data_size, MPI_BYTE, MPI_SUM, win);
MPI_Win_unlock(target_rank, win);
}
t_acc = (MPI_Wtime() - t_acc) / num_iter;
#define XDIM 1024
#define YDIM 1024
-#define ITERATIONS 10
+#define ITERATIONS 3
int main(int argc, char **argv)
{
putfidx 4
getfence1 4
accfence1 4
-#Needs lock, unlock
-#adlb_mimic1 3
+adlb_mimic1 3
accfence2 4
putpscw1 4
accpscw1 4
transpose1 2
transpose2 2
transpose3 2
-#Needs MPI_Win_allocate
-#transpose3_shm 2
+transpose3_shm 2
transpose5 2
transpose6 1
transpose7 2
test3 2
test4 2
test5 2
-#Needs lock, unlock
lockcontention 3
lockcontention2 4
lockcontention2 8
#Buggy one.
#lockcontention3 8
lockopts 2
-#needs get_accumulate
-#lock_dt 2
-#lock_dt_flush 2
-#lock_dt_flushlocal 2
-#lockall_dt 4 timeLimit=240
-#lockall_dt_flush 4 timeLimit=240
-#lockall_dt_flushall 4 timeLimit=240
-#lockall_dt_flushlocal 4 timeLimit=240
-#lockall_dt_flushlocalall 4 timeLimit=240
-#lock_contention_dt 4 timeLimit=240
+lock_dt 2
+lock_dt_flush 2
+lock_dt_flushlocal 2
+lockall_dt 4 timeLimit=240
+lockall_dt_flush 4 timeLimit=240
+lockall_dt_flushall 4 timeLimit=240
+lockall_dt_flushlocal 4 timeLimit=240
+lockall_dt_flushlocalall 4 timeLimit=240
+lock_contention_dt 4 timeLimit=240
transpose4 2
#fetchandadd 7
#fetchandadd_tree 7
contig_displ 1
test1_am 2
test2_am 2
-#test3_am 2
-#test4_am 2
+test3_am 2
+test4_am 2
test5_am 2
#fetchandadd_am 7
#fetchandadd_tree_am 7
#mixedsync 4
epochtest 3
locknull 2
-#Needs MPI_Rput, rget, racumulate, MPI_Fetch_and_op, MPI_Compare_and_swap
-#rmanull 2
-#rmazero 2
+rmanull 2
+rmazero 2
strided_acc_indexed 2
strided_acc_onelock 2
#needs MPI_Type_create_subarray
strided_get_indexed 2
strided_putget_indexed 4
#strided_putget_indexed_shared 4 mpiversion=3.0
-#strided_getacc_indexed 4 mpiversion=3.0
+strided_getacc_indexed 4
#strided_getacc_indexed_shared 4 mpiversion=3.0
window_creation 2
contention_put 4
contention_putget 4
-#put_base 2
-#put_bottom 2
+put_base 2
+put_bottom 2
#win_flavors 4 mpiversion=3.0
#win_flavors 3 mpiversion=3.0
-#manyrma2 2 timeLimit=500
+manyrma2 2 timeLimit=500
manyrma3 2
#win_shared 4 mpiversion=3.0
#win_shared_create_allocshm 4 mpiversion=3.0
#win_shared_noncontig 4 mpiversion=3.0
#win_shared_noncontig_put 4 mpiversion=3.0
#win_zero 4 mpiversion=3.0
-#win_dynamic_acc 4 mpiversion=3.0
-#get_acc_local 1 mpiversion=3.0
+win_dynamic_acc 4
+get_acc_local 1
+#issues with concurrent updates..
#linked_list 4 mpiversion=3.0
#linked_list_fop 4 mpiversion=3.0
-#compare_and_swap 4 mpiversion=3.0
+compare_and_swap 4
+fetch_and_op 4
#fetch_and_op_char 4 mpiversion=3.0
#fetch_and_op_short 4 mpiversion=3.0
#fetch_and_op_int 4 mpiversion=3.0
#fetch_and_op_long_double 4 mpiversion=3.0
#get_accumulate_double 4 mpiversion=3.0
#get_accumulate_double_derived 4 mpiversion=3.0
-#get_accumulate_int 4 mpiversion=3.0
+get_accumulate 4
#get_accumulate_int_derived 4 mpiversion=3.0
#get_accumulate_long 4 mpiversion=3.0
#get_accumulate_long_derived 4 mpiversion=3.0
#get_accumulate_short 4 mpiversion=3.0
#get_accumulate_short_derived 4 mpiversion=3.0
-#flush 4 mpiversion=3.0
-#reqops 4 mpiversion=3.0
-#req_example 4 mpiversion=3.0
-#win_info 4 mpiversion=3.0
+flush 4
+#reqops 4
+req_example 4
+rput_local_comp 2 mpiversion=3.0
+racc_local_comp 2 mpiversion=3.0
+win_info 4
+#issues with concurrent updates..
#linked_list_lockall 4 mpiversion=3.0
-#pscw_ordering 4 mpiversion=3.0
-#linked_list_bench_lock_all 4 mpiversion=3.0
-#linked_list_bench_lock_excl 4 mpiversion=3.0
+pscw_ordering 4
+linked_list_bench_lock_all 4
+linked_list_bench_lock_excl 4 mpiversion=3.0
#linked_list_bench_lock_shr 4 mpiversion=3.0
#linked_list_bench_lock_shr_nocheck 4 mpiversion=3.0
#mutex_bench_shm 4 mpiversion=3.0
#mutex_bench_shm_ordered 4 mpiversion=3.0
-#rma-contig 2 mpiversion=3.0 timeLimit=720
-#badrma 2 mpiversion=3.0
-#acc-loc 4
+rma-contig 2 timeLimit=720
+badrma 2
+acc-loc 4
#fence_shm 2 mpiversion=3.0
#mutex_bench 4 mpiversion=3.0
#mutex_bench_shared 4 mpiversion=3.0
win_shared_zerobyte 4 mpiversion=3.0
win_shared_put_flush_get 4 mpiversion=3.0
-#get-struct 2
+get-struct 2
at_complete 2
#atomic_rmw_fop 3
#atomic_rmw_cas 3
#gacc_flush_get 3 mpiversion=3.0
#fop_flush_get 3 mpiversion=3.0
#cas_flush_get 3 mpiversion=3.0
-#rget-unlock 2 mpiversion=3.0
+#We still have an issue here, unlock should finish R* calls, but this causes issues.
+#rget-unlock 2
#overlap_wins_put 3
#overlap_wins_acc 3
#overlap_wins_gacc 3
/* Test#1: setting a valid key at window-create time */
MPI_Info_create(&info_in);
- MPI_Info_set(info_in, "no_locks", "true");
+ MPI_Info_set(info_in, (char *)"no_locks", (char *)"true");
MPI_Win_allocate(sizeof(int), sizeof(int), info_in, MPI_COMM_WORLD, &base, &win);
MPI_Win_get_info(win, &info_out);
- MPI_Info_get(info_out, "no_locks", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"no_locks", MPI_MAX_INFO_VAL, buf, &flag);
if (!flag || strncmp(buf, "true", strlen("true")) != 0) {
if (!flag)
printf("%d: no_locks is not defined\n", rank);
/* Test#2: setting and getting invalid key */
MPI_Info_create(&info_in);
- MPI_Info_set(info_in, invalid_key, "true");
+ MPI_Info_set(info_in, invalid_key, (char *)"true");
MPI_Win_set_info(win, info_in);
MPI_Win_get_info(win, &info_out);
MPI_Info_get(info_out, invalid_key, MPI_MAX_INFO_VAL, buf, &flag);
-#ifndef USE_STRICT_MPI
+#ifdef USE_STRICT_MPI
/* Check if our invalid key was ignored. Note, this check's MPICH's
* behavior, but this behavior may not be required for a standard
* conforming MPI implementation. */
/* Test#3: setting info key "no_lock" to false and getting the key */
MPI_Info_create(&info_in);
- MPI_Info_set(info_in, "no_locks", "false");
+ MPI_Info_set(info_in, (char *)"no_locks", (char *)"false");
MPI_Win_set_info(win, info_in);
MPI_Win_get_info(win, &info_out);
- MPI_Info_get(info_out, "no_locks", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"no_locks", MPI_MAX_INFO_VAL, buf, &flag);
if (!flag || strncmp(buf, "false", strlen("false")) != 0) {
if (!flag)
printf("%d: no_locks is not defined\n", rank);
/* Test#4: setting info key "no_lock" to true and getting the key */
MPI_Info_create(&info_in);
- MPI_Info_set(info_in, "no_locks", "true");
+ MPI_Info_set(info_in, (char *)"no_locks", (char *)"true");
MPI_Win_set_info(win, info_in);
MPI_Win_get_info(win, &info_out);
- MPI_Info_get(info_out, "no_locks", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"no_locks", MPI_MAX_INFO_VAL, buf, &flag);
if (!flag || strncmp(buf, "true", strlen("true")) != 0) {
if (!flag)
printf("%d: no_locks is not defined\n", rank);
MPI_Win_get_info(win, &info_out);
- MPI_Info_get(info_out, "accumulate_ordering", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"accumulate_ordering", MPI_MAX_INFO_VAL, buf, &flag);
if (flag && VERBOSE)
printf("%d: accumulate_ordering = %s\n", rank, buf);
- MPI_Info_get(info_out, "accumulate_ops", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"accumulate_ops", MPI_MAX_INFO_VAL, buf, &flag);
if (flag && VERBOSE)
printf("%d: accumulate_ops = %s\n", rank, buf);
- MPI_Info_get(info_out, "same_size", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"same_size", MPI_MAX_INFO_VAL, buf, &flag);
if (flag && VERBOSE)
printf("%d: same_size = %s\n", rank, buf);
- MPI_Info_get(info_out, "alloc_shm", MPI_MAX_INFO_VAL, buf, &flag);
+ MPI_Info_get(info_out, (char *)"alloc_shm", MPI_MAX_INFO_VAL, buf, &flag);
if (flag && VERBOSE)
printf("%d: alloc_shm = %s\n", rank, buf);
# skip empty lines
if ($programname eq "") { next; }
- #if privatization is disabled, and if the test needs it, ignore it
+ # if privatization is disabled, and if the test needs it, ignore it
if ($needs_privatization == 1 &&
$enabled_privatization != 1) {
SkippedTest($programname, $np, $workdir, "requires SMPI privatization");
p Test dsend
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [Jupiter:1:(2) 0.000000] [dsend/INFO] rank 1: data exchanged
> [Tremblay:0:(1) 0.005890] [dsend/INFO] rank 0: data exchanged
> [rank 0] -> Tremblay
p process 2 will finish at 0.5+2*4 (time before first send) + 2*(1+0.5*4) (recv+irecv) + 0.005890 (network time, same as before) = 14.505890s
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --cfg=smpi/or:0:1:0.5 --cfg=smpi/os:0:0.5:2 --cfg=smpi/ois:0:1:0.1
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/ois' to '0:1:0.1'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/or' to '0:1:0.5'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/os' to '0:0.5:2'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/pt2pt-dsend -q --log=smpi_kernel.thres:warning --cfg=smpi/or:0:1:0.5 --cfg=smpi/os:0:0.5:2 --cfg=smpi/ois:0:1:0.1 --log=xbt_cfg.thres:warning
> [Jupiter:1:(2) 9.900000] [dsend/INFO] rank 1: data exchanged
> [Tremblay:0:(1) 14.505890] [dsend/INFO] rank 0: data exchanged
> [rank 0] -> Tremblay
p generate a trace with pingpong, and replay itself, then check that output trace of the second run is the same as in the first (once sorted)
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
>
>
>
> == pivot=0 : pingpong [0] <--> [1]
> == pivot=1 : pingpong [1] <--> [2]
> == pivot=2 : pingpong [2] <--> [3]
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_simgrid.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/format' to 'TI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/computing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'out_in_ti.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
> [0] About to send 1st message '99' to process [1]
> [0] Received reply message '100' from process [1]
> [1] About to send 1st message '100' to process [2]
> [rank 2] -> Fafard
> [rank 3] -> Ginette
-$ ${bindir:=.}/../../../bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=smpi/simulate-computation:no -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
> [rank 3] -> Ginette
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_simgrid.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/format' to 'TI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/computing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'out_ti.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
> [Jupiter:1:(2) 0.016798] [smpi_replay/INFO] Simulation time 0.016798
! output sort
p Same test, but only using one output file for all processes
p generate a trace with pingpong, and replay itself, then check that output trace of the second run is the same as in the first (once sorted)
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -trace-ti --cfg=tracing/filename:out_in_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
>
>
>
> == pivot=0 : pingpong [0] <--> [1]
> == pivot=1 : pingpong [1] <--> [2]
> == pivot=2 : pingpong [2] <--> [3]
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_simgrid.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/format' to 'TI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/computing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'out_in_ti.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/format/ti-one-file' to 'yes'
> [0] About to send 1st message '99' to process [1]
> [0] Received reply message '100' from process [1]
> [1] About to send 1st message '100' to process [2]
> [rank 2] -> Fafard
> [rank 3] -> Ginette
-$ ${bindir:=.}/../../../bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -ext smpi_replay --log=replay.:critical -trace-ti --cfg=tracing/filename:out_ti.txt --cfg=tracing/smpi/format/ti-one-file:yes -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/../../../examples/smpi/replay/smpi_replay ./out_in_ti.txt --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
> [rank 3] -> Ginette
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'smpi_simgrid.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/format' to 'TI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/computing' to 'yes'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/filename' to 'out_ti.txt'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'tracing/smpi/format/ti-one-file' to 'yes'
> [Jupiter:1:(2) 0.016798] [smpi_replay/INFO] Simulation time 0.016798
$ rm -rf ./out_ti.txt_files
! setenv LD_LIBRARY_PATH=../../lib
! expect return 1
-$ ${bindir:=.}/../../../bin/smpirun -hostfile ../hostfile_empty -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile ../hostfile_empty -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
> [smpirun] ** error: the hostfile '../hostfile_empty' is empty. Aborting.
! expect return 1
-$ ${bindir:=.}/../../../bin/smpirun -hostfile hostfile-does-not-exist.txt -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -hostfile hostfile-does-not-exist.txt -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
> [smpirun] ** error: the file 'hostfile-does-not-exist.txt' does not exist. Aborting.
p Test pingpong
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 ${bindir:=.}/pt2pt-pingpong -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> *** Ping-pong test (MPI_Send/MPI_Recv) ***
>
>
> == pivot=0 : pingpong [0] <--> [1]
> == pivot=1 : pingpong [1] <--> [2]
> == pivot=2 : pingpong [2] <--> [3]
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
> [0] About to send 1st message '99' to process [1]
> [0] Received reply message '100' from process [1]
> [1] About to send 1st message '100' to process [2]
p Test timers
! setenv LD_LIBRARY_PATH=../../lib
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_kernel.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100000
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 1 ${bindir:=.}/timers -q --log=smpi_kernel.thres:warning --cfg=smpi/simulate-computation:no --cfg=smpi/host-speed:100000 --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/simulate-computation' to 'no'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'smpi/host-speed' to '100000'
p Test hvector
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-hvector -q --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-hvector -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> rank= 0, a[0][0]=0.000000
p Test indexed
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-indexed -q --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-indexed -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> buffer[0] = 0
p Test struct
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-struct -q --log=smpi_kernel.thres:warning
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-struct -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
> Process 0 got -2 (-2?) and 8.000000 (8.0?), tab (should be all 0): 0 0 0 0 0 0
> Process 1 got -2 (-2?) and 8.000000 (8.0?), tab (should be all 0): 0 0 0 0 0 0
p Test vector
! setenv LD_LIBRARY_PATH=../../lib
! output sort
-$ ${bindir:=.}/../../../bin/smpirun -map -hostfile ../hostfile -platform ${srcdir:=.}/../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-vector -q --log=smpi_kernel.thres:warning
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'surf/precision' to '1e-9'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'SMPI'
-> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/TCP-gamma' to '4194304'
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ${srcdir:=.}/../../../examples/platforms/small_platform.xml -np 2 ${bindir:=.}/type-vector -q --log=smpi_kernel.thres:warning --log=xbt_cfg.thres:warning
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> rank= 0, a[0][0]=0.000000
xbt_free(A);
}
-int main(int argc, char **argv)
+int main()
{
- surf_init(&argc, argv);
-
XBT_INFO("***** Test 1 (Max-Min)");
test1(MAXMIN);
XBT_INFO("***** Test 1 (Lagrange - Vegas)");
XBT_INFO("***** Test 3 (Lagrange - Reno)");
test3(LAGRANGE_RENO);
- surf_exit();
return 0;
}
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "src/include/surf/surf.h"
#include "surf/maxmin.h"
#include "xbt/module.h"
#include "xbt/xbt_os_time.h"
//Otherwise, just set it to a constant value (and set rate_no_limit to 1.0):
//nb_elem=200
- surf_init(&argc, argv);
-
for(int i=0;i<testcount;i++){
seedx=i+1;
fprintf(stderr, "Starting %i: (%i)\n",i,myrand()%1000);
testcount,nb_cnst, nb_var, nb_elem, (1<<pw_base_limit), (1<<pw_base_limit)+(1<<pw_max_limit), max_share);
if(mode==3)
fprintf(stderr, "Execution time: %g +- %g microseconds \n",mean_date, stdev_date);
-
- surf_exit();
+
return 0;
}
! expect return 0
! output sort
$ $SG_TEST_EXENV ${bindir:=.}/maxmin_bench medium 5 test
-> [0.000000]: [surf_kernel/DEBUG] Add SURF levels
-> [0.000000]: [surf_kernel/DEBUG] Create all Libs
> [0.000000]: [surf_maxmin/DEBUG] Setting selective_update_active flag to 1
> [0.000000]: [surf_maxmin/DEBUG] Active constraints : 100
> [0.000000]: [surf_maxmin/DEBUG] Constraint '98' usage: 13.060939 remaining: 3.166833 concurrency: 7<=8<=10
> Starting 4: (35)
> Starting to solve(210)
> 5x One shot execution time for a total of 100 constraints, 100 variables with 24 active constraint each, concurrency in [8,72] and max concurrency share 2
-
! expect return 0
! output sort
$ $SG_TEST_EXENV ${bindir:=.}/maxmin_bench small 10 test
-> [0.000000]: [surf_kernel/DEBUG] Add SURF levels
-> [0.000000]: [surf_kernel/DEBUG] Create all Libs
> [0.000000]: [surf_maxmin/DEBUG] Setting selective_update_active flag to 1
> [0.000000]: [surf_maxmin/DEBUG] Active constraints : 10
> [0.000000]: [surf_maxmin/DEBUG] Constraint '9' usage: 4.703796 remaining: 7.082917 concurrency: 2<=2<=-1
> Starting 9: (70)
> Starting to solve(117)
> 10x One shot execution time for a total of 10 constraints, 10 variables with 4 active constraint each, concurrency in [2,6] and max concurrency share 2
-
/* A few basic tests for the surf library */
-/* Copyright (c) 2004-2015. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "surf/surf.h"
#include "simgrid/host.h"
-#include "simgrid/s4u/host.hpp"
-#include "simgrid/sg_config.h"
#include "src/surf/cpu_interface.hpp"
#include "src/surf/network_interface.hpp"
-#include "src/surf/surf_interface.hpp"
-#include <stdio.h>
-#include "xbt/log.h"
XBT_LOG_NEW_DEFAULT_CATEGORY(surf_test, "Messages specific for surf example");
static const char *string_action(simgrid::surf::Action::State state)
/* A few basic tests for the surf library */
-/* Copyright (c) 2004-2015. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2004-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "surf/surf.h"
#include "simgrid/host.h"
-#include "simgrid/s4u/host.hpp"
-#include "simgrid/sg_config.h"
#include "src/surf/cpu_interface.hpp"
#include "src/surf/network_interface.hpp"
-#include "src/surf/surf_interface.hpp"
-#include "xbt/log.h"
XBT_LOG_NEW_DEFAULT_CATEGORY(surf_test, "Messages specific for surf example");
int main(int argc, char **argv)
endif()
if(NOT enable_memcheck)
- set(DART_TESTING_TIMEOUT "300") #TIMEOUT FOR EACH TEST
+ set(DART_TESTING_TIMEOUT "500") #TIMEOUT FOR EACH TEST
else()
set(DART_TESTING_TIMEOUT "3000") #TIMEOUT FOR EACH TEST
endif()
src/smpi/instr_smpi.cpp
src/smpi/smpi_bench.cpp
src/smpi/smpi_memory.cpp
+ src/smpi/smpi_shared.cpp
src/smpi/smpi_static_variables.cpp
src/smpi/smpi_coll.cpp
src/smpi/smpi_coll.hpp
src/smpi/smpi_f2c.hpp
src/smpi/smpi_group.cpp
src/smpi/smpi_group.hpp
+ src/smpi/SmpiHost.cpp
+ src/smpi/SmpiHost.hpp
src/smpi/smpi_mpi.cpp
src/smpi/smpi_datatype.cpp
src/smpi/smpi_datatype.hpp
src/smpi/smpi_keyvals.hpp
src/smpi/smpi_datatype_derived.cpp
src/smpi/smpi_datatype_derived.hpp
+ src/smpi/smpi_main.c
src/smpi/smpi_op.cpp
src/smpi/smpi_op.hpp
src/smpi/smpi_process.cpp
include/simgrid/s4u/forward.hpp
include/simgrid/s4u/Activity.hpp
include/simgrid/s4u/Actor.hpp
- include/simgrid/s4u/comm.hpp
- include/simgrid/s4u/conditionVariable.hpp
- include/simgrid/s4u/engine.hpp
- include/simgrid/s4u/file.hpp
- include/simgrid/s4u/host.hpp
+ include/simgrid/s4u/Comm.hpp
+ include/simgrid/s4u/ConditionVariable.hpp
+ include/simgrid/s4u/Engine.hpp
+ include/simgrid/s4u/File.hpp
+ include/simgrid/s4u/Host.hpp
include/simgrid/s4u/Link.hpp
include/simgrid/s4u/Mailbox.hpp
include/simgrid/s4u/Mutex.hpp
include/simgrid/s4u/NetZone.hpp
- include/simgrid/s4u/storage.hpp
+ include/simgrid/s4u/Storage.hpp
include/simgrid/s4u/VirtualMachine.hpp
include/simgrid/s4u.hpp
include/simgrid/plugins/energy.h
include/smpi/smpi_extended_traces.h
include/smpi/smpi_extended_traces_fortran.h
include/smpi/forward.hpp
+ include/smpi/smpi_shared_malloc.hpp
include/surf/surf_routing.h
include/xbt.h
include/xbt/RngStream.h
teshsuite/java/CMakeLists.txt
teshsuite/mc/CMakeLists.txt
teshsuite/msg/CMakeLists.txt
+ teshsuite/s4u/CMakeLists.txt
teshsuite/simdag/CMakeLists.txt
teshsuite/simix/CMakeLists.txt
teshsuite/smpi/CMakeLists.txt
${CMAKE_BINARY_DIR}/bin/smpicc
${CMAKE_BINARY_DIR}/bin/smpicxx
${CMAKE_BINARY_DIR}/bin/smpirun
+ ${CMAKE_BINARY_DIR}/bin/smpimain
DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
if(SMPI_FORTRAN)
install(PROGRAMS
endif()
endif()
+if(enable_model-checking)
+ install(
+ PROGRAMS ${CMAKE_BINARY_DIR}/bin/simgrid-mc
+ DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
+endif()
+
install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/tesh DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/graphicator DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
COMMAND ${CMAKE_COMMAND} -E remove ${PROJECT_NAME}-${release_version}.tar.gz
COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_NAME}-${release_version}
COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_NAME}-${release_version}/doc/html/
- COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_HOME_DIRECTORY}/doc/html/ ${PROJECT_NAME}-${release_version}/doc/html/)
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_HOME_DIRECTORY}/doc/html/ ${PROJECT_NAME}-${release_version}/doc/html/
+ COMMAND rm -f `grep -rl " Reference" ${PROJECT_NAME}-${release_version}/doc/html/` # Doxygen, go away
+ COMMAND rm -f `grep -rl "Member List" ${PROJECT_NAME}-${release_version}/doc/html/` # Doxygen, you're getting annoying
+ )
add_dependencies(dist-dir maintainer_files)
set(dirs_in_tarball "")
endif()
endif()
-if(HAVE_MC AND HAVE_GNU_LD)
+if(HAVE_MC AND HAVE_GNU_LD AND NOT ${DL_LIBRARY} STREQUAL "")
SET(SIMGRID_DEP "${SIMGRID_DEP} ${DL_LIBRARY}")
endif()
# Compute the dependencies of SMPI
##################################
+
+if(enable_smpi)
+ if(NOT ${DL_LIBRARY} STREQUAL "")
+ set(SIMGRID_DEP "${SIMGRID_DEP} ${DL_LIBRARY}") # for privatization
+ endif()
+ add_executable(smpimain src/smpi/smpi_main.c)
+ target_link_libraries(smpimain simgrid)
+ set_target_properties(smpimain
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+endif()
+
if(enable_smpi AND APPLE)
set(SIMGRID_DEP "${SIMGRID_DEP} -Wl,-U -Wl,_smpi_simulated_main")
endif()
ENDIF()
IF(enable_smpi_MPICH3_testsuite AND SMPI_FORTRAN AND HAVE_THREAD_CONTEXTS)
- ADD_TEST(test-smpi-mpich3-thread-f77 ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f77/ ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f77/ -tests=testlist -privatization=${HAVE_PRIVATIZATION} -execarg=--cfg=contexts/stack_size:8000 -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION})
+ ADD_TEST(test-smpi-mpich3-thread-f77 ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f77/ ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f77/ -tests=testlist -privatization=${HAVE_PRIVATIZATION} -execarg=--cfg=contexts/stack_size:8000 -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION})
SET_TESTS_PROPERTIES(test-smpi-mpich3-thread-f77 PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
- ADD_TEST(test-smpi-mpich3-thread-f90 ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f90/ ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f90/ -tests=testlist -privatization=${HAVE_PRIVATIZATION} -execarg=--cfg=smpi/privatize-global-variables:${HAVE_PRIVATIZATION} -execarg=--cfg=contexts/factory:thread)
+ ADD_TEST(test-smpi-mpich3-thread-f90 ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f90/ ${PERL_EXECUTABLE} ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests ${TESH_OPTION} -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f90/ -tests=testlist -privatization=${HAVE_PRIVATIZATION} -execarg=--cfg=smpi/privatization:${HAVE_PRIVATIZATION} -execarg=--cfg=contexts/factory:thread)
SET_TESTS_PROPERTIES(test-smpi-mpich3-thread-f90 PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
ENDIF()
#cmakedefine01 HAVE_PRIVATIZATION
/* We have PAPI to fine-grain trace execution time */
#cmakedefine01 HAVE_PAPI
+/* We have sendfile to efficiently copy files for dl-open privatization */
+#cmakedefine01 HAVE_SENDFILE
/* Other function checks */
/* Function backtrace */
- Makefile
- doc/html/.*
++ BuildSimGrid.sh
+ \.classpath
+ \.cproject
+ \.project