teshsuite/smpi/type-struct/type-struct
teshsuite/smpi/type-vector/type-vector
teshsuite/s4u/actor/actor
+teshsuite/s4u/comm-pt2pt/comm-pt2pt
teshsuite/s4u/concurrent_rw/concurrent_rw
teshsuite/s4u/host_on_off_wait/host_on_off_wait
teshsuite/s4u/listen_async/listen_async
XML platforms: Switch to platform v4.1 format.
* This is (mainly) a backward compatible change: v4 are valid v4.1 files
- <zone> can be used as a synonym for the now deprecated <as>
- - <zoneZoute> an be used as a synonym for the now deprecated <asroute>
+ - <zoneRoute> can be used as a synonym for the now deprecated <asroute>
- <bypassZoneRoute> an be used as a synonym for the now deprecated <bypassAsRoute>
- <actor> can be used as a synonym for the now deprecated <process>
- state_file and avail_file periodicity is now easier to express
* Histogram entry for each measured block
* Each entry is guarded inside xbt dictionary which is read from the file */
typedef struct xbt_hist {
- int n;
- int counts;
- double mean;
- double *breaks;
- double *percentage;
- char* block_id;
+ int n;
+ int counts;
+ double mean;
+ double* breaks;
+ double* percentage;
+ char* block_id;
} xbt_hist_t;
extern RngStream get_randgen(void);
/* Initializing xbt dictionary for SMPI version, reading xbt_hist_t entries line by line */
static inline void xbt_inject_init(char *inputfile)
{
- xbt_dict_t mydict = get_dict();
- FILE* fpInput = fopen(inputfile, "r");
- if (fpInput == NULL)
- printf("Error while opening the inputfile");
- fseek(fpInput, 0, 0);
-
- char line[200];
- char *key;
- int i;
- xbt_hist_t* data;
-
- if (fgets(line, 200, fpInput) == NULL)
- printf("Error input file is empty!");//Skipping first row
- while (fgets(line, 200, fpInput) != NULL)
- {
- key = strtok(line, "\t");
-
- data = xbt_dict_get_or_null(mydict, key);
- if (data)
- printf("Error, data with that block_id already exists!");
-
- data = (xbt_hist_t *) xbt_new(xbt_hist_t, 1);
-
- data->block_id = key;
- data->counts = atoi(strtok(NULL, "\t"));
- data->mean = atof(strtok(NULL, "\t"));
- data->n = atoi(strtok(NULL, "\t"));
-
- data->breaks = (double*) malloc(sizeof(double) * data->n);
- data->percentage = (double*) malloc(sizeof(double) * (data->n - 1));
- for (i = 0; i < data->n; i++)
- data->breaks[i] = atof(strtok(NULL, "\t"));
- for (i = 0; i < (data->n - 1); i++)
- data->percentage[i] = atof(strtok(NULL, "\t"));
-
- xbt_dict_set(mydict, key, data, NULL);
- }
+ xbt_dict_t mydict = get_dict();
+ FILE* fpInput = fopen(inputfile, "r");
+ if (fpInput == NULL)
+ printf("Error while opening the inputfile");
+ fseek(fpInput, 0, 0);
+
+ char line[200];
+ char* key;
+
+ if (fgets(line, 200, fpInput) == NULL)
+ printf("Error input file is empty!"); // Skipping first row
+ while (fgets(line, 200, fpInput) != NULL) {
+ key = strtok(line, "\t");
+
+ xbt_hist_t* data = xbt_dict_get_or_null(mydict, key);
+ if (data)
+ printf("Error, data with that block_id already exists!");
+
+ data = (xbt_hist_t*)xbt_new(xbt_hist_t, 1);
+
+ data->block_id = key;
+ data->counts = atoi(strtok(NULL, "\t"));
+ data->mean = atof(strtok(NULL, "\t"));
+ data->n = atoi(strtok(NULL, "\t"));
+
+ data->breaks = (double*)malloc(sizeof(double) * data->n);
+ data->percentage = (double*)malloc(sizeof(double) * (data->n - 1));
+ for (int i = 0; i < data->n; i++)
+ data->breaks[i] = atof(strtok(NULL, "\t"));
+ for (int i = 0; i < (data->n - 1); i++)
+ data->percentage[i] = atof(strtok(NULL, "\t"));
+
+ xbt_dict_set(mydict, key, data, NULL);
+ }
+ fclose(fInput);
}
/* Initializing xbt dictionary for StarPU version, reading xbt_hist_t entries line by line */
static inline void inject_init_starpu(char *inputfile, xbt_dict_t *dict, RngStream *rng)
{
- *dict = xbt_dict_new_homogeneous(free);
- *rng = RngStream_CreateStream("Randgen1");
- unsigned long seed[] = {134, 233445, 865, 2634, 424242, 876541};
- RngStream_SetSeed(*rng, seed);
-
- xbt_dict_t mydict = *dict;
- mydict = *dict;
- FILE* fpInput = fopen(inputfile, "r");
- if (fpInput == NULL)
- {
- printf("Error while opening the inputfile");
- return;
- }
-
- fseek(fpInput, 0, 0);
-
- char line[MAX_LINE_INJ];
- char *key;
- int i;
- xbt_hist_t* data;
-
- if (fgets(line, MAX_LINE_INJ, fpInput) == NULL)
- {
- printf("Error input file is empty!");//Skipping first row
- return;
- }
-
-
- while (fgets(line, MAX_LINE_INJ, fpInput) != NULL)
- {
- key = strtok(line, "\t");
-
- data = xbt_dict_get_or_null(mydict, key);
- if (data)
- printf("Error, data with that block_id already exists!");
-
- data = (xbt_hist_t *) xbt_new(xbt_hist_t, 1);
- data->block_id = key;
- data->counts = atoi(strtok(NULL, "\t"));
- data->mean = atof(strtok(NULL, "\t"));
- data->n = atoi(strtok(NULL, "\t"));
- data->breaks = (double*) malloc(sizeof(double) * data->n);
- data->percentage = (double*) malloc(sizeof(double) * (data->n - 1));
-
- for (i = 0; i < data->n; i++)
- data->breaks[i] = atof(strtok(NULL, "\t"));
- for (i = 0; i < (data->n - 1); i++)
- {
- data->percentage[i] = atof(strtok(NULL, "\t"));
- }
-
- xbt_dict_set(mydict, key, data, NULL);
- }
+ *dict = xbt_dict_new_homogeneous(free);
+ *rng = RngStream_CreateStream("Randgen1");
+ unsigned long seed[] = {134, 233445, 865, 2634, 424242, 876541};
+ RngStream_SetSeed(*rng, seed);
+
+ xbt_dict_t mydict = *dict;
+ FILE* fpInput = fopen(inputfile, "r");
+ if (fpInput == NULL) {
+ printf("Error while opening the inputfile");
+ return;
+ }
+
+ fseek(fpInput, 0, 0);
+
+ char line[MAX_LINE_INJ];
+ char* key;
+
+ if (fgets(line, MAX_LINE_INJ, fpInput) == NULL) {
+ printf("Error input file is empty!"); // Skipping first row
+ return;
+ }
+
+ while (fgets(line, MAX_LINE_INJ, fpInput) != NULL) {
+ key = strtok(line, "\t");
+
+ xbt_hist_t* data = xbt_dict_get_or_null(mydict, key);
+ if (data)
+ printf("Error, data with that block_id already exists!");
+
+ data = (xbt_hist_t*)xbt_new(xbt_hist_t, 1);
+ data->block_id = key;
+ data->counts = atoi(strtok(NULL, "\t"));
+ data->mean = atof(strtok(NULL, "\t"));
+ data->n = atoi(strtok(NULL, "\t"));
+ data->breaks = (double*)malloc(sizeof(double) * data->n);
+ data->percentage = (double*)malloc(sizeof(double) * (data->n - 1));
+
+ for (int i = 0; i < data->n; i++)
+ data->breaks[i] = atof(strtok(NULL, "\t"));
+ for (int i = 0; i < (data->n - 1); i++) {
+ data->percentage[i] = atof(strtok(NULL, "\t"));
+ }
+
+ xbt_dict_set(mydict, key, data, NULL);
+ }
+ fclose(fInput);
}
/* Injecting time */
static inline double xbt_inject_time(char *key)
{
- return xbt_hist_time(key);
- //return xbt_mean_time(key);
+ return xbt_hist_time(key);
+ // return xbt_mean_time(key);
}
/* Injecting mean value */
static inline double xbt_mean_time(char *key)
{
- xbt_dict_t mydict = get_dict();
- xbt_hist_t* data = xbt_dict_get_or_null(mydict, key);
+ xbt_dict_t mydict = get_dict();
+ xbt_hist_t* data = xbt_dict_get_or_null(mydict, key);
- if (!data)
- {
- printf("Warning: element with specified key does not exist (%s)\n",key);
- return 0;
- }
+ if (!data) {
+ printf("Warning: element with specified key does not exist (%s)\n", key);
+ return 0;
+ }
- return data->mean;
+ return data->mean;
}
/* Injecting random value from the histogram */
static inline double xbt_hist_time(char *key)
{
- int i, k = 0;
- double left = 0, right = 1;
- double timer = 0;
- RngStream rng_stream;
- double r, r2;
-
- xbt_dict_t mydict = get_dict();
- xbt_hist_t* data = xbt_dict_get_or_null(mydict, key);
-
- if (!data)
- {
- printf("Warning: element with specified key does not exist (%s)\n",key);
- return 0;
- }
-
- /* Choosing random interval of the histogram */
- rng_stream = get_randgen();
- r = RngStream_RandU01(rng_stream);
- for (i = 0; i < (data->n - 1); i++)
- {
- left += (i == 0) ? 0 : data->percentage[i - 1];
- right += data->percentage[i];
- if (left < r && r <= right)
- k = i;
- }
-
- /* Choosing random value inside the interval of the histogram */
- r2 = RngStream_RandU01(rng_stream);
- timer = data->breaks[k] + r2 * (data->breaks[k + 1] - data->breaks[k]);
-
- return timer;
+ xbt_dict_t mydict = get_dict();
+ xbt_hist_t* data = xbt_dict_get_or_null(mydict, key);
+
+ if (!data) {
+ printf("Warning: element with specified key does not exist (%s)\n", key);
+ return 0;
+ }
+
+ /* Choosing random interval of the histogram */
+ RngStream rng_stream = get_randgen();
+ double r = RngStream_RandU01(rng_stream);
+ int k = 0;
+ double left = 0;
+ double right = 1;
+ for (int i = 0; i < (data->n - 1); i++) {
+ left += (i == 0) ? 0 : data->percentage[i - 1];
+ right += data->percentage[i];
+ if (left < r && r <= right)
+ k = i;
+ }
+
+ /* Choosing random value inside the interval of the histogram */
+ double r2 = RngStream_RandU01(rng_stream);
+ double timer = data->breaks[k] + r2 * (data->breaks[k + 1] - data->breaks[k]);
+
+ return timer;
}
#endif // __INJECT_H__
-/* Copyright (c) 2010-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
for (int i = 0; i < receivers_count; i++) {
char mailbox[80];
- snprintf(mailbox,79, "receiver-%ld", i % receivers_count);
+ snprintf(mailbox, 79, "receiver-%d", i);
msg_task_t task = MSG_task_create("finalize", 0, 0, 0);
msg_comm_t comm = MSG_task_isend(task, mailbox);
- XBT_INFO("Send to receiver-%ld finalize", i % receivers_count);
+ XBT_INFO("Send to receiver-%d finalize", i);
if (sleep_test_time > 0) {
while (MSG_comm_test(comm) == 0) {
MSG_process_sleep(sleep_test_time);
p Testing the mechanism for computing host energy consumption
$ ${bindir:=.}/energy-consumption/energy-consumption$EXEEXT ${srcdir:=.}/../platforms/energy_platform.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:dvfs_test@MyHost1) Energetic profile: 100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0
+> [ 0.000000] (1:dvfs_test@MyHost1) Energetic profile: 100.0:120.0:120.0, 93.0:110.0:110.0, 90.0:105.0:105.0
> [ 0.000000] (1:dvfs_test@MyHost1) Initial peak speed=1E+08 flop/s; Energy dissipated =0E+00 J
> [ 0.000000] (1:dvfs_test@MyHost1) Sleep for 10 seconds
> [ 10.000000] (1:dvfs_test@MyHost1) Done sleeping (duration: 10.00 s). Current peak speed=1E+08; Energy dissipated=1000.00 J
> [ 10.000000] (1:dvfs_test@MyHost1) Run a task of 1E+08 flops
-> [ 11.000000] (1:dvfs_test@MyHost1) Task done (duration: 1.00 s). Current peak speed=1E+08 flop/s; Current consumption: from 120W to 200W depending on load; Energy dissipated=1120 J
+> [ 11.000000] (1:dvfs_test@MyHost1) Task done (duration: 1.00 s). Current peak speed=1E+08 flop/s; Current consumption: from 120W to 120W depending on load; Energy dissipated=1120 J
> [ 11.000000] (1:dvfs_test@MyHost1) ========= Requesting pstate 2 (speed should be of 2E+07 flop/s and is of 2E+07 flop/s)
> [ 11.000000] (1:dvfs_test@MyHost1) Run a task of 1E+08 flops
> [ 16.000000] (1:dvfs_test@MyHost1) Task done (duration: 5.00 s). Current peak speed=2E+07 flop/s; Energy dissipated=1645 J
> [ 30.000000] (0:maestro@) Energy consumption of host MyHost3: 3000.000000 Joules
$ ${bindir:=.}/energy-consumption/energy-consumption$EXEEXT ${srcdir:=.}/../platforms/energy_cluster.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:dvfs_test@MyHost1) Energetic profile: 100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0
+> [ 0.000000] (1:dvfs_test@MyHost1) Energetic profile: 100.0:120.0:120.0, 93.0:110.0:110.0, 90.0:105.0:105.0
> [ 0.000000] (1:dvfs_test@MyHost1) Initial peak speed=1E+08 flop/s; Energy dissipated =0E+00 J
> [ 0.000000] (1:dvfs_test@MyHost1) Sleep for 10 seconds
> [ 10.000000] (1:dvfs_test@MyHost1) Done sleeping (duration: 10.00 s). Current peak speed=1E+08; Energy dissipated=1000.00 J
> [ 10.000000] (1:dvfs_test@MyHost1) Run a task of 1E+08 flops
-> [ 11.000000] (1:dvfs_test@MyHost1) Task done (duration: 1.00 s). Current peak speed=1E+08 flop/s; Current consumption: from 120W to 200W depending on load; Energy dissipated=1120 J
+> [ 11.000000] (1:dvfs_test@MyHost1) Task done (duration: 1.00 s). Current peak speed=1E+08 flop/s; Current consumption: from 120W to 120W depending on load; Energy dissipated=1120 J
> [ 11.000000] (1:dvfs_test@MyHost1) ========= Requesting pstate 2 (speed should be of 2E+07 flop/s and is of 2E+07 flop/s)
> [ 11.000000] (1:dvfs_test@MyHost1) Run a task of 1E+08 flops
> [ 16.000000] (1:dvfs_test@MyHost1) Task done (duration: 5.00 s). Current peak speed=2E+07 flop/s; Energy dissipated=1645 J
! output sort 19
$ ${bindir:=.}/energy-onoff/energy-onoff$EXEEXT ${srcdir:=.}/energy-onoff/platform_onoff.xml "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
-> [ 0.000000] (1:onoff_test@MyHost2) Energetic profile: 95.0:120.0:200.0,93.0:110.0:170.0,90.0:100.0:150.0, 120:120:120,110:110:110
+> [ 0.000000] (1:onoff_test@MyHost2) Energetic profile: 95.0:120.0:120.0,93.0:110.0:110.0,90.0:100.0:100.0, 120:120:120,110:110:110
> [ 0.000000] (1:onoff_test@MyHost2) Initial peak speed=1E+08 flop/s; Energy dissipated =0E+00 J
> [ 0.000000] (1:onoff_test@MyHost2) Sleep for 10 seconds
> [ 10.000000] (1:onoff_test@MyHost2) Done sleeping. Current peak speed=1E+08; Energy dissipated=950.00 J
values that are right for you. -->
<host id="MyHost1" speed="100.0Mf,50.0Mf,20.0Mf, 0.006666667f,0.1429f" pstate="0" >
- <prop id="watt_per_state" value="95.0:120.0:200.0,93.0:110.0:170.0,90.0:100.0:150.0, 120:120:120,110:110:110" />
+ <prop id="watt_per_state" value="95.0:120.0:120.0,93.0:110.0:110.0,90.0:100.0:100.0, 120:120:120,110:110:110" />
<prop id="watt_off" value="10" />
</host>
<host id="MyHost2" speed="100.0Mf" >
- <prop id="watt_per_state" value="100.0:120.0:200.0" />
+ <prop id="watt_per_state" value="100.0:120.0:120.0" />
<prop id="watt_off" value="10" />
</host>
xbt_dynar_foreach(storages, cur, st){
XBT_INFO("Init: %llu MiB used on '%s'", MSG_storage_get_used_size(st)/INMEGA, MSG_storage_get_name(st));
}
- xbt_dynar_free_container(&storages);
+ xbt_dynar_free(&storages);
XBT_INFO("Simulation time %g", MSG_get_clock());
return res != MSG_OK;
/* - Then retrieve this data */
char *data = MSG_file_get_data(file);
XBT_INFO("User data attached to the file: %s", data);
+ xbt_free(data);
MSG_file_close(file);
free(file_name);
<!-- _________
| |
| router |
- ____________|__________|_____________ backbone
+ ____________|__________|_____________ backbone link
| | | | | |
l0| l1| l2| l97| l96 | | l99
| | | ........ | | |
| |
node-0.acme.org node-99.acme.org
+
+ The route from node-0 to node-2 is: l0.UP ; backbone ; l2.DOWN
+
+ The route from node-0 to the outer world begins with: l0.UP ; backbone
-->
<cluster id="acme" prefix="node-" radical="0-99" suffix=".acme.org" speed="1Gf" bw="125MBps" lat="50us"
bb_bw="2.25GBps" bb_lat="500us"/>
--- /dev/null
+<?xml version='1.0'?>
+<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
+<platform version="4.1">
+ <zone id="world" routing="Full">
+ <!-- this platform models 3 clusters, interconnected over the Internet
+ --
+ -- This example intends to be somewhat exhaustive, your platform should probably be simpler.
+ --
+ -- In particular, the clusters are modeled in very different ways,
+ -- but you should pick the way you prefer, and stick to it.
+ -->
+
+ <!-- Here comes the first cluster, the simplest one.
+
+ Every nodes are connected through a private link to a router
+ (ie a machine that cannot host computations).
+
+ node-0.1core.org --[l0]--
+ \
+ node-1.1core.org --[l1]-- router -- (outer world)
+ ... /
+ node-7.1core.org --[l9]--
+
+
+ So the route from node-0 to node-1 is {l0.UP, l1.DOWN}
+ -->
+ <cluster id="simple" prefix="node-" radical="0-7" suffix=".1core.org" speed="1Gf" bw="125MBps" lat="50us" />
+
+
+ <!-- This second cluster has a backbone link, connecting all private links:
+
+ node-0.2cores.org --[l0]-------+
+ |
+ node-1.2cores.org --[l1]--[backbone]-- router -- (outer world)
+ ... |
+ node-7.2cores.org --[l7]-------+
+
+
+ The route from node-0 to node-1 is: l0.UP ; backbone ; l1.DOWN
+
+ The route from node-0 to the outer world begins with: l0.UP ; backbone
+ -->
+ <cluster id="backboned" prefix="node-" radical="0-7" suffix=".2cores.org"
+ speed="1Gf" core="2"
+ bw="125MBps" lat="50us"
+ bb_bw="2.25GBps" bb_lat="500us"/>
+
+
+ <!-- This cluster has a backbone link, but no links are fullduplex.
+ -- It means that up and down communications compete as if they
+ -- were using exactly the same resource. If you send and receive
+ -- at the same time, then each get half of the bandwidth.
+ --
+ -- Also, the hosts have 4 cores.
+ -->
+ <cluster id="halfduplex" prefix="node-" radical="0-7" suffix=".4cores.org" speed="1Gf" core="4"
+ bw="125MBps" lat="50us" sharing_policy="SHARED"
+ bb_bw="2.25GBps" bb_lat="500us" bb_sharing_policy="SHARED" />
+
+
+ <!-- And now, we create the routes between the clusters, ie inter-zone routes -->
+
+ <!-- We have only one outer link, representing the internet
+ -- Its sharing is FATPIPE, meaning that communications have no impact on each others.
+ -- Any given comm can use the full provided bandwidth.
+ --
+ -- This models the big links constituting the backbone of the internet,
+ -- that users cannot saturate.
+ -- Users' bandwidth is mostly limited by their outgoing connexion,
+ -- not by the network backbone. -->
+
+ <link id="backbone" bandwidth="1.25GBps" latency="500us" sharing_policy="FATPIPE"/>
+
+ <zoneRoute src="simple" dst="backboned"
+ gw_src="node-simple_router.1core.org"
+ gw_dst="node-backboned_router.2cores.org">
+ <link_ctn id="backbone" />
+ </zoneRoute>
+
+ <zoneRoute src="simple" dst="halfduplex"
+ gw_src="node-simple_router.1core.org"
+ gw_dst="node-halfduplex_router.4cores.org">
+ <link_ctn id="backbone" />
+ </zoneRoute>
+
+ <zoneRoute src="backboned" dst="halfduplex"
+ gw_src="node-backboned_router.2cores.org"
+ gw_dst="node-halfduplex_router.4cores.org">
+ <link_ctn id="backbone" />
+ </zoneRoute>
+</zone>
+</platform>
bw="125MBps" lat="50us" bb_bw="2.25GBps" bb_lat="500us">
<!-- List of idle_power:min_power:max_power pairs (in Watts) -->
<!-- The list must contain one speed tupple for each previously defined pstate-->
- <prop id="watt_per_state" value="100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0" />
+ <prop id="watt_per_state" value="100.0:120.0:120.0, 93.0:110.0:110.0, 90.0:105.0:105.0" />
<prop id="watt_off" value="10" />
</cluster>
</platform>
<!-- Multiple pstate processor capacities can be defined as a list of powers specified for a given host -->
<!-- Attribute 'pstate' specifies the initialy selected pstate (here, the lowest pstate corresponds to the highest
processor speed) -->
- <host id="MyHost1" speed="100.0Mf,50.0Mf,20.0Mf" pstate="0" >
+ <host id="MyHost1" speed="100.0Mf,50.0Mf,20.0Mf" pstate="0" core="1" >
<!-- List of min_power:max_power pairs (in Watts) corresponding to the speed consumed when the processor is idle
and when it is fully loaded -->
<!-- The list must contain one speed pair for each previously defined pstate-->
- <prop id="watt_per_state" value="100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0" />
+ <prop id="watt_per_state" value="100.0:120.0:120.0, 93.0:110.0:110.0, 90.0:105.0:105.0" />
<prop id="watt_off" value="10" />
</host>
<host id="MyHost2" speed="100.0Mf,50.0Mf,20.0Mf" pstate="0" >
- <prop id="watt_per_state" value="100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0" />
+ <prop id="watt_per_state" value="100.0:120.0:120.0, 93.0:110.0:110.0, 90.0:105.0:105.0" />
<prop id="watt_off" value="10" />
</host>
<host id="MyHost3" speed="100.0Mf,50.0Mf,20.0Mf" pstate="0" >
- <prop id="watt_per_state" value="100.0:120.0:200.0, 93.0:110.0:170.0, 90.0:105.0:150.0" />
+ <prop id="watt_per_state" value="100.0:120.0:120.0, 93.0:110.0:110.0, 90.0:105.0:105.0" />
<prop id="watt_off" value="10" />
</host>
+++ /dev/null
-<?xml version='1.0'?>
-<!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">
-<platform version="4.1">
-<!-- _________________________________________
- ____|_____ |
- | | |
- | router1 | |
- ____________|__________|_____________ backbone1 |
- | | | | | | | backbone
- l0| l1| l2| l7| l8| |l9 |
- | | | ........ | | | |
- | | |
-node-0.acme.org node-9.acme.org |
- __________________________________________|
- ___|______
- | |
- | router2 |
- ____________|__________|_____________ backbone2
- | | | | | |
- l10|l11|l12| l17| l18 | | l19
- | | | ........ | | |
- | |
-node-10.acme.org node-19.acme.org -->
-
- <zone id="AS0" routing="Full">
- <cluster id="my_cluster_1" prefix="node-" suffix=".acme.org" radical="0-9" speed="1Gf"
- bw="125MBps" lat="50us" bb_bw="2.25GBps" bb_lat="500us" />
-
- <cluster id="my_cluster_2" prefix="node-" suffix=".acme.org" radical="10-19" speed="1Gf"
- bw="125MBps" lat="50us" bb_bw="2.25GBps" bb_lat="500us" />
-
- <link id="backbone" bandwidth="1.25GBps" latency="500us" />
-
- <zoneRoute src="my_cluster_1" dst="my_cluster_2" gw_src="node-my_cluster_1_router.acme.org"
- gw_dst="node-my_cluster_2_router.acme.org">
- <link_ctn id="backbone" />
- </zoneRoute>
- </zone>
-</platform>
# The order differ when executed with gcc's thread sanitizer
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/dag-dotload/dag.dot
+$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/dag-dotload/dag.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
> [0.000000] [sd_task/INFO] Displaying task root
> [0.000000] [sd_task/INFO] 7->end
> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
-> [110.005082] [test/INFO] ------------------- Produce the trace file---------------------------
-> [110.005082] [test/INFO] Producing the trace of the run into dag.trace
+> [110.004162] [test/INFO] ------------------- Produce the trace file---------------------------
+> [110.004162] [test/INFO] Producing the trace of the run into dag.trace
$ cat ${srcdir:=.}/dag-dotload/dag.trace
> [0.000000->0.000000] node-0.acme.org compute 0.000000 flops # root
> [0.000000->10.000000] node-1.acme.org compute 10000000129.452715 flops # 0
-> [10.001680->20.001680] node-10.acme.org compute 10000000131.133657 flops # 1
-> [20.002360->30.002360] node-11.acme.org compute 10000000121.124870 flops # 2
-> [30.003040->40.003041] node-12.acme.org compute 10000000230.608025 flops # 3
-> [40.003041->50.003041] node-13.acme.org compute 10000000004.994019 flops # 4
-> [50.003721->60.003721] node-14.acme.org compute 10000000046.016401 flops # 5
-> [60.003721->70.003721] node-15.acme.org compute 10000000091.598791 flops # 6
-> [70.004401->80.004401] node-16.acme.org compute 10000000040.679438 flops # 7
-> [80.005081->90.005081] node-17.acme.org compute 10000000250.490017 flops # 8
-> [90.005081->100.005081] node-18.acme.org compute 10000000079.267649 flops # 9
-> [10.000000 -> 10.001680] node-1.acme.org -> node-10.acme.org transfer of 10001 bytes # 0->1
-> [20.001680 -> 20.002360] node-10.acme.org -> node-11.acme.org transfer of 10004 bytes # 1->2
-> [30.002360 -> 30.003040] node-11.acme.org -> node-12.acme.org transfer of 10002 bytes # 2->3
-> [50.003041 -> 50.003721] node-13.acme.org -> node-14.acme.org transfer of 10029 bytes # 4->5
-> [70.003721 -> 70.004401] node-15.acme.org -> node-16.acme.org transfer of 10005 bytes # 6->7
-> [80.004401 -> 80.005081] node-16.acme.org -> node-17.acme.org transfer of 10000 bytes # 7->8
-> [80.004401 -> 80.086113] node-16.acme.org -> node-0.acme.org transfer of 10014000 bytes # 7->end
-> [0.000000 -> 0.081712] node-0.acme.org -> node-14.acme.org transfer of 10014000 bytes # root->5
-> [100.005081->110.005082] node-0.acme.org compute 10000000129.452715 flops # end
+> [10.000680->20.000680] node-10.acme.org compute 10000000131.133657 flops # 1
+> [20.001360->30.001360] node-11.acme.org compute 10000000121.124870 flops # 2
+> [30.002040->40.002041] node-12.acme.org compute 10000000230.608025 flops # 3
+> [40.002041->50.002041] node-13.acme.org compute 10000000004.994019 flops # 4
+> [50.002721->60.002721] node-14.acme.org compute 10000000046.016401 flops # 5
+> [60.002721->70.002721] node-15.acme.org compute 10000000091.598791 flops # 6
+> [70.003401->80.003401] node-16.acme.org compute 10000000040.679438 flops # 7
+> [80.004161->90.004161] node-17.acme.org compute 10000000250.490017 flops # 8
+> [90.004161->100.004161] node-18.acme.org compute 10000000079.267649 flops # 9
+> [10.000000 -> 10.000680] node-1.acme.org -> node-10.acme.org transfer of 10001 bytes # 0->1
+> [20.000680 -> 20.001360] node-10.acme.org -> node-11.acme.org transfer of 10004 bytes # 1->2
+> [30.001360 -> 30.002040] node-11.acme.org -> node-12.acme.org transfer of 10002 bytes # 2->3
+> [50.002041 -> 50.002721] node-13.acme.org -> node-14.acme.org transfer of 10029 bytes # 4->5
+> [70.002721 -> 70.003401] node-15.acme.org -> node-16.acme.org transfer of 10005 bytes # 6->7
+> [80.003401 -> 80.004161] node-16.acme.org -> node-17.acme.org transfer of 10000 bytes # 7->8
+> [80.003401 -> 80.084193] node-16.acme.org -> node-0.acme.org transfer of 10014000 bytes # 7->end
+> [0.000000 -> 0.080712] node-0.acme.org -> node-14.acme.org transfer of 10014000 bytes # root->5
+> [100.004161->110.004162] node-0.acme.org compute 10000000129.452715 flops # end
$ rm -f ${srcdir:=.}/dag-dotload/dag.trace ${srcdir:=.}/dot.dot
! expect return 2
-$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/dag-dotload/dag_with_cycle.dot
+$ $SG_TEST_EXENV ${bindir:=.}/dag-dotload/sd_dag-dotload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/dag-dotload/dag_with_cycle.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_daxparse/WARNING] the task root is not marked
> [0.000000] [sd_daxparse/WARNING] the task 1 is in a cycle
p Test the DAX loader on a small DAX instance
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/daxload/smalldax.xml
+$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/daxload/smalldax.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_daxparse/WARNING] Ignore file o1 size redefinition from 1000000 to 304
> [0.000000] [sd_daxparse/WARNING] Ignore file o2 size redefinition from 1000000 to 304
> [0.000000] [sd_task/INFO] 3@task1_o3_end
> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
-> [84.068138] [test/INFO] ------------------- Produce the trace file---------------------------
-> [84.068138] [test/INFO] Producing the trace of the run into smalldax.trace
+> [84.067138] [test/INFO] ------------------- Produce the trace file---------------------------
+> [84.067138] [test/INFO] Producing the trace of the run into smalldax.trace
$ cat ${srcdir:=.}/daxload/smalldax.trace
> [0.000000] node-0.acme.org compute 0.000000 # root
-> [0.015600] node-1.acme.org compute 42000000000.000000 # 1@task1
+> [0.016600] node-1.acme.org compute 42000000000.000000 # 1@task1
> [0.016600] node-10.acme.org compute 42000000000.000000 # 2@task2
> [42.033200] node-11.acme.org compute 42000000000.000000 # 3@task1
> [0.000000] node-0.acme.org send node-10.acme.org 1000000.000000 # root_i2_2@task2
> [0.016600] node-10.acme.org recv node-0.acme.org 1000000.000000 # root_i2_2@task2
-> [42.015600] node-1.acme.org send node-11.acme.org 1000000.000000 # 1@task1_o1_3@task1
+> [42.016600] node-1.acme.org send node-11.acme.org 1000000.000000 # 1@task1_o1_3@task1
> [42.033200] node-11.acme.org recv node-1.acme.org 1000000.000000 # 1@task1_o1_3@task1
> [42.016600] node-10.acme.org send node-11.acme.org 1000000.000000 # 2@task2_o2_3@task1
> [42.033200] node-11.acme.org recv node-10.acme.org 1000000.000000 # 2@task2_o2_3@task1
> [84.033200] node-11.acme.org send node-0.acme.org 4167312.000000 # 3@task1_o3_end
-> [84.068138] node-0.acme.org recv node-11.acme.org 4167312.000000 # 3@task1_o3_end
+> [84.067138] node-0.acme.org recv node-11.acme.org 4167312.000000 # 3@task1_o3_end
> [0.000000] node-0.acme.org send node-1.acme.org 1000000.000000 # root_i1_1@task1
-> [0.015600] node-1.acme.org recv node-0.acme.org 1000000.000000 # root_i1_1@task1
-> [84.068138] node-0.acme.org compute 0.000000 # end
+> [0.016600] node-1.acme.org recv node-0.acme.org 1000000.000000 # root_i1_1@task1
+> [84.067138] node-0.acme.org compute 0.000000 # end
$ cmake -E remove -f ${srcdir:=.}/dax.dot ${srcdir:=.}/daxload/smalldax.trace
p Test the DAX loader with a DAX comprising a cycle.
! expect return 255
-$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/daxload/simple_dax_with_cycle.xml
+$ $SG_TEST_EXENV ${bindir:=.}/daxload/sd_daxload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/daxload/simple_dax_with_cycle.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_daxparse/WARNING] the task root is not marked
> [0.000000] [sd_daxparse/WARNING] the task 1@task1 is in a cycle
# The order differ when executed with gcc's thread sanitizer
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/ptg-dotload/sd_ptg-dotload ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/ptg-dotload/ptg.dot
+$ $SG_TEST_EXENV ${bindir:=.}/ptg-dotload/sd_ptg-dotload ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/ptg-dotload/ptg.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
> [0.000000] [sd_task/INFO] Displaying task root
> [0.000000] [sd_task/INFO] c3->end
> [0.000000] [test/INFO] ------------------- Schedule tasks ---------------------------
> [0.000000] [test/INFO] ------------------- Run the schedule ---------------------------
-> [3.126200] [test/INFO] Makespan: 3.126200
+> [2.931978] [test/INFO] Makespan: 2.931978
p Test the loader of DAG written in the DOT format
! expect return 2
-$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc "--log=sd_dotparse.thres:verbose" ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/schedule-dotload/dag_with_bad_schedule.dot
+$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc "--log=sd_dotparse.thres:verbose" ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/schedule-dotload/dag_with_bad_schedule.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [sd_dotparse/VERBOSE] The schedule is ignored, task 'end' can not be scheduled on -1 hosts
> [0.000000] [sd_dotparse/VERBOSE] The schedule is ignored, task '1' can not be scheduled on 0 hosts
# The order differ when executed with gcc's thread sanitizer
! output sort
-$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc ${srcdir:=.}/../platforms/two_clusters.xml ${srcdir:=.}/schedule-dotload/dag_with_good_schedule.dot
+$ $SG_TEST_EXENV ${bindir:=.}/schedule-dotload/sd_schedule-dotload --log=no_loc ${srcdir:=.}/../platforms/cluster.xml ${srcdir:=.}/schedule-dotload/dag_with_good_schedule.dot
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [0.000000] [test/INFO] ------------------- Display all tasks of the loaded DAG ---------------------------
> [0.000000] [sd_task/INFO] Displaying task root
static double finish_on_at(SD_task_t task, sg_host_t host)
{
double result;
- unsigned int i;
- double data_available = 0.;
- double redist_time = 0;
- double last_data_available;
xbt_dynar_t parents = SD_task_get_parents(task);
if (!xbt_dynar_is_empty(parents)) {
+ unsigned int i;
+ double data_available = 0.;
+ double redist_time = 0;
+ double last_data_available;
/* compute last_data_available */
SD_task_t parent;
last_data_available = -1.0;
# We need to sort this out because the order changes with the sanitizers (at least)
! output sort
-$ $SG_TEST_EXENV ./throttling/sd_throttling ${srcdir:=.}/../platforms/two_clusters.xml
+$ $SG_TEST_EXENV ./throttling/sd_throttling ${srcdir:=.}/../platforms/cluster.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [5.000000] [sd_comm_throttling/INFO] Simulation stopped after 5.0000 seconds
> [5.000000] [sd_comm_throttling/INFO] Task 'Task A' start time: 0.000000, finish time: 5.000000
p Usage test of simdag's typed tasks
! output sort
-$ $SG_TEST_EXENV ./typed_tasks/sd_typed_tasks ${srcdir:=.}/../platforms/two_clusters.xml
+$ $SG_TEST_EXENV ./typed_tasks/sd_typed_tasks ${srcdir:=.}/../platforms/cluster.xml
> [0.000000] [xbt_cfg/INFO] Switching to the L07 model to handle parallel tasks.
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 3' start time: 0.000000, finish time: 0.400000
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 1' start time: 0.000000, finish time: 0.400000
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Seq. comp. 1' start time: 0.000000, finish time: 1.000000
-> [2.080600] [sd_typed_tasks_test/INFO] Task 'MxN redist' start time: 0.400000, finish time: 0.721600
-> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 2' start time: 0.721600, finish time: 0.921600
+> [2.080600] [sd_typed_tasks_test/INFO] Task 'MxN redist' start time: 0.400000, finish time: 0.720600
+> [2.080600] [sd_typed_tasks_test/INFO] Task 'Par. Comp. 2' start time: 0.720600, finish time: 0.920600
> [2.080600] [sd_typed_tasks_test/INFO] Task 'E2E comm.' start time: 1.000000, finish time: 1.080600
> [2.080600] [sd_typed_tasks_test/INFO] Task 'Seq. comp 2.' start time: 1.080600, finish time: 2.080600
namespace kernel {
namespace activity {
class ActivityImpl;
+ using ActivityImplPtr = boost::intrusive_ptr<ActivityImpl>;
XBT_PUBLIC(void) intrusive_ptr_add_ref(ActivityImpl* activity);
XBT_PUBLIC(void) intrusive_ptr_release(ActivityImpl* activity);
+
+ class CommImpl;
+ using CommImplPtr = boost::intrusive_ptr<CommImpl>;
+ class ExecImpl;
+ using ExecImplPtr = boost::intrusive_ptr<ExecImpl>;
+ class IoImpl;
+ using IoImplPtr = boost::intrusive_ptr<IoImpl>;
+ class RawImpl;
+ using RawImplPtr = boost::intrusive_ptr<RawImpl>;
+ class SleepImpl;
+ using SleepImplPtr = boost::intrusive_ptr<SleepImpl>;
}
namespace routing {
class NetPoint;
}
}
namespace simix {
+ class ActorImpl;
+ using ActorImplPtr = boost::intrusive_ptr<ActorImpl>;
class Host;
}
namespace surf {
typedef simgrid::s4u::File s4u_File;
typedef simgrid::s4u::Storage s4u_Storage;
typedef simgrid::s4u::NetZone s4u_NetZone;
-typedef simgrid::kernel::activity::ActivityImpl* smx_activity_t;
+typedef boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> smx_activity_t;
typedef simgrid::kernel::routing::NetPoint routing_NetPoint;
typedef simgrid::surf::Resource surf_Resource;
typedef simgrid::trace_mgr::trace tmgr_Trace;
XBT_PUBLIC(double) sg_host_get_consumed_energy(sg_host_t host);
XBT_PUBLIC(double) sg_host_get_wattmin_at(sg_host_t host, int pstate);
XBT_PUBLIC(double) sg_host_get_wattmax_at(sg_host_t host, int pstate);
+XBT_PUBLIC(double) sg_host_get_current_consumption(sg_host_t host);
#define MSG_host_energy_plugin_init() sg_energy_plugin_init()
#define MSG_host_get_consumed_energy(host) sg_host_get_consumed_energy(host)
#define MSG_host_get_wattmin_at(host,pstate) sg_host_get_wattmin_at(host,pstate)
#define MSG_host_get_wattmax_at(host,pstate) sg_host_get_wattmax_at(host,pstate)
+#define MSG_host_get_current_consumption(host) sg_host_get_current_consumption(host)
SG_END_DECL()
void *getUserData() { return userData_; }
private:
- simgrid::kernel::activity::ActivityImpl *pimpl_ = nullptr;
+ simgrid::kernel::activity::ActivityImplPtr pimpl_ = nullptr;
e_s4u_activity_state_t state_ = inited;
double remains_ = 0;
void *userData_ = nullptr;
virtual ~Comm();
- /*! take a range of s4u::Comm* (last excluded) and return when one of them is finished. The return value is an
+ /*! take a range of s4u::CommPtr (last excluded) and return when one of them is finished. The return value is an
* iterator on the finished Comms. */
template <class I> static I wait_any(I first, I last)
{
// Map to dynar<Synchro*>:
- xbt_dynar_t comms = xbt_dynar_new(sizeof(simgrid::kernel::activity::ActivityImpl*), NULL);
+ xbt_dynar_t comms = xbt_dynar_new(sizeof(simgrid::kernel::activity::ActivityImpl*), [](void*ptr){
+ intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
+ });
for (I iter = first; iter != last; iter++) {
- Comm& comm = **iter;
- if (comm.state_ == inited)
- comm.start();
- xbt_assert(comm.state_ == started);
- xbt_dynar_push_as(comms, simgrid::kernel::activity::ActivityImpl*, comm.pimpl_);
+ CommPtr comm = *iter;
+ if (comm->state_ == inited)
+ comm->start();
+ xbt_assert(comm->state_ == started);
+ simgrid::kernel::activity::ActivityImpl* ptr = comm->pimpl_.get();
+ intrusive_ptr_add_ref(ptr);
+ xbt_dynar_push_as(comms, simgrid::kernel::activity::ActivityImpl*, ptr);
}
// Call the underlying simcall:
int idx = simcall_comm_waitany(comms, -1);
(*res)->state_ = finished;
return res;
}
- /*! Same as wait_any, but with a timeout. If wait_any_for return because of the timeout last is returned.*/
+ /*! Same as wait_any, but with a timeout. If the timeout occurs, parameter last is returned.*/
template <class I> static I wait_any_for(I first, I last, double timeout)
{
// Map to dynar<Synchro*>:
- xbt_dynar_t comms = xbt_dynar_new(sizeof(simgrid::kernel::activity::ActivityImpl*), NULL);
+ xbt_dynar_t comms = xbt_dynar_new(sizeof(simgrid::kernel::activity::ActivityImpl*), [](void*ptr){
+ intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
+ });
for (I iter = first; iter != last; iter++) {
- Comm& comm = **iter;
- if (comm.state_ == inited)
- comm.start();
- xbt_assert(comm.state_ == started);
- xbt_dynar_push_as(comms, simgrid::kernel::activity::ActivityImpl*, comm.pimpl_);
+ CommPtr comm = *iter;
+ if (comm->state_ == inited)
+ comm->start();
+ xbt_assert(comm->state_ == started);
+ simgrid::kernel::activity::ActivityImpl* ptr = comm->pimpl_.get();
+ intrusive_ptr_add_ref(ptr);
+ xbt_dynar_push_as(comms, simgrid::kernel::activity::ActivityImpl*, ptr);
}
// Call the underlying simcall:
int idx = simcall_comm_waitany(comms, timeout);
namespace simgrid {
namespace s4u {
-class Storage;
-
/** @brief A simulated file
*
* Used to simulate the time it takes to access to a file, but does not really store any information.
XBT_PUBLIC(void) SIMIX_process_on_exit_runall(smx_actor_t process);
XBT_PUBLIC(void) SIMIX_process_on_exit(smx_actor_t process, int_f_pvoid_pvoid_t fun, void *data);
+SG_END_DECL()
+
/****************************** Communication *********************************/
XBT_PUBLIC(void) SIMIX_comm_set_copy_data_callback(void (*callback) (smx_activity_t, void*, size_t));
XBT_PUBLIC(void) SIMIX_comm_copy_pointer_callback(smx_activity_t comm, void* buff, size_t buff_size);
XBT_PUBLIC(e_smx_state_t) simcall_execution_wait(smx_activity_t execution);
/**************************** Process simcalls ********************************/
+SG_BEGIN_DECL()
/* Constructor and Destructor */
XBT_PUBLIC(smx_actor_t)
simcall_process_create(const char* name, xbt_main_func_t code, void* data, sg_host_t host, int argc, char** argv,
XBT_PUBLIC(void) simcall_process_join(smx_actor_t process, double timeout);
/* Sleep control */
XBT_PUBLIC(e_smx_state_t) simcall_process_sleep(double duration);
+SG_END_DECL()
/************************** Comunication simcalls *****************************/
XBT_PUBLIC(void) simcall_set_category(smx_activity_t synchro, const char *category);
/************************** Synchro simcalls **********************************/
+SG_BEGIN_DECL()
XBT_PUBLIC(smx_mutex_t) simcall_mutex_init();
XBT_PUBLIC(smx_mutex_t) SIMIX_mutex_ref(smx_mutex_t mutex);
XBT_PUBLIC(void) SIMIX_mutex_unref(smx_mutex_t mutex);
XBT_PUBLIC(int) simcall_mc_random(int min, int max);
SG_END_DECL()
+
#endif /* _SIMIX_SIMIX_H */
class ThrowPoint {
public:
ThrowPoint() = default;
- ThrowPoint(const char* file, int line, const char* function) :
- file(file), line(line), function(function) {}
+ explicit ThrowPoint(const char* file, int line, const char* function) : file(file), line(line), function(function) {}
const char* file = nullptr;
int line = 0;
const char* function = nullptr;
return data()[i];
}
// Conversion
- operator std::string() const
- {
- return std::string(this->c_str(), this->size());
- }
+ operator std::string() const { return std::string(this->c_str(), this->size()); }
// Iterators
iterator begin() { return data(); }
*/
XBT_PUBLIC(double) lmm_constraint_get_usage(lmm_constraint_t cnst);
+XBT_PUBLIC(int) lmm_constraint_get_variable_amount(lmm_constraint_t cnst);
+
/**
* @brief Sets the concurrency limit for this constraint
* @param cnst A constraint
#include "src/kernel/activity/ActivityImpl.hpp"
+XBT_LOG_EXTERNAL_CATEGORY(simix_process);
+
namespace simgrid {
namespace kernel {
namespace activity {
ActivityImpl::ActivityImpl() = default;
ActivityImpl::~ActivityImpl() = default;
-void ActivityImpl::ref()
-{
- // Atomic operation! Do not split in two instructions!
- xbt_assert(refcount_ != 0);
- refcount_++;
-}
-
-void ActivityImpl::unref()
-{
- xbt_assert(refcount_ > 0,
- "This activity has a negative refcount! You can only call test() or wait() once per activity.");
- refcount_--;
- if (refcount_ == 0)
- delete this;
-}
-
// boost::intrusive_ptr<Activity> support:
void intrusive_ptr_add_ref(simgrid::kernel::activity::ActivityImpl* activity)
{
- activity->ref();
+ xbt_assert(activity->refcount_ >= 0);
+ activity->refcount_++;
+ XBT_CDEBUG(simix_process, "%p->refcount++ ~> %d", activity, (int)activity->refcount_);
+ if (XBT_LOG_ISENABLED(simix_process, xbt_log_priority_trace))
+ xbt_backtrace_display_current();
}
void intrusive_ptr_release(simgrid::kernel::activity::ActivityImpl* activity)
{
- activity->unref();
+ XBT_CDEBUG(simix_process, "%p->refcount-- ~> %d", activity, ((int)activity->refcount_) - 1);
+ xbt_assert(activity->refcount_ >= 0);
+ activity->refcount_--;
+ if (XBT_LOG_ISENABLED(simix_process, xbt_log_priority_trace))
+ xbt_backtrace_display_current();
+ if (activity->refcount_ <= 0)
+ delete activity;
}
}
}
virtual void resume()=0;
virtual void post() =0; // What to do when a simcall terminates
- /** @brief Increases the refcount */
- void ref();
- /** @brief Reduces the refcount */
- void unref();
-
- // boost::intrusive_ptr<Activity> support:
+ // boost::intrusive_ptr<ActivityImpl> support:
friend void intrusive_ptr_add_ref(ActivityImpl * activity);
friend void intrusive_ptr_release(ActivityImpl * activity);
private:
- std::atomic_int_fast32_t refcount_{1};
+ std::atomic_int_fast32_t refcount_{0};
};
}}} // namespace simgrid::kernel::activity
state = SIMIX_WAITING;
src_data = nullptr;
dst_data = nullptr;
- intrusive_ptr_add_ref(this);
XBT_DEBUG("Create comm activity %p", this);
}
if (state == SIMIX_WAITING) {
mbox->remove(this);
state = SIMIX_CANCELED;
- this->unref();
} else if (not MC_is_active() /* when running the MC there are no surf actions */
&& not MC_record_replay_is_active() && (state == SIMIX_READY || state == SIMIX_RUNNING)) {
/* if there are simcalls associated with the synchro, then answer them */
if (not simcalls.empty()) {
SIMIX_comm_finish(this);
- this->unref();
}
}
if (name)
this->name = name;
this->state = SIMIX_RUNNING;
+ XBT_DEBUG("Create exec %p", this);
}
simgrid::kernel::activity::ExecImpl::~ExecImpl()
surf_exec->unref();
if (timeoutDetector)
timeoutDetector->unref();
+ XBT_DEBUG("Destroy exec %p", this);
}
void simgrid::kernel::activity::ExecImpl::suspend()
{
/** @brief Pushes a communication activity into a mailbox
* @param comm What to add
*/
-void MailboxImpl::push(activity::CommImpl* comm)
+void MailboxImpl::push(activity::CommImplPtr comm)
{
- this->comm_queue.push_back(comm);
comm->mbox = this;
+ this->comm_queue.push_back(std::move(comm));
}
/** @brief Removes a communication activity from a mailbox
*/
void MailboxImpl::remove(smx_activity_t activity)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(activity);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(activity);
+ xbt_assert(comm->mbox == this, "Comm %p is in mailbox %s, not mailbox %s", comm.get(),
+ (comm->mbox ? comm->mbox->name_ : "(null)"), this->name_);
comm->mbox = nullptr;
for (auto it = this->comm_queue.begin(); it != this->comm_queue.end(); it++)
if (*it == comm) {
this->comm_queue.erase(it);
return;
}
- xbt_die("Cannot remove the comm %p that is not part of the mailbox %s", comm, this->name_);
+ xbt_die("Comm %p not found in mailbox %s", comm.get(), this->name_);
}
}
}
static MailboxImpl* byNameOrNull(const char* name);
static MailboxImpl* byNameOrCreate(const char* name);
void setReceiver(s4u::ActorPtr actor);
- void push(activity::CommImpl* comm);
+ void push(activity::CommImplPtr comm);
void remove(smx_activity_t activity);
simgrid::s4u::Mailbox piface_; // Our interface
char* name_;
- boost::intrusive_ptr<simgrid::simix::ActorImpl> permanent_receiver; // process which the mailbox is attached to
+ simgrid::simix::ActorImplPtr permanent_receiver; // process which the mailbox is attached to
boost::circular_buffer_space_optimized<smx_activity_t> comm_queue;
boost::circular_buffer_space_optimized<smx_activity_t> done_comm_queue; // messages already received in the permanent receive mode
};
#include "src/surf/surf_interface.hpp"
#include "src/simix/smx_private.h"
-void simgrid::kernel::activity::Io::suspend()
+void simgrid::kernel::activity::IoImpl::suspend()
{
if (surf_io)
surf_io->suspend();
}
-void simgrid::kernel::activity::Io::resume()
+void simgrid::kernel::activity::IoImpl::resume()
{
if (surf_io)
surf_io->resume();
}
-void simgrid::kernel::activity::Io::post()
+void simgrid::kernel::activity::IoImpl::post()
{
for (smx_simcall_t simcall : simcalls) {
switch (simcall->call) {
namespace kernel {
namespace activity {
- XBT_PUBLIC_CLASS Io : public ActivityImpl {
- public:
- void suspend() override;
- void resume() override;
- void post() override;
-
- sg_host_t host = nullptr;
- surf_action_t surf_io = nullptr;
+XBT_PUBLIC_CLASS IoImpl : public ActivityImpl
+{
+public:
+ void suspend() override;
+ void resume() override;
+ void post() override;
+
+ sg_host_t host = nullptr;
+ surf_action_t surf_io = nullptr;
};
}}} // namespace simgrid::kernel::activity
XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(simix_synchro);
-simgrid::kernel::activity::Raw::~Raw()
+simgrid::kernel::activity::RawImpl::~RawImpl()
{
sleep->unref();
}
-void simgrid::kernel::activity::Raw::suspend()
+void simgrid::kernel::activity::RawImpl::suspend()
{
/* The suspension of raw synchros is delayed to when the process is rescheduled. */
}
-void simgrid::kernel::activity::Raw::resume()
+void simgrid::kernel::activity::RawImpl::resume()
{
/* I cannot resume raw synchros directly. This is delayed to when the process is rescheduled at
* the end of the synchro. */
}
-void simgrid::kernel::activity::Raw::post()
+void simgrid::kernel::activity::RawImpl::post()
{
XBT_IN("(%p)",this);
if (sleep->getState() == simgrid::surf::Action::State::failed)
namespace activity {
/** Used to implement mutexes, semaphores and conditions */
- XBT_PUBLIC_CLASS Raw : public ActivityImpl {
- public:
- ~Raw() override;
- void suspend() override;
- void resume() override;
- void post() override;
-
- surf_action_t sleep = nullptr;
+XBT_PUBLIC_CLASS RawImpl : public ActivityImpl
+{
+public:
+ ~RawImpl() override;
+ void suspend() override;
+ void resume() override;
+ void post() override;
+
+ surf_action_t sleep = nullptr;
};
}}} // namespace simgrid::kernel::activity
public:
constexpr ReadOptions() : value_(0) {}
- constexpr operator bool() const { return value_ != 0; }
+ explicit constexpr operator bool() const { return value_ != 0; }
constexpr bool operator!() const { return value_ == 0; }
constexpr ReadOptions operator|(ReadOptions const& that) const
private:
Process* process_;
public:
- AddressSpace(Process* process) : process_(process) {}
+ explicit AddressSpace(Process* process) : process_(process) {}
virtual ~AddressSpace() = default;
/** The process of this addres space
LocationListEntry(DwarfExpression expression, range_type range)
: expression_(std::move(expression)), range_(range)
{}
- LocationListEntry(DwarfExpression expression)
- : expression_(std::move(expression)), range_({0, UINT64_MAX})
- {}
+ LocationListEntry(DwarfExpression expression) : expression_(std::move(expression)), range_({0, UINT64_MAX}) {}
DwarfExpression& expression()
{
void* memory_;
int register_id_;
public:
- Location(void* x) :memory_(x) {}
- Location(int register_id) :
- memory_(nullptr), register_id_(register_id) {}
+ explicit Location(void* x) : memory_(x) {}
+ explicit Location(int register_id) : memory_(nullptr), register_id_(register_id) {}
// Type of location:
bool in_register() const { return memory_ == nullptr; }
bool in_memory() const { return memory_ != nullptr; }
int num = 0; // unique id of that state in the storage of all stored IDs
int original_num = 0; // num field of the VisitedState to which I was declared equal to (used for dot_output)
- VisitedState(unsigned long state_number);
+ explicit VisitedState(unsigned long state_number);
~VisitedState();
};
class Checker {
Session* session_;
public:
- Checker(Session& session);
+ explicit Checker(Session& session);
// No copy:
Checker(Checker const&) = delete;
if (call_type == MC_CALL_TYPE_SEND) {
/* Create comm pattern */
pattern->type = simgrid::mc::PatternCommunicationType::send;
- pattern->comm_addr = simcall_comm_isend__get__result(request);
+ pattern->comm_addr = static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_isend__getraw__result(request));
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_synchro;
mc_model_checker->process().read(temp_synchro,
}
} else if (call_type == MC_CALL_TYPE_RECV) {
pattern->type = simgrid::mc::PatternCommunicationType::receive;
- pattern->comm_addr = simcall_comm_irecv__get__result(request);
+ pattern->comm_addr = static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_irecv__getraw__result(request));
simgrid::smpi::Request mpi_request;
mc_model_checker->process().read(&mpi_request,
class XBT_PRIVATE CommunicationDeterminismChecker : public Checker {
public:
- CommunicationDeterminismChecker(Session& session);
+ explicit CommunicationDeterminismChecker(Session& session);
~CommunicationDeterminismChecker();
void run() override;
RecordTrace getRecordTrace() override;
int depth = 0;
bool exploration_started = false;
- Pair(unsigned long expanded_pairs);
+ explicit Pair(unsigned long expanded_pairs);
~Pair() = default;
Pair(Pair const&) = delete;
{
/* FIXME: check also that src and dst processes are not suspended */
simgrid::kernel::activity::CommImpl* act =
- static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__get__comm(req));
+ static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req));
#if SIMGRID_HAVE_MC
// Fetch from MCed memory:
case SIMCALL_COMM_WAITANY: {
xbt_dynar_t comms;
simgrid::kernel::activity::CommImpl* act =
- static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__get__comm(req));
+ static_cast<simgrid::kernel::activity::CommImpl*>(&*simcall_comm_wait__get__comm(req));
#if SIMGRID_HAVE_MC
s_xbt_dynar_t comms_buffer;
{
simgrid::mc::RemotePtr<simgrid::kernel::activity::CommImpl> comm_addr = nullptr;
if (call_type == MC_CALL_TYPE_WAIT)
- comm_addr = remote(static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__get__comm(req)));
+ comm_addr = remote(static_cast<simgrid::kernel::activity::CommImpl*>(
+ simgrid::simix::unmarshal<simgrid::kernel::activity::ActivityImpl*>(req->result)));
+
else {
simgrid::kernel::activity::CommImpl* addr;
// comm_addr = REMOTE(xbt_dynar_get_as(simcall_comm_waitany__get__comms(req), value, smx_synchro_t)):
{
switch (r->call ) {
case SIMCALL_COMM_WAIT:
- return static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__get__comm(r));
+ return static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(r));
case SIMCALL_COMM_TEST:
- return static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_test__get__comm(r));
+ return static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_test__getraw__comm(r));
default:
return nullptr;
}
case SIMCALL_COMM_WAIT: {
simgrid::kernel::activity::CommImpl* remote_act =
- static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__get__comm(req));
+ static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(req));
char* p;
if (value == -1) {
type = "WaitTimeout";
case SIMCALL_COMM_TEST: {
simgrid::kernel::activity::CommImpl* remote_act =
- static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_test__get__comm(req));
+ static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_test__getraw__comm(req));
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_synchro;
simgrid::kernel::activity::CommImpl* act;
if (use_remote_comm) {
read_element(mc_model_checker->process(),
&remote_sync, remote(simcall_comm_waitany__get__comms(req)), value,
sizeof(remote_sync));
- char* p = pointer_to_string(remote_sync);
+ char* p = pointer_to_string(&*remote_sync);
args = bprintf("comm=%s (%d of %lu)",
p, value + 1, xbt_dynar_length(&comms));
xbt_free(p);
bool request_is_enabled_by_idx(smx_simcall_t req, unsigned int idx)
{
- smx_activity_t remote_act = nullptr;
+ simgrid::kernel::activity::ActivityImpl* remote_act = nullptr;
switch (req->call) {
case SIMCALL_COMM_WAIT:
/* FIXME: check also that src and dst processes are not suspended */
- remote_act = simcall_comm_wait__get__comm(req);
+ remote_act = simcall_comm_wait__getraw__comm(req);
break;
case SIMCALL_COMM_WAITANY: {
- read_element(
- mc_model_checker->process(), &remote_act,
- remote(simcall_comm_waitany__get__comms(req)),
- idx, sizeof(remote_act));
+ read_element(mc_model_checker->process(), &remote_act, remote(simcall_comm_waitany__getraw__comms(req)), idx,
+ sizeof(remote_act));
}
break;
case SIMCALL_COMM_TESTANY:
- remote_act = mc_model_checker->process().read(remote(
- simcall_comm_testany__get__comms(req) + idx));
+ remote_act = mc_model_checker->process().read(remote(simcall_comm_testany__getraw__comms(req) + idx));
break;
default:
else
label = simgrid::xbt::string_printf("[(%lu)] WaitTimeout", issuer->pid);
} else {
- smx_activity_t remote_act = simcall_comm_wait__get__comm(req);
+ simgrid::kernel::activity::ActivityImpl* remote_act = simcall_comm_wait__getraw__comm(req);
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_comm;
mc_model_checker->process().read(temp_comm,
remote(static_cast<simgrid::kernel::activity::CommImpl*>(remote_act)));
}
case SIMCALL_COMM_TEST: {
- smx_activity_t remote_act = simcall_comm_test__get__comm(req);
+ simgrid::kernel::activity::ActivityImpl* remote_act = simcall_comm_test__getraw__comm(req);
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_comm;
mc_model_checker->process().read(temp_comm, remote(static_cast<simgrid::kernel::activity::CommImpl*>(remote_act)));
simgrid::kernel::activity::CommImpl* comm = temp_comm.getBuffer();
case SIMCALL_COMM_WAIT: {
simgrid::mc::RemotePtr<simgrid::kernel::activity::CommImpl> remote_act =
- remote(static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__get__comm(&actor->simcall)));
+ remote(static_cast<simgrid::kernel::activity::CommImpl*>(simcall_comm_wait__getraw__comm(&actor->simcall)));
simgrid::mc::Remote<simgrid::kernel::activity::CommImpl> temp_act;
mc_model_checker->process().read(temp_act, remote_act);
simgrid::kernel::activity::CommImpl* act = temp_act.getBuffer();
switch (req->call) {
case SIMCALL_COMM_WAITANY: {
state->internal_req.call = SIMCALL_COMM_WAIT;
- smx_activity_t remote_comm;
- read_element(mc_model_checker->process(),
- &remote_comm, remote(simcall_comm_waitany__get__comms(req)),
- state->transition.argument, sizeof(remote_comm));
+ simgrid::kernel::activity::ActivityImpl* remote_comm;
+ read_element(mc_model_checker->process(), &remote_comm, remote(simcall_comm_waitany__getraw__comms(req)),
+ state->transition.argument, sizeof(remote_comm));
mc_model_checker->process().read(state->internal_comm,
remote(static_cast<simgrid::kernel::activity::CommImpl*>(remote_comm)));
simcall_comm_wait__set__comm(&state->internal_req, state->internal_comm.getBuffer());
state->internal_req.call = SIMCALL_COMM_TEST;
if (state->transition.argument > 0) {
- smx_activity_t remote_comm = mc_model_checker->process().read(
- remote(simcall_comm_testany__get__comms(req) + state->transition.argument));
+ simgrid::kernel::activity::ActivityImpl* remote_comm = mc_model_checker->process().read(
+ remote(simcall_comm_testany__getraw__comms(req) + state->transition.argument));
mc_model_checker->process().read(state->internal_comm,
remote(static_cast<simgrid::kernel::activity::CommImpl*>(remote_comm)));
}
break;
case SIMCALL_COMM_WAIT:
- mc_model_checker->process().read_bytes(&state->internal_comm ,
- sizeof(state->internal_comm), remote(simcall_comm_wait__get__comm(req)));
+ mc_model_checker->process().read_bytes(&state->internal_comm, sizeof(state->internal_comm),
+ remote(simcall_comm_wait__getraw__comm(req)));
simcall_comm_wait__set__comm(&state->executed_req, state->internal_comm.getBuffer());
simcall_comm_wait__set__comm(&state->internal_req, state->internal_comm.getBuffer());
break;
case SIMCALL_COMM_TEST:
- mc_model_checker->process().read_bytes(&state->internal_comm,
- sizeof(state->internal_comm), remote(simcall_comm_test__get__comm(req)));
+ mc_model_checker->process().read_bytes(&state->internal_comm, sizeof(state->internal_comm),
+ remote(simcall_comm_test__getraw__comm(req)));
simcall_comm_test__set__comm(&state->executed_req, state->internal_comm.getBuffer());
simcall_comm_test__set__comm(&state->internal_req, state->internal_comm.getBuffer());
break;
struct PatternCommunication {
int num = 0;
- smx_activity_t comm_addr;
+ simgrid::kernel::activity::CommImpl* comm_addr;
PatternCommunicationType type = PatternCommunicationType::send;
unsigned long src_proc = 0;
unsigned long dst_proc = 0;
#include <xbt/ex.hpp>
+#include "src/kernel/activity/ExecImpl.hpp"
#include "src/msg/msg_private.h"
#include "src/simix/smx_private.h" /* MSG_task_listen looks inside the rdv directly. Not clean. */
simdata->setUsed();
if (simdata->host_nb > 0) {
- simdata->compute = static_cast<simgrid::kernel::activity::ExecImpl*>(simcall_execution_parallel_start(
- task->name, simdata->host_nb, simdata->host_list, simdata->flops_parallel_amount,
- simdata->bytes_parallel_amount, 1.0, -1.0, timeout));
- XBT_DEBUG("Parallel execution action created: %p", simdata->compute);
+ simdata->compute =
+ boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(simcall_execution_parallel_start(
+ task->name, simdata->host_nb, simdata->host_list, simdata->flops_parallel_amount,
+ simdata->bytes_parallel_amount, 1.0, -1.0, timeout));
+ XBT_DEBUG("Parallel execution action created: %p", simdata->compute.get());
} else {
- simdata->compute = static_cast<simgrid::kernel::activity::ExecImpl*>(
+ simdata->compute = boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(
simcall_execution_start(task->name, simdata->flops_amount, simdata->priority, simdata->bound));
}
simcall_set_category(simdata->compute, task->category);
/* Send it by calling SIMIX network layer */
smx_activity_t act = simcall_comm_isend(myself->getImpl(), mailbox->getImpl(), t_simdata->bytes_amount, t_simdata->rate,
task, sizeof(void *), match_fun, cleanup, nullptr, match_data,detached);
- t_simdata->comm = static_cast<simgrid::kernel::activity::CommImpl*>(act);
+ t_simdata->comm = boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(act);
msg_comm_t comm = nullptr;
if (not detached) {
if (finished && comm->task_received != nullptr) {
/* I am the receiver */
(*comm->task_received)->simdata->setNotUsed();
- comm->s_comm->unref();
}
}
catch (xbt_ex& e) {
int finished_index = -1;
/* Create the equivalent array with SIMIX objects: */
- std::vector<simgrid::kernel::activity::ActivityImpl*> s_comms;
+ std::vector<simgrid::kernel::activity::ActivityImplPtr> s_comms;
s_comms.reserve(xbt_dynar_length(comms));
msg_comm_t comm;
unsigned int cursor;
if (status == MSG_OK && comm->task_received != nullptr) {
/* I am the receiver */
(*comm->task_received)->simdata->setNotUsed();
- comm->s_comm->unref();
}
}
{
try {
simcall_comm_wait(comm->s_comm, timeout);
- comm->s_comm->unref();
if (comm->task_received != nullptr) {
/* I am the receiver */
int finished_index = -1;
/* create the equivalent dynar with SIMIX objects */
- xbt_dynar_t s_comms = xbt_dynar_new(sizeof(smx_activity_t), nullptr);
+ xbt_dynar_t s_comms = xbt_dynar_new(sizeof(smx_activity_t), [](void*ptr){
+ intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
+ });
msg_comm_t comm;
unsigned int cursor;
xbt_dynar_foreach(comms, cursor, comm) {
- xbt_dynar_push(s_comms, &comm->s_comm);
+ intrusive_ptr_add_ref(comm->s_comm.get());
+ xbt_dynar_push_as(s_comms, simgrid::kernel::activity::ActivityImpl*, comm->s_comm.get());
}
msg_error_t status = MSG_OK;
if (comm->task_received != nullptr) {
/* I am the receiver */
(*comm->task_received)->simdata->setNotUsed();
- comm->s_comm->unref();
}
return finished_index;
*/
void MSG_comm_copy_data_from_SIMIX(smx_activity_t synchro, void* buff, size_t buff_size)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
SIMIX_comm_copy_pointer_callback(comm, buff, buff_size);
t_simdata->rate, task, sizeof(void *), nullptr, nullptr, nullptr, task, 0);
if (TRACE_is_enabled())
simcall_set_category(comm, task->category);
- t_simdata->comm = static_cast<simgrid::kernel::activity::CommImpl*>(comm);
+ t_simdata->comm = boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(comm);
simcall_comm_wait(comm, timeout);
- comm->unref();
}
catch (xbt_ex& e) {
switch (e.category) {
int MSG_task_listen_from(const char *alias)
{
simgrid::s4u::MailboxPtr mbox = simgrid::s4u::Mailbox::byName(alias);
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(mbox->front());
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(mbox->front());
if (not comm)
return -1;
typedef struct simdata_task {
~simdata_task()
{
- if (this->compute)
- this->compute->unref();
-
/* parallel tasks only */
xbt_free(this->host_list);
}
this->isused = false;
}
- simgrid::kernel::activity::ExecImpl* compute = nullptr; /* SIMIX modeling of computation */
- simgrid::kernel::activity::CommImpl* comm = nullptr; /* SIMIX modeling of communication */
+ simgrid::kernel::activity::ExecImplPtr compute = nullptr; /* SIMIX modeling of computation */
+ simgrid::kernel::activity::CommImplPtr comm = nullptr; /* SIMIX modeling of communication */
double bytes_amount = 0.0; /* Data size */
double flops_amount = 0.0; /* Computation size */
msg_process_t sender = nullptr;
*/
double MSG_task_get_remaining_communication(msg_task_t task)
{
- XBT_DEBUG("calling simcall_communication_get_remains(%p)", task->simdata->comm);
+ XBT_DEBUG("calling simcall_communication_get_remains(%p)", task->simdata->comm.get());
return task->simdata->comm->remains();
}
#include "simgrid/s4u/VirtualMachine.hpp"
#include "src/simix/ActorImpl.hpp"
#include "src/surf/HostImpl.hpp"
+#include <algorithm>
+#include <deque>
#ifndef VM_INTERFACE_HPP_
#define VM_INTERFACE_HPP_
e_surf_vm_state_t getState();
void setState(e_surf_vm_state_t state);
static std::deque<s4u::VirtualMachine*> allVms_;
+ int coreAmount() { return coreAmount_; }
bool isMigrating = false;
#include "simgrid/s4u/Mailbox.hpp"
#include "src/kernel/context/Context.hpp"
+#include <sstream>
XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_actor, "S4U actors");
XBT_INFO("pimpl_ is null");
xbt_backtrace_display_current();
}
- if (pimpl_)
- pimpl_->unref();
}
s4u::CommPtr Comm::send_init(s4u::MailboxPtr chan)
matchFunction_, cleanFunction_, copyDataFunction_,
userData_, detached_);
} else if (dstBuff_ != nullptr) { // Receiver side
+ xbt_assert(not detached_, "Receive cannot be detached");
pimpl_ = simcall_comm_irecv(receiver_, mailbox_->getImpl(), dstBuff_, &dstBuffSize_,
matchFunction_, copyDataFunction_,
userData_, rate_);
}
}
state_ = finished;
- if (pimpl_)
- pimpl_->unref();
}
void Comm::wait(double timeout) {
if (state_ == started) {
simcall_comm_wait(pimpl_, timeout);
state_ = finished;
- pimpl_->unref();
return;
}
userData_, timeout, rate_);
}
state_ = finished;
- if (pimpl_)
- pimpl_->unref();
}
void Comm::send_detached(MailboxPtr dest, void* data, int simulatedSize)
void Comm::cancel()
{
- simgrid::kernel::activity::CommImpl* commPimpl = static_cast<simgrid::kernel::activity::CommImpl*>(pimpl_);
+ simgrid::kernel::activity::CommImplPtr commPimpl =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(pimpl_);
commPimpl->cancel();
}
if(simcall_comm_test(pimpl_)){
state_ = finished;
- pimpl_->unref();
return true;
}
return false;
*/
void SIMIX_process_cleanup(smx_actor_t process)
{
- XBT_DEBUG("Cleanup process %s (%p), waiting synchro %p", process->name.c_str(), process, process->waiting_synchro);
+ XBT_DEBUG("Cleanup process %s (%p), waiting synchro %p", process->name.c_str(), process,
+ process->waiting_synchro.get());
process->finished = true;
SIMIX_process_on_exit_runall(process);
xbt_os_mutex_acquire(simix_global->mutex);
/* cancel non-blocking communications */
- smx_activity_t synchro = static_cast<smx_activity_t>(process->comms.front());
+ smx_activity_t synchro = process->comms.front();
while (not process->comms.empty()) {
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
/* make sure no one will finish the comm after this process is destroyed,
* because src_proc or dst_proc would be an invalid pointer */
if (comm->src_proc == process) {
- XBT_DEBUG("Found an unfinished send comm %p (detached = %d), state %d, src = %p, dst = %p",
- comm, comm->detached, (int)comm->state, comm->src_proc, comm->dst_proc);
+ XBT_DEBUG("Found an unfinished send comm %p (detached = %d), state %d, src = %p, dst = %p", comm.get(),
+ comm->detached, (int)comm->state, comm->src_proc, comm->dst_proc);
comm->src_proc = nullptr;
- /* I'm not supposed to destroy a detached comm from the sender side, */
- if (comm->detached)
- XBT_DEBUG("Don't destroy it since it's a detached comm and I'm the sender");
- else
- comm->unref();
} else if (comm->dst_proc == process) {
- XBT_DEBUG("Found an unfinished recv comm %p, state %d, src = %p, dst = %p",
- comm, (int)comm->state, comm->src_proc, comm->dst_proc);
+ XBT_DEBUG("Found an unfinished recv comm %p, state %d, src = %p, dst = %p", comm.get(), (int)comm->state,
+ comm->src_proc, comm->dst_proc);
comm->dst_proc = nullptr;
if (comm->detached && comm->src_proc != nullptr) {
comm->src_proc->comms.remove(comm);
}
} else {
- xbt_die("Communication synchro %p is in my list but I'm not the sender nor the receiver", synchro);
+ xbt_die("Communication synchro %p is in my list but I'm not the sender nor the receiver", synchro.get());
}
process->comms.pop_front();
- synchro = static_cast<smx_activity_t>(process->comms.front());
+ synchro = process->comms.front();
comm->cancel();
}
if (host->extension<simgrid::simix::Host>() == nullptr)
host->extension_set<simgrid::simix::Host>(new simgrid::simix::Host());
- /* Add the process to it's host process list */
+ /* Add the process to its host process list */
xbt_swag_insert(process, host->extension<simgrid::simix::Host>()->process_list);
XBT_DEBUG("Start context '%s'", process->name.c_str());
simix_global->process_list[process->pid] = process;
XBT_DEBUG("Inserting %s(%s) in the to_run list", process->cname(), host->cname());
xbt_dynar_push_as(simix_global->process_to_run, smx_actor_t, process);
+ intrusive_ptr_add_ref(process);
/* Tracing the process creation */
TRACE_msg_process_create(process->cname(), process->pid, process->host);
process->exception = nullptr;
/* destroy the blocking synchro if any */
- if (process->waiting_synchro) {
-
- simgrid::kernel::activity::ExecImpl* exec =
- dynamic_cast<simgrid::kernel::activity::ExecImpl*>(process->waiting_synchro);
- simgrid::kernel::activity::CommImpl* comm =
- dynamic_cast<simgrid::kernel::activity::CommImpl*>(process->waiting_synchro);
- simgrid::kernel::activity::SleepImpl* sleep =
- dynamic_cast<simgrid::kernel::activity::SleepImpl*>(process->waiting_synchro);
- simgrid::kernel::activity::Raw *raw = dynamic_cast<simgrid::kernel::activity::Raw*>(process->waiting_synchro);
- simgrid::kernel::activity::Io *io = dynamic_cast<simgrid::kernel::activity::Io*>(process->waiting_synchro);
+ if (process->waiting_synchro != nullptr) {
+
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::ExecImpl>(process->waiting_synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::CommImpl>(process->waiting_synchro);
+ simgrid::kernel::activity::SleepImplPtr sleep =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::SleepImpl>(process->waiting_synchro);
+ simgrid::kernel::activity::RawImplPtr raw =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::RawImpl>(process->waiting_synchro);
+ simgrid::kernel::activity::IoImplPtr io =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::IoImpl>(process->waiting_synchro);
if (exec != nullptr) {
- exec->unref();
} else if (comm != nullptr) {
process->comms.remove(process->waiting_synchro);
auto i = boost::range::find(process->waiting_synchro->simcalls, &process->simcall);
if (i != process->waiting_synchro->simcalls.end())
process->waiting_synchro->simcalls.remove(&process->simcall);
- comm->unref();
} else if (sleep != nullptr) {
SIMIX_process_sleep_destroy(process->waiting_synchro);
} else if (raw != nullptr) {
SIMIX_synchro_stop_waiting(process, &process->simcall);
- delete process->waiting_synchro;
} else if (io != nullptr) {
SIMIX_io_destroy(process->waiting_synchro);
/* cancel the blocking synchro if any */
if (process->waiting_synchro) {
- simgrid::kernel::activity::ExecImpl* exec =
- dynamic_cast<simgrid::kernel::activity::ExecImpl*>(process->waiting_synchro);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::ExecImpl>(process->waiting_synchro);
if (exec != nullptr) {
SIMIX_execution_cancel(process->waiting_synchro);
}
- simgrid::kernel::activity::CommImpl* comm =
- dynamic_cast<simgrid::kernel::activity::CommImpl*>(process->waiting_synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::CommImpl>(process->waiting_synchro);
if (comm != nullptr) {
process->comms.remove(comm);
comm->cancel();
}
- simgrid::kernel::activity::SleepImpl* sleep =
- dynamic_cast<simgrid::kernel::activity::SleepImpl*>(process->waiting_synchro);
+ simgrid::kernel::activity::SleepImplPtr sleep =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::SleepImpl>(process->waiting_synchro);
if (sleep != nullptr) {
SIMIX_process_sleep_destroy(process->waiting_synchro);
if (not xbt_dynar_member(simix_global->process_to_run, &(process)) && process != SIMIX_process_self()) {
}
}
- simgrid::kernel::activity::Raw *raw = dynamic_cast<simgrid::kernel::activity::Raw*>(process->waiting_synchro);
+ simgrid::kernel::activity::RawImplPtr raw =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::RawImpl>(process->waiting_synchro);
if (raw != nullptr) {
SIMIX_synchro_stop_waiting(process, &process->simcall);
}
- simgrid::kernel::activity::Io *io = dynamic_cast<simgrid::kernel::activity::Io*>(process->waiting_synchro);
+ simgrid::kernel::activity::IoImplPtr io =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::IoImpl>(process->waiting_synchro);
if (io != nullptr) {
SIMIX_io_destroy(process->waiting_synchro);
}
simcall->issuer->waiting_synchro = sync;
}
-static int SIMIX_process_join_finish(smx_process_exit_status_t status, smx_activity_t synchro){
+static int SIMIX_process_join_finish(smx_process_exit_status_t status, void* synchro)
+{
simgrid::kernel::activity::SleepImpl* sleep = static_cast<simgrid::kernel::activity::SleepImpl*>(synchro);
if (sleep->surf_sleep) {
sleep->surf_sleep->unref();
sleep->surf_sleep = nullptr;
}
- sleep->unref();
// intrusive_ptr_release(process); // FIXME: We are leaking here. See comment in SIMIX_process_join()
return 0;
}
smx_activity_t SIMIX_process_join(smx_actor_t issuer, smx_actor_t process, double timeout)
{
smx_activity_t res = SIMIX_process_sleep(issuer, timeout);
- static_cast<simgrid::kernel::activity::ActivityImpl*>(res)->ref();
+ intrusive_ptr_add_ref(res.get());
/* We are leaking the process here, but if we don't take the ref, we get a "use after free".
* The correct solution would be to derivate the type SynchroSleep into a SynchroProcessJoin,
* but the code is not clean enough for now for this.
* The C API should first be properly replaced with the C++ one, which is a fair amount of work.
*/
intrusive_ptr_add_ref(process);
- SIMIX_process_on_exit(process, (int_f_pvoid_pvoid_t)SIMIX_process_join_finish, res);
+ SIMIX_process_on_exit(process, (int_f_pvoid_pvoid_t)SIMIX_process_join_finish, &*res);
return res;
}
simgrid::kernel::activity::SleepImpl* synchro = new simgrid::kernel::activity::SleepImpl();
synchro->host = host;
- synchro->surf_sleep = host->pimpl_cpu->sleep(duration);
+ synchro->surf_sleep = host->pimpl_cpu->sleep(duration);
synchro->surf_sleep->setData(synchro);
XBT_DEBUG("Create sleep synchronization %p", synchro);
void SIMIX_process_sleep_destroy(smx_activity_t synchro)
{
- XBT_DEBUG("Destroy synchro %p", synchro);
- simgrid::kernel::activity::SleepImpl* sleep = static_cast<simgrid::kernel::activity::SleepImpl*>(synchro);
+ XBT_DEBUG("Destroy sleep synchro %p", synchro.get());
+ simgrid::kernel::activity::SleepImplPtr sleep =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::SleepImpl>(synchro);
if (sleep->surf_sleep) {
sleep->surf_sleep->unref();
sleep->surf_sleep = nullptr;
- sleep->unref();
}
}
* \param properties the properties of the process
* \param auto_restart either it is autorestarting or not.
*/
+extern "C"
smx_actor_t simcall_process_create(const char* name, xbt_main_func_t code, void* data, sg_host_t host, int argc,
char** argv, xbt_dict_t properties)
{
/* Refcounting */
private:
- std::atomic_int_fast32_t refcount_{1};
+ std::atomic_int_fast32_t refcount_{0};
+
public:
friend void intrusive_ptr_add_ref(ActorImpl* process)
{
XBT_PRIVATE void SIMIX_process_yield(smx_actor_t self);
XBT_PRIVATE void SIMIX_process_exception_terminate(xbt_ex_t * e);
XBT_PRIVATE void SIMIX_process_change_host(smx_actor_t process, sg_host_t dest);
-XBT_PRIVATE smx_activity_t SIMIX_process_suspend(smx_actor_t process, smx_actor_t issuer);
XBT_PRIVATE void SIMIX_process_resume(smx_actor_t process);
XBT_PRIVATE int SIMIX_process_get_PID(smx_actor_t self);
XBT_PRIVATE void SIMIX_process_set_data(smx_actor_t process, void *data);
XBT_PRIVATE smx_actor_t SIMIX_process_get_by_name(const char* name);
XBT_PRIVATE int SIMIX_process_is_suspended(smx_actor_t process);
XBT_PRIVATE xbt_dict_t SIMIX_process_get_properties(smx_actor_t process);
-XBT_PRIVATE smx_activity_t SIMIX_process_join(smx_actor_t issuer, smx_actor_t process, double timeout);
-XBT_PRIVATE smx_activity_t SIMIX_process_sleep(smx_actor_t process, double duration);
-XBT_PRIVATE void SIMIX_process_sleep_destroy(smx_activity_t synchro);
XBT_PRIVATE void SIMIX_process_auto_restart_set(smx_actor_t process, int auto_restart);
XBT_PRIVATE smx_actor_t SIMIX_process_restart(smx_actor_t process, smx_actor_t issuer);
SG_END_DECL()
+XBT_PRIVATE void SIMIX_process_sleep_destroy(smx_activity_t synchro);
+XBT_PRIVATE smx_activity_t SIMIX_process_suspend(smx_actor_t process, smx_actor_t issuer);
+XBT_PRIVATE smx_activity_t SIMIX_process_join(smx_actor_t issuer, smx_actor_t process, double timeout);
+XBT_PRIVATE smx_activity_t SIMIX_process_sleep(smx_actor_t process, double duration);
+
#endif
void simcall_comm_cancel(smx_activity_t synchro)
{
simgrid::simix::kernelImmediate([synchro] {
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
comm->cancel();
});
}
static inline smx_actor_t simcall_process_kill__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_kill__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_kill__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline int simcall_process_killall__get__reset_pid(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[0]);
}
+static inline int simcall_process_killall__getraw__reset_pid(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[0]);
+}
static inline void simcall_process_killall__set__reset_pid(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[0], arg);
}
static inline smx_actor_t simcall_process_cleanup__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_cleanup__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_cleanup__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_actor_t simcall_process_suspend__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_suspend__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_suspend__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_actor_t simcall_process_resume__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_resume__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_resume__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_actor_t simcall_process_set_host__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_set_host__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_set_host__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline sg_host_t simcall_process_set_host__get__dest(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]);
}
+static inline sg_host_t simcall_process_set_host__getraw__dest(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[1]);
+}
static inline void simcall_process_set_host__set__dest(smx_simcall_t simcall, sg_host_t arg) {
simgrid::simix::marshal<sg_host_t>(simcall->args[1], arg);
}
static inline smx_actor_t simcall_process_is_suspended__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_is_suspended__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_is_suspended__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline int simcall_process_is_suspended__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_process_is_suspended__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_process_is_suspended__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_actor_t simcall_process_join__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_join__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_join__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline double simcall_process_join__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_process_join__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_process_join__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
static inline int simcall_process_join__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_process_join__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_process_join__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline double simcall_process_sleep__get__duration(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[0]);
}
+static inline double simcall_process_sleep__getraw__duration(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[0]);
+}
static inline void simcall_process_sleep__set__duration(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[0], arg);
}
static inline int simcall_process_sleep__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_process_sleep__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_process_sleep__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline const char* simcall_execution_start__get__name(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<const char*>(simcall->args[0]);
}
+static inline const char* simcall_execution_start__getraw__name(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<const char*>(simcall->args[0]);
+}
static inline void simcall_execution_start__set__name(smx_simcall_t simcall, const char* arg) {
simgrid::simix::marshal<const char*>(simcall->args[0], arg);
}
static inline double simcall_execution_start__get__flops_amount(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_execution_start__getraw__flops_amount(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_execution_start__set__flops_amount(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
static inline double simcall_execution_start__get__priority(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[2]);
}
+static inline double simcall_execution_start__getraw__priority(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[2]);
+}
static inline void simcall_execution_start__set__priority(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[2], arg);
}
static inline double simcall_execution_start__get__bound(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[3]);
}
+static inline double simcall_execution_start__getraw__bound(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[3]);
+}
static inline void simcall_execution_start__set__bound(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[3], arg);
}
-static inline smx_activity_t simcall_execution_start__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->result);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_execution_start__get__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result);
}
-static inline void simcall_execution_start__set__result(smx_simcall_t simcall, smx_activity_t result){
- simgrid::simix::marshal<smx_activity_t>(simcall->result, result);
+static inline simgrid::kernel::activity::ActivityImpl* simcall_execution_start__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->result);
+}
+static inline void
+simcall_execution_start__set__result(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> result)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result, result);
}
static inline const char* simcall_execution_parallel_start__get__name(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<const char*>(simcall->args[0]);
}
+static inline const char* simcall_execution_parallel_start__getraw__name(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<const char*>(simcall->args[0]);
+}
static inline void simcall_execution_parallel_start__set__name(smx_simcall_t simcall, const char* arg) {
simgrid::simix::marshal<const char*>(simcall->args[0], arg);
}
static inline int simcall_execution_parallel_start__get__host_nb(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[1]);
}
+static inline int simcall_execution_parallel_start__getraw__host_nb(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[1]);
+}
static inline void simcall_execution_parallel_start__set__host_nb(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[1], arg);
}
static inline sg_host_t* simcall_execution_parallel_start__get__host_list(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t*>(simcall->args[2]);
}
+static inline sg_host_t* simcall_execution_parallel_start__getraw__host_list(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t*>(simcall->args[2]);
+}
static inline void simcall_execution_parallel_start__set__host_list(smx_simcall_t simcall, sg_host_t* arg) {
simgrid::simix::marshal<sg_host_t*>(simcall->args[2], arg);
}
static inline double* simcall_execution_parallel_start__get__flops_amount(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double*>(simcall->args[3]);
}
+static inline double* simcall_execution_parallel_start__getraw__flops_amount(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double*>(simcall->args[3]);
+}
static inline void simcall_execution_parallel_start__set__flops_amount(smx_simcall_t simcall, double* arg) {
simgrid::simix::marshal<double*>(simcall->args[3], arg);
}
static inline double* simcall_execution_parallel_start__get__bytes_amount(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double*>(simcall->args[4]);
}
+static inline double* simcall_execution_parallel_start__getraw__bytes_amount(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double*>(simcall->args[4]);
+}
static inline void simcall_execution_parallel_start__set__bytes_amount(smx_simcall_t simcall, double* arg) {
simgrid::simix::marshal<double*>(simcall->args[4], arg);
}
static inline double simcall_execution_parallel_start__get__amount(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[5]);
}
+static inline double simcall_execution_parallel_start__getraw__amount(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[5]);
+}
static inline void simcall_execution_parallel_start__set__amount(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[5], arg);
}
static inline double simcall_execution_parallel_start__get__rate(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[6]);
}
+static inline double simcall_execution_parallel_start__getraw__rate(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[6]);
+}
static inline void simcall_execution_parallel_start__set__rate(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[6], arg);
}
static inline double simcall_execution_parallel_start__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[7]);
}
+static inline double simcall_execution_parallel_start__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[7]);
+}
static inline void simcall_execution_parallel_start__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[7], arg);
}
-static inline smx_activity_t simcall_execution_parallel_start__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->result);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_execution_parallel_start__get__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result);
}
-static inline void simcall_execution_parallel_start__set__result(smx_simcall_t simcall, smx_activity_t result){
- simgrid::simix::marshal<smx_activity_t>(simcall->result, result);
+static inline simgrid::kernel::activity::ActivityImpl*
+simcall_execution_parallel_start__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->result);
}
-
-static inline smx_activity_t simcall_execution_cancel__get__execution(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
-}
-static inline void simcall_execution_cancel__set__execution(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+static inline void
+simcall_execution_parallel_start__set__result(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> result)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result, result);
}
-static inline smx_activity_t simcall_execution_set_priority__get__execution(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_execution_cancel__get__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
+}
+static inline simgrid::kernel::activity::ActivityImpl*
+simcall_execution_cancel__getraw__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
+}
+static inline void
+simcall_execution_cancel__set__execution(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
-static inline void simcall_execution_set_priority__set__execution(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_execution_set_priority__get__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
+}
+static inline simgrid::kernel::activity::ActivityImpl*
+simcall_execution_set_priority__getraw__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
+}
+static inline void
+simcall_execution_set_priority__set__execution(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
static inline double simcall_execution_set_priority__get__priority(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_execution_set_priority__getraw__priority(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_execution_set_priority__set__priority(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
-static inline smx_activity_t simcall_execution_set_bound__get__execution(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
-}
-static inline void simcall_execution_set_bound__set__execution(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_execution_set_bound__get__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
+}
+static inline simgrid::kernel::activity::ActivityImpl*
+simcall_execution_set_bound__getraw__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
+}
+static inline void
+simcall_execution_set_bound__set__execution(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
static inline double simcall_execution_set_bound__get__bound(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_execution_set_bound__getraw__bound(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_execution_set_bound__set__bound(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
-static inline smx_activity_t simcall_execution_wait__get__execution(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_execution_wait__get__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
}
-static inline void simcall_execution_wait__set__execution(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+static inline simgrid::kernel::activity::ActivityImpl* simcall_execution_wait__getraw__execution(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
+}
+static inline void
+simcall_execution_wait__set__execution(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
static inline int simcall_execution_wait__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_execution_wait__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_execution_wait__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_actor_t simcall_process_on_exit__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_on_exit__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_on_exit__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline int_f_pvoid_pvoid_t simcall_process_on_exit__get__fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int_f_pvoid_pvoid_t>(simcall->args[1]);
}
+static inline int_f_pvoid_pvoid_t simcall_process_on_exit__getraw__fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int_f_pvoid_pvoid_t>(simcall->args[1]);
+}
static inline void simcall_process_on_exit__set__fun(smx_simcall_t simcall, int_f_pvoid_pvoid_t arg) {
simgrid::simix::marshal<int_f_pvoid_pvoid_t>(simcall->args[1], arg);
}
static inline void* simcall_process_on_exit__get__data(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[2]);
}
+static inline void* simcall_process_on_exit__getraw__data(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[2]);
+}
static inline void simcall_process_on_exit__set__data(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[2], arg);
}
static inline smx_actor_t simcall_process_auto_restart_set__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_auto_restart_set__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_auto_restart_set__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline int simcall_process_auto_restart_set__get__auto_restart(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[1]);
}
+static inline int simcall_process_auto_restart_set__getraw__auto_restart(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[1]);
+}
static inline void simcall_process_auto_restart_set__set__auto_restart(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[1], arg);
}
static inline smx_actor_t simcall_process_restart__get__process(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_process_restart__getraw__process(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_process_restart__set__process(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_actor_t simcall_process_restart__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<smx_actor_t>(simcall->result);
}
+static inline smx_actor_t simcall_process_restart__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->result);
+}
static inline void simcall_process_restart__set__result(smx_simcall_t simcall, smx_actor_t result){
simgrid::simix::marshal<smx_actor_t>(simcall->result, result);
}
static inline smx_mailbox_t simcall_comm_iprobe__get__mbox(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[0]);
}
+static inline smx_mailbox_t simcall_comm_iprobe__getraw__mbox(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mailbox_t>(simcall->args[0]);
+}
static inline void simcall_comm_iprobe__set__mbox(smx_simcall_t simcall, smx_mailbox_t arg) {
simgrid::simix::marshal<smx_mailbox_t>(simcall->args[0], arg);
}
static inline int simcall_comm_iprobe__get__type(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[1]);
}
+static inline int simcall_comm_iprobe__getraw__type(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[1]);
+}
static inline void simcall_comm_iprobe__set__type(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[1], arg);
}
static inline int simcall_comm_iprobe__get__src(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[2]);
}
+static inline int simcall_comm_iprobe__getraw__src(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[2]);
+}
static inline void simcall_comm_iprobe__set__src(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[2], arg);
}
static inline int simcall_comm_iprobe__get__tag(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[3]);
}
+static inline int simcall_comm_iprobe__getraw__tag(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[3]);
+}
static inline void simcall_comm_iprobe__set__tag(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[3], arg);
}
static inline simix_match_func_t simcall_comm_iprobe__get__match_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]);
}
+static inline simix_match_func_t simcall_comm_iprobe__getraw__match_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_match_func_t>(simcall->args[4]);
+}
static inline void simcall_comm_iprobe__set__match_fun(smx_simcall_t simcall, simix_match_func_t arg) {
simgrid::simix::marshal<simix_match_func_t>(simcall->args[4], arg);
}
static inline void* simcall_comm_iprobe__get__data(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[5]);
}
+static inline void* simcall_comm_iprobe__getraw__data(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[5]);
+}
static inline void simcall_comm_iprobe__set__data(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[5], arg);
}
-static inline smx_activity_t simcall_comm_iprobe__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->result);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_comm_iprobe__get__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result);
}
-static inline void simcall_comm_iprobe__set__result(smx_simcall_t simcall, smx_activity_t result){
- simgrid::simix::marshal<smx_activity_t>(simcall->result, result);
+static inline simgrid::kernel::activity::ActivityImpl* simcall_comm_iprobe__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->result);
+}
+static inline void
+simcall_comm_iprobe__set__result(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> result)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result, result);
}
static inline smx_actor_t simcall_comm_send__get__sender(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_comm_send__getraw__sender(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_comm_send__set__sender(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_mailbox_t simcall_comm_send__get__mbox(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]);
}
+static inline smx_mailbox_t simcall_comm_send__getraw__mbox(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mailbox_t>(simcall->args[1]);
+}
static inline void simcall_comm_send__set__mbox(smx_simcall_t simcall, smx_mailbox_t arg) {
simgrid::simix::marshal<smx_mailbox_t>(simcall->args[1], arg);
}
static inline double simcall_comm_send__get__task_size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[2]);
}
+static inline double simcall_comm_send__getraw__task_size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[2]);
+}
static inline void simcall_comm_send__set__task_size(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[2], arg);
}
static inline double simcall_comm_send__get__rate(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[3]);
}
+static inline double simcall_comm_send__getraw__rate(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[3]);
+}
static inline void simcall_comm_send__set__rate(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[3], arg);
}
static inline void* simcall_comm_send__get__src_buff(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[4]);
}
+static inline void* simcall_comm_send__getraw__src_buff(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[4]);
+}
static inline void simcall_comm_send__set__src_buff(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[4], arg);
}
static inline size_t simcall_comm_send__get__src_buff_size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<size_t>(simcall->args[5]);
}
+static inline size_t simcall_comm_send__getraw__src_buff_size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<size_t>(simcall->args[5]);
+}
static inline void simcall_comm_send__set__src_buff_size(smx_simcall_t simcall, size_t arg) {
simgrid::simix::marshal<size_t>(simcall->args[5], arg);
}
static inline simix_match_func_t simcall_comm_send__get__match_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[6]);
}
+static inline simix_match_func_t simcall_comm_send__getraw__match_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_match_func_t>(simcall->args[6]);
+}
static inline void simcall_comm_send__set__match_fun(smx_simcall_t simcall, simix_match_func_t arg) {
simgrid::simix::marshal<simix_match_func_t>(simcall->args[6], arg);
}
static inline simix_copy_data_func_t simcall_comm_send__get__copy_data_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[7]);
}
+static inline simix_copy_data_func_t simcall_comm_send__getraw__copy_data_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_copy_data_func_t>(simcall->args[7]);
+}
static inline void simcall_comm_send__set__copy_data_fun(smx_simcall_t simcall, simix_copy_data_func_t arg) {
simgrid::simix::marshal<simix_copy_data_func_t>(simcall->args[7], arg);
}
static inline void* simcall_comm_send__get__data(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[8]);
}
+static inline void* simcall_comm_send__getraw__data(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[8]);
+}
static inline void simcall_comm_send__set__data(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[8], arg);
}
static inline double simcall_comm_send__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[9]);
}
+static inline double simcall_comm_send__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[9]);
+}
static inline void simcall_comm_send__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[9], arg);
}
static inline smx_actor_t simcall_comm_isend__get__sender(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_comm_isend__getraw__sender(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_comm_isend__set__sender(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_mailbox_t simcall_comm_isend__get__mbox(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]);
}
+static inline smx_mailbox_t simcall_comm_isend__getraw__mbox(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mailbox_t>(simcall->args[1]);
+}
static inline void simcall_comm_isend__set__mbox(smx_simcall_t simcall, smx_mailbox_t arg) {
simgrid::simix::marshal<smx_mailbox_t>(simcall->args[1], arg);
}
static inline double simcall_comm_isend__get__task_size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[2]);
}
+static inline double simcall_comm_isend__getraw__task_size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[2]);
+}
static inline void simcall_comm_isend__set__task_size(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[2], arg);
}
static inline double simcall_comm_isend__get__rate(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[3]);
}
+static inline double simcall_comm_isend__getraw__rate(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[3]);
+}
static inline void simcall_comm_isend__set__rate(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[3], arg);
}
static inline void* simcall_comm_isend__get__src_buff(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[4]);
}
+static inline void* simcall_comm_isend__getraw__src_buff(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[4]);
+}
static inline void simcall_comm_isend__set__src_buff(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[4], arg);
}
static inline size_t simcall_comm_isend__get__src_buff_size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<size_t>(simcall->args[5]);
}
+static inline size_t simcall_comm_isend__getraw__src_buff_size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<size_t>(simcall->args[5]);
+}
static inline void simcall_comm_isend__set__src_buff_size(smx_simcall_t simcall, size_t arg) {
simgrid::simix::marshal<size_t>(simcall->args[5], arg);
}
static inline simix_match_func_t simcall_comm_isend__get__match_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[6]);
}
+static inline simix_match_func_t simcall_comm_isend__getraw__match_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_match_func_t>(simcall->args[6]);
+}
static inline void simcall_comm_isend__set__match_fun(smx_simcall_t simcall, simix_match_func_t arg) {
simgrid::simix::marshal<simix_match_func_t>(simcall->args[6], arg);
}
static inline simix_clean_func_t simcall_comm_isend__get__clean_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_clean_func_t>(simcall->args[7]);
}
+static inline simix_clean_func_t simcall_comm_isend__getraw__clean_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_clean_func_t>(simcall->args[7]);
+}
static inline void simcall_comm_isend__set__clean_fun(smx_simcall_t simcall, simix_clean_func_t arg) {
simgrid::simix::marshal<simix_clean_func_t>(simcall->args[7], arg);
}
static inline simix_copy_data_func_t simcall_comm_isend__get__copy_data_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[8]);
}
+static inline simix_copy_data_func_t simcall_comm_isend__getraw__copy_data_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_copy_data_func_t>(simcall->args[8]);
+}
static inline void simcall_comm_isend__set__copy_data_fun(smx_simcall_t simcall, simix_copy_data_func_t arg) {
simgrid::simix::marshal<simix_copy_data_func_t>(simcall->args[8], arg);
}
static inline void* simcall_comm_isend__get__data(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[9]);
}
+static inline void* simcall_comm_isend__getraw__data(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[9]);
+}
static inline void simcall_comm_isend__set__data(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[9], arg);
}
static inline int simcall_comm_isend__get__detached(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[10]);
}
+static inline int simcall_comm_isend__getraw__detached(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[10]);
+}
static inline void simcall_comm_isend__set__detached(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[10], arg);
}
-static inline smx_activity_t simcall_comm_isend__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->result);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_comm_isend__get__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result);
}
-static inline void simcall_comm_isend__set__result(smx_simcall_t simcall, smx_activity_t result){
- simgrid::simix::marshal<smx_activity_t>(simcall->result, result);
+static inline simgrid::kernel::activity::ActivityImpl* simcall_comm_isend__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->result);
+}
+static inline void simcall_comm_isend__set__result(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> result)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result, result);
}
static inline smx_actor_t simcall_comm_recv__get__receiver(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_comm_recv__getraw__receiver(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_comm_recv__set__receiver(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_mailbox_t simcall_comm_recv__get__mbox(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]);
}
+static inline smx_mailbox_t simcall_comm_recv__getraw__mbox(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mailbox_t>(simcall->args[1]);
+}
static inline void simcall_comm_recv__set__mbox(smx_simcall_t simcall, smx_mailbox_t arg) {
simgrid::simix::marshal<smx_mailbox_t>(simcall->args[1], arg);
}
static inline void* simcall_comm_recv__get__dst_buff(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[2]);
}
+static inline void* simcall_comm_recv__getraw__dst_buff(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[2]);
+}
static inline void simcall_comm_recv__set__dst_buff(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[2], arg);
}
static inline size_t* simcall_comm_recv__get__dst_buff_size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<size_t*>(simcall->args[3]);
}
+static inline size_t* simcall_comm_recv__getraw__dst_buff_size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<size_t*>(simcall->args[3]);
+}
static inline void simcall_comm_recv__set__dst_buff_size(smx_simcall_t simcall, size_t* arg) {
simgrid::simix::marshal<size_t*>(simcall->args[3], arg);
}
static inline simix_match_func_t simcall_comm_recv__get__match_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]);
}
+static inline simix_match_func_t simcall_comm_recv__getraw__match_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_match_func_t>(simcall->args[4]);
+}
static inline void simcall_comm_recv__set__match_fun(smx_simcall_t simcall, simix_match_func_t arg) {
simgrid::simix::marshal<simix_match_func_t>(simcall->args[4], arg);
}
static inline simix_copy_data_func_t simcall_comm_recv__get__copy_data_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[5]);
}
+static inline simix_copy_data_func_t simcall_comm_recv__getraw__copy_data_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_copy_data_func_t>(simcall->args[5]);
+}
static inline void simcall_comm_recv__set__copy_data_fun(smx_simcall_t simcall, simix_copy_data_func_t arg) {
simgrid::simix::marshal<simix_copy_data_func_t>(simcall->args[5], arg);
}
static inline void* simcall_comm_recv__get__data(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[6]);
}
+static inline void* simcall_comm_recv__getraw__data(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[6]);
+}
static inline void simcall_comm_recv__set__data(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[6], arg);
}
static inline double simcall_comm_recv__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[7]);
}
+static inline double simcall_comm_recv__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[7]);
+}
static inline void simcall_comm_recv__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[7], arg);
}
static inline double simcall_comm_recv__get__rate(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[8]);
}
+static inline double simcall_comm_recv__getraw__rate(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[8]);
+}
static inline void simcall_comm_recv__set__rate(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[8], arg);
}
static inline smx_actor_t simcall_comm_irecv__get__receiver(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]);
}
+static inline smx_actor_t simcall_comm_irecv__getraw__receiver(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_actor_t>(simcall->args[0]);
+}
static inline void simcall_comm_irecv__set__receiver(smx_simcall_t simcall, smx_actor_t arg) {
simgrid::simix::marshal<smx_actor_t>(simcall->args[0], arg);
}
static inline smx_mailbox_t simcall_comm_irecv__get__mbox(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]);
}
+static inline smx_mailbox_t simcall_comm_irecv__getraw__mbox(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mailbox_t>(simcall->args[1]);
+}
static inline void simcall_comm_irecv__set__mbox(smx_simcall_t simcall, smx_mailbox_t arg) {
simgrid::simix::marshal<smx_mailbox_t>(simcall->args[1], arg);
}
static inline void* simcall_comm_irecv__get__dst_buff(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[2]);
}
+static inline void* simcall_comm_irecv__getraw__dst_buff(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[2]);
+}
static inline void simcall_comm_irecv__set__dst_buff(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[2], arg);
}
static inline size_t* simcall_comm_irecv__get__dst_buff_size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<size_t*>(simcall->args[3]);
}
+static inline size_t* simcall_comm_irecv__getraw__dst_buff_size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<size_t*>(simcall->args[3]);
+}
static inline void simcall_comm_irecv__set__dst_buff_size(smx_simcall_t simcall, size_t* arg) {
simgrid::simix::marshal<size_t*>(simcall->args[3], arg);
}
static inline simix_match_func_t simcall_comm_irecv__get__match_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]);
}
+static inline simix_match_func_t simcall_comm_irecv__getraw__match_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_match_func_t>(simcall->args[4]);
+}
static inline void simcall_comm_irecv__set__match_fun(smx_simcall_t simcall, simix_match_func_t arg) {
simgrid::simix::marshal<simix_match_func_t>(simcall->args[4], arg);
}
static inline simix_copy_data_func_t simcall_comm_irecv__get__copy_data_fun(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[5]);
}
+static inline simix_copy_data_func_t simcall_comm_irecv__getraw__copy_data_fun(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simix_copy_data_func_t>(simcall->args[5]);
+}
static inline void simcall_comm_irecv__set__copy_data_fun(smx_simcall_t simcall, simix_copy_data_func_t arg) {
simgrid::simix::marshal<simix_copy_data_func_t>(simcall->args[5], arg);
}
static inline void* simcall_comm_irecv__get__data(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<void*>(simcall->args[6]);
}
+static inline void* simcall_comm_irecv__getraw__data(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<void*>(simcall->args[6]);
+}
static inline void simcall_comm_irecv__set__data(smx_simcall_t simcall, void* arg) {
simgrid::simix::marshal<void*>(simcall->args[6], arg);
}
static inline double simcall_comm_irecv__get__rate(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[7]);
}
+static inline double simcall_comm_irecv__getraw__rate(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[7]);
+}
static inline void simcall_comm_irecv__set__rate(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[7], arg);
}
-static inline smx_activity_t simcall_comm_irecv__get__result(smx_simcall_t simcall){
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->result);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_comm_irecv__get__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result);
}
-static inline void simcall_comm_irecv__set__result(smx_simcall_t simcall, smx_activity_t result){
- simgrid::simix::marshal<smx_activity_t>(simcall->result, result);
+static inline simgrid::kernel::activity::ActivityImpl* simcall_comm_irecv__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->result);
+}
+static inline void simcall_comm_irecv__set__result(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> result)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->result, result);
}
static inline xbt_dynar_t simcall_comm_waitany__get__comms(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<xbt_dynar_t>(simcall->args[0]);
}
+static inline xbt_dynar_t simcall_comm_waitany__getraw__comms(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<xbt_dynar_t>(simcall->args[0]);
+}
static inline void simcall_comm_waitany__set__comms(smx_simcall_t simcall, xbt_dynar_t arg) {
simgrid::simix::marshal<xbt_dynar_t>(simcall->args[0], arg);
}
static inline double simcall_comm_waitany__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_comm_waitany__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_comm_waitany__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
static inline int simcall_comm_waitany__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_comm_waitany__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_comm_waitany__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
-static inline smx_activity_t simcall_comm_wait__get__comm(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_comm_wait__get__comm(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
+}
+static inline simgrid::kernel::activity::ActivityImpl* simcall_comm_wait__getraw__comm(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
}
-static inline void simcall_comm_wait__set__comm(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+static inline void simcall_comm_wait__set__comm(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
static inline double simcall_comm_wait__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_comm_wait__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_comm_wait__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
-static inline smx_activity_t simcall_comm_test__get__comm(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_comm_test__get__comm(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
+}
+static inline simgrid::kernel::activity::ActivityImpl* simcall_comm_test__getraw__comm(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
}
-static inline void simcall_comm_test__set__comm(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+static inline void simcall_comm_test__set__comm(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
static inline int simcall_comm_test__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_comm_test__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_comm_test__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
-static inline smx_activity_t* simcall_comm_testany__get__comms(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t*>(simcall->args[0]);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>*
+simcall_comm_testany__get__comms(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>*>(simcall->args[0]);
+}
+static inline simgrid::kernel::activity::ActivityImpl** simcall_comm_testany__getraw__comms(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl**>(simcall->args[0]);
}
-static inline void simcall_comm_testany__set__comms(smx_simcall_t simcall, smx_activity_t* arg) {
- simgrid::simix::marshal<smx_activity_t*>(simcall->args[0], arg);
+static inline void simcall_comm_testany__set__comms(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>* arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>*>(simcall->args[0], arg);
}
static inline size_t simcall_comm_testany__get__count(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<size_t>(simcall->args[1]);
}
+static inline size_t simcall_comm_testany__getraw__count(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<size_t>(simcall->args[1]);
+}
static inline void simcall_comm_testany__set__count(smx_simcall_t simcall, size_t arg) {
simgrid::simix::marshal<size_t>(simcall->args[1], arg);
}
static inline int simcall_comm_testany__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_comm_testany__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_comm_testany__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_mutex_t simcall_mutex_init__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<smx_mutex_t>(simcall->result);
}
+static inline smx_mutex_t simcall_mutex_init__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mutex_t>(simcall->result);
+}
static inline void simcall_mutex_init__set__result(smx_simcall_t simcall, smx_mutex_t result){
simgrid::simix::marshal<smx_mutex_t>(simcall->result, result);
}
static inline smx_mutex_t simcall_mutex_lock__get__mutex(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mutex_t>(simcall->args[0]);
}
+static inline smx_mutex_t simcall_mutex_lock__getraw__mutex(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mutex_t>(simcall->args[0]);
+}
static inline void simcall_mutex_lock__set__mutex(smx_simcall_t simcall, smx_mutex_t arg) {
simgrid::simix::marshal<smx_mutex_t>(simcall->args[0], arg);
}
static inline smx_mutex_t simcall_mutex_trylock__get__mutex(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mutex_t>(simcall->args[0]);
}
+static inline smx_mutex_t simcall_mutex_trylock__getraw__mutex(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mutex_t>(simcall->args[0]);
+}
static inline void simcall_mutex_trylock__set__mutex(smx_simcall_t simcall, smx_mutex_t arg) {
simgrid::simix::marshal<smx_mutex_t>(simcall->args[0], arg);
}
static inline int simcall_mutex_trylock__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_mutex_trylock__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_mutex_trylock__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_mutex_t simcall_mutex_unlock__get__mutex(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mutex_t>(simcall->args[0]);
}
+static inline smx_mutex_t simcall_mutex_unlock__getraw__mutex(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mutex_t>(simcall->args[0]);
+}
static inline void simcall_mutex_unlock__set__mutex(smx_simcall_t simcall, smx_mutex_t arg) {
simgrid::simix::marshal<smx_mutex_t>(simcall->args[0], arg);
}
static inline smx_cond_t simcall_cond_init__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<smx_cond_t>(simcall->result);
}
+static inline smx_cond_t simcall_cond_init__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_cond_t>(simcall->result);
+}
static inline void simcall_cond_init__set__result(smx_simcall_t simcall, smx_cond_t result){
simgrid::simix::marshal<smx_cond_t>(simcall->result, result);
}
static inline smx_cond_t simcall_cond_signal__get__cond(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_cond_t>(simcall->args[0]);
}
+static inline smx_cond_t simcall_cond_signal__getraw__cond(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_cond_t>(simcall->args[0]);
+}
static inline void simcall_cond_signal__set__cond(smx_simcall_t simcall, smx_cond_t arg) {
simgrid::simix::marshal<smx_cond_t>(simcall->args[0], arg);
}
static inline smx_cond_t simcall_cond_wait__get__cond(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_cond_t>(simcall->args[0]);
}
+static inline smx_cond_t simcall_cond_wait__getraw__cond(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_cond_t>(simcall->args[0]);
+}
static inline void simcall_cond_wait__set__cond(smx_simcall_t simcall, smx_cond_t arg) {
simgrid::simix::marshal<smx_cond_t>(simcall->args[0], arg);
}
static inline smx_mutex_t simcall_cond_wait__get__mutex(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mutex_t>(simcall->args[1]);
}
+static inline smx_mutex_t simcall_cond_wait__getraw__mutex(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mutex_t>(simcall->args[1]);
+}
static inline void simcall_cond_wait__set__mutex(smx_simcall_t simcall, smx_mutex_t arg) {
simgrid::simix::marshal<smx_mutex_t>(simcall->args[1], arg);
}
static inline smx_cond_t simcall_cond_wait_timeout__get__cond(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_cond_t>(simcall->args[0]);
}
+static inline smx_cond_t simcall_cond_wait_timeout__getraw__cond(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_cond_t>(simcall->args[0]);
+}
static inline void simcall_cond_wait_timeout__set__cond(smx_simcall_t simcall, smx_cond_t arg) {
simgrid::simix::marshal<smx_cond_t>(simcall->args[0], arg);
}
static inline smx_mutex_t simcall_cond_wait_timeout__get__mutex(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_mutex_t>(simcall->args[1]);
}
+static inline smx_mutex_t simcall_cond_wait_timeout__getraw__mutex(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_mutex_t>(simcall->args[1]);
+}
static inline void simcall_cond_wait_timeout__set__mutex(smx_simcall_t simcall, smx_mutex_t arg) {
simgrid::simix::marshal<smx_mutex_t>(simcall->args[1], arg);
}
static inline double simcall_cond_wait_timeout__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[2]);
}
+static inline double simcall_cond_wait_timeout__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[2]);
+}
static inline void simcall_cond_wait_timeout__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[2], arg);
}
static inline smx_cond_t simcall_cond_broadcast__get__cond(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_cond_t>(simcall->args[0]);
}
+static inline smx_cond_t simcall_cond_broadcast__getraw__cond(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_cond_t>(simcall->args[0]);
+}
static inline void simcall_cond_broadcast__set__cond(smx_simcall_t simcall, smx_cond_t arg) {
simgrid::simix::marshal<smx_cond_t>(simcall->args[0], arg);
}
static inline unsigned int simcall_sem_init__get__capacity(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<unsigned int>(simcall->args[0]);
}
+static inline unsigned int simcall_sem_init__getraw__capacity(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<unsigned int>(simcall->args[0]);
+}
static inline void simcall_sem_init__set__capacity(smx_simcall_t simcall, unsigned int arg) {
simgrid::simix::marshal<unsigned int>(simcall->args[0], arg);
}
static inline smx_sem_t simcall_sem_init__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<smx_sem_t>(simcall->result);
}
+static inline smx_sem_t simcall_sem_init__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_sem_t>(simcall->result);
+}
static inline void simcall_sem_init__set__result(smx_simcall_t simcall, smx_sem_t result){
simgrid::simix::marshal<smx_sem_t>(simcall->result, result);
}
static inline smx_sem_t simcall_sem_release__get__sem(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_sem_t>(simcall->args[0]);
}
+static inline smx_sem_t simcall_sem_release__getraw__sem(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_sem_t>(simcall->args[0]);
+}
static inline void simcall_sem_release__set__sem(smx_simcall_t simcall, smx_sem_t arg) {
simgrid::simix::marshal<smx_sem_t>(simcall->args[0], arg);
}
static inline smx_sem_t simcall_sem_would_block__get__sem(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_sem_t>(simcall->args[0]);
}
+static inline smx_sem_t simcall_sem_would_block__getraw__sem(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_sem_t>(simcall->args[0]);
+}
static inline void simcall_sem_would_block__set__sem(smx_simcall_t simcall, smx_sem_t arg) {
simgrid::simix::marshal<smx_sem_t>(simcall->args[0], arg);
}
static inline int simcall_sem_would_block__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_sem_would_block__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_sem_would_block__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_sem_t simcall_sem_acquire__get__sem(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_sem_t>(simcall->args[0]);
}
+static inline smx_sem_t simcall_sem_acquire__getraw__sem(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_sem_t>(simcall->args[0]);
+}
static inline void simcall_sem_acquire__set__sem(smx_simcall_t simcall, smx_sem_t arg) {
simgrid::simix::marshal<smx_sem_t>(simcall->args[0], arg);
}
static inline smx_sem_t simcall_sem_acquire_timeout__get__sem(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_sem_t>(simcall->args[0]);
}
+static inline smx_sem_t simcall_sem_acquire_timeout__getraw__sem(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_sem_t>(simcall->args[0]);
+}
static inline void simcall_sem_acquire_timeout__set__sem(smx_simcall_t simcall, smx_sem_t arg) {
simgrid::simix::marshal<smx_sem_t>(simcall->args[0], arg);
}
static inline double simcall_sem_acquire_timeout__get__timeout(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<double>(simcall->args[1]);
}
+static inline double simcall_sem_acquire_timeout__getraw__timeout(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<double>(simcall->args[1]);
+}
static inline void simcall_sem_acquire_timeout__set__timeout(smx_simcall_t simcall, double arg) {
simgrid::simix::marshal<double>(simcall->args[1], arg);
}
static inline smx_sem_t simcall_sem_get_capacity__get__sem(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_sem_t>(simcall->args[0]);
}
+static inline smx_sem_t simcall_sem_get_capacity__getraw__sem(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_sem_t>(simcall->args[0]);
+}
static inline void simcall_sem_get_capacity__set__sem(smx_simcall_t simcall, smx_sem_t arg) {
simgrid::simix::marshal<smx_sem_t>(simcall->args[0], arg);
}
static inline int simcall_sem_get_capacity__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_sem_get_capacity__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_sem_get_capacity__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_file_t simcall_file_read__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_read__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_read__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_size_t simcall_file_read__get__size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_size_t>(simcall->args[1]);
}
+static inline sg_size_t simcall_file_read__getraw__size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_size_t>(simcall->args[1]);
+}
static inline void simcall_file_read__set__size(smx_simcall_t simcall, sg_size_t arg) {
simgrid::simix::marshal<sg_size_t>(simcall->args[1], arg);
}
static inline sg_host_t simcall_file_read__get__host(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t>(simcall->args[2]);
}
+static inline sg_host_t simcall_file_read__getraw__host(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[2]);
+}
static inline void simcall_file_read__set__host(smx_simcall_t simcall, sg_host_t arg) {
simgrid::simix::marshal<sg_host_t>(simcall->args[2], arg);
}
static inline sg_size_t simcall_file_read__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<sg_size_t>(simcall->result);
}
+static inline sg_size_t simcall_file_read__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_size_t>(simcall->result);
+}
static inline void simcall_file_read__set__result(smx_simcall_t simcall, sg_size_t result){
simgrid::simix::marshal<sg_size_t>(simcall->result, result);
}
static inline smx_file_t simcall_file_write__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_write__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_write__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_size_t simcall_file_write__get__size(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_size_t>(simcall->args[1]);
}
+static inline sg_size_t simcall_file_write__getraw__size(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_size_t>(simcall->args[1]);
+}
static inline void simcall_file_write__set__size(smx_simcall_t simcall, sg_size_t arg) {
simgrid::simix::marshal<sg_size_t>(simcall->args[1], arg);
}
static inline sg_host_t simcall_file_write__get__host(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t>(simcall->args[2]);
}
+static inline sg_host_t simcall_file_write__getraw__host(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[2]);
+}
static inline void simcall_file_write__set__host(smx_simcall_t simcall, sg_host_t arg) {
simgrid::simix::marshal<sg_host_t>(simcall->args[2], arg);
}
static inline sg_size_t simcall_file_write__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<sg_size_t>(simcall->result);
}
+static inline sg_size_t simcall_file_write__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_size_t>(simcall->result);
+}
static inline void simcall_file_write__set__result(smx_simcall_t simcall, sg_size_t result){
simgrid::simix::marshal<sg_size_t>(simcall->result, result);
}
static inline const char* simcall_file_open__get__fullpath(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<const char*>(simcall->args[0]);
}
+static inline const char* simcall_file_open__getraw__fullpath(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<const char*>(simcall->args[0]);
+}
static inline void simcall_file_open__set__fullpath(smx_simcall_t simcall, const char* arg) {
simgrid::simix::marshal<const char*>(simcall->args[0], arg);
}
static inline sg_host_t simcall_file_open__get__host(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]);
}
+static inline sg_host_t simcall_file_open__getraw__host(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[1]);
+}
static inline void simcall_file_open__set__host(smx_simcall_t simcall, sg_host_t arg) {
simgrid::simix::marshal<sg_host_t>(simcall->args[1], arg);
}
static inline smx_file_t simcall_file_open__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<smx_file_t>(simcall->result);
}
+static inline smx_file_t simcall_file_open__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->result);
+}
static inline void simcall_file_open__set__result(smx_simcall_t simcall, smx_file_t result){
simgrid::simix::marshal<smx_file_t>(simcall->result, result);
}
static inline smx_file_t simcall_file_close__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_close__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_close__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_host_t simcall_file_close__get__host(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]);
}
+static inline sg_host_t simcall_file_close__getraw__host(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[1]);
+}
static inline void simcall_file_close__set__host(smx_simcall_t simcall, sg_host_t arg) {
simgrid::simix::marshal<sg_host_t>(simcall->args[1], arg);
}
static inline int simcall_file_close__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_file_close__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_file_close__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_file_t simcall_file_unlink__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_unlink__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_unlink__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_host_t simcall_file_unlink__get__host(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_host_t>(simcall->args[1]);
}
+static inline sg_host_t simcall_file_unlink__getraw__host(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_host_t>(simcall->args[1]);
+}
static inline void simcall_file_unlink__set__host(smx_simcall_t simcall, sg_host_t arg) {
simgrid::simix::marshal<sg_host_t>(simcall->args[1], arg);
}
static inline int simcall_file_unlink__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_file_unlink__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_file_unlink__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_file_t simcall_file_get_size__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_get_size__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_get_size__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_size_t simcall_file_get_size__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<sg_size_t>(simcall->result);
}
+static inline sg_size_t simcall_file_get_size__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_size_t>(simcall->result);
+}
static inline void simcall_file_get_size__set__result(smx_simcall_t simcall, sg_size_t result){
simgrid::simix::marshal<sg_size_t>(simcall->result, result);
}
static inline smx_file_t simcall_file_tell__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_tell__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_tell__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_size_t simcall_file_tell__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<sg_size_t>(simcall->result);
}
+static inline sg_size_t simcall_file_tell__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_size_t>(simcall->result);
+}
static inline void simcall_file_tell__set__result(smx_simcall_t simcall, sg_size_t result){
simgrid::simix::marshal<sg_size_t>(simcall->result, result);
}
static inline smx_file_t simcall_file_seek__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_seek__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_seek__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline sg_offset_t simcall_file_seek__get__offset(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<sg_offset_t>(simcall->args[1]);
}
+static inline sg_offset_t simcall_file_seek__getraw__offset(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<sg_offset_t>(simcall->args[1]);
+}
static inline void simcall_file_seek__set__offset(smx_simcall_t simcall, sg_offset_t arg) {
simgrid::simix::marshal<sg_offset_t>(simcall->args[1], arg);
}
static inline int simcall_file_seek__get__origin(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[2]);
}
+static inline int simcall_file_seek__getraw__origin(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[2]);
+}
static inline void simcall_file_seek__set__origin(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[2], arg);
}
static inline int simcall_file_seek__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_file_seek__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_file_seek__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline smx_file_t simcall_file_get_info__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_get_info__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_get_info__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline xbt_dynar_t simcall_file_get_info__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<xbt_dynar_t>(simcall->result);
}
+static inline xbt_dynar_t simcall_file_get_info__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<xbt_dynar_t>(simcall->result);
+}
static inline void simcall_file_get_info__set__result(smx_simcall_t simcall, xbt_dynar_t result){
simgrid::simix::marshal<xbt_dynar_t>(simcall->result, result);
}
static inline smx_file_t simcall_file_move__get__fd(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<smx_file_t>(simcall->args[0]);
}
+static inline smx_file_t simcall_file_move__getraw__fd(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<smx_file_t>(simcall->args[0]);
+}
static inline void simcall_file_move__set__fd(smx_simcall_t simcall, smx_file_t arg) {
simgrid::simix::marshal<smx_file_t>(simcall->args[0], arg);
}
static inline const char* simcall_file_move__get__fullpath(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<const char*>(simcall->args[1]);
}
+static inline const char* simcall_file_move__getraw__fullpath(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<const char*>(simcall->args[1]);
+}
static inline void simcall_file_move__set__fullpath(smx_simcall_t simcall, const char* arg) {
simgrid::simix::marshal<const char*>(simcall->args[1], arg);
}
static inline int simcall_file_move__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_file_move__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_file_move__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
static inline int simcall_mc_random__get__min(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[0]);
}
+static inline int simcall_mc_random__getraw__min(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[0]);
+}
static inline void simcall_mc_random__set__min(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[0], arg);
}
static inline int simcall_mc_random__get__max(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<int>(simcall->args[1]);
}
+static inline int simcall_mc_random__getraw__max(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->args[1]);
+}
static inline void simcall_mc_random__set__max(smx_simcall_t simcall, int arg) {
simgrid::simix::marshal<int>(simcall->args[1], arg);
}
static inline int simcall_mc_random__get__result(smx_simcall_t simcall){
return simgrid::simix::unmarshal<int>(simcall->result);
}
+static inline int simcall_mc_random__getraw__result(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<int>(simcall->result);
+}
static inline void simcall_mc_random__set__result(smx_simcall_t simcall, int result){
simgrid::simix::marshal<int>(simcall->result, result);
}
-static inline smx_activity_t simcall_set_category__get__synchro(smx_simcall_t simcall) {
- return simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]);
+static inline boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_set_category__get__synchro(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]);
}
-static inline void simcall_set_category__set__synchro(smx_simcall_t simcall, smx_activity_t arg) {
- simgrid::simix::marshal<smx_activity_t>(simcall->args[0], arg);
+static inline simgrid::kernel::activity::ActivityImpl* simcall_set_category__getraw__synchro(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<simgrid::kernel::activity::ActivityImpl*>(simcall->args[0]);
+}
+static inline void simcall_set_category__set__synchro(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> arg)
+{
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0], arg);
}
static inline const char* simcall_set_category__get__category(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<const char*>(simcall->args[1]);
}
+static inline const char* simcall_set_category__getraw__category(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<const char*>(simcall->args[1]);
+}
static inline void simcall_set_category__set__category(smx_simcall_t simcall, const char* arg) {
simgrid::simix::marshal<const char*>(simcall->args[1], arg);
}
static inline std::function<void()> const* simcall_run_kernel__get__code(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<std::function<void()> const*>(simcall->args[0]);
}
+static inline std::function<void()> const* simcall_run_kernel__getraw__code(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<std::function<void()> const*>(simcall->args[0]);
+}
static inline void simcall_run_kernel__set__code(smx_simcall_t simcall, std::function<void()> const* arg) {
simgrid::simix::marshal<std::function<void()> const*>(simcall->args[0], arg);
}
static inline std::function<void()> const* simcall_run_blocking__get__code(smx_simcall_t simcall) {
return simgrid::simix::unmarshal<std::function<void()> const*>(simcall->args[0]);
}
+static inline std::function<void()> const* simcall_run_blocking__getraw__code(smx_simcall_t simcall)
+{
+ return simgrid::simix::unmarshal_raw<std::function<void()> const*>(simcall->args[0]);
+}
static inline void simcall_run_blocking__set__code(smx_simcall_t simcall, std::function<void()> const* arg) {
simgrid::simix::marshal<std::function<void()> const*>(simcall->args[0], arg);
}
XBT_PRIVATE void simcall_HANDLER_process_set_host(smx_simcall_t simcall, smx_actor_t process, sg_host_t dest);
XBT_PRIVATE void simcall_HANDLER_process_join(smx_simcall_t simcall, smx_actor_t process, double timeout);
XBT_PRIVATE void simcall_HANDLER_process_sleep(smx_simcall_t simcall, double duration);
-XBT_PRIVATE smx_activity_t simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, double priority, double bound);
-XBT_PRIVATE void simcall_HANDLER_execution_wait(smx_simcall_t simcall, smx_activity_t execution);
+XBT_PRIVATE boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_HANDLER_execution_start(smx_simcall_t simcall, const char* name, double flops_amount, double priority,
+ double bound);
+XBT_PRIVATE void
+simcall_HANDLER_execution_wait(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution);
XBT_PRIVATE smx_actor_t simcall_HANDLER_process_restart(smx_simcall_t simcall, smx_actor_t process);
-XBT_PRIVATE smx_activity_t simcall_HANDLER_comm_iprobe(smx_simcall_t simcall, smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data);
+XBT_PRIVATE boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_HANDLER_comm_iprobe(smx_simcall_t simcall, smx_mailbox_t mbox, int type, int src, int tag,
+ simix_match_func_t match_fun, void* data);
XBT_PRIVATE void simcall_HANDLER_comm_send(smx_simcall_t simcall, smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout);
-XBT_PRIVATE smx_activity_t simcall_HANDLER_comm_isend(smx_simcall_t simcall, smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data, int detached);
+XBT_PRIVATE boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_HANDLER_comm_isend(smx_simcall_t simcall, smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate,
+ void* src_buff, size_t src_buff_size, simix_match_func_t match_fun,
+ simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data,
+ int detached);
XBT_PRIVATE void simcall_HANDLER_comm_recv(smx_simcall_t simcall, smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout, double rate);
-XBT_PRIVATE smx_activity_t simcall_HANDLER_comm_irecv(smx_simcall_t simcall, smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate);
+XBT_PRIVATE boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+simcall_HANDLER_comm_irecv(smx_simcall_t simcall, smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff,
+ size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun,
+ void* data, double rate);
XBT_PRIVATE void simcall_HANDLER_comm_waitany(smx_simcall_t simcall, xbt_dynar_t comms, double timeout);
-XBT_PRIVATE void simcall_HANDLER_comm_wait(smx_simcall_t simcall, smx_activity_t comm, double timeout);
-XBT_PRIVATE void simcall_HANDLER_comm_test(smx_simcall_t simcall, smx_activity_t comm);
-XBT_PRIVATE void simcall_HANDLER_comm_testany(smx_simcall_t simcall, smx_activity_t* comms, size_t count);
+XBT_PRIVATE void simcall_HANDLER_comm_wait(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm,
+ double timeout);
+XBT_PRIVATE void simcall_HANDLER_comm_test(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm);
+XBT_PRIVATE void simcall_HANDLER_comm_testany(smx_simcall_t simcall,
+ boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>* comms,
+ size_t count);
XBT_PRIVATE smx_mutex_t simcall_HANDLER_mutex_init(smx_simcall_t simcall);
XBT_PRIVATE void simcall_HANDLER_mutex_lock(smx_simcall_t simcall, smx_mutex_t mutex);
XBT_PRIVATE int simcall_HANDLER_mutex_trylock(smx_simcall_t simcall, smx_mutex_t mutex);
return simcall<int, double>(SIMCALL_PROCESS_SLEEP, duration);
}
-inline static smx_activity_t simcall_BODY_execution_start(const char* name, double flops_amount, double priority, double bound) {
+ inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+ simcall_BODY_execution_start(const char* name, double flops_amount, double priority, double bound)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_execution_start(&SIMIX_process_self()->simcall, name, flops_amount, priority, bound);
- return simcall<smx_activity_t, const char*, double, double, double>(SIMCALL_EXECUTION_START, name, flops_amount, priority, bound);
+ return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, const char*, double, double, double>(
+ SIMCALL_EXECUTION_START, name, flops_amount, priority, bound);
}
-inline static smx_activity_t simcall_BODY_execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate, double timeout) {
+ inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+ simcall_BODY_execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount,
+ double* bytes_amount, double amount, double rate, double timeout)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) SIMIX_execution_parallel_start(name, host_nb, host_list, flops_amount, bytes_amount, amount, rate, timeout);
- return simcall<smx_activity_t, const char*, int, sg_host_t*, double*, double*, double, double, double>(SIMCALL_EXECUTION_PARALLEL_START, name, host_nb, host_list, flops_amount, bytes_amount, amount, rate, timeout);
+ return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, const char*, int, sg_host_t*, double*,
+ double*, double, double, double>(SIMCALL_EXECUTION_PARALLEL_START, name, host_nb, host_list,
+ flops_amount, bytes_amount, amount, rate, timeout);
}
-inline static void simcall_BODY_execution_cancel(smx_activity_t execution) {
+ inline static void
+ simcall_BODY_execution_cancel(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) SIMIX_execution_cancel(execution);
- return simcall<void, smx_activity_t>(SIMCALL_EXECUTION_CANCEL, execution);
+ return simcall<void, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(SIMCALL_EXECUTION_CANCEL,
+ execution);
}
-inline static void simcall_BODY_execution_set_priority(smx_activity_t execution, double priority) {
+ inline static void
+ simcall_BODY_execution_set_priority(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution,
+ double priority)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) SIMIX_execution_set_priority(execution, priority);
- return simcall<void, smx_activity_t, double>(SIMCALL_EXECUTION_SET_PRIORITY, execution, priority);
+ return simcall<void, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, double>(
+ SIMCALL_EXECUTION_SET_PRIORITY, execution, priority);
}
-inline static void simcall_BODY_execution_set_bound(smx_activity_t execution, double bound) {
+ inline static void
+ simcall_BODY_execution_set_bound(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution,
+ double bound)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) SIMIX_execution_set_bound(execution, bound);
- return simcall<void, smx_activity_t, double>(SIMCALL_EXECUTION_SET_BOUND, execution, bound);
+ return simcall<void, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, double>(
+ SIMCALL_EXECUTION_SET_BOUND, execution, bound);
}
-inline static int simcall_BODY_execution_wait(smx_activity_t execution) {
+ inline static int simcall_BODY_execution_wait(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_execution_wait(&SIMIX_process_self()->simcall, execution);
- return simcall<int, smx_activity_t>(SIMCALL_EXECUTION_WAIT, execution);
+ return simcall<int, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(SIMCALL_EXECUTION_WAIT,
+ execution);
}
inline static void simcall_BODY_process_on_exit(smx_actor_t process, int_f_pvoid_pvoid_t fun, void* data) {
return simcall<smx_actor_t, smx_actor_t>(SIMCALL_PROCESS_RESTART, process);
}
-inline static smx_activity_t simcall_BODY_comm_iprobe(smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data) {
+ inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+ simcall_BODY_comm_iprobe(smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_comm_iprobe(&SIMIX_process_self()->simcall, mbox, type, src, tag, match_fun, data);
- return simcall<smx_activity_t, smx_mailbox_t, int, int, int, simix_match_func_t, void*>(SIMCALL_COMM_IPROBE, mbox, type, src, tag, match_fun, data);
+ return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, smx_mailbox_t, int, int, int,
+ simix_match_func_t, void*>(SIMCALL_COMM_IPROBE, mbox, type, src, tag, match_fun, data);
}
inline static void simcall_BODY_comm_send(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout) {
return simcall<void, smx_actor_t, smx_mailbox_t, double, double, void*, size_t, simix_match_func_t, simix_copy_data_func_t, void*, double>(SIMCALL_COMM_SEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, copy_data_fun, data, timeout);
}
-inline static smx_activity_t simcall_BODY_comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data, int detached) {
+ inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+ simcall_BODY_comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff,
+ size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun,
+ simix_copy_data_func_t copy_data_fun, void* data, int detached)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_comm_isend(&SIMIX_process_self()->simcall, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun, data, detached);
- return simcall<smx_activity_t, smx_actor_t, smx_mailbox_t, double, double, void*, size_t, simix_match_func_t, simix_clean_func_t, simix_copy_data_func_t, void*, int>(SIMCALL_COMM_ISEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun, data, detached);
+ return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, smx_actor_t, smx_mailbox_t, double,
+ double, void*, size_t, simix_match_func_t, simix_clean_func_t, simix_copy_data_func_t, void*, int>(
+ SIMCALL_COMM_ISEND, sender, mbox, task_size, rate, src_buff, src_buff_size, match_fun, clean_fun, copy_data_fun,
+ data, detached);
}
inline static void simcall_BODY_comm_recv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout, double rate) {
return simcall<void, smx_actor_t, smx_mailbox_t, void*, size_t*, simix_match_func_t, simix_copy_data_func_t, void*, double, double>(SIMCALL_COMM_RECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, timeout, rate);
}
-inline static smx_activity_t simcall_BODY_comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate) {
+ inline static boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>
+ simcall_BODY_comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size,
+ simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_comm_irecv(&SIMIX_process_self()->simcall, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate);
- return simcall<smx_activity_t, smx_actor_t, smx_mailbox_t, void*, size_t*, simix_match_func_t, simix_copy_data_func_t, void*, double>(SIMCALL_COMM_IRECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate);
+ return simcall<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, smx_actor_t, smx_mailbox_t, void*,
+ size_t*, simix_match_func_t, simix_copy_data_func_t, void*, double>(
+ SIMCALL_COMM_IRECV, receiver, mbox, dst_buff, dst_buff_size, match_fun, copy_data_fun, data, rate);
}
inline static int simcall_BODY_comm_waitany(xbt_dynar_t comms, double timeout) {
return simcall<int, xbt_dynar_t, double>(SIMCALL_COMM_WAITANY, comms, timeout);
}
-inline static void simcall_BODY_comm_wait(smx_activity_t comm, double timeout) {
+ inline static void simcall_BODY_comm_wait(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm,
+ double timeout)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_comm_wait(&SIMIX_process_self()->simcall, comm, timeout);
- return simcall<void, smx_activity_t, double>(SIMCALL_COMM_WAIT, comm, timeout);
+ return simcall<void, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, double>(SIMCALL_COMM_WAIT, comm,
+ timeout);
}
-inline static int simcall_BODY_comm_test(smx_activity_t comm) {
+ inline static int simcall_BODY_comm_test(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_comm_test(&SIMIX_process_self()->simcall, comm);
- return simcall<int, smx_activity_t>(SIMCALL_COMM_TEST, comm);
+ return simcall<int, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(SIMCALL_COMM_TEST, comm);
}
-inline static int simcall_BODY_comm_testany(smx_activity_t* comms, size_t count) {
+ inline static int simcall_BODY_comm_testany(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>* comms,
+ size_t count)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) simcall_HANDLER_comm_testany(&SIMIX_process_self()->simcall, comms, count);
- return simcall<int, smx_activity_t*, size_t>(SIMCALL_COMM_TESTANY, comms, count);
+ return simcall<int, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>*, size_t>(SIMCALL_COMM_TESTANY,
+ comms, count);
}
inline static smx_mutex_t simcall_BODY_mutex_init() {
return simcall<int, int, int>(SIMCALL_MC_RANDOM, min, max);
}
-inline static void simcall_BODY_set_category(smx_activity_t synchro, const char* category) {
+ inline static void simcall_BODY_set_category(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> synchro,
+ const char* category)
+ {
/* Go to that function to follow the code flow through the simcall barrier */
if (0) SIMIX_set_category(synchro, category);
- return simcall<void, smx_activity_t, const char*>(SIMCALL_SET_CATEGORY, synchro, category);
+ return simcall<void, boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>, const char*>(
+ SIMCALL_SET_CATEGORY, synchro, category);
}
inline static void simcall_BODY_run_kernel(std::function<void()> const* code) {
break;
case SIMCALL_EXECUTION_START:
- simgrid::simix::marshal<smx_activity_t>(simcall->result, simcall_HANDLER_execution_start(simcall, simgrid::simix::unmarshal<const char*>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]), simgrid::simix::unmarshal<double>(simcall->args[2]), simgrid::simix::unmarshal<double>(simcall->args[3])));
- SIMIX_simcall_answer(simcall);
- break;
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(
+ simcall->result,
+ simcall_HANDLER_execution_start(simcall, simgrid::simix::unmarshal<const char*>(simcall->args[0]),
+ simgrid::simix::unmarshal<double>(simcall->args[1]),
+ simgrid::simix::unmarshal<double>(simcall->args[2]),
+ simgrid::simix::unmarshal<double>(simcall->args[3])));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_EXECUTION_PARALLEL_START:
- simgrid::simix::marshal<smx_activity_t>(simcall->result, SIMIX_execution_parallel_start(simgrid::simix::unmarshal<const char*>(simcall->args[0]), simgrid::simix::unmarshal<int>(simcall->args[1]), simgrid::simix::unmarshal<sg_host_t*>(simcall->args[2]), simgrid::simix::unmarshal<double*>(simcall->args[3]), simgrid::simix::unmarshal<double*>(simcall->args[4]), simgrid::simix::unmarshal<double>(simcall->args[5]), simgrid::simix::unmarshal<double>(simcall->args[6]), simgrid::simix::unmarshal<double>(simcall->args[7])));
- SIMIX_simcall_answer(simcall);
- break;
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(
+ simcall->result,
+ SIMIX_execution_parallel_start(
+ simgrid::simix::unmarshal<const char*>(simcall->args[0]), simgrid::simix::unmarshal<int>(simcall->args[1]),
+ simgrid::simix::unmarshal<sg_host_t*>(simcall->args[2]), simgrid::simix::unmarshal<double*>(simcall->args[3]),
+ simgrid::simix::unmarshal<double*>(simcall->args[4]), simgrid::simix::unmarshal<double>(simcall->args[5]),
+ simgrid::simix::unmarshal<double>(simcall->args[6]), simgrid::simix::unmarshal<double>(simcall->args[7])));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_EXECUTION_CANCEL:
- SIMIX_execution_cancel(simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]));
- SIMIX_simcall_answer(simcall);
- break;
+ SIMIX_execution_cancel(
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_EXECUTION_SET_PRIORITY:
- SIMIX_execution_set_priority(simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]));
- SIMIX_simcall_answer(simcall);
- break;
+ SIMIX_execution_set_priority(
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]),
+ simgrid::simix::unmarshal<double>(simcall->args[1]));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_EXECUTION_SET_BOUND:
- SIMIX_execution_set_bound(simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]));
- SIMIX_simcall_answer(simcall);
- break;
+ SIMIX_execution_set_bound(
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]),
+ simgrid::simix::unmarshal<double>(simcall->args[1]));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_EXECUTION_WAIT:
- simcall_HANDLER_execution_wait(simcall, simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]));
- break;
+ simcall_HANDLER_execution_wait(
+ simcall,
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]));
+ break;
case SIMCALL_PROCESS_ON_EXIT:
SIMIX_process_on_exit(simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]), simgrid::simix::unmarshal<int_f_pvoid_pvoid_t>(simcall->args[1]), simgrid::simix::unmarshal<void*>(simcall->args[2]));
break;
case SIMCALL_COMM_IPROBE:
- simgrid::simix::marshal<smx_activity_t>(simcall->result, simcall_HANDLER_comm_iprobe(simcall, simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[0]), simgrid::simix::unmarshal<int>(simcall->args[1]), simgrid::simix::unmarshal<int>(simcall->args[2]), simgrid::simix::unmarshal<int>(simcall->args[3]), simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]), simgrid::simix::unmarshal<void*>(simcall->args[5])));
- SIMIX_simcall_answer(simcall);
- break;
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(
+ simcall->result, simcall_HANDLER_comm_iprobe(simcall, simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[0]),
+ simgrid::simix::unmarshal<int>(simcall->args[1]),
+ simgrid::simix::unmarshal<int>(simcall->args[2]),
+ simgrid::simix::unmarshal<int>(simcall->args[3]),
+ simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]),
+ simgrid::simix::unmarshal<void*>(simcall->args[5])));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_COMM_SEND:
simcall_HANDLER_comm_send(simcall, simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]), simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]), simgrid::simix::unmarshal<double>(simcall->args[2]), simgrid::simix::unmarshal<double>(simcall->args[3]), simgrid::simix::unmarshal<void*>(simcall->args[4]), simgrid::simix::unmarshal<size_t>(simcall->args[5]), simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[6]), simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[7]), simgrid::simix::unmarshal<void*>(simcall->args[8]), simgrid::simix::unmarshal<double>(simcall->args[9]));
break;
case SIMCALL_COMM_ISEND:
- simgrid::simix::marshal<smx_activity_t>(simcall->result, simcall_HANDLER_comm_isend(simcall, simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]), simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]), simgrid::simix::unmarshal<double>(simcall->args[2]), simgrid::simix::unmarshal<double>(simcall->args[3]), simgrid::simix::unmarshal<void*>(simcall->args[4]), simgrid::simix::unmarshal<size_t>(simcall->args[5]), simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[6]), simgrid::simix::unmarshal<simix_clean_func_t>(simcall->args[7]), simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[8]), simgrid::simix::unmarshal<void*>(simcall->args[9]), simgrid::simix::unmarshal<int>(simcall->args[10])));
- SIMIX_simcall_answer(simcall);
- break;
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(
+ simcall->result,
+ simcall_HANDLER_comm_isend(
+ simcall, simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]),
+ simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]),
+ simgrid::simix::unmarshal<double>(simcall->args[2]), simgrid::simix::unmarshal<double>(simcall->args[3]),
+ simgrid::simix::unmarshal<void*>(simcall->args[4]), simgrid::simix::unmarshal<size_t>(simcall->args[5]),
+ simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[6]),
+ simgrid::simix::unmarshal<simix_clean_func_t>(simcall->args[7]),
+ simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[8]),
+ simgrid::simix::unmarshal<void*>(simcall->args[9]), simgrid::simix::unmarshal<int>(simcall->args[10])));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_COMM_RECV:
simcall_HANDLER_comm_recv(simcall, simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]), simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]), simgrid::simix::unmarshal<void*>(simcall->args[2]), simgrid::simix::unmarshal<size_t*>(simcall->args[3]), simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]), simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[5]), simgrid::simix::unmarshal<void*>(simcall->args[6]), simgrid::simix::unmarshal<double>(simcall->args[7]), simgrid::simix::unmarshal<double>(simcall->args[8]));
break;
case SIMCALL_COMM_IRECV:
- simgrid::simix::marshal<smx_activity_t>(simcall->result, simcall_HANDLER_comm_irecv(simcall, simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]), simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]), simgrid::simix::unmarshal<void*>(simcall->args[2]), simgrid::simix::unmarshal<size_t*>(simcall->args[3]), simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]), simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[5]), simgrid::simix::unmarshal<void*>(simcall->args[6]), simgrid::simix::unmarshal<double>(simcall->args[7])));
- SIMIX_simcall_answer(simcall);
- break;
+ simgrid::simix::marshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(
+ simcall->result, simcall_HANDLER_comm_irecv(simcall, simgrid::simix::unmarshal<smx_actor_t>(simcall->args[0]),
+ simgrid::simix::unmarshal<smx_mailbox_t>(simcall->args[1]),
+ simgrid::simix::unmarshal<void*>(simcall->args[2]),
+ simgrid::simix::unmarshal<size_t*>(simcall->args[3]),
+ simgrid::simix::unmarshal<simix_match_func_t>(simcall->args[4]),
+ simgrid::simix::unmarshal<simix_copy_data_func_t>(simcall->args[5]),
+ simgrid::simix::unmarshal<void*>(simcall->args[6]),
+ simgrid::simix::unmarshal<double>(simcall->args[7])));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_COMM_WAITANY:
simcall_HANDLER_comm_waitany(simcall, simgrid::simix::unmarshal<xbt_dynar_t>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]));
break;
case SIMCALL_COMM_WAIT:
- simcall_HANDLER_comm_wait(simcall, simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]), simgrid::simix::unmarshal<double>(simcall->args[1]));
- break;
+ simcall_HANDLER_comm_wait(
+ simcall,
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]),
+ simgrid::simix::unmarshal<double>(simcall->args[1]));
+ break;
case SIMCALL_COMM_TEST:
- simcall_HANDLER_comm_test(simcall, simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]));
- break;
+ simcall_HANDLER_comm_test(
+ simcall,
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]));
+ break;
case SIMCALL_COMM_TESTANY:
- simcall_HANDLER_comm_testany(simcall, simgrid::simix::unmarshal<smx_activity_t*>(simcall->args[0]), simgrid::simix::unmarshal<size_t>(simcall->args[1]));
- break;
+ simcall_HANDLER_comm_testany(
+ simcall,
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>*>(simcall->args[0]),
+ simgrid::simix::unmarshal<size_t>(simcall->args[1]));
+ break;
case SIMCALL_MUTEX_INIT:
simgrid::simix::marshal<smx_mutex_t>(simcall->result, simcall_HANDLER_mutex_init(simcall));
break;
case SIMCALL_SET_CATEGORY:
- SIMIX_set_category(simgrid::simix::unmarshal<smx_activity_t>(simcall->args[0]), simgrid::simix::unmarshal<const char*>(simcall->args[1]));
- SIMIX_simcall_answer(simcall);
- break;
+ SIMIX_set_category(
+ simgrid::simix::unmarshal<boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>>(simcall->args[0]),
+ simgrid::simix::unmarshal<const char*>(simcall->args[1]));
+ SIMIX_simcall_answer(simcall);
+ break;
case SIMCALL_RUN_KERNEL:
SIMIX_run_kernel(simgrid::simix::unmarshal<std::function<void()> const*>(simcall->args[0]));
#ifdef __cplusplus
+/* Defines the marshal/unmarshal functions for each type of parameters.
+ *
+ * They will be used in popping_accessors.h to define the functions allowing
+ * to retrieve/set each parameter of each simcall.
+ *
+ * There is a unmarshal_raw() function, which is exactly similar to unmarshal()
+ * for all types but boost::intrusive_ptr(T). For that type, the unmarshal()
+ * function builds a new intrusive_ptr wrapping the pointer (that is stored raw
+ * within the simcall) while the unmarshal_raw retrieves the raw pointer.
+ *
+ * This is used in <simcall>_getraw_<param> functions, that allow the
+ * model-checker, to read the data in the remote memory of the MCed.
+ */
+
namespace simgrid {
namespace simix {
};
template<typename T> struct marshal_t {};
-#define SIMIX_MARSHAL(T, field) \
- inline void marshal(type<T>, u_smx_scalar& simcall, T value) \
- { \
- simcall.field = value; \
- } \
- inline T unmarshal(type<T>, u_smx_scalar const& simcall) \
- { \
- return simcall.field; \
- }
+#define SIMIX_MARSHAL(T, field) \
+ inline void marshal(type<T>, u_smx_scalar& simcall, T value) { simcall.field = value; } \
+ inline T unmarshal(type<T>, u_smx_scalar const& simcall) { return simcall.field; } \
+ inline T unmarshal_raw(type<T>, u_smx_scalar const& simcall) \
+ { /* Exactly same as unmarshal. It differs only for intrusive_ptr */ return simcall.field; }
SIMIX_MARSHAL(char, c);
SIMIX_MARSHAL(short, s);
SIMIX_MARSHAL(double, d);
SIMIX_MARSHAL(FPtr, fp);
-inline
-void unmarshal(type<void>, u_smx_scalar const& simcall) {}
+inline void unmarshal(type<void>, u_smx_scalar const& simcall)
+{
+}
+inline void unmarshal_raw(type<void>, u_smx_scalar const& simcall)
+{
+}
template<class T> inline
void marshal(type<T*>, u_smx_scalar& simcall, T* value)
{
return static_cast<T*>(simcall.dp);
}
+template <class T> inline T* unmarshal_raw(type<T*>, u_smx_scalar const& simcall)
+{
+ return static_cast<T*>(simcall.dp);
+}
template <class T>
inline void marshal(type<boost::intrusive_ptr<T>>, u_smx_scalar& simcall, boost::intrusive_ptr<T> value)
{
- intrusive_ptr_add_ref(&*value);
- simcall.dp = static_cast<void*>(&*value);
+ if (value.get() == nullptr) { // Sometimes we return nullptr in an intrusive_ptr...
+ simcall.dp = nullptr;
+ } else {
+ intrusive_ptr_add_ref(&*value);
+ simcall.dp = static_cast<void*>(&*value);
+ }
}
template <class T> inline boost::intrusive_ptr<T> unmarshal(type<boost::intrusive_ptr<T>>, u_smx_scalar const& simcall)
{
+ // refcount was already increased during the marshaling, thus the "false" as last argument
boost::intrusive_ptr<T> res = boost::intrusive_ptr<T>(static_cast<T*>(simcall.dp), false);
- intrusive_ptr_release(&*res);
return res;
}
+template <class T> inline T* unmarshal_raw(type<boost::intrusive_ptr<T>>, u_smx_scalar const& simcall)
+{
+ return static_cast<T*>(simcall.dp);
+}
template<class R, class... T> inline
void marshal(type<R(*)(T...)>, u_smx_scalar& simcall, R(*value)(T...))
{
return (R(*)(T...)) simcall.fp;
}
+template <class R, class... T> inline auto unmarshal_raw(type<R (*)(T...)>, u_smx_scalar simcall) -> R (*)(T...)
+{
+ return (R(*)(T...))simcall.fp;
+}
template<class T> inline
void marshal(u_smx_scalar& simcall, T const& value)
{
return unmarshal(type<T>(), simcall);
}
+template <class T> inline typename std::remove_reference<T>::type unmarshal_raw(u_smx_scalar& simcall)
+{
+ return unmarshal(type<T>(), simcall);
+}
template<std::size_t I>
inline void marshalArgs(smx_simcall_t simcall) {}
int process_join(smx_actor_t process, double timeout) [[block]];
int process_sleep(double duration) [[block]];
-smx_activity_t execution_start(const char* name, double flops_amount, double priority, double bound);
-smx_activity_t execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate, double timeout) [[nohandler]];
-void execution_cancel(smx_activity_t execution) [[nohandler]];
-void execution_set_priority(smx_activity_t execution, double priority) [[nohandler]];
-void execution_set_bound(smx_activity_t execution, double bound) [[nohandler]];
-int execution_wait(smx_activity_t execution) [[block]];
+boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution_start(const char* name, double flops_amount, double priority, double bound);
+boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list, double* flops_amount, double* bytes_amount, double amount, double rate, double timeout) [[nohandler]];
+void execution_cancel(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution) [[nohandler]];
+void execution_set_priority(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution, double priority) [[nohandler]];
+void execution_set_bound(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution, double bound) [[nohandler]];
+int execution_wait(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> execution) [[block]];
void process_on_exit(smx_actor_t process, int_f_pvoid_pvoid_t fun, void* data) [[nohandler]];
void process_auto_restart_set(smx_actor_t process, int auto_restart) [[nohandler]];
smx_actor_t process_restart(smx_actor_t process);
-smx_activity_t comm_iprobe(smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data);
+boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm_iprobe(smx_mailbox_t mbox, int type, int src, int tag, simix_match_func_t match_fun, void* data);
void comm_send(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout) [[block]];
-smx_activity_t comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data, int detached);
+boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double task_size, double rate, void* src_buff, size_t src_buff_size, simix_match_func_t match_fun, simix_clean_func_t clean_fun, simix_copy_data_func_t copy_data_fun, void* data, int detached);
void comm_recv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double timeout, double rate) [[block]];
-smx_activity_t comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate);
+boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff, size_t* dst_buff_size, simix_match_func_t match_fun, simix_copy_data_func_t copy_data_fun, void* data, double rate);
int comm_waitany(xbt_dynar_t comms, double timeout) [[block]];
-void comm_wait(smx_activity_t comm, double timeout) [[block]];
-int comm_test(smx_activity_t comm) [[block]];
-int comm_testany(smx_activity_t* comms, size_t count) [[block]];
+void comm_wait(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm, double timeout) [[block]];
+int comm_test(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> comm) [[block]];
+int comm_testany(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl>* comms, size_t count) [[block]];
smx_mutex_t mutex_init();
void mutex_lock(smx_mutex_t mutex) [[block]];
int file_move(smx_file_t fd, const char* fullpath);
int mc_random(int min, int max);
-void set_category(smx_activity_t synchro, const char* category) [[nohandler]];
+void set_category(boost::intrusive_ptr<simgrid::kernel::activity::ActivityImpl> synchro, const char* category) [[nohandler]];
void run_kernel(std::function<void()> const* code) [[nohandler]];
void run_blocking(std::function<void()> const* code) [[block,nohandler]];
def accessors(self):
res = []
res.append('')
+ regex = re.compile(r"^boost::intrusive_ptr<(.*?)>(.*)$") #Â to compute the raw type
# Arguments getter/setters
for i in range(len(self.args)):
arg = self.args[i]
+ rawtype = regex.sub(r'\1*\2', arg.rettype())
res.append('static inline %s simcall_%s__get__%s(smx_simcall_t simcall) {' % (
arg.rettype(), self.name, arg.name))
res.append(
' return simgrid::simix::unmarshal<%s>(simcall->args[%i]);' % (arg.rettype(), i))
res.append('}')
+ res.append('static inline %s simcall_%s__getraw__%s(smx_simcall_t simcall) {' % (
+ rawtype, self.name, arg.name))
+ res.append(
+ ' return simgrid::simix::unmarshal_raw<%s>(simcall->args[%i]);' % (rawtype, i))
+ res.append('}')
res.append('static inline void simcall_%s__set__%s(smx_simcall_t simcall, %s arg) {' % (
self.name, arg.name, arg.rettype()))
res.append(' simgrid::simix::marshal<%s>(simcall->args[%i], arg);' % (arg.rettype(), i))
# Return value getter/setters
if self.res.type != 'void':
+ rawtype = regex.sub(r'\1*\2', self.res.rettype())
res.append(
'static inline %s simcall_%s__get__result(smx_simcall_t simcall){' % (self.res.rettype(), self.name))
res.append(' return simgrid::simix::unmarshal<%s>(simcall->result);' % self.res.rettype())
res.append('}')
+ res.append(
+ 'static inline %s simcall_%s__getraw__result(smx_simcall_t simcall){' % (rawtype, self.name))
+ res.append(' return simgrid::simix::unmarshal_raw<%s>(simcall->result);' % rawtype)
+ res.append('}')
res.append(
'static inline void simcall_%s__set__result(smx_simcall_t simcall, %s result){' % (self.name, self.res.rettype()))
res.append(' simgrid::simix::marshal<%s>(simcall->result, result);' % (self.res.rettype()))
XBT_DEBUG("Handling the processes whose action failed (if any)");
while ((action = surf_model_extract_failed_action_set(model))) {
XBT_DEBUG(" Handling Action %p",action);
- SIMIX_simcall_exit((smx_activity_t) action->getData());
+ SIMIX_simcall_exit(static_cast<simgrid::kernel::activity::ActivityImpl*>(action->getData()));
}
XBT_DEBUG("Handling the processes whose action terminated normally (if any)");
while ((action = surf_model_extract_done_action_set(model))) {
if (action->getData() == nullptr)
XBT_DEBUG("probably vcpu's action %p, skip", action);
else
- SIMIX_simcall_exit((smx_activity_t) action->getData());
+ SIMIX_simcall_exit(static_cast<simgrid::kernel::activity::ActivityImpl*>(action->getData()));
}
}
}
const char* synchro_description = "unknown";
- if (dynamic_cast<simgrid::kernel::activity::ExecImpl*>(process->waiting_synchro) != nullptr)
+ if (boost::dynamic_pointer_cast<simgrid::kernel::activity::ExecImpl>(process->waiting_synchro) != nullptr)
synchro_description = "execution";
- if (dynamic_cast<simgrid::kernel::activity::CommImpl*>(process->waiting_synchro) != nullptr)
+ if (boost::dynamic_pointer_cast<simgrid::kernel::activity::CommImpl>(process->waiting_synchro) != nullptr)
synchro_description = "communication";
- if (dynamic_cast<simgrid::kernel::activity::SleepImpl*>(process->waiting_synchro) != nullptr)
+ if (boost::dynamic_pointer_cast<simgrid::kernel::activity::SleepImpl>(process->waiting_synchro) != nullptr)
synchro_description = "sleeping";
- if (dynamic_cast<simgrid::kernel::activity::Raw*>(process->waiting_synchro) != nullptr)
+ if (boost::dynamic_pointer_cast<simgrid::kernel::activity::RawImpl>(process->waiting_synchro) != nullptr)
synchro_description = "synchronization";
- if (dynamic_cast<simgrid::kernel::activity::Io*>(process->waiting_synchro) != nullptr)
+ if (boost::dynamic_pointer_cast<simgrid::kernel::activity::IoImpl>(process->waiting_synchro) != nullptr)
synchro_description = "I/O";
*/
XBT_INFO("Process %lu (%s@%s): waiting for %s synchro %p (%s) in state %d to finish", process->pid,
- process->cname(), process->host->cname(), synchro_description, process->waiting_synchro,
+ process->cname(), process->host->cname(), synchro_description, process->waiting_synchro.get(),
process->waiting_synchro->name.c_str(), (int)process->waiting_synchro->state);
}
else {
double bound){
/* alloc structures and initialize */
- simgrid::kernel::activity::ExecImpl* exec = new simgrid::kernel::activity::ExecImpl(name, issuer->host);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ simgrid::kernel::activity::ExecImplPtr(new simgrid::kernel::activity::ExecImpl(name, issuer->host));
/* set surf's action */
if (not MC_is_active() && not MC_record_replay_is_active()) {
exec->surf_exec = issuer->host->pimpl_cpu->execution_start(flops_amount);
- exec->surf_exec->setData(exec);
+ exec->surf_exec->setData(exec.get());
exec->surf_exec->setPriority(priority);
if (bound > 0)
static_cast<simgrid::surf::CpuAction*>(exec->surf_exec)->setBound(bound);
}
- XBT_DEBUG("Create execute synchro %p: %s", exec, exec->name.c_str());
+ XBT_DEBUG("Create execute synchro %p: %s", exec.get(), exec->name.c_str());
return exec;
}
{
/* alloc structures and initialize */
- simgrid::kernel::activity::ExecImpl* exec = new simgrid::kernel::activity::ExecImpl(name, nullptr);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ simgrid::kernel::activity::ExecImplPtr(new simgrid::kernel::activity::ExecImpl(name, nullptr));
/* set surf's synchro */
sg_host_t *host_list_cpy = xbt_new0(sg_host_t, host_nb);
/* set surf's synchro */
if (not MC_is_active() && not MC_record_replay_is_active()) {
exec->surf_exec = surf_host_model->executeParallelTask(host_nb, host_list_cpy, flops_amount, bytes_amount, rate);
- exec->surf_exec->setData(exec);
+ exec->surf_exec->setData(exec.get());
if (timeout > 0) {
exec->timeoutDetector = host_list[0]->pimpl_cpu->sleep(timeout);
- exec->timeoutDetector->setData(exec);
+ exec->timeoutDetector->setData(exec.get());
}
}
- XBT_DEBUG("Create parallel execute synchro %p", exec);
+ XBT_DEBUG("Create parallel execute synchro %p", exec.get());
return exec;
}
void SIMIX_execution_cancel(smx_activity_t synchro)
{
- XBT_DEBUG("Cancel synchro %p", synchro);
- simgrid::kernel::activity::ExecImpl* exec = static_cast<simgrid::kernel::activity::ExecImpl*>(synchro);
+ XBT_DEBUG("Cancel synchro %p", synchro.get());
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(synchro);
if (exec->surf_exec)
exec->surf_exec->cancel();
void SIMIX_execution_set_priority(smx_activity_t synchro, double priority)
{
- simgrid::kernel::activity::ExecImpl* exec = static_cast<simgrid::kernel::activity::ExecImpl*>(synchro);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(synchro);
if(exec->surf_exec)
exec->surf_exec->setPriority(priority);
}
void SIMIX_execution_set_bound(smx_activity_t synchro, double bound)
{
- simgrid::kernel::activity::ExecImpl* exec = static_cast<simgrid::kernel::activity::ExecImpl*>(synchro);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(synchro);
if(exec->surf_exec)
static_cast<simgrid::surf::CpuAction*>(exec->surf_exec)->setBound(bound);
}
void simcall_HANDLER_execution_wait(smx_simcall_t simcall, smx_activity_t synchro)
{
- simgrid::kernel::activity::ExecImpl* exec = static_cast<simgrid::kernel::activity::ExecImpl*>(synchro);
- XBT_DEBUG("Wait for execution of synchro %p, state %d", synchro, (int)synchro->state);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(synchro);
+ XBT_DEBUG("Wait for execution of synchro %p, state %d", synchro.get(), (int)synchro->state);
/* Associate this simcall to the synchro */
synchro->simcalls.push_back(simcall);
SIMIX_execution_finish(exec);
}
-void SIMIX_execution_finish(simgrid::kernel::activity::ExecImpl* exec)
+void SIMIX_execution_finish(simgrid::kernel::activity::ExecImplPtr exec)
{
for (smx_simcall_t simcall : exec->simcalls) {
switch (exec->state) {
simcall_execution_wait__set__result(simcall, exec->state);
SIMIX_simcall_answer(simcall);
}
-
- /* We no longer need it */
- exec->unref();
}
void SIMIX_set_category(smx_activity_t synchro, const char *category)
if (synchro->state != SIMIX_RUNNING)
return;
- simgrid::kernel::activity::ExecImpl* exec = dynamic_cast<simgrid::kernel::activity::ExecImpl*>(synchro);
+ simgrid::kernel::activity::ExecImplPtr exec =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::ExecImpl>(synchro);
if (exec != nullptr) {
exec->surf_exec->setCategory(category);
return;
}
- simgrid::kernel::activity::CommImpl* comm = dynamic_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
if (comm != nullptr) {
comm->surf_comm->setCategory(category);
}
int auto_restart);
XBT_PRIVATE void SIMIX_host_autorestart(sg_host_t host);
-XBT_PRIVATE smx_activity_t SIMIX_execution_start(smx_actor_t issuer, const char *name,
- double flops_amount, double priority, double bound);
-XBT_PRIVATE smx_activity_t SIMIX_execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list,
- double* flops_amount, double* bytes_amount, double amount,
- double rate, double timeout);
XBT_PRIVATE void SIMIX_execution_cancel(smx_activity_t synchro);
XBT_PRIVATE void SIMIX_execution_set_priority(smx_activity_t synchro, double priority);
XBT_PRIVATE void SIMIX_execution_set_bound(smx_activity_t synchro, double bound);
-XBT_PRIVATE void SIMIX_execution_finish(simgrid::kernel::activity::ExecImpl* exec);
+XBT_PRIVATE void SIMIX_execution_finish(simgrid::kernel::activity::ExecImplPtr exec);
XBT_PRIVATE void SIMIX_set_category(smx_activity_t synchro, const char *category);
SG_END_DECL()
+XBT_PRIVATE smx_activity_t SIMIX_execution_start(smx_actor_t issuer, const char* name, double flops_amount,
+ double priority, double bound);
+XBT_PRIVATE smx_activity_t SIMIX_execution_parallel_start(const char* name, int host_nb, sg_host_t* host_list,
+ double* flops_amount, double* bytes_amount, double amount,
+ double rate, double timeout);
+
#endif
if (host->isOff())
THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->cname());
- simgrid::kernel::activity::Io *synchro = new simgrid::kernel::activity::Io();
+ simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
synchro->host = host;
synchro->surf_io = surf_host_read(host, fd->surf_file, size);
if (host->isOff())
THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->cname());
- simgrid::kernel::activity::Io *synchro = new simgrid::kernel::activity::Io();
+ simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
synchro->host = host;
synchro->surf_io = surf_host_write(host, fd->surf_file, size);
synchro->surf_io->setData(synchro);
if (host->isOff())
THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->cname());
- simgrid::kernel::activity::Io *synchro = new simgrid::kernel::activity::Io();
+ simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
synchro->host = host;
synchro->surf_io = surf_host_open(host, fullpath);
synchro->surf_io->setData(synchro);
if (host->isOff())
THROWF(host_error, 0, "Host %s failed, you cannot call this function", host->cname());
- simgrid::kernel::activity::Io *synchro = new simgrid::kernel::activity::Io();
+ simgrid::kernel::activity::IoImpl* synchro = new simgrid::kernel::activity::IoImpl();
synchro->host = host;
synchro->surf_io = surf_host_close(host, fd->surf_file);
synchro->surf_io->setData(synchro);
void SIMIX_io_destroy(smx_activity_t synchro)
{
- simgrid::kernel::activity::Io *io = static_cast<simgrid::kernel::activity::Io*>(synchro);
- XBT_DEBUG("Destroy synchro %p", synchro);
+ simgrid::kernel::activity::IoImplPtr io = boost::static_pointer_cast<simgrid::kernel::activity::IoImpl>(synchro);
+ XBT_DEBUG("Destroy synchro %p", synchro.get());
if (io->surf_io)
io->surf_io->unref();
- delete io;
}
void SIMIX_io_finish(smx_activity_t synchro)
static void SIMIX_waitany_remove_simcall_from_actions(smx_simcall_t simcall);
static void SIMIX_comm_copy_data(smx_activity_t comm);
-static void SIMIX_comm_start(smx_activity_t synchro);
-static simgrid::kernel::activity::CommImpl*
-_find_matching_comm(boost::circular_buffer_space_optimized<smx_activity_t>* deque, e_smx_comm_type_t type,
- int (*match_fun)(void*, void*, smx_activity_t), void* user_data, smx_activity_t my_synchro,
- bool remove_matching);
+static void SIMIX_comm_start(simgrid::kernel::activity::CommImplPtr synchro);
/**
* \brief Checks if there is a communication activity queued in a deque matching our needs
* \param type The type of communication we are looking for (comm_send, comm_recv)
* \return The communication activity if found, nullptr otherwise
*/
-static simgrid::kernel::activity::CommImpl*
+static simgrid::kernel::activity::CommImplPtr
_find_matching_comm(boost::circular_buffer_space_optimized<smx_activity_t>* deque, e_smx_comm_type_t type,
int (*match_fun)(void*, void*, smx_activity_t), void* this_user_data, smx_activity_t my_synchro,
bool remove_matching)
for(auto it = deque->begin(); it != deque->end(); it++){
smx_activity_t synchro = *it;
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::CommImpl>(std::move(synchro));
if (comm->type == SIMIX_COMM_SEND) {
other_user_data = comm->src_data;
} else if (comm->type == SIMIX_COMM_RECEIVE) {
other_user_data = comm->dst_data;
}
- if (comm->type == type && (match_fun == nullptr || match_fun(this_user_data, other_user_data, synchro)) &&
+ if (comm->type == type && (match_fun == nullptr || match_fun(this_user_data, other_user_data, comm)) &&
(not comm->match_fun || comm->match_fun(other_user_data, this_user_data, my_synchro))) {
- XBT_DEBUG("Found a matching communication synchro %p", comm);
+ XBT_DEBUG("Found a matching communication synchro %p", comm.get());
if (remove_matching)
deque->erase(it);
- comm->ref();
#if SIMGRID_HAVE_MC
comm->mbox_cpy = comm->mbox;
#endif
}
XBT_DEBUG("Sorry, communication synchro %p does not match our needs:"
" its type is %d but we are looking for a comm of type %d (or maybe the filtering didn't match)",
- comm, (int)comm->type, (int)type);
+ comm.get(), (int)comm->type, (int)type);
}
XBT_DEBUG("No matching communication synchro found");
return nullptr;
void (*copy_data_fun)(smx_activity_t, void*, size_t),// used to copy data if not default one
void *data, int detached)
{
- XBT_DEBUG("send from %p", mbox);
+ XBT_DEBUG("send from mailbox %p", mbox);
/* Prepare a synchro describing us, so that it gets passed to the user-provided filter of other side */
- simgrid::kernel::activity::CommImpl* this_comm = new simgrid::kernel::activity::CommImpl(SIMIX_COMM_SEND);
+ simgrid::kernel::activity::CommImplPtr this_comm =
+ simgrid::kernel::activity::CommImplPtr(new simgrid::kernel::activity::CommImpl(SIMIX_COMM_SEND));
/* Look for communication synchro matching our needs. We also provide a description of
* ourself so that the other side also gets a chance of choosing if it wants to match with us.
*
* If it is not found then push our communication into the rendez-vous point */
- simgrid::kernel::activity::CommImpl* other_comm =
+ simgrid::kernel::activity::CommImplPtr other_comm =
_find_matching_comm(&mbox->comm_queue, SIMIX_COMM_RECEIVE, match_fun, data, this_comm, /*remove_matching*/ true);
if (not other_comm) {
- other_comm = this_comm;
+ other_comm = std::move(this_comm);
- if (mbox->permanent_receiver!=nullptr){
+ if (mbox->permanent_receiver != nullptr) {
//this mailbox is for small messages, which have to be sent right now
other_comm->state = SIMIX_READY;
other_comm->dst_proc=mbox->permanent_receiver.get();
- other_comm->ref();
mbox->done_comm_queue.push_back(other_comm);
- XBT_DEBUG("pushing a message into the permanent receive list %p, comm %p", mbox, other_comm);
+ XBT_DEBUG("pushing a message into the permanent receive list %p, comm %p", mbox, other_comm.get());
}else{
- mbox->push(this_comm);
+ mbox->push(other_comm);
}
} else {
XBT_DEBUG("Receive already pushed");
- this_comm->unref();
- this_comm->unref();
other_comm->state = SIMIX_READY;
other_comm->type = SIMIX_COMM_READY;
void (*copy_data_fun)(smx_activity_t, void*, size_t), // used to copy data if not default one
void *data, double rate)
{
- simgrid::kernel::activity::CommImpl* this_synchro = new simgrid::kernel::activity::CommImpl(SIMIX_COMM_RECEIVE);
- XBT_DEBUG("recv from %p %p. this_synchro=%p", mbox, &mbox->comm_queue, this_synchro);
+ simgrid::kernel::activity::CommImplPtr this_synchro =
+ simgrid::kernel::activity::CommImplPtr(new simgrid::kernel::activity::CommImpl(SIMIX_COMM_RECEIVE));
+ XBT_DEBUG("recv from mbox %p. this_synchro=%p", mbox, this_synchro.get());
- simgrid::kernel::activity::CommImpl* other_comm;
+ simgrid::kernel::activity::CommImplPtr other_comm;
//communication already done, get it inside the list of completed comms
if (mbox->permanent_receiver != nullptr && not mbox->done_comm_queue.empty()) {
- this_synchro->unref();
XBT_DEBUG("We have a comm that has probably already been received, trying to match it, to skip the communication");
//find a match in the list of already received comms
other_comm = _find_matching_comm(&mbox->done_comm_queue, SIMIX_COMM_SEND, match_fun, data, this_synchro,
mbox->push(this_synchro);
} else {
if (other_comm->surf_comm && other_comm->remains() < 1e-12) {
- XBT_DEBUG("comm %p has been already sent, and is finished, destroy it",other_comm);
+ XBT_DEBUG("comm %p has been already sent, and is finished, destroy it", other_comm.get());
other_comm->state = SIMIX_DONE;
other_comm->type = SIMIX_COMM_DONE;
other_comm->mbox = nullptr;
- other_comm->unref();
}
- other_comm->unref();
- this_synchro->unref();
}
} else {
/* Prepare a comm describing us, so that it gets passed to the user-provided filter of other side */
other_comm = _find_matching_comm(&mbox->comm_queue, SIMIX_COMM_SEND, match_fun, data, this_synchro,
/*remove_matching*/ true);
- if (not other_comm) {
- XBT_DEBUG("Receive pushed first %zu", mbox->comm_queue.size());
+ if (other_comm == nullptr) {
+ XBT_DEBUG("Receive pushed first (%zu comm enqueued so far)", mbox->comm_queue.size());
other_comm = this_synchro;
mbox->push(this_synchro);
} else {
- XBT_DEBUG("Match my %p with the existing %p", this_synchro, other_comm);
-
- other_comm = static_cast<simgrid::kernel::activity::CommImpl*>(other_comm);
+ XBT_DEBUG("Match my %p with the existing %p", this_synchro.get(), other_comm.get());
other_comm->state = SIMIX_READY;
other_comm->type = SIMIX_COMM_READY;
- this_synchro->unref();
- this_synchro->unref();
}
dst_proc->comms.push_back(other_comm);
}
int tag, int (*match_fun)(void *, void *, smx_activity_t), void *data)
{
XBT_DEBUG("iprobe from %p %p", mbox, &mbox->comm_queue);
- simgrid::kernel::activity::CommImpl* this_comm;
+ simgrid::kernel::activity::CommImplPtr this_comm;
int smx_type;
if(type == 1){
- this_comm = new simgrid::kernel::activity::CommImpl(SIMIX_COMM_SEND);
+ this_comm = simgrid::kernel::activity::CommImplPtr(new simgrid::kernel::activity::CommImpl(SIMIX_COMM_SEND));
smx_type = SIMIX_COMM_RECEIVE;
} else{
- this_comm = new simgrid::kernel::activity::CommImpl(SIMIX_COMM_RECEIVE);
+ this_comm = simgrid::kernel::activity::CommImplPtr(new simgrid::kernel::activity::CommImpl(SIMIX_COMM_RECEIVE));
smx_type = SIMIX_COMM_SEND;
}
smx_activity_t other_synchro=nullptr;
(e_smx_comm_type_t) smx_type, match_fun, data, this_comm,/*remove_matching*/false);
}
- if(other_synchro)
- other_synchro->unref();
-
- this_comm->unref();
return other_synchro;
}
void simcall_HANDLER_comm_wait(smx_simcall_t simcall, smx_activity_t synchro, double timeout)
{
/* Associate this simcall to the wait synchro */
- XBT_DEBUG("simcall_HANDLER_comm_wait, %p", synchro);
+ XBT_DEBUG("simcall_HANDLER_comm_wait, %p", synchro.get());
synchro->simcalls.push_back(simcall);
simcall->issuer->waiting_synchro = synchro;
if (timeout < 0.0)
THROW_IMPOSSIBLE;
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
if (comm->src_proc == simcall->issuer)
comm->state = SIMIX_SRC_TIMEOUT;
else
SIMIX_comm_finish(synchro);
} else { /* if (timeout >= 0) { we need a surf sleep action even when there is no timeout, otherwise surf won't tell us when the host fails */
surf_action_t sleep = simcall->issuer->host->pimpl_cpu->sleep(timeout);
- sleep->setData(synchro);
+ sleep->setData(&*synchro);
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
if (simcall->issuer == comm->src_proc)
comm->src_timeout = sleep;
else
void simcall_HANDLER_comm_test(smx_simcall_t simcall, smx_activity_t synchro)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
if (MC_is_active() || MC_record_replay_is_active()){
simcall_comm_test__set__result(simcall, comm->src_proc && comm->dst_proc);
}
}
-void simcall_HANDLER_comm_testany(
- smx_simcall_t simcall, simgrid::kernel::activity::ActivityImpl* comms[], size_t count)
+void simcall_HANDLER_comm_testany(smx_simcall_t simcall, simgrid::kernel::activity::ActivityImplPtr comms[],
+ size_t count)
{
// The default result is -1 -- this means, "nothing is ready".
// It can be changed below, but only if something matches.
if(idx == -1){
SIMIX_simcall_answer(simcall);
}else{
- simgrid::kernel::activity::ActivityImpl* synchro = comms[idx];
+ simgrid::kernel::activity::ActivityImplPtr synchro = comms[idx];
simcall_comm_testany__set__result(simcall, idx);
synchro->simcalls.push_back(simcall);
synchro->state = SIMIX_DONE;
}
for (std::size_t i = 0; i != count; ++i) {
- simgrid::kernel::activity::ActivityImpl* synchro = comms[i];
+ simgrid::kernel::activity::ActivityImplPtr synchro = comms[i];
if (synchro->state != SIMIX_WAITING && synchro->state != SIMIX_RUNNING) {
simcall_comm_testany__set__result(simcall, i);
synchro->simcalls.push_back(simcall);
void simcall_HANDLER_comm_waitany(smx_simcall_t simcall, xbt_dynar_t synchros, double timeout)
{
- smx_activity_t synchro;
- unsigned int cursor = 0;
-
if (MC_is_active() || MC_record_replay_is_active()){
if (timeout > 0.0)
xbt_die("Timeout not implemented for waitany in the model-checker");
int idx = SIMCALL_GET_MC_VALUE(simcall);
- synchro = xbt_dynar_get_as(synchros, idx, smx_activity_t);
+ smx_activity_t synchro = xbt_dynar_get_as(synchros, idx, smx_activity_t);
synchro->simcalls.push_back(simcall);
simcall_comm_waitany__set__result(simcall, idx);
synchro->state = SIMIX_DONE;
});
}
- xbt_dynar_foreach(synchros, cursor, synchro){
+ unsigned int cursor;
+ simgrid::kernel::activity::ActivityImpl* ptr;
+ xbt_dynar_foreach(synchros, cursor, ptr){
+ smx_activity_t synchro = simgrid::kernel::activity::ActivityImplPtr(ptr);
/* associate this simcall to the the synchro */
synchro->simcalls.push_back(simcall);
void SIMIX_waitany_remove_simcall_from_actions(smx_simcall_t simcall)
{
- smx_activity_t synchro;
unsigned int cursor = 0;
xbt_dynar_t synchros = simcall_comm_waitany__get__comms(simcall);
- xbt_dynar_foreach(synchros, cursor, synchro) {
+ simgrid::kernel::activity::ActivityImpl* ptr;
+ xbt_dynar_foreach(synchros, cursor, ptr){
+ smx_activity_t synchro = simgrid::kernel::activity::ActivityImplPtr(ptr);
+
// Remove the first occurence of simcall:
auto i = boost::range::find(synchro->simcalls, simcall);
if (i != synchro->simcalls.end())
* \brief Starts the simulation of a communication synchro.
* \param synchro the communication synchro
*/
-static inline void SIMIX_comm_start(smx_activity_t synchro)
+static inline void SIMIX_comm_start(simgrid::kernel::activity::CommImplPtr comm)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
-
/* If both the sender and the receiver are already there, start the communication */
- if (synchro->state == SIMIX_READY) {
+ if (comm->state == SIMIX_READY) {
simgrid::s4u::Host* sender = comm->src_proc->host;
simgrid::s4u::Host* receiver = comm->dst_proc->host;
comm->surf_comm = surf_network_model->communicate(sender, receiver, comm->task_size, comm->rate);
- comm->surf_comm->setData(synchro);
+ comm->surf_comm->setData(comm.get());
comm->state = SIMIX_RUNNING;
- XBT_DEBUG("Starting communication %p from '%s' to '%s' (surf_action: %p)", synchro, sender->cname(),
+ XBT_DEBUG("Starting communication %p from '%s' to '%s' (surf_action: %p)", comm.get(), sender->cname(),
receiver->cname(), comm->surf_comm);
/* If a link is failed, detect it immediately */
*/
void SIMIX_comm_finish(smx_activity_t synchro)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
while (not synchro->simcalls.empty()) {
smx_simcall_t simcall = synchro->simcalls.front();
switch (comm->state) {
case SIMIX_DONE:
- XBT_DEBUG("Communication %p complete!", synchro);
+ XBT_DEBUG("Communication %p complete!", synchro.get());
SIMIX_comm_copy_data(synchro);
break;
case SIMIX_LINK_FAILURE:
XBT_DEBUG("Link failure in synchro %p between '%s' and '%s': posting an exception to the issuer: %s (%p) "
"detached:%d",
- synchro, comm->src_proc ? comm->src_proc->host->cname() : nullptr,
+ synchro.get(), comm->src_proc ? comm->src_proc->host->cname() : nullptr,
comm->dst_proc ? comm->dst_proc->host->cname() : nullptr, simcall->issuer->cname(), simcall->issuer,
comm->detached);
if (comm->src_proc == simcall->issuer) {
void SIMIX_comm_copy_pointer_callback(smx_activity_t synchro, void* buff, size_t buff_size)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
xbt_assert((buff_size == sizeof(void *)), "Cannot copy %zu bytes: must be sizeof(void*)", buff_size);
*(void **) (comm->dst_buff) = buff;
void SIMIX_comm_copy_buffer_callback(smx_activity_t synchro, void* buff, size_t buff_size)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
XBT_DEBUG("Copy the data over");
memcpy(comm->dst_buff, buff, buff_size);
*/
void SIMIX_comm_copy_data(smx_activity_t synchro)
{
- simgrid::kernel::activity::CommImpl* comm = static_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
size_t buff_size = comm->src_buff_size;
/* If there is no data to copy then return */
if (not comm->src_buff || not comm->dst_buff || comm->copied)
return;
- XBT_DEBUG("Copying comm %p data from %s (%p) -> %s (%p) (%zu bytes)", comm,
+ XBT_DEBUG("Copying comm %p data from %s (%p) -> %s (%p) (%zu bytes)", comm.get(),
comm->src_proc ? comm->src_proc->host->cname() : "a finished process", comm->src_buff,
comm->dst_proc ? comm->dst_proc->host->cname() : "a finished process", comm->dst_buff, buff_size);
{
XBT_IN("(%p, %f)",smx_host,timeout);
- simgrid::kernel::activity::Raw *sync = new simgrid::kernel::activity::Raw();
+ simgrid::kernel::activity::RawImplPtr sync =
+ simgrid::kernel::activity::RawImplPtr(new simgrid::kernel::activity::RawImpl());
sync->sleep = smx_host->pimpl_cpu->sleep(timeout);
- sync->sleep->setData(sync);
+ sync->sleep->setData(&*sync);
XBT_OUT();
return sync;
}
void SIMIX_synchro_finish(smx_activity_t synchro)
{
- XBT_IN("(%p)",synchro);
+ XBT_IN("(%p)", synchro.get());
smx_simcall_t simcall = synchro->simcalls.front();
synchro->simcalls.pop_front();
SIMIX_synchro_stop_waiting(simcall->issuer, simcall);
simcall->issuer->waiting_synchro = nullptr;
- delete synchro;
SIMIX_simcall_answer(simcall);
XBT_OUT();
}
if (xbt_swag_size(this->sleeping) > 0) {
/*process to wake up */
smx_actor_t p = (smx_actor_t) xbt_swag_extract(this->sleeping);
- delete p->waiting_synchro;
p->waiting_synchro = nullptr;
this->owner = p;
SIMIX_simcall_answer(&p->simcall);
if ((proc = (smx_actor_t) xbt_swag_extract(cond->sleeping))) {
/* Destroy waiter's synchronization */
- delete proc->waiting_synchro;
proc->waiting_synchro = nullptr;
/* Now transform the cond wait simcall into a mutex lock one */
XBT_DEBUG("Sem release semaphore %p", sem);
if ((proc = (smx_actor_t) xbt_swag_extract(sem->sleeping))) {
- delete proc->waiting_synchro;
proc->waiting_synchro = nullptr;
SIMIX_simcall_answer(&proc->simcall);
} else {
void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t buff_size)
{
- simgrid::kernel::activity::CommImpl* comm = dynamic_cast<simgrid::kernel::activity::CommImpl*>(synchro);
+ simgrid::kernel::activity::CommImplPtr comm =
+ boost::dynamic_pointer_cast<simgrid::kernel::activity::CommImpl>(synchro);
int src_shared = 0;
int dst_shared = 0;
size_t src_offset = 0;
count++;
if (status != MPI_STATUSES_IGNORE)
status[i] = *pstat;
- if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->flags_ & NON_PERSISTENT)
+ if ((requests[i] != MPI_REQUEST_NULL) && (requests[i]->flags_ & NON_PERSISTENT))
requests[i] = MPI_REQUEST_NULL;
}
} else {
int Request::testany(int count, MPI_Request requests[], int *index, MPI_Status * status)
{
- std::vector<simgrid::kernel::activity::ActivityImpl*> comms;
+ std::vector<simgrid::kernel::activity::ActivityImplPtr> comms;
comms.reserve(count);
int i;
}
if (request->action_ != nullptr){
- simgrid::kernel::activity::CommImpl* sync_comm =
- static_cast<simgrid::kernel::activity::CommImpl*>(request->action_);
+ simgrid::kernel::activity::CommImplPtr sync_comm =
+ boost::static_pointer_cast<simgrid::kernel::activity::CommImpl>(request->action_);
MPI_Request req = static_cast<MPI_Request>(sync_comm->src_data);
*flag = 1;
if(status != MPI_STATUS_IGNORE && (req->flags_ & PREPARED) == 0) {
int Request::waitany(int count, MPI_Request requests[], MPI_Status * status)
{
s_xbt_dynar_t comms; // Keep it on stack to save some extra mallocs
- int i;
int size = 0;
int index = MPI_UNDEFINED;
- int *map;
if(count > 0) {
// Wait for a request to complete
- xbt_dynar_init(&comms, sizeof(smx_activity_t), nullptr);
- map = xbt_new(int, count);
+ xbt_dynar_init(&comms, sizeof(smx_activity_t), [](void*ptr){
+ intrusive_ptr_release(*(simgrid::kernel::activity::ActivityImpl**)ptr);
+ });
+ int *map = xbt_new(int, count);
XBT_DEBUG("Wait for one of %d", count);
- for(i = 0; i < count; i++) {
+ for(int i = 0; i < count; i++) {
if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED) &&
not(requests[i]->flags_ & FINISHED)) {
if (requests[i]->action_ != nullptr) {
XBT_DEBUG("Waiting any %p ", requests[i]);
- xbt_dynar_push(&comms, &requests[i]->action_);
+ intrusive_ptr_add_ref(requests[i]->action_.get());
+ xbt_dynar_push_as(&comms, simgrid::kernel::activity::ActivityImpl*, requests[i]->action_.get());
map[size] = i;
size++;
} else {
}
}
}
- if(size > 0) {
- i = simcall_comm_waitany(&comms, -1);
+ if (size > 0) {
+ XBT_DEBUG("Enter waitany for %lu comms", xbt_dynar_length(&comms));
+ int i = simcall_comm_waitany(&comms, -1);
// not MPI_UNDEFINED, as this is a simix return code
if (i != -1) {
std::vector<MPI_Request> accumulates;
int index;
MPI_Status stat;
- MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
+ MPI_Status *pstat = (status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat);
int retvalue = MPI_SUCCESS;
//tag invalid requests in the set
if (status != MPI_STATUSES_IGNORE) {
wait(&requests[c],pstat);
index = c;
} else {
- index = waitany(count, requests, pstat);
+ index = waitany(count, (MPI_Request*)requests, pstat);
if (index == MPI_UNDEFINED)
break;
int *periodic_;
int *position_;
public:
- Topo_Cart(int ndims);
+ explicit Topo_Cart(int ndims);
~Topo_Cart();
Topo_Cart(MPI_Comm comm_old, int ndims, int dims[], int periods[], int reorder, MPI_Comm *comm_cart);
Topo_Cart* sub(const int remain_dims[], MPI_Comm *newcomm) ;
{
/* iterate for all virtual machines */
for (s4u::VirtualMachine* ws_vm : vm::VirtualMachineImpl::allVms_) {
-
Cpu* cpu = ws_vm->pimpl_cpu;
+ int active_tasks = lmm_constraint_get_variable_amount(cpu->constraint());
- int is_active = lmm_constraint_used(cpu->model()->getMaxminSystem(), cpu->constraint());
-
- if (is_active) {
- /* some tasks exist on this VM */
- XBT_DEBUG("set the weight of the dummy CPU action on PM to 1");
-
- /* FIXME: If we assign 1.05 and 0.05, the system makes apparently wrong values. */
- ws_vm->pimpl_vm_->action_->setPriority(1);
+ /* The impact of the VM over its PM is the min between its vCPU amount and the amount of tasks it contains */
+ int impact = std::min(active_tasks, ws_vm->pimpl_vm_->coreAmount());
- } else {
- /* no task exist on this VM */
- XBT_DEBUG("set the weight of the dummy CPU action on PM to 0");
-
- ws_vm->pimpl_vm_->action_->setPriority(0);
- }
+ XBT_DEBUG("set the weight of the dummy CPU action of VM%p on PM to %d (#tasks: %d)", ws_vm, impact, active_tasks);
+ ws_vm->pimpl_vm_->action_->setPriority(impact);
}
}
#include "simgrid/s4u/Host.hpp"
#include "src/surf/maxmin_private.hpp"
+#include <list>
+
/***********
* Classes *
***********/
}
return usage;
}
+int lmm_constraint_get_variable_amount(lmm_constraint_t cnst) {
+ int usage = 0;
+ xbt_swag_t elem_list = &(cnst->enabled_element_set);
+ void *_elem;
+
+ xbt_swag_foreach(_elem, elem_list) {
+ lmm_element_t elem = (lmm_element_t)_elem;
+ if (elem->value > 0)
+ usage++;
+ }
+ return usage;
+}
void lmm_check_concurrency(lmm_system_t sys){
//These checks are very expensive, so do them only if we want to debug SURF LMM
#include "simgrid/s4u/Engine.hpp"
+#include <algorithm>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <string>
xbt_free(msg);
}
/* watts_off is 0 by default */
+
+ if (ptr->coreCount() == 1)
+ xbt_assert(std::all_of(power_range_watts_list.begin(), power_range_watts_list.end(),
+ [](PowerRange power_range) { return power_range.min == power_range.max; }),
+ "You only have one core in host %s, but the \
+ energy consumption for one core does not match the energy consumption for all (here: 1) cores). This is an error in your platform, please fix it.",
+ host->cname());
}
HostEnergy::~HostEnergy() = default;
return host->extension<HostEnergy>()->getWattMaxAt(pstate);
}
+/** @brief Returns the current consumption of the host */
+double sg_host_get_current_consumption(sg_host_t host)
+{
+ xbt_assert(HostEnergy::EXTENSION_ID.valid(),
+ "The Energy plugin is not active. Please call sg_energy_plugin_init() during initialization.");
+ double cpu_load = lmm_constraint_get_usage(host->pimpl_cpu->constraint()) / host->speed();
+ return host->extension<HostEnergy>()->getCurrentWattsValue(cpu_load);
+}
+
SG_END_DECL()
if (current_routing == nullptr) { /* it is the first one */
xbt_assert(simgrid::s4u::Engine::instance()->pimpl->netRoot_ == nullptr,
- "All defined components must belong to a AS");
+ "All defined components must belong to a networking zone.");
simgrid::s4u::Engine::instance()->pimpl->netRoot_ = new_as;
} else {
delete stype->model_properties;
free(stype);
}
+ for (auto s : *simgrid::surf::StorageImpl::storages)
+ delete s.second;
delete simgrid::surf::StorageImpl::storages;
for (auto model : *all_existing_models)
XBT_PUBLIC(void) surf_parse_close();
XBT_PUBLIC(void) surf_parse_assert(bool cond, const char *fmt, ...) XBT_ATTRIB_PRINTF(2,3);
XBT_PUBLIC(void) XBT_ATTRIB_NORETURN surf_parse_error(const char *msg,...) XBT_ATTRIB_PRINTF(1,2);
+XBT_PUBLIC(void) surf_parse_assert_netpoint(char* hostname, const char* pre, const char* post);
XBT_PUBLIC(void) surf_parse_warn(const char *msg,...) XBT_ATTRIB_PRINTF(1,2);
XBT_PUBLIC(double) surf_parse_get_double(const char *string);
* New in DTD version 4.1 (in SimGrid 3.16): backward compatible change (v4 files are valid v4.1 files)
- <zone> can be used as a synonym for the now deprecated <as>
- - <zoneZoute> an be used as a synonym for the now deprecated <asroute>
+ - <zoneRoute> can be used as a synonym for the now deprecated <asroute>
- <bypassZoneRoute> an be used as a synonym for the now deprecated <bypassAsRoute>
- <actor> can be used as a synonym for the now deprecated <process>
* To upgrade your files, use the tool simgrid_update_xml
* * New in DTD version 4.1 (in SimGrid 3.16): backward compatible change (v4 files are valid v4.1 files)
* - <zone> can be used as a synonym for the now deprecated <as>
- * - <zoneZoute> an be used as a synonym for the now deprecated <asroute>
+ * - <zoneRoute> can be used as a synonym for the now deprecated <asroute>
* - <bypassZoneRoute> an be used as a synonym for the now deprecated <bypassAsRoute>
* - <actor> can be used as a synonym for the now deprecated <process>
* * New in DTD version 4 (in SimGrid 3.13):
surf_exit();
xbt_die("Exiting now");
}
+void surf_parse_assert_netpoint(char* hostname, const char* pre, const char* post)
+{
+ if (sg_netpoint_by_name_or_null(hostname) != nullptr) // found
+ return;
+
+ std::string msg = std::string(pre);
+ msg += hostname;
+ msg += post;
+ msg += " Existing netpoints: \n";
+
+ std::vector<simgrid::kernel::routing::NetPoint*> list;
+ simgrid::s4u::Engine::instance()->netpointList(&list);
+ bool first = true;
+ for (auto np : list) {
+ if (np->isNetZone())
+ continue;
+
+ if (not first)
+ msg += ",";
+ first = false;
+ msg += "'" + np->name() + "'";
+ if (msg.length() > 4096) {
+ msg.pop_back(); // remove trailing quote
+ msg += "...(list truncated)......";
+ break;
+ }
+ }
+ surf_parse_error("%s", msg.c_str());
+}
+
void surf_parse_warn(const char *fmt, ...) {
va_list va;
va_start(va,fmt);
}
void STag_surfxml_route(){
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_route_src), "Route src='%s' does name a node.",
- A_surfxml_route_src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_route_dst), "Route dst='%s' does name a node.",
- A_surfxml_route_dst);
+ surf_parse_assert_netpoint(A_surfxml_route_src, "Route src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_route_dst, "Route dst='", "' does name a node.");
}
void STag_surfxml_ASroute(){
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_ASroute_src), "ASroute src='%s' does name a node.",
- A_surfxml_ASroute_src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_ASroute_dst), "ASroute dst='%s' does name a node.",
- A_surfxml_ASroute_dst);
+ surf_parse_assert_netpoint(A_surfxml_ASroute_src, "ASroute src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_ASroute_dst, "ASroute dst='", "' does name a node.");
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_ASroute_gw___src), "ASroute gw_src='%s' does name a node.",
- A_surfxml_ASroute_gw___src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_ASroute_gw___dst), "ASroute gw_dst='%s' does name a node.",
- A_surfxml_ASroute_gw___dst);
+ surf_parse_assert_netpoint(A_surfxml_ASroute_gw___src, "ASroute gw_src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_ASroute_gw___dst, "ASroute gw_dst='", "' does name a node.");
}
void STag_surfxml_zoneRoute(){
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_zoneRoute_src), "zoneRoute src='%s' does name a node.",
- A_surfxml_zoneRoute_src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_zoneRoute_dst), "zoneRoute dst='%s' does name a node.",
- A_surfxml_zoneRoute_dst);
-
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_zoneRoute_gw___src), "zoneRoute gw_src='%s' does name a node.",
- A_surfxml_zoneRoute_gw___src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_zoneRoute_gw___dst), "zoneRoute gw_dst='%s' does name a node.",
- A_surfxml_zoneRoute_gw___dst);
+ surf_parse_assert_netpoint(A_surfxml_zoneRoute_src, "zoneRoute src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_zoneRoute_dst, "zoneRoute dst='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_zoneRoute_gw___src, "zoneRoute gw_src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_zoneRoute_gw___dst, "zoneRoute gw_dst='", "' does name a node.");
}
void STag_surfxml_bypassRoute(){
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassRoute_src), "bypassRoute src='%s' does name a node.",
- A_surfxml_bypassRoute_src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassRoute_dst), "bypassRoute dst='%s' does name a node.",
- A_surfxml_bypassRoute_dst);
+ surf_parse_assert_netpoint(A_surfxml_bypassRoute_src, "bypassRoute src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassRoute_dst, "bypassRoute dst='", "' does name a node.");
}
void STag_surfxml_bypassASroute(){
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassASroute_src),
- "bypassASroute src='%s' does name a node.", A_surfxml_bypassASroute_src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassASroute_dst),
- "bypassASroute dst='%s' does name a node.", A_surfxml_bypassASroute_dst);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassASroute_gw___src),
- "bypassASroute gw_src='%s' does name a node.", A_surfxml_bypassASroute_gw___src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassASroute_gw___dst),
- "bypassASroute gw_dst='%s' does name a node.", A_surfxml_bypassASroute_gw___dst);
+ surf_parse_assert_netpoint(A_surfxml_bypassASroute_src, "bypassASroute src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassASroute_dst, "bypassASroute dst='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassASroute_gw___src, "bypassASroute gw_src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassASroute_gw___dst, "bypassASroute gw_dst='", "' does name a node.");
}
void STag_surfxml_bypassZoneRoute(){
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassZoneRoute_src),
- "bypassASroute src='%s' does name a node.", A_surfxml_bypassZoneRoute_src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassZoneRoute_dst),
- "bypassASroute dst='%s' does name a node.", A_surfxml_bypassZoneRoute_dst);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassZoneRoute_gw___src),
- "bypassASroute gw_src='%s' does name a node.", A_surfxml_bypassZoneRoute_gw___src);
- surf_parse_assert(sg_netpoint_by_name_or_null(A_surfxml_bypassZoneRoute_gw___dst),
- "bypassASroute gw_dst='%s' does name a node.", A_surfxml_bypassZoneRoute_gw___dst);
+ surf_parse_assert_netpoint(A_surfxml_bypassZoneRoute_src, "bypassZoneRoute src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassZoneRoute_dst, "bypassZoneRoute dst='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassZoneRoute_gw___src, "bypassZoneRoute gw_src='", "' does name a node.");
+ surf_parse_assert_netpoint(A_surfxml_bypassZoneRoute_gw___dst, "bypassZoneRoute gw_dst='", "' does name a node.");
}
void ETag_surfxml_route(){
-/* Copyright (c) 2005-2016. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2005-2017. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <cstddef>
#include <cstdlib>
+#include <cstring>
#include <vector>
}
+static bool startWith(std::string str, const char* prefix)
+{
+ return strncmp(str.c_str(), prefix, strlen(prefix)) == 0;
+}
+
void xbt_backtrace_display(xbt_backtrace_location_t* loc, std::size_t count)
{
#ifdef HAVE_BACKTRACE
return;
}
fprintf(stderr, "Backtrace (displayed in process %s):\n", SIMIX_process_self_get_name());
- for (std::string const& s : backtrace)
- fprintf(stderr, "---> %s\n", s.c_str());
+ for (std::string const& s : backtrace) {
+ if (startWith(s, "xbt_backtrace_display_current"))
+ continue;
+
+ std::fprintf(stderr, "---> '%s'\n", s.c_str());
+ if (startWith(s, "SIMIX_simcall_handle") ||
+ startWith(s, "simgrid::xbt::MainFunction") /* main used with thread factory */)
+ break;
+ }
#else
XBT_ERROR("Cannot display backtrace when compiled without libunwind.");
#endif
${CMAKE_CURRENT_SOURCE_DIR}/trace_integration/test-hbp1-c1s1-c3s2.xml
${CMAKE_CURRENT_SOURCE_DIR}/trace_integration/test-hbp2.5-hbp1.5.xml PARENT_SCOPE)
-foreach(x cloud-sharing get_sender host_on_off host_on_off_processes host_on_off_recv task_destroy_cancel task_listen_from trace_integration)
+foreach(x get_sender host_on_off host_on_off_processes host_on_off_recv task_destroy_cancel task_listen_from trace_integration)
ADD_TESH_FACTORIES(tesh-msg-${x} "thread;boost;ucontext;raw" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/msg/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x}/${x}.tesh)
endforeach()
+
+
+# One context factory is enough for these ones
+
+foreach(x cloud-sharing)
+ ADD_TESH(tesh-msg-${x} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/msg/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/msg/${x}/${x}.tesh)
+endforeach()
#include "simgrid/msg.h"
XBT_LOG_NEW_DEFAULT_CATEGORY(msg_test, "Messages specific for this msg example");
+const int FAIL_ON_ERROR = 0;
+const int flop_amount = 100000000;
+int failed_test = 0;
+
static int computation_fun(int argc, char* argv[])
{
- msg_task_t task = MSG_task_create("Task", 100000000, 0, NULL);
+ int *size = MSG_process_get_data(MSG_process_self());
+ msg_task_t task = MSG_task_create("Task", *size, 0, NULL);
- double clock_sta = MSG_get_clock();
+ double begin = MSG_get_clock();
MSG_task_execute(task);
- double clock_end = MSG_get_clock();
+ double end = MSG_get_clock();
- XBT_INFO("Task took %gs to execute", clock_end - clock_sta);
+ if (0.1 - (end - begin) > 0.001) {
+ xbt_assert(! FAIL_ON_ERROR, "%s with %.4g load (%dflops) took %.4fs instead of 0.1s",
+ MSG_process_get_name(MSG_process_self()), ((double)*size/flop_amount),*size, (end-begin));
+ XBT_INFO("FAILED TEST: %s with %.4g load (%dflops) took %.4fs instead of 0.1s",
+ MSG_process_get_name(MSG_process_self()),((double)*size/flop_amount), *size, (end-begin));
+ failed_test ++;
+ } else {
+ XBT_INFO("Passed: %s with %.4g load (%dflops) took 0.1s as expected",
+ MSG_process_get_name(MSG_process_self()), ((double)*size/flop_amount), *size);
+ }
MSG_task_destroy(task);
+ free(size);
return 0;
}
+static void run_test(const char *name, msg_host_t location, int size) {
+ int* data = xbt_new(int, 1);
+ *data = size;
+ MSG_process_create(name, computation_fun, data, location);
+}
+
static int master_main(int argc, char* argv[])
{
- msg_host_t pm0 = MSG_host_by_name("node-0.acme.org");
- msg_host_t pm1 = MSG_host_by_name("node-1.acme.org");
- xbt_assert(pm0, "Host node-0.acme.org does not seem to exist");
+
+ XBT_INFO("# TEST ON SINGLE-CORE PMs");
+
+ msg_host_t pm0 = MSG_host_by_name("node-0.1core.org");
+ msg_host_t pm1 = MSG_host_by_name("node-1.1core.org");
+ msg_host_t vm0;
+ xbt_assert(pm0, "Host node-0.1core.org does not seem to exist");
+
+ // syntax of the process name:
+ // "( )1" means PM with one core; "( )2" means PM with 2 cores
+ // "( [ ]2 )4" means a VM with 2 cores, on a PM with 4 cores.
+ // "o" means another process is there
+ // "X" means the process which holds this name
XBT_INFO("## Test 1 (started): check computation on normal PMs");
XBT_INFO("### Put a task on a PM");
- MSG_process_create("compute", computation_fun, NULL, pm0);
+ run_test("(X)1", pm0, flop_amount);
MSG_process_sleep(2);
XBT_INFO("### Put two tasks on a PM");
- MSG_process_create("compute", computation_fun, NULL, pm0);
- MSG_process_create("compute", computation_fun, NULL, pm0);
+ run_test("(Xo)1", pm0, flop_amount/2);
+ run_test("(oX)1", pm0, flop_amount/2);
MSG_process_sleep(2);
XBT_INFO("### Put a task on each PM");
- MSG_process_create("compute", computation_fun, NULL, pm0);
- MSG_process_create("compute", computation_fun, NULL, pm1);
+ run_test("(X)1 (o)1", pm0, flop_amount);
+ run_test("(o)1 (X)1", pm1, flop_amount);
MSG_process_sleep(2);
XBT_INFO("## Test 1 (ended)");
+ XBT_INFO("# TEST ON SINGLE-CORE PMs AND SINGLE-CORE VMs");
- XBT_INFO("## Test 2 (started): check impact of running a task inside a VM (there is no degradation for the moment)");
+ XBT_INFO("## Test 2 (started): check impact of running tasks inside a VM (there is no degradation for the moment)");
XBT_INFO("### Put a VM on a PM, and put a task to the VM");
- msg_vm_t vm0 = MSG_vm_create_core(pm0, "VM0");
+ vm0 = MSG_vm_create_core(pm0, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [X]1 )1", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two task to the VM");
+ vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- MSG_process_create("compute", computation_fun, NULL, vm0);
+ run_test("( [Xo]1 )1", vm0, flop_amount/2);
+ run_test("( [oX]1 )1", vm0, flop_amount/2);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
XBT_INFO("## Test 2 (ended)");
+
+ XBT_INFO("## Test 3 (started): check impact of running tasks collocated with VMs (there is no VM noise for the moment)");
- XBT_INFO(
- "## Test 3 (started): check impact of running a task collocated with a VM (there is no VM noise for the moment)");
+ XBT_INFO("### Put a task on a PM collocated with an empty VM");
- XBT_INFO("### Put a VM on a PM, and put a task to the PM");
vm0 = MSG_vm_create_core(pm0, "VM0");
MSG_vm_start(vm0);
- MSG_process_create("compute", computation_fun, NULL, pm0);
+ run_test("( [ ]1 X )1", pm0, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put a task to the PM and a task to the VM");
+ vm0 = MSG_vm_create_core(pm0, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [X]1 o )1", vm0, flop_amount/2);
+ run_test("( [o]1 X )1", pm0, flop_amount/2);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put a task to the PM and two tasks to the VM");
+
+ vm0 = MSG_vm_create_core(pm0, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [Xo]1 o )1", vm0, flop_amount/4);
+ run_test("( [oX]1 o )1", vm0, flop_amount/4);
+ run_test("( [oo]1 X )1", pm0, flop_amount/2);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
XBT_INFO("## Test 3 (ended)");
+
+ XBT_INFO("# TEST ON TWO-CORE PMs");
+
+ msg_host_t pm2 = MSG_host_by_name("node-0.2cores.org"); // 2 cores
+ xbt_assert(pm2, "Host node-0.2cores.org does not seem to exist");
+
+ XBT_INFO("## Test 4 (started): check computation on 2 cores PMs");
+
+ XBT_INFO("### Put a task on a PM");
+ run_test("(X)2", pm2, flop_amount);
+ MSG_process_sleep(2);
+
+ XBT_INFO("### Put two tasks on a PM");
+ run_test("(Xx)2", pm2, flop_amount);
+ run_test("(xX)2", pm2, flop_amount);
+ MSG_process_sleep(2);
+
+ XBT_INFO("### Put three tasks on a PM");
+ run_test("(Xxx)2", pm2, flop_amount*2/3);
+ run_test("(xXx)2", pm2, flop_amount*2/3);
+ run_test("(xxX)2", pm2, flop_amount*2/3);
+ MSG_process_sleep(2);
- XBT_INFO("## Test 4 (started): compare the cost of running two tasks inside two different VMs collocated or not (for"
- " the moment, there is no degradation for the VMs. Hence, the time should be equals to the time of test 1");
+ XBT_INFO("## Test 4 (ended)");
+
+ XBT_INFO("# TEST ON TWO-CORE PMs AND SINGLE-CORE VMs");
+
+ XBT_INFO("## Test 5 (started): check impact of a single VM (there is no degradation for the moment)");
+ XBT_INFO("### Put a VM on a PM, and put a task to the VM");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [X]1 )2", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [Xx]1 )2", vm0, flop_amount/2);
+ run_test("( [xX]1 )2", vm0, flop_amount/2);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put a task to the PM");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [ ]1 X )2", pm2, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put a task to the PM and a task to the VM");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ MSG_vm_start(vm0);
+ run_test("( [X]1 x )2", vm0, flop_amount);
+ run_test("( [x]1 X )2", pm2, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("## Test 5 (ended)");
+
+ XBT_INFO("## Test 6 (started): check impact of a several VMs (there is no degradation for the moment)");
+
+ XBT_INFO("### Put two VMs on a PM, and put a task to one VM");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ msg_vm_t vm1 = MSG_vm_create_core(pm2, "VM1");
+ MSG_vm_start(vm0);
+ MSG_vm_start(vm1);
+ run_test("( [X]1 [ ]1 )2", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+ MSG_vm_destroy(vm1);
+
XBT_INFO("### Put two VMs on a PM, and put a task to each VM");
- vm0 = MSG_vm_create_core(pm0, "VM0");
- msg_vm_t vm1 = MSG_vm_create_core(pm0, "VM1");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ vm1 = MSG_vm_create_core(pm2, "VM1");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- MSG_process_create("compute", computation_fun, NULL, vm0);
- MSG_process_create("compute", computation_fun, NULL, vm1);
+ run_test("( [X]1 [x]1 )2", vm0, flop_amount);
+ run_test("( [x]1 [X]1 )2", vm1, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
-
- XBT_INFO("### Put a VM on each PM, and put a task to each VM");
- vm0 = MSG_vm_create_core(pm0, "VM0");
- vm1 = MSG_vm_create_core(pm1, "VM1");
+
+ XBT_INFO("### Put three VMs on a PM, and put a task to two VMs");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ vm1 = MSG_vm_create_core(pm2, "VM1");
+ msg_vm_t vm2 = MSG_vm_create_core(pm2, "VM2");
MSG_vm_start(vm0);
MSG_vm_start(vm1);
- MSG_process_create("compute", computation_fun, NULL, vm0);
- MSG_process_create("compute", computation_fun, NULL, vm1);
+ MSG_vm_start(vm2);
+ run_test("( [X]1 [x]1 [ ]1 )2", vm0, flop_amount);
+ run_test("( [x]1 [X]1 [ ]1 )2", vm1, flop_amount);
MSG_process_sleep(2);
MSG_vm_destroy(vm0);
MSG_vm_destroy(vm1);
- XBT_INFO("## Test 4 (ended)");
+ MSG_vm_destroy(vm2);
+
+ XBT_INFO("### Put three VMs on a PM, and put a task to each VM");
+ vm0 = MSG_vm_create_core(pm2, "VM0");
+ vm1 = MSG_vm_create_core(pm2, "VM1");
+ vm2 = MSG_vm_create_core(pm2, "VM2");
+ MSG_vm_start(vm0);
+ MSG_vm_start(vm1);
+ MSG_vm_start(vm2);
+ run_test("( [X]1 [o]1 [o]1 )2", vm0, flop_amount*2/3);
+ run_test("( [o]1 [X]1 [o]1 )2", vm1, flop_amount*2/3);
+ run_test("( [o]1 [o]1 [X]1 )2", vm2, flop_amount*2/3);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+ MSG_vm_destroy(vm1);
+ MSG_vm_destroy(vm2);
+
+ XBT_INFO("## Test 6 (ended)");
+
+ XBT_INFO("# TEST ON TWO-CORE PMs AND TWO-CORE VMs");
+
+ XBT_INFO("## Test 7 (started): check impact of a single VM (there is no degradation for the moment)");
+
+ XBT_INFO("### Put a VM on a PM, and put a task to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [X]2 )2", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xo]2 )2", vm0, flop_amount);
+ run_test("( [oX]2 )2", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xoo]2 )2", vm0, flop_amount*2/3);
+ run_test("( [oXo]2 )2", vm0, flop_amount*2/3);
+ run_test("( [ooX]2 )2", vm0, flop_amount*2/3);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("## Test 7 (ended)");
+
+ XBT_INFO("## Test 8 (started): check impact of a single VM collocated with a task (there is no degradation for the moment)");
+
+ XBT_INFO("### Put a VM on a PM, and put a task to the PM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ ]2 X )2", pm2, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and one task to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [o]2 X )2", pm2, flop_amount);
+ run_test("( [X]2 o )2", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and two tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [oo]2 X )2", pm2, flop_amount*2/3);
+ run_test("( [Xo]2 o )2", vm0, flop_amount*2/3);
+ run_test("( [oX]2 o )2", vm0, flop_amount*2/3);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ooo]2 X )2", pm2, flop_amount*2/3);
+ run_test("( [Xoo]2 o )2", vm0, flop_amount*2/9);
+ run_test("( [oXo]2 o )2", vm0, flop_amount*2/9);
+ run_test("( [ooX]2 o )2", vm0, flop_amount*2/9);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ ]2 Xo )2", pm2, flop_amount);
+ run_test("( [ ]2 oX )2", pm2, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and one task to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [o]2 Xo )2", pm2, flop_amount*2/3);
+ run_test("( [o]2 oX )2", pm2, flop_amount*2/3);
+ run_test("( [X]2 oo )2", vm0, flop_amount*2/3);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and two tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [oo]2 Xo )2", pm2, flop_amount/2);
+ run_test("( [oo]2 oX )2", pm2, flop_amount/2);
+ run_test("( [Xo]2 oo )2", vm0, flop_amount/2);
+ run_test("( [oX]2 oo )2", vm0, flop_amount/2);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, put one task to the PM and three tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm2, "VM0", 2);
+ MSG_vm_start(vm0);
+ run_test("( [ooo]2 Xo )2", pm2, flop_amount*2/4);
+ run_test("( [ooo]2 oX )2", pm2, flop_amount*2/4);
+ run_test("( [Xoo]2 oo )2", vm0, flop_amount/3);
+ run_test("( [oXo]2 oo )2", vm0, flop_amount/3);
+ run_test("( [ooX]2 oo )2", vm0, flop_amount/3);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("## Test 8 (ended)");
+
+ XBT_INFO("# TEST ON FOUR-CORE PMs AND TWO-CORE VMs");
+
+ msg_host_t pm4 = MSG_host_by_name("node-0.4cores.org");
+ xbt_assert(pm4, "Host node-0.4cores.org does not seem to exist");
+
+ XBT_INFO("## Test 9 (started): check impact of a single VM");
+
+ XBT_INFO("### Put a VM on a PM, and put a task to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
+ MSG_vm_start(vm0);
+ run_test("( [X]2 )4", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xo]2 )4", vm0, flop_amount);
+ run_test("( [oX]2 )4", vm0, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xoo]2 )4", vm0, flop_amount*2/3);
+ run_test("( [oXo]2 )4", vm0, flop_amount*2/3);
+ run_test("( [ooX]2 )4", vm0, flop_amount*2/3);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("## Test 9 (ended)");
+
+ XBT_INFO("## Test 10 (started): check impact of a single emtpy VM collocated with tasks");
+
+ XBT_INFO("### Put a VM on a PM, and put a task to the PM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ ]2 X )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ ]2 Xo )4", pm4, flop_amount);
+ run_test("( [ ]2 oX )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ ]2 Xoo )4", pm4, flop_amount);
+ run_test("( [ ]2 oXo )4", pm4, flop_amount);
+ run_test("( [ ]2 ooX )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put four tasks to the PM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [ ]2 Xooo )4", pm4, flop_amount);
+ run_test("( [ ]2 oXoo )4", pm4, flop_amount);
+ run_test("( [ ]2 ooXo )4", pm4, flop_amount);
+ run_test("( [ ]2 oooX )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("## Test 10 (ended)");
+
+ XBT_INFO("## Test 11 (started): check impact of a single working VM collocated with tasks");
+
+ XBT_INFO("### Put a VM on a PM, and put one task to the PM and one task to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0", 2);
+ MSG_vm_start(vm0);
+ run_test("( [X]2 o )4", vm0, flop_amount);
+ run_test("( [o]2 X )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and one task to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [X]2 oo )4", vm0, flop_amount);
+ run_test("( [o]2 Xo )4", pm4, flop_amount);
+ run_test("( [o]2 oX )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xo]2 oo )4", vm0, flop_amount);
+ run_test("( [oX]2 oo )4", vm0, flop_amount);
+ run_test("( [oo]2 Xo )4", pm4, flop_amount);
+ run_test("( [oo]2 oX )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [X]2 ooo )4", vm0, flop_amount);
+ run_test("( [o]2 Xoo )4", pm4, flop_amount);
+ run_test("( [o]2 oXo )4", pm4, flop_amount);
+ run_test("( [o]2 ooX )4", pm4, flop_amount);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xo]2 ooo )4", vm0, flop_amount*4/5);
+ run_test("( [oX]2 ooo )4", vm0, flop_amount*4/5);
+ run_test("( [oo]2 Xoo )4", pm4, flop_amount*4/5);
+ run_test("( [oo]2 oXo )4", pm4, flop_amount*4/5);
+ run_test("( [oo]2 ooX )4", pm4, flop_amount*4/5);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM");
+ vm0 = MSG_vm_create_multicore(pm4, "VM0",2);
+ MSG_vm_start(vm0);
+ run_test("( [Xoo]2 ooo )4", vm0, flop_amount*(8/5) * 1/3); // The VM has 8/5 of the PM
+ run_test("( [oXo]2 ooo )4", vm0, flop_amount*(8/5) * 1/3);
+ run_test("( [ooX]2 ooo )4", vm0, flop_amount*(8/5) * 1/3);
+
+ run_test("( [ooo]2 Xoo )4", pm4, flop_amount*4/5);
+ run_test("( [ooo]2 oXo )4", pm4, flop_amount*4/5);
+ run_test("( [ooo]2 ooX )4", pm4, flop_amount*4/5);
+ MSG_process_sleep(2);
+ MSG_vm_destroy(vm0);
+
+ XBT_INFO("## Test 11 (ended)");
+ XBT_INFO(" ");
+ XBT_INFO(" ");
+ XBT_INFO("## %d test failed", failed_test);
+ XBT_INFO(" ");
return 0;
}
MSG_init(&argc, argv);
/* load the platform file */
- const char* platform = "../../platforms/cluster.xml";
+ const char* platform = "../../../platforms/cloud-sharing.xml";
if (argc == 2)
platform = argv[1];
MSG_create_environment(platform);
- msg_host_t pm0 = MSG_host_by_name("node-0.acme.org");
- xbt_assert(pm0, "Host 'node-0.acme.org' not found");
+ msg_host_t pm0 = MSG_host_by_name("node-0.1core.org");
+ xbt_assert(pm0, "Host 'node-0.1core.org' not found");
MSG_process_create("master", master_main, NULL, pm0);
- return MSG_main() != MSG_OK;
+ return MSG_main() != MSG_OK || failed_test;
}
#! ./tesh
-$ $SG_TEST_EXENV ${bindir:=.}/cloud-sharing$EXEEXT --log=no_loc ${srcdir:=.}/../../../examples/platforms/cluster.xml
-> [node-0.acme.org:master:(1) 0.000000] [msg_test/INFO] ## Test 1 (started): check computation on normal PMs
-> [node-0.acme.org:master:(1) 0.000000] [msg_test/INFO] ### Put a task on a PM
-> [node-0.acme.org:compute:(2) 0.100000] [msg_test/INFO] Task took 0.1s to execute
-> [node-0.acme.org:master:(1) 2.000000] [msg_test/INFO] ### Put two tasks on a PM
-> [node-0.acme.org:compute:(4) 2.200000] [msg_test/INFO] Task took 0.2s to execute
-> [node-0.acme.org:compute:(3) 2.200000] [msg_test/INFO] Task took 0.2s to execute
-> [node-0.acme.org:master:(1) 4.000000] [msg_test/INFO] ### Put a task on each PM
-> [node-0.acme.org:compute:(5) 4.100000] [msg_test/INFO] Task took 0.1s to execute
-> [node-1.acme.org:compute:(6) 4.100000] [msg_test/INFO] Task took 0.1s to execute
-> [node-0.acme.org:master:(1) 6.000000] [msg_test/INFO] ## Test 1 (ended)
-> [node-0.acme.org:master:(1) 6.000000] [msg_test/INFO] ## Test 2 (started): check impact of running a task inside a VM (there is no degradation for the moment)
-> [node-0.acme.org:master:(1) 6.000000] [msg_test/INFO] ### Put a VM on a PM, and put a task to the VM
-> [VM0:compute:(7) 6.100000] [msg_test/INFO] Task took 0.1s to execute
-> [node-0.acme.org:master:(1) 8.000000] [msg_test/INFO] ## Test 2 (ended)
-> [node-0.acme.org:master:(1) 8.000000] [msg_test/INFO] ## Test 3 (started): check impact of running a task collocated with a VM (there is no VM noise for the moment)
-> [node-0.acme.org:master:(1) 8.000000] [msg_test/INFO] ### Put a VM on a PM, and put a task to the PM
-> [node-0.acme.org:compute:(8) 8.100000] [msg_test/INFO] Task took 0.1s to execute
-> [node-0.acme.org:master:(1) 10.000000] [msg_test/INFO] ## Test 3 (ended)
-> [node-0.acme.org:master:(1) 10.000000] [msg_test/INFO] ## Test 4 (started): compare the cost of running two tasks inside two different VMs collocated or not (for the moment, there is no degradation for the VMs. Hence, the time should be equals to the time of test 1
-> [node-0.acme.org:master:(1) 10.000000] [msg_test/INFO] ### Put two VMs on a PM, and put a task to each VM
-> [VM0:compute:(9) 10.200000] [msg_test/INFO] Task took 0.2s to execute
-> [VM1:compute:(10) 10.200000] [msg_test/INFO] Task took 0.2s to execute
-> [node-0.acme.org:master:(1) 12.000000] [msg_test/INFO] ### Put a VM on each PM, and put a task to each VM
-> [VM0:compute:(11) 12.100000] [msg_test/INFO] Task took 0.1s to execute
-> [VM1:compute:(12) 12.100000] [msg_test/INFO] Task took 0.1s to execute
-> [node-0.acme.org:master:(1) 14.000000] [msg_test/INFO] ## Test 4 (ended)
+$ $SG_TEST_EXENV ${bindir:=.}/cloud-sharing$EXEEXT --log=root.fmt:%m%n ${srcdir:=.}/../../../examples/platforms/cluster_backbone.xml
+> # TEST ON SINGLE-CORE PMs
+> ## Test 1 (started): check computation on normal PMs
+> ### Put a task on a PM
+> Passed: (X)1 with 1 load (100000000flops) took 0.1s as expected
+> ### Put two tasks on a PM
+> Passed: (oX)1 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: (Xo)1 with 0.5 load (50000000flops) took 0.1s as expected
+> ### Put a task on each PM
+> Passed: (X)1 (o)1 with 1 load (100000000flops) took 0.1s as expected
+> Passed: (o)1 (X)1 with 1 load (100000000flops) took 0.1s as expected
+> ## Test 1 (ended)
+> # TEST ON SINGLE-CORE PMs AND SINGLE-CORE VMs
+> ## Test 2 (started): check impact of running tasks inside a VM (there is no degradation for the moment)
+> ### Put a VM on a PM, and put a task to the VM
+> Passed: ( [X]1 )1 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two task to the VM
+> Passed: ( [oX]1 )1 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [Xo]1 )1 with 0.5 load (50000000flops) took 0.1s as expected
+> ## Test 2 (ended)
+> ## Test 3 (started): check impact of running tasks collocated with VMs (there is no VM noise for the moment)
+> ### Put a task on a PM collocated with an empty VM
+> Passed: ( [ ]1 X )1 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put a task to the PM and a task to the VM
+> Passed: ( [o]1 X )1 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [X]1 o )1 with 0.5 load (50000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put a task to the PM and two tasks to the VM
+> Passed: ( [oo]1 X )1 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [oX]1 o )1 with 0.25 load (25000000flops) took 0.1s as expected
+> Passed: ( [Xo]1 o )1 with 0.25 load (25000000flops) took 0.1s as expected
+> ## Test 3 (ended)
+> # TEST ON TWO-CORE PMs
+> ## Test 4 (started): check computation on 2 cores PMs
+> ### Put a task on a PM
+> Passed: (X)2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put two tasks on a PM
+> Passed: (xX)2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: (Xx)2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put three tasks on a PM
+> Passed: (xxX)2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: (Xxx)2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: (xXx)2 with 0.6667 load (66666666flops) took 0.1s as expected
+> ## Test 4 (ended)
+> # TEST ON TWO-CORE PMs AND SINGLE-CORE VMs
+> ## Test 5 (started): check impact of a single VM (there is no degradation for the moment)
+> ### Put a VM on a PM, and put a task to the VM
+> Passed: ( [X]1 )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the VM
+> Passed: ( [xX]1 )2 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [Xx]1 )2 with 0.5 load (50000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put a task to the PM
+> Passed: ( [ ]1 X )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put a task to the PM and a task to the VM
+> Passed: ( [x]1 X )2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [X]1 x )2 with 1 load (100000000flops) took 0.1s as expected
+> ## Test 5 (ended)
+> ## Test 6 (started): check impact of a several VMs (there is no degradation for the moment)
+> ### Put two VMs on a PM, and put a task to one VM
+> Passed: ( [X]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put two VMs on a PM, and put a task to each VM
+> Passed: ( [X]1 [x]1 )2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [x]1 [X]1 )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put three VMs on a PM, and put a task to two VMs
+> Passed: ( [X]1 [x]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [x]1 [X]1 [ ]1 )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put three VMs on a PM, and put a task to each VM
+> Passed: ( [X]1 [o]1 [o]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [o]1 [o]1 [X]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [o]1 [X]1 [o]1 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> ## Test 6 (ended)
+> # TEST ON TWO-CORE PMs AND TWO-CORE VMs
+> ## Test 7 (started): check impact of a single VM (there is no degradation for the moment)
+> ### Put a VM on a PM, and put a task to the VM
+> Passed: ( [X]2 )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the VM
+> Passed: ( [oX]2 )2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [Xo]2 )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put three tasks to the VM
+> Passed: ( [ooX]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [Xoo]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [oXo]2 )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> ## Test 7 (ended)
+> ## Test 8 (started): check impact of a single VM collocated with a task (there is no degradation for the moment)
+> ### Put a VM on a PM, and put a task to the PM
+> Passed: ( [ ]2 X )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put one task to the PM and one task to the VM
+> Passed: ( [o]2 X )2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [X]2 o )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put one task to the PM and two tasks to the VM
+> Passed: ( [oo]2 X )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [oX]2 o )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [Xo]2 o )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> ### Put a VM on a PM, put one task to the PM and three tasks to the VM
+> Passed: ( [ooo]2 X )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [ooX]2 o )2 with 0.2222 load (22222222flops) took 0.1s as expected
+> Passed: ( [Xoo]2 o )2 with 0.2222 load (22222222flops) took 0.1s as expected
+> Passed: ( [oXo]2 o )2 with 0.2222 load (22222222flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the PM
+> Passed: ( [ ]2 oX )2 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 Xo )2 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put one task to the PM and one task to the VM
+> Passed: ( [o]2 Xo )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [o]2 oX )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [X]2 oo )2 with 0.6667 load (66666666flops) took 0.1s as expected
+> ### Put a VM on a PM, put one task to the PM and two tasks to the VM
+> Passed: ( [oo]2 Xo )2 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [oo]2 oX )2 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [oX]2 oo )2 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [Xo]2 oo )2 with 0.5 load (50000000flops) took 0.1s as expected
+> ### Put a VM on a PM, put one task to the PM and three tasks to the VM
+> Passed: ( [ooo]2 Xo )2 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [ooo]2 oX )2 with 0.5 load (50000000flops) took 0.1s as expected
+> Passed: ( [ooX]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
+> Passed: ( [Xoo]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
+> Passed: ( [oXo]2 oo )2 with 0.3333 load (33333333flops) took 0.1s as expected
+> ## Test 8 (ended)
+> # TEST ON FOUR-CORE PMs AND TWO-CORE VMs
+> ## Test 9 (started): check impact of a single VM
+> ### Put a VM on a PM, and put a task to the VM
+> Passed: ( [X]2 )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the VM
+> Passed: ( [oX]2 )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [Xo]2 )4 with 1 load (100000000flops) took 0.1s as expected
+> ### ( [ooo]2 )4: Put a VM on a PM, and put three tasks to the VM
+> Passed: ( [ooX]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [Xoo]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
+> Passed: ( [oXo]2 )4 with 0.6667 load (66666666flops) took 0.1s as expected
+> ## Test 9 (ended)
+> ## Test 10 (started): check impact of a single emtpy VM collocated with tasks
+> ### Put a VM on a PM, and put a task to the PM
+> Passed: ( [ ]2 X )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the PM
+> Passed: ( [ ]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put three tasks to the PM
+> Passed: ( [ ]2 ooX )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 Xoo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 oXo )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put four tasks to the PM
+> Passed: ( [ ]2 oooX )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 ooXo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 oXoo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [ ]2 Xooo )4 with 1 load (100000000flops) took 0.1s as expected
+> ## Test 10 (ended)
+> ## Test 11 (started): check impact of a single working VM collocated with tasks
+> ### Put a VM on a PM, and put one task to the PM and one task to the VM
+> Passed: ( [o]2 X )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [X]2 o )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the PM and one task to the VM
+> Passed: ( [o]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [o]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [X]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put two tasks to the PM and two tasks to the VM
+> Passed: ( [oo]2 Xo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [oo]2 oX )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [oX]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [Xo]2 oo )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put three tasks to the PM and one tasks to the VM
+> Passed: ( [o]2 ooX )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [o]2 Xoo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [o]2 oXo )4 with 1 load (100000000flops) took 0.1s as expected
+> Passed: ( [X]2 ooo )4 with 1 load (100000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put three tasks to the PM and two tasks to the VM
+> Passed: ( [oo]2 ooX )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [oo]2 Xoo )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [oo]2 oXo )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [oX]2 ooo )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [Xo]2 ooo )4 with 0.8 load (80000000flops) took 0.1s as expected
+> ### Put a VM on a PM, and put three tasks to the PM and three tasks to the VM
+> Passed: ( [ooo]2 ooX )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [ooo]2 Xoo )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [ooo]2 oXo )4 with 0.8 load (80000000flops) took 0.1s as expected
+> Passed: ( [ooX]2 ooo )4 with 0.3333 load (33333333flops) took 0.1s as expected
+> Passed: ( [Xoo]2 ooo )4 with 0.3333 load (33333333flops) took 0.1s as expected
+> Passed: ( [oXo]2 ooo )4 with 0.3333 load (33333333flops) took 0.1s as expected
+> ## Test 11 (ended)
+>
+>
+> ## 15 test failed
> [Tremblay:test_launcher:(1) 10.000000] [msg_test/INFO] Test 6 is also weird: when the node Jupiter is turned off once again, the VM and its daemon are not killed. However, the issue regarding the shutdown of hosted VMs can be seen a feature not a bug ;)
> [Tremblay:test_launcher:(1) 10.000000] [msg_test/INFO] Test done. See you!
> [10.000000] [msg_test/INFO] Simulation time 10
+> [10.000000] [surf_maxmin/WARNING] Probable bug: a simgrid::surf::CpuCas01Action variable (#13) not removed before the LMM system destruction.
+> [10.000000] [surf_maxmin/WARNING] Probable bug: a simgrid::surf::CpuCas01Action variable (#2) not removed before the LMM system destruction.
+
+p The previous test suffers of bug https://github.com/simgrid/simgrid/issues/120
+p but the code is still not clean enough to really solve it.
-foreach(x actor concurrent_rw host_on_off_wait listen_async pid storage_client_server)
+foreach(x actor comm-pt2pt comm-waitany concurrent_rw host_on_off_wait listen_async pid storage_client_server)
add_executable (${x} ${x}/${x}.cpp)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
- set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.cpp)
endforeach()
+## Add the tests.
+## Some need to be run with all factories, some need not tesh to run
+foreach(x actor concurrent_rw)
+ set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
+ ADD_TESH_FACTORIES(tesh-s4u-${x} "thread;boost;ucontext;raw" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/s4u/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x}/${x}.tesh)
+endforeach()
+
+foreach(x host_on_off_wait listen_async pid storage_client_server)
+ set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
+ ADD_TESH(tesh-s4u-${x} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/s4u/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x}/${x}.tesh)
+endforeach()
+
+# The output is not relevant
+ADD_TEST(tesh-s4u-comm-pt2pt ${CMAKE_BINARY_DIR}/teshsuite/s4u/comm-pt2pt/comm-pt2pt ${CMAKE_HOME_DIRECTORY}/examples/platforms/cluster.xml)
+ADD_TEST(tesh-s4u-comm-waitany ${CMAKE_BINARY_DIR}/teshsuite/s4u/comm-waitany/comm-waitany ${CMAKE_HOME_DIRECTORY}/examples/platforms/two_hosts.xml)
+
+
+
set(teshsuite_src ${teshsuite_src} PARENT_SCOPE)
set(tesh_files ${tesh_files} PARENT_SCOPE)
set(xml_files ${xml_files} PARENT_SCOPE)
-
-foreach(x actor concurrent_rw host_on_off_wait listen_async pid storage_client_server)
- ADD_TESH_FACTORIES(tesh-s4u-${x} "thread;boost;ucontext;raw" --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x} --cd ${CMAKE_BINARY_DIR}/teshsuite/s4u/${x} ${CMAKE_HOME_DIRECTORY}/teshsuite/s4u/${x}/${x}.tesh)
-endforeach()
--- /dev/null
+/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/* We want this test to be exhaustive in term of:
+ * - communication involved (regular, asynchronous, detached, with a permanent receiver declared)
+ * - whether the send or the receive was posted first
+ *
+ * FIXME: Missing elements: timeouts, host/actor failures, link failures
+ */
+
+#include "simgrid/s4u.hpp"
+
+#include <cstring>
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(s4u_test, "Messages specific for this s4u example");
+
+using namespace simgrid::s4u;
+
+static void usage(const char* binaryName, const char* defaultSend, const char* defaultRecv)
+{
+ std::fprintf(stderr, "Usage: %s examples/platforms/cluster.xml <send_spec> <recv_spec>\n"
+ "where spec is a list of letters giving the kind of tests you want to see.\n"
+ "Existing sender spec:\n"
+ " r regular send\n"
+ " R regular send (after a little delay)\n"
+ " i asynchronous isend\n"
+ " I asynchronous isend (after a little delay)\n"
+ " d detached send\n"
+ " D detached send (after a little delay)\n"
+ "Existing receiver spec:\n"
+ " r regular receive\n"
+ " R regular receive (after a little delay)\n"
+ " i asynchronous irecv\n"
+ " I asynchronous irecv (after a little delay)\n"
+ " p regular receive on permanent mailbox (after a little delay)\n"
+ " P regular receive on permanent mailbox (after a little delay)\n"
+ " j irecv on permanent mailbox (after a little delay)\n"
+ " J irecv on permanent mailbox (after a little delay)\n"
+ "\n"
+ "Example 1: %s examples/platforms/cluster.xml rRiIdD rrrrrr # testing all send functions\n"
+ "Default specs: %s %s (all possible pairs)\n",
+ binaryName, binaryName, defaultSend, defaultRecv);
+ exit(1);
+}
+
+static void sender(std::vector<std::string> args)
+{
+ XBT_INFO("Sender spec: %s", args[0].c_str());
+ for (unsigned int test = 1; test <= args[0].size(); test++) {
+ this_actor::sleep_until(test * 5 - 5);
+ char* mboxName = bprintf("Test #%u", test);
+ simgrid::s4u::MailboxPtr mbox = simgrid::s4u::Mailbox::byName(mboxName);
+
+ switch (args[0][test - 1]) {
+ case 'r':
+ XBT_INFO("Test %d: r (regular send)", test);
+ simgrid::s4u::this_actor::send(mbox, (void*)mboxName, 42.0);
+ break;
+ case 'R':
+ XBT_INFO("Test %d: R (sleep + regular send)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ simgrid::s4u::this_actor::send(mbox, (void*)mboxName, 42.0);
+ break;
+
+ case 'i':
+ XBT_INFO("Test %d: i (asynchronous isend)", test);
+ simgrid::s4u::this_actor::isend(mbox, (void*)mboxName, 42.0)->wait();
+ break;
+ case 'I':
+ XBT_INFO("Test %d: I (sleep + isend)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ simgrid::s4u::this_actor::isend(mbox, (void*)mboxName, 42.0)->wait();
+ break;
+
+ case 'd':
+ XBT_INFO("Test %d: d (detached send)", test);
+ simgrid::s4u::this_actor::dsend(mbox, (void*)mboxName, 42.0);
+ break;
+ case 'D':
+ XBT_INFO("Test %d: D (sleep + detached send)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ simgrid::s4u::this_actor::dsend(mbox, (void*)mboxName, 42.0);
+ break;
+ default:
+ xbt_die("Unknown sender spec for test %d: '%c'", test, args[0][test - 1]);
+ }
+ XBT_INFO("Test %d OK", test);
+ }
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ // FIXME: we should test what happens when the process ends before the end of remote comm instead of hiding it
+}
+
+static void receiver(std::vector<std::string> args)
+{
+ XBT_INFO("Receiver spec: %s", args[0].c_str());
+ for (unsigned int test = 1; test <= args[0].size(); test++) {
+ this_actor::sleep_until(test * 5 - 5);
+ char* mboxName = bprintf("Test #%u", test);
+ simgrid::s4u::MailboxPtr mbox = simgrid::s4u::Mailbox::byName(mboxName);
+ void* received = nullptr;
+
+ switch (args[0][test - 1]) {
+ case 'r':
+ XBT_INFO("Test %d: r (regular receive)", test);
+ received = simgrid::s4u::this_actor::recv(mbox);
+ break;
+ case 'R':
+ XBT_INFO("Test %d: R (sleep + regular receive)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ received = simgrid::s4u::this_actor::recv(mbox);
+ break;
+
+ case 'i':
+ XBT_INFO("Test %d: i (asynchronous irecv)", test);
+ simgrid::s4u::this_actor::irecv(mbox, &received)->wait();
+ break;
+ case 'I':
+ XBT_INFO("Test %d: I (sleep + asynchronous irecv)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ simgrid::s4u::this_actor::irecv(mbox, &received)->wait();
+ break;
+ case 'p':
+ XBT_INFO("Test %d: p (regular receive on permanent mailbox)", test);
+ mbox->setReceiver(Actor::self());
+ received = simgrid::s4u::this_actor::recv(mbox);
+ break;
+ case 'P':
+ XBT_INFO("Test %d: P (sleep + regular receive on permanent mailbox)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ mbox->setReceiver(Actor::self());
+ received = simgrid::s4u::this_actor::recv(mbox);
+ break;
+ case 'j':
+ XBT_INFO("Test %d: j (irecv on permanent mailbox)", test);
+ mbox->setReceiver(Actor::self());
+ simgrid::s4u::this_actor::irecv(mbox, &received)->wait();
+ break;
+ case 'J':
+ XBT_INFO("Test %d: J (sleep + irecv on permanent mailbox)", test);
+ simgrid::s4u::this_actor::sleep_for(0.5);
+ mbox->setReceiver(Actor::self());
+ simgrid::s4u::this_actor::irecv(mbox, &received)->wait();
+ break;
+ default:
+ xbt_die("Unknown receiver spec for test %d: '%c'", test, args[0][test - 1]);
+ }
+
+ xbt_assert(strcmp(static_cast<char*>(received), mboxName) == 0);
+ xbt_free(received);
+ xbt_free(mboxName);
+ XBT_INFO("Test %d OK", test);
+ }
+ simgrid::s4u::this_actor::sleep_for(0.5);
+}
+
+int main(int argc, char* argv[])
+{
+ std::string specSend;
+ std::string specRecv;
+ for (char s : {'r', 'R', 'i', 'I', 'd', 'D'})
+ for (char r : {'r', 'R', 'i', 'I', 'p', 'P', 'j', 'J'}) {
+ specSend += s;
+ specRecv += r;
+ }
+ std::vector<std::string> argSend{specSend.c_str()};
+ std::vector<std::string> argRecv{specRecv.c_str()};
+
+ simgrid::s4u::Engine* e = new simgrid::s4u::Engine(&argc, argv);
+ if (argc < 2)
+ usage(argv[0], specSend.c_str(), specRecv.c_str());
+
+ e->loadPlatform(argv[1]);
+
+ if (argc >= 3) {
+ argSend.clear();
+ argSend.push_back(argv[2]);
+ }
+ if (argc >= 4) {
+ argRecv.clear();
+ argRecv.push_back(argv[3]);
+ }
+ xbt_assert(argSend.front().size() == argRecv.front().size(), "Sender and receiver spec must be of the same size");
+
+ simgrid::s4u::Host** hosts = sg_host_list();
+ simgrid::s4u::Actor::createActor("sender", hosts[0], sender, argSend);
+ simgrid::s4u::Actor::createActor("recver", hosts[1], receiver, argRecv);
+ xbt_free(hosts);
+
+ e->run();
+ XBT_INFO("Simulation time %g", e->getClock());
+
+ return 0;
+}
--- /dev/null
+#include <iostream>
+#include <simgrid/s4u.hpp>
+#include <stdlib.h>
+#include <vector>
+
+#define NUM_COMMS 1
+
+XBT_LOG_NEW_DEFAULT_CATEGORY(mwe, "Minimum Working Example");
+
+static void receiver()
+{
+ simgrid::s4u::MailboxPtr mymailbox = simgrid::s4u::Mailbox::byName("receiver_mailbox");
+ simgrid::s4u::MailboxPtr theirmailbox = simgrid::s4u::Mailbox::byName("sender_mailbox");
+
+ std::vector<simgrid::s4u::CommPtr> pending_comms;
+
+ XBT_INFO("Placing %d asynchronous recv requests", NUM_COMMS);
+ void* data;
+ for (int i = 0; i < NUM_COMMS; i++) {
+ simgrid::s4u::CommPtr comm = simgrid::s4u::Comm::recv_async(mymailbox, &data);
+ pending_comms.push_back(comm);
+ }
+
+ for (int i = 0; i < NUM_COMMS; i++) {
+ XBT_INFO("Sleeping for 3 seconds (for the %dth time)...", i + 1);
+ simgrid::s4u::this_actor::sleep_for(3.0);
+ XBT_INFO("Calling wait_any() for %zu pending comms", pending_comms.size());
+ std::vector<simgrid::s4u::CommPtr>::iterator ret_it =
+ simgrid::s4u::Comm::wait_any(pending_comms.begin(), pending_comms.end());
+ XBT_INFO("Counting the number of completed comms...");
+
+ int count = 0;
+ for (; ret_it != pending_comms.end(); count++, ret_it++)
+ ;
+
+ XBT_INFO("wait_any() replied that %d comms have completed", count);
+ // xbt_assert(count == 1, "wait_any() replied that %d comms have completed, which is broken!", count);
+ }
+}
+
+static void sender()
+{
+ simgrid::s4u::MailboxPtr mymailbox = simgrid::s4u::Mailbox::byName("sender_mailbox");
+ simgrid::s4u::MailboxPtr theirmailbox = simgrid::s4u::Mailbox::byName("receiver_mailbox");
+
+ void* data = (void*)"data";
+
+ for (int i = 0; i < NUM_COMMS; i++) {
+ XBT_INFO("Sending a message to the receiver");
+ simgrid::s4u::this_actor::send(theirmailbox, &data, 4);
+ XBT_INFO("Sleeping for 1000 seconds");
+ simgrid::s4u::this_actor::sleep_for(1000.0);
+ }
+}
+
+int main(int argc, char** argv)
+{
+
+ simgrid::s4u::Engine* engine = new simgrid::s4u::Engine(&argc, argv);
+
+ if (argc < 2) {
+ std::cerr << "Usage: " << argv[0] << " <xml platform file>" << std::endl;
+ exit(1);
+ }
+
+ engine->loadPlatform(argv[1]);
+ simgrid::s4u::Host* host = simgrid::s4u::Host::by_name("Tremblay");
+
+ simgrid::s4u::Actor::createActor("Receiver", host, receiver);
+ simgrid::s4u::Actor::createActor("Sender", host, sender);
+
+ simgrid::s4u::Engine::instance()->run();
+
+ return 0;
+}
> [ 0.000195] (test@Tremblay) Data successfully received from regular mailbox
> [ 0.000195] (test@Tremblay) Task listen works on asynchronous mailboxes
> [ 0.000195] (test@Tremblay) Data successfully received from asynchronous mailbox
-> [ 0.000195] (maestro@) Probable bug: a simgrid::surf::NetworkCm02Action variable (#3) not removed before the LMM system destruction.
! expect signal SIGABRT
$ ${bindir:=.}/flatifier ../platforms/bogus_missing_src_gateway.xml "--log=root.fmt:[%10.6r]%e[%i:%P@%h]%e%m%n"
> [ 0.000000] [0:maestro@] Switching to the L07 model to handle parallel tasks.
-> [ 0.000000] [0:maestro@] Parse error at ../platforms/bogus_missing_src_gateway.xml:14: zoneRoute gw_src='nod-cluster_router.cluster.us' does name a node.
+> [ 0.000000] [0:maestro@] Parse error at ../platforms/bogus_missing_src_gateway.xml:14: zoneRoute gw_src='nod-cluster_router.cluster.us' does name a node. Existing netpoints:
+> 'noeud-2.grappe.fr','noeud-4.grappe.fr','noeud-1.grappe.fr','noeud-3.grappe.fr','node-1.cluster.us','noeud-grappe_router.grappe.fr','node-2.cluster.us','node-3.cluster.us','node-4.cluster.us','node-cluster_router.cluster.us'
> [ 0.000000] [0:maestro@] Exiting now
! expect signal SIGABRT
$ ${bindir:=.}/flatifier ../platforms/bogus_missing_dst_gateway.xml "--log=root.fmt:[%10.6r]%e[%i:%P@%h]%e%m%n"
> [ 0.000000] [0:maestro@] Switching to the L07 model to handle parallel tasks.
-> [ 0.000000] [0:maestro@] Parse error at ../platforms/bogus_missing_dst_gateway.xml:14: zoneRoute gw_dst='neud-grappe_router.grappe.fr' does name a node.
+> [ 0.000000] [0:maestro@] Parse error at ../platforms/bogus_missing_dst_gateway.xml:14: zoneRoute gw_dst='neud-grappe_router.grappe.fr' does name a node. Existing netpoints:
+> 'noeud-2.grappe.fr','noeud-4.grappe.fr','noeud-1.grappe.fr','noeud-3.grappe.fr','node-1.cluster.us','noeud-grappe_router.grappe.fr','node-2.cluster.us','node-3.cluster.us','node-4.cluster.us','node-cluster_router.cluster.us'
> [ 0.000000] [0:maestro@] Exiting now
#include "src/kernel/routing/NetPoint.hpp"
#include "src/surf/network_interface.hpp"
+#include <algorithm>
XBT_LOG_NEW_DEFAULT_CATEGORY(flatifier, "Logging specific to this platform parsing tool");
#include "simgrid/s4u/Host.hpp"
#include "simgrid/simdag.h"
#include "src/kernel/routing/NetPoint.hpp"
+#include <algorithm>
#include <stdio.h>
int main(int argc, char **argv)
! output sort
p Test separate clusters
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/two_clusters.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
-> [rank 0] -> node-0.acme.org
-> [rank 1] -> node-1.acme.org
-> [rank 2] -> node-2.acme.org
-> [rank 3] -> node-3.acme.org
-> [rank 4] -> node-4.acme.org
-> [rank 5] -> node-5.acme.org
-> [rank 6] -> node-6.acme.org
-> [rank 7] -> node-7.acme.org
-> [rank 8] -> node-8.acme.org
-> [rank 9] -> node-9.acme.org
-> [rank 10] -> node-10.acme.org
-> [rank 11] -> node-11.acme.org
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples/platforms/cluster_backbone.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+> [rank 0] -> node-0.1core.org
+> [rank 1] -> node-1.1core.org
+> [rank 2] -> node-2.1core.org
+> [rank 3] -> node-3.1core.org
+> [rank 4] -> node-4.1core.org
+> [rank 5] -> node-5.1core.org
+> [rank 6] -> node-6.1core.org
+> [rank 7] -> node-7.1core.org
+> [rank 8] -> node-0.2cores.org
+> [rank 9] -> node-1.2cores.org
+> [rank 10] -> node-2.2cores.org
+> [rank 11] -> node-3.2cores.org
> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 ]
> [1] sndbuf=[12 13 14 15 16 17 18 19 20 21 22 23 ]
> [2] sndbuf=[24 25 26 27 28 29 30 31 32 33 34 35 ]
examples/platforms/bypassRoute.xml
examples/platforms/cloud.xml
examples/platforms/cluster.xml
+ examples/platforms/cluster_backbone.xml
examples/platforms/cluster_and_one_host.xml
examples/platforms/cluster_prototype.lua
examples/platforms/cluster_no_backbone.xml
examples/platforms/syscoord/median_meridian.syscoord
examples/platforms/syscoord/median_p2psim.syscoord
examples/platforms/three_multicore_hosts.xml
- examples/platforms/two_clusters.xml
examples/platforms/two_hosts.xml
examples/platforms/two_hosts_platform_shared.xml
examples/platforms/two_hosts_platform_with_availability.xml
ADD_TESH_FACTORIES(mc-bugged1 "ucontext;raw" --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1.tesh)
ADD_TESH_FACTORIES(mc-bugged2 "ucontext;raw" --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged2.tesh)
IF(HAVE_UCONTEXT_CONTEXTS AND SIMGRID_PROCESSOR_x86_64) # liveness model-checking works only on 64bits (for now ...)
-# ADD_TESH(mc-bugged1-liveness-ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness.tesh)
-# ADD_TESH(mc-bugged1-liveness-ucontext-sparse --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_sparse.tesh)
-# ADD_TESH(mc-bugged1-liveness-visited-ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited.tesh)
-# ADD_TESH(mc-bugged1-liveness-visited-ucontext-sparse --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited_sparse.tesh)
+ ADD_TESH(mc-bugged1-liveness-ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness.tesh)
+ ADD_TESH(mc-bugged1-liveness-ucontext-sparse --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_sparse.tesh)
+ ADD_TESH(mc-bugged1-liveness-visited-ucontext --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited.tesh)
+ ADD_TESH(mc-bugged1-liveness-visited-ucontext-sparse --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/mc --cd ${CMAKE_HOME_DIRECTORY}/examples/msg/mc bugged1_liveness_visited_sparse.tesh)
IF(HAVE_C_STACK_CLEANER)
# This test checks if the stack cleaner is making a difference:
-# ADD_TEST(mc-bugged1-liveness-stack-cleaner ${CMAKE_HOME_DIRECTORY}/examples/msg/mc/bugged1_liveness_stack_cleaner ${CMAKE_HOME_DIRECTORY}/examples/msg/mc/ ${CMAKE_BINARY_DIR}/examples/msg/mc/)
+ ADD_TEST(mc-bugged1-liveness-stack-cleaner ${CMAKE_HOME_DIRECTORY}/examples/msg/mc/bugged1_liveness_stack_cleaner ${CMAKE_HOME_DIRECTORY}/examples/msg/mc/ ${CMAKE_BINARY_DIR}/examples/msg/mc/)
ENDIF()
ENDIF()
ENDIF()