#define BAND_OP(a, b) (b) &= (a)
#define BOR_OP(a, b) (b) |= (a)
#define BXOR_OP(a, b) (b) ^= (a)
-#define MAXLOC_OP(a, b) (b) = (a.value) < (b.value) ? (b) : (a)
-#define MINLOC_OP(a, b) (b) = (a.value) < (b.value) ? (a) : (b)
+#define MAXLOC_OP(a, b) (b) = (a.value) < (b.value) ? (b) : ((a.value) == (b.value) ? ((a.index) < (b.index) ? (a) : (b)) : (a))
+#define MINLOC_OP(a, b) (b) = (a.value) < (b.value) ? (a) : ((a.value) == (b.value) ? ((a.index) < (b.index) ? (a) : (b)) : (b))
#define APPLY_FUNC(a, b, length, type, func) \
{ \
//in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
- (origin_addr==nullptr && origin_count > 0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
(result_addr==nullptr && result_count > 0)){
retval = MPI_ERR_COUNT;
- } else if ((!origin_datatype->is_valid()) ||
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
(!target_datatype->is_valid())||
(!result_datatype->is_valid())) {
retval = MPI_ERR_TYPE;
//in case of dynamic window, target_disp can be mistakenly seen as negative, as it is an address
retval = MPI_ERR_ARG;
} else if ((origin_count < 0 || target_count < 0 || result_count <0) ||
- (origin_addr==nullptr && origin_count > 0) ||
+ (origin_addr==nullptr && origin_count > 0 && op != MPI_NO_OP) ||
(result_addr==nullptr && result_count > 0)){
retval = MPI_ERR_COUNT;
- } else if ((!origin_datatype->is_valid()) ||
+ } else if ((origin_datatype!=MPI_DATATYPE_NULL && !origin_datatype->is_valid()) ||
(!target_datatype->is_valid())||
(!result_datatype->is_valid())) {
retval = MPI_ERR_TYPE;
at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc fetch_and_op flush req_example rmanull rmazero badrma
lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed compare_and_swap
lockall_dt lockall_dt_flushall lock_dt_flush lockall_dt_flush lockall_dt_flushlocalall lockall_dt_flushlocal lock_dt_flushlocal
- strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget)
- # not compiled files: acc-loc adlb_mimic1
- # contention_putget contig_displ fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
- # fkeyvalwin get_acc_local get_accumulate linked_list_bench_lock_all
- # linked_list_bench_lock_excl linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
- # manyrma2 mcs-mutex mixedsync mutex_bench lockcontention3 reqops
- # pscw_ordering put_base put_bottom rma-contig selfrma
- # strided_getacc_indexed_shared
+ strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget
+ adlb_mimic1 lock_contention_dt acc-loc get_acc_local get_accumulate put_base put_bottom
+ linked_list_bench_lock_all linked_list_bench_lock_excl manyrma2 pscw_ordering rma-contig get-struct
+ rput_local_comp racc_local_comp)
+ # fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
+ # linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
+ # mcs-mutex mixedsync mutex_bench lockcontention3 reqops
+ # strided_getacc_indexed_shared rget-unlock
# win_flavors win_shared win_shared_noncontig win_shared_noncontig_put
- # win_large_shm win_zero wintest get-struct atomic_rmw_fop atomic_rmw_gacc rget-unlock atomic_get atomic_rmw_cas
+ # win_large_shm win_zero wintest atomic_rmw_fop atomic_rmw_gacc atomic_get atomic_rmw_cas
# win_shared_zerobyte aint derived-acc-flush_local large-acc-flush_local
- # lock_contention_dt
- #racc_local_comp rput_local_comp win_shared_create win_shared_put_flush_get win_shared_rma_flush_load
+ # win_shared_create win_shared_put_flush_get win_shared_rma_flush_load
# wrma_flush_get
add_executable(${file} ${file}.c)
target_link_libraries(${file} simgrid mtest_c)
int errors = 0;
const int NITER = 1000;
-const int acc_val = 3;
+
int main(int argc, char **argv)
{
int rank, nproc;
int out_val, i, counter = 0;
MPI_Win win;
-
+ int acc_val = 3;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
#define MIN_NPROBE 1
#define ELEM_PER_ROW 16
-#define MIN(X,Y) ((X < Y) ? (X) : (Y))
-#define MAX(X,Y) ((X > Y) ? (X) : (Y))
+#define MYMIN(X,Y) ((X < Y) ? (X) : (Y))
+#define MYMAX(X,Y) ((X > Y) ? (X) : (Y))
/* Linked list pointer */
typedef struct {
static const int verbose = 0;
static const int print_perf = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &procid);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(procid, llist_win);
+ head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank,
(void *) next_tail_ptr.disp);
tail_ptr = next_tail_ptr;
- pollint = MAX(MIN_NPROBE, pollint / 2);
+ pollint = MYMAX(MIN_NPROBE, pollint / 2);
}
else {
for (j = 0; j < pollint; j++)
MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag,
MPI_STATUS_IGNORE);
- pollint = MIN(MAX_NPROBE, pollint * 2);
+ pollint = MYMIN(MAX_NPROBE, pollint * 2);
}
}
} while (!success);
#include <unistd.h>
#endif
-#define NUM_ELEMS 1000
+#define NUM_ELEMS 100
#define MAX_NPROBE nproc
#define MIN_NPROBE 1
#define ELEM_PER_ROW 16
-#define MIN(X,Y) ((X < Y) ? (X) : (Y))
-#define MAX(X,Y) ((X > Y) ? (X) : (Y))
+#define MYMIN(X,Y) ((X < Y) ? (X) : (Y))
+#define MYMAX(X,Y) ((X > Y) ? (X) : (Y))
/* Linked list pointer */
typedef struct {
static const int verbose = 0;
static const int print_perf = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
double time;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(procid, llist_win);
+ head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank,
(void *) next_tail_ptr.disp);
tail_ptr = next_tail_ptr;
- pollint = MAX(MIN_NPROBE, pollint / 2);
+ pollint = MYMAX(MIN_NPROBE, pollint / 2);
}
else {
for (j = 0; j < pollint; j++)
MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag,
MPI_STATUS_IGNORE);
- pollint = MIN(MAX_NPROBE, pollint * 2);
+ pollint = MYMIN(MAX_NPROBE, pollint * 2);
}
}
} while (!success);
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
static const int verbose = 0;
-/* List of locally allocated list elements. */
-static llist_elem_t **my_elems = NULL;
-static int my_elems_size = 0;
-static int my_elems_count = 0;
-
/* Allocate a new shared linked list element */
-MPI_Aint alloc_elem(int value, MPI_Win win)
+static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count)
{
MPI_Aint disp;
llist_elem_t *elem_ptr;
MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));
/* Add the element to the list of local elements so we can free it later. */
- if (my_elems_size == my_elems_count) {
- my_elems_size += 100;
- my_elems = realloc(my_elems, my_elems_size * sizeof(void *));
+ if (*my_elems_size == *my_elems_count) {
+ *my_elems_size += 100;
+ *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *));
}
- my_elems[my_elems_count] = elem_ptr;
- my_elems_count++;
+ (*my_elems)[*my_elems_count] = elem_ptr;
+ (*my_elems_count)++;
MPI_Get_address(elem_ptr, &disp);
return disp;
int procid, nproc, i;
MPI_Win llist_win;
llist_ptr_t head_ptr, tail_ptr;
+ /* List of locally allocated list elements. */
+ llist_elem_t **my_elems = NULL;
+ int my_elems_size = 0;
+ int my_elems_count = 0;
MPI_Init(&argc, &argv);
/* Process 0 creates the head node */
if (procid == 0)
- head_ptr.disp = alloc_elem(-1, llist_win);
+ head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Broadcast the head pointer to everyone */
head_ptr.rank = 0;
/* Create a new list element and register it with the window */
new_elem_ptr.rank = procid;
- new_elem_ptr.disp = alloc_elem(procid, llist_win);
+ new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count);
/* Append the new node to the list. This might take multiple attempts if
* others have already appended and our tail pointer is stale. */
case 6: /* a few small puts (like strided put, but 1 word at a time) */
/* FIXME: The conditional and increment are reversed below. This looks
* like a bug, and currently prevents the following test from running. */
- for (j = 0; j++; j < veccount) {
+ for (j = 0; j < veccount; j++) {
if (buf[j * stride] != PUT_VAL + j) {
errs++;
printf("case %d: value is %d should be %d\n", i, buf[j * stride], PUT_VAL + j);
case 7: /* a few small accumulates (like strided acc, but 1 word at a time) */
/* FIXME: The conditional and increment are reversed below. This looks
* like a bug, and currently prevents the following test from running. */
- for (j = 0; j++; j < veccount) {
+ for (j = 0; j < veccount;j++) {
if (buf[j * stride] != ACC_VAL + j + OFFSET_2 + j * stride) {
errs++;
printf("case %d: value is %d should be %d\n", i,
#include <stdlib.h>
#include <string.h>
-#define MAX_COUNT 65536*4/16
+#define MAX_COUNT 512
#define MAX_RMA_SIZE 2 /* 16 in manyrma performance test */
#define MAX_RUNS 8
#define MAX_ITER_TIME 5.0 /* seconds */
#else
#ifdef USE_WIN_ALLOC_SHM
MPI_Info_create(&hdl->win_info);
- MPI_Info_set(hdl->win_info, "alloc_shm", "true");
+ MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"true");
#else
MPI_Info_create(&hdl->win_info);
- MPI_Info_set(hdl->win_info, "alloc_shm", "false");
+ MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"false");
#endif
MPI_Win_allocate(2 * sizeof(int), sizeof(int), hdl->win_info, hdl->comm,
&hdl->base, &hdl->window);
#include <string.h>
#include <mpi.h>
-#define MAX_DATA_SIZE (1024*128*16)
-#define MAX_NUM_ITERATIONS (8192*4)
+#define MAX_DATA_SIZE (1024)
+#define MAX_NUM_ITERATIONS (1024)
#define MIN_NUM_ITERATIONS 8
#define NUM_WARMUP_ITER 1
const int verbose = 0;
static int rank;
-void run_test(int lock_mode, int lock_assert)
+static void run_test(int lock_mode, int lock_assert)
{
int nproc, test_iter, target_rank, data_size;
int *buf, *win_buf;
putfidx 4
getfence1 4
accfence1 4
-#Needs lock, unlock
-#adlb_mimic1 3
+adlb_mimic1 3
accfence2 4
putpscw1 4
accpscw1 4
transpose1 2
transpose2 2
transpose3 2
-#Needs MPI_Win_allocate
transpose3_shm 2
transpose5 2
transpose6 1
test3 2
test4 2
test5 2
-#Needs lock, unlock
lockcontention 3
lockcontention2 4
lockcontention2 8
#Buggy one.
#lockcontention3 8
lockopts 2
-#needs get_accumulate
lock_dt 2
lock_dt_flush 2
lock_dt_flushlocal 2
lockall_dt_flushall 4 timeLimit=240
lockall_dt_flushlocal 4 timeLimit=240
lockall_dt_flushlocalall 4 timeLimit=240
-#lock_contention_dt 4 timeLimit=240
+lock_contention_dt 4 timeLimit=240
transpose4 2
#fetchandadd 7
#fetchandadd_tree 7
window_creation 2
contention_put 4
contention_putget 4
-#put_base 2
-#put_bottom 2
+put_base 2
+put_bottom 2
#win_flavors 4 mpiversion=3.0
#win_flavors 3 mpiversion=3.0
-#manyrma2 2 timeLimit=500
+manyrma2 2 timeLimit=500
manyrma3 2
#win_shared 4 mpiversion=3.0
#win_shared_create_allocshm 4 mpiversion=3.0
#win_shared_noncontig_put 4 mpiversion=3.0
#win_zero 4 mpiversion=3.0
win_dynamic_acc 4
-#get_acc_local 1 mpiversion=3.0
+get_acc_local 1
+#issues with concurrent updates..
#linked_list 4 mpiversion=3.0
#linked_list_fop 4 mpiversion=3.0
compare_and_swap 4
#fetch_and_op_long_double 4 mpiversion=3.0
#get_accumulate_double 4 mpiversion=3.0
#get_accumulate_double_derived 4 mpiversion=3.0
-#get_accumulate_int 4 mpiversion=3.0
+get_accumulate 4
#get_accumulate_int_derived 4 mpiversion=3.0
#get_accumulate_long 4 mpiversion=3.0
#get_accumulate_long_derived 4 mpiversion=3.0
flush 4
#reqops 4
req_example 4
+rput_local_comp 2 mpiversion=3.0
+racc_local_comp 2 mpiversion=3.0
win_info 4
+#issues with concurrent updates..
#linked_list_lockall 4 mpiversion=3.0
-#pscw_ordering 4 mpiversion=3.0
-#linked_list_bench_lock_all 4 mpiversion=3.0
-#linked_list_bench_lock_excl 4 mpiversion=3.0
+pscw_ordering 4
+linked_list_bench_lock_all 4
+linked_list_bench_lock_excl 4 mpiversion=3.0
#linked_list_bench_lock_shr 4 mpiversion=3.0
#linked_list_bench_lock_shr_nocheck 4 mpiversion=3.0
#mutex_bench_shm 4 mpiversion=3.0
#mutex_bench_shm_ordered 4 mpiversion=3.0
-#rma-contig 2 mpiversion=3.0 timeLimit=720
+rma-contig 2 timeLimit=720
badrma 2
-#acc-loc 4
+acc-loc 4
#fence_shm 2 mpiversion=3.0
#mutex_bench 4 mpiversion=3.0
#mutex_bench_shared 4 mpiversion=3.0
win_shared_zerobyte 4 mpiversion=3.0
win_shared_put_flush_get 4 mpiversion=3.0
-#get-struct 2
+get-struct 2
at_complete 2
#atomic_rmw_fop 3
#atomic_rmw_cas 3
#gacc_flush_get 3 mpiversion=3.0
#fop_flush_get 3 mpiversion=3.0
#cas_flush_get 3 mpiversion=3.0
-#rget-unlock 2 mpiversion=3.0
+#We still have an issue here, unlock should finish R* calls, but this causes issues.
+#rget-unlock 2
#overlap_wins_put 3
#overlap_wins_acc 3
#overlap_wins_gacc 3
endif()
endif()
+if(enable_model-checking)
+ install(
+ PROGRAMS ${CMAKE_BINARY_DIR}/bin/simgrid-mc
+ DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
+endif()
+
install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/tesh DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)
install(PROGRAMS ${CMAKE_BINARY_DIR}/bin/graphicator DESTINATION $ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/bin/)