From 3037a0963f5d462b145ee127ffaed836dffd0085 Mon Sep 17 00:00:00 2001 From: degomme Date: Thu, 6 Apr 2017 00:39:03 +0200 Subject: [PATCH] activate some more rma tests --- teshsuite/smpi/mpich3-test/rma/CMakeLists.txt | 21 +++++------ .../smpi/mpich3-test/rma/get_acc_local.c | 4 +- teshsuite/smpi/mpich3-test/rma/linked_list.c | 25 ++++++------- .../rma/linked_list_bench_lock_all.c | 33 ++++++++--------- .../rma/linked_list_bench_lock_excl.c | 35 +++++++++--------- .../smpi/mpich3-test/rma/linked_list_fop.c | 25 ++++++------- .../mpich3-test/rma/linked_list_lockall.c | 25 ++++++------- .../smpi/mpich3-test/rma/lockcontention3.c | 4 +- teshsuite/smpi/mpich3-test/rma/manyrma2.c | 2 +- teshsuite/smpi/mpich3-test/rma/mcs-mutex.c | 4 +- teshsuite/smpi/mpich3-test/rma/rma-contig.c | 6 +-- teshsuite/smpi/mpich3-test/rma/testlist | 37 ++++++++++--------- 12 files changed, 108 insertions(+), 113 deletions(-) diff --git a/teshsuite/smpi/mpich3-test/rma/CMakeLists.txt b/teshsuite/smpi/mpich3-test/rma/CMakeLists.txt index 55223b844a..65821229e2 100644 --- a/teshsuite/smpi/mpich3-test/rma/CMakeLists.txt +++ b/teshsuite/smpi/mpich3-test/rma/CMakeLists.txt @@ -16,19 +16,18 @@ if(enable_smpi AND enable_smpi_MPICH3_testsuite) at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc fetch_and_op flush req_example rmanull rmazero badrma lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed compare_and_swap lockall_dt lockall_dt_flushall lock_dt_flush lockall_dt_flush lockall_dt_flushlocalall lockall_dt_flushlocal lock_dt_flushlocal - strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget) - # not compiled files: acc-loc adlb_mimic1 - # contention_putget contig_displ fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree - # fkeyvalwin get_acc_local get_accumulate linked_list_bench_lock_all - # linked_list_bench_lock_excl linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall - # manyrma2 mcs-mutex mixedsync mutex_bench lockcontention3 reqops - # pscw_ordering put_base put_bottom rma-contig selfrma - # strided_getacc_indexed_shared + strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget + adlb_mimic1 lock_contention_dt acc-loc get_acc_local get_accumulate put_base put_bottom + linked_list_bench_lock_all linked_list_bench_lock_excl manyrma2 pscw_ordering rma-contig get-struct + rput_local_comp racc_local_comp) + # fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree + # linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall + # mcs-mutex mixedsync mutex_bench lockcontention3 reqops + # strided_getacc_indexed_shared rget-unlock # win_flavors win_shared win_shared_noncontig win_shared_noncontig_put - # win_large_shm win_zero wintest get-struct atomic_rmw_fop atomic_rmw_gacc rget-unlock atomic_get atomic_rmw_cas + # win_large_shm win_zero wintest atomic_rmw_fop atomic_rmw_gacc atomic_get atomic_rmw_cas # win_shared_zerobyte aint derived-acc-flush_local large-acc-flush_local - # lock_contention_dt - #racc_local_comp rput_local_comp win_shared_create win_shared_put_flush_get win_shared_rma_flush_load + # win_shared_create win_shared_put_flush_get win_shared_rma_flush_load # wrma_flush_get add_executable(${file} ${file}.c) target_link_libraries(${file} simgrid mtest_c) diff --git a/teshsuite/smpi/mpich3-test/rma/get_acc_local.c b/teshsuite/smpi/mpich3-test/rma/get_acc_local.c index c1a38db86c..2b29c692c4 100644 --- a/teshsuite/smpi/mpich3-test/rma/get_acc_local.c +++ b/teshsuite/smpi/mpich3-test/rma/get_acc_local.c @@ -12,14 +12,14 @@ int errors = 0; const int NITER = 1000; -const int acc_val = 3; + int main(int argc, char **argv) { int rank, nproc; int out_val, i, counter = 0; MPI_Win win; - + int acc_val = 3; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); diff --git a/teshsuite/smpi/mpich3-test/rma/linked_list.c b/teshsuite/smpi/mpich3-test/rma/linked_list.c index 126ab62ca0..3838e39879 100644 --- a/teshsuite/smpi/mpich3-test/rma/linked_list.c +++ b/teshsuite/smpi/mpich3-test/rma/linked_list.c @@ -47,13 +47,8 @@ static const llist_ptr_t nil = { -1, (MPI_Aint) MPI_BOTTOM }; static const int verbose = 0; -/* List of locally allocated list elements. */ -static llist_elem_t **my_elems = NULL; -static int my_elems_size = 0; -static int my_elems_count = 0; - /* Allocate a new shared linked list element */ -MPI_Aint alloc_elem(int value, MPI_Win win) +static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count) { MPI_Aint disp; llist_elem_t *elem_ptr; @@ -65,12 +60,12 @@ MPI_Aint alloc_elem(int value, MPI_Win win) MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t)); /* Add the element to the list of local elements so we can free it later. */ - if (my_elems_size == my_elems_count) { - my_elems_size += 100; - my_elems = realloc(my_elems, my_elems_size * sizeof(void *)); + if (*my_elems_size == *my_elems_count) { + *my_elems_size += 100; + *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *)); } - my_elems[my_elems_count] = elem_ptr; - my_elems_count++; + (*my_elems)[*my_elems_count] = elem_ptr; + (*my_elems_count)++; MPI_Get_address(elem_ptr, &disp); return disp; @@ -81,6 +76,10 @@ int main(int argc, char **argv) int procid, nproc, i; MPI_Win llist_win; llist_ptr_t head_ptr, tail_ptr; + /* List of locally allocated list elements. */ + llist_elem_t **my_elems = NULL; + int my_elems_size = 0; + int my_elems_count = 0; MPI_Init(&argc, &argv); @@ -91,7 +90,7 @@ int main(int argc, char **argv) /* Process 0 creates the head node */ if (procid == 0) - head_ptr.disp = alloc_elem(-1, llist_win); + head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Broadcast the head pointer to everyone */ head_ptr.rank = 0; @@ -105,7 +104,7 @@ int main(int argc, char **argv) /* Create a new list element and register it with the window */ new_elem_ptr.rank = procid; - new_elem_ptr.disp = alloc_elem(procid, llist_win); + new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Append the new node to the list. This might take multiple attempts if * others have already appended and our tail pointer is stale. */ diff --git a/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_all.c b/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_all.c index 5e0dfa481e..fa0f49bf39 100644 --- a/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_all.c +++ b/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_all.c @@ -28,8 +28,8 @@ #define MIN_NPROBE 1 #define ELEM_PER_ROW 16 -#define MIN(X,Y) ((X < Y) ? (X) : (Y)) -#define MAX(X,Y) ((X > Y) ? (X) : (Y)) +#define MYMIN(X,Y) ((X < Y) ? (X) : (Y)) +#define MYMAX(X,Y) ((X > Y) ? (X) : (Y)) /* Linked list pointer */ typedef struct { @@ -48,13 +48,8 @@ static const llist_ptr_t nil = { -1, (MPI_Aint) MPI_BOTTOM }; static const int verbose = 0; static const int print_perf = 0; -/* List of locally allocated list elements. */ -static llist_elem_t **my_elems = NULL; -static int my_elems_size = 0; -static int my_elems_count = 0; - /* Allocate a new shared linked list element */ -MPI_Aint alloc_elem(int value, MPI_Win win) +static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count) { MPI_Aint disp; llist_elem_t *elem_ptr; @@ -66,12 +61,12 @@ MPI_Aint alloc_elem(int value, MPI_Win win) MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t)); /* Add the element to the list of local elements so we can free it later. */ - if (my_elems_size == my_elems_count) { - my_elems_size += 100; - my_elems = realloc(my_elems, my_elems_size * sizeof(void *)); + if (*my_elems_size == *my_elems_count) { + *my_elems_size += 100; + *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *)); } - my_elems[my_elems_count] = elem_ptr; - my_elems_count++; + (*my_elems)[*my_elems_count] = elem_ptr; + (*my_elems_count)++; MPI_Get_address(elem_ptr, &disp); return disp; @@ -85,6 +80,10 @@ int main(int argc, char **argv) MPI_Win llist_win; llist_ptr_t head_ptr, tail_ptr; + /* List of locally allocated list elements. */ + llist_elem_t **my_elems = NULL; + int my_elems_size = 0; + int my_elems_count = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &procid); @@ -94,7 +93,7 @@ int main(int argc, char **argv) /* Process 0 creates the head node */ if (procid == 0) - head_ptr.disp = alloc_elem(procid, llist_win); + head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Broadcast the head pointer to everyone */ head_ptr.rank = 0; @@ -126,7 +125,7 @@ int main(int argc, char **argv) /* Create a new list element and register it with the window */ new_elem_ptr.rank = procid; - new_elem_ptr.disp = alloc_elem(procid, llist_win); + new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Append the new node to the list. This might take multiple attempts if * others have already appended and our tail pointer is stale. */ @@ -169,14 +168,14 @@ int main(int argc, char **argv) printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank, (void *) next_tail_ptr.disp); tail_ptr = next_tail_ptr; - pollint = MAX(MIN_NPROBE, pollint / 2); + pollint = MYMAX(MIN_NPROBE, pollint / 2); } else { for (j = 0; j < pollint; j++) MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE); - pollint = MIN(MAX_NPROBE, pollint * 2); + pollint = MYMIN(MAX_NPROBE, pollint * 2); } } } while (!success); diff --git a/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_excl.c b/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_excl.c index c52208e2ac..607ca12c01 100644 --- a/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_excl.c +++ b/teshsuite/smpi/mpich3-test/rma/linked_list_bench_lock_excl.c @@ -23,13 +23,13 @@ #include #endif -#define NUM_ELEMS 1000 +#define NUM_ELEMS 100 #define MAX_NPROBE nproc #define MIN_NPROBE 1 #define ELEM_PER_ROW 16 -#define MIN(X,Y) ((X < Y) ? (X) : (Y)) -#define MAX(X,Y) ((X > Y) ? (X) : (Y)) +#define MYMIN(X,Y) ((X < Y) ? (X) : (Y)) +#define MYMAX(X,Y) ((X > Y) ? (X) : (Y)) /* Linked list pointer */ typedef struct { @@ -48,13 +48,8 @@ static const llist_ptr_t nil = { -1, (MPI_Aint) MPI_BOTTOM }; static const int verbose = 0; static const int print_perf = 0; -/* List of locally allocated list elements. */ -static llist_elem_t **my_elems = NULL; -static int my_elems_size = 0; -static int my_elems_count = 0; - /* Allocate a new shared linked list element */ -MPI_Aint alloc_elem(int value, MPI_Win win) +static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count) { MPI_Aint disp; llist_elem_t *elem_ptr; @@ -66,12 +61,12 @@ MPI_Aint alloc_elem(int value, MPI_Win win) MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t)); /* Add the element to the list of local elements so we can free it later. */ - if (my_elems_size == my_elems_count) { - my_elems_size += 100; - my_elems = realloc(my_elems, my_elems_size * sizeof(void *)); + if (*my_elems_size == *my_elems_count) { + *my_elems_size += 100; + *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *)); } - my_elems[my_elems_count] = elem_ptr; - my_elems_count++; + (*my_elems)[*my_elems_count] = elem_ptr; + (*my_elems_count)++; MPI_Get_address(elem_ptr, &disp); return disp; @@ -84,6 +79,10 @@ int main(int argc, char **argv) double time; MPI_Win llist_win; llist_ptr_t head_ptr, tail_ptr; + /* List of locally allocated list elements. */ + llist_elem_t **my_elems = NULL; + int my_elems_size = 0; + int my_elems_count = 0; MPI_Init(&argc, &argv); @@ -94,7 +93,7 @@ int main(int argc, char **argv) /* Process 0 creates the head node */ if (procid == 0) - head_ptr.disp = alloc_elem(procid, llist_win); + head_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Broadcast the head pointer to everyone */ head_ptr.rank = 0; @@ -122,7 +121,7 @@ int main(int argc, char **argv) /* Create a new list element and register it with the window */ new_elem_ptr.rank = procid; - new_elem_ptr.disp = alloc_elem(procid, llist_win); + new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Append the new node to the list. This might take multiple attempts if * others have already appended and our tail pointer is stale. */ @@ -173,14 +172,14 @@ int main(int argc, char **argv) printf("%d: Chasing to <%d, %p>\n", procid, next_tail_ptr.rank, (void *) next_tail_ptr.disp); tail_ptr = next_tail_ptr; - pollint = MAX(MIN_NPROBE, pollint / 2); + pollint = MYMAX(MIN_NPROBE, pollint / 2); } else { for (j = 0; j < pollint; j++) MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE); - pollint = MIN(MAX_NPROBE, pollint * 2); + pollint = MYMIN(MAX_NPROBE, pollint * 2); } } } while (!success); diff --git a/teshsuite/smpi/mpich3-test/rma/linked_list_fop.c b/teshsuite/smpi/mpich3-test/rma/linked_list_fop.c index 61b7618487..55d4c0ab54 100644 --- a/teshsuite/smpi/mpich3-test/rma/linked_list_fop.c +++ b/teshsuite/smpi/mpich3-test/rma/linked_list_fop.c @@ -47,13 +47,8 @@ static const llist_ptr_t nil = { -1, (MPI_Aint) MPI_BOTTOM }; static const int verbose = 0; -/* List of locally allocated list elements. */ -static llist_elem_t **my_elems = NULL; -static int my_elems_size = 0; -static int my_elems_count = 0; - /* Allocate a new shared linked list element */ -MPI_Aint alloc_elem(int value, MPI_Win win) +static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count) { MPI_Aint disp; llist_elem_t *elem_ptr; @@ -65,12 +60,12 @@ MPI_Aint alloc_elem(int value, MPI_Win win) MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t)); /* Add the element to the list of local elements so we can free it later. */ - if (my_elems_size == my_elems_count) { - my_elems_size += 100; - my_elems = realloc(my_elems, my_elems_size * sizeof(void *)); + if (*my_elems_size == *my_elems_count) { + *my_elems_size += 100; + *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *)); } - my_elems[my_elems_count] = elem_ptr; - my_elems_count++; + (*my_elems)[*my_elems_count] = elem_ptr; + (*my_elems_count)++; MPI_Get_address(elem_ptr, &disp); return disp; @@ -81,6 +76,10 @@ int main(int argc, char **argv) int procid, nproc, i; MPI_Win llist_win; llist_ptr_t head_ptr, tail_ptr; + /* List of locally allocated list elements. */ + llist_elem_t **my_elems = NULL; + int my_elems_size = 0; + int my_elems_count = 0; MPI_Init(&argc, &argv); @@ -91,7 +90,7 @@ int main(int argc, char **argv) /* Process 0 creates the head node */ if (procid == 0) - head_ptr.disp = alloc_elem(-1, llist_win); + head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Broadcast the head pointer to everyone */ head_ptr.rank = 0; @@ -105,7 +104,7 @@ int main(int argc, char **argv) /* Create a new list element and register it with the window */ new_elem_ptr.rank = procid; - new_elem_ptr.disp = alloc_elem(procid, llist_win); + new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Append the new node to the list. This might take multiple attempts if * others have already appended and our tail pointer is stale. */ diff --git a/teshsuite/smpi/mpich3-test/rma/linked_list_lockall.c b/teshsuite/smpi/mpich3-test/rma/linked_list_lockall.c index 699623c6f2..eed66f3a37 100644 --- a/teshsuite/smpi/mpich3-test/rma/linked_list_lockall.c +++ b/teshsuite/smpi/mpich3-test/rma/linked_list_lockall.c @@ -47,13 +47,8 @@ static const llist_ptr_t nil = { -1, (MPI_Aint) MPI_BOTTOM }; static const int verbose = 0; -/* List of locally allocated list elements. */ -static llist_elem_t **my_elems = NULL; -static int my_elems_size = 0; -static int my_elems_count = 0; - /* Allocate a new shared linked list element */ -MPI_Aint alloc_elem(int value, MPI_Win win) +static MPI_Aint alloc_elem(int value, MPI_Win win, llist_elem_t ***my_elems, int* my_elems_size, int* my_elems_count) { MPI_Aint disp; llist_elem_t *elem_ptr; @@ -65,12 +60,12 @@ MPI_Aint alloc_elem(int value, MPI_Win win) MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t)); /* Add the element to the list of local elements so we can free it later. */ - if (my_elems_size == my_elems_count) { - my_elems_size += 100; - my_elems = realloc(my_elems, my_elems_size * sizeof(void *)); + if (*my_elems_size == *my_elems_count) { + *my_elems_size += 100; + *my_elems = realloc(*my_elems, *my_elems_size * sizeof(void *)); } - my_elems[my_elems_count] = elem_ptr; - my_elems_count++; + (*my_elems)[*my_elems_count] = elem_ptr; + (*my_elems_count)++; MPI_Get_address(elem_ptr, &disp); return disp; @@ -81,6 +76,10 @@ int main(int argc, char **argv) int procid, nproc, i; MPI_Win llist_win; llist_ptr_t head_ptr, tail_ptr; + /* List of locally allocated list elements. */ + llist_elem_t **my_elems = NULL; + int my_elems_size = 0; + int my_elems_count = 0; MPI_Init(&argc, &argv); @@ -91,7 +90,7 @@ int main(int argc, char **argv) /* Process 0 creates the head node */ if (procid == 0) - head_ptr.disp = alloc_elem(-1, llist_win); + head_ptr.disp = alloc_elem(-1, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Broadcast the head pointer to everyone */ head_ptr.rank = 0; @@ -108,7 +107,7 @@ int main(int argc, char **argv) /* Create a new list element and register it with the window */ new_elem_ptr.rank = procid; - new_elem_ptr.disp = alloc_elem(procid, llist_win); + new_elem_ptr.disp = alloc_elem(procid, llist_win, &my_elems, &my_elems_size, &my_elems_count); /* Append the new node to the list. This might take multiple attempts if * others have already appended and our tail pointer is stale. */ diff --git a/teshsuite/smpi/mpich3-test/rma/lockcontention3.c b/teshsuite/smpi/mpich3-test/rma/lockcontention3.c index 8be8a50cb5..134f41240b 100644 --- a/teshsuite/smpi/mpich3-test/rma/lockcontention3.c +++ b/teshsuite/smpi/mpich3-test/rma/lockcontention3.c @@ -315,7 +315,7 @@ int RMACheck(int i, int *buf, MPI_Aint bufsize) case 6: /* a few small puts (like strided put, but 1 word at a time) */ /* FIXME: The conditional and increment are reversed below. This looks * like a bug, and currently prevents the following test from running. */ - for (j = 0; j++; j < veccount) { + for (j = 0; j < veccount; j++) { if (buf[j * stride] != PUT_VAL + j) { errs++; printf("case %d: value is %d should be %d\n", i, buf[j * stride], PUT_VAL + j); @@ -326,7 +326,7 @@ int RMACheck(int i, int *buf, MPI_Aint bufsize) case 7: /* a few small accumulates (like strided acc, but 1 word at a time) */ /* FIXME: The conditional and increment are reversed below. This looks * like a bug, and currently prevents the following test from running. */ - for (j = 0; j++; j < veccount) { + for (j = 0; j < veccount;j++) { if (buf[j * stride] != ACC_VAL + j + OFFSET_2 + j * stride) { errs++; printf("case %d: value is %d should be %d\n", i, diff --git a/teshsuite/smpi/mpich3-test/rma/manyrma2.c b/teshsuite/smpi/mpich3-test/rma/manyrma2.c index 67739199a0..c7436bcc29 100644 --- a/teshsuite/smpi/mpich3-test/rma/manyrma2.c +++ b/teshsuite/smpi/mpich3-test/rma/manyrma2.c @@ -15,7 +15,7 @@ #include #include -#define MAX_COUNT 65536*4/16 +#define MAX_COUNT 512 #define MAX_RMA_SIZE 2 /* 16 in manyrma performance test */ #define MAX_RUNS 8 #define MAX_ITER_TIME 5.0 /* seconds */ diff --git a/teshsuite/smpi/mpich3-test/rma/mcs-mutex.c b/teshsuite/smpi/mpich3-test/rma/mcs-mutex.c index a972a55c44..96b0f7b580 100644 --- a/teshsuite/smpi/mpich3-test/rma/mcs-mutex.c +++ b/teshsuite/smpi/mpich3-test/rma/mcs-mutex.c @@ -44,10 +44,10 @@ int MCS_Mutex_create(int tail_rank, MPI_Comm comm, MCS_Mutex * hdl_out) #else #ifdef USE_WIN_ALLOC_SHM MPI_Info_create(&hdl->win_info); - MPI_Info_set(hdl->win_info, "alloc_shm", "true"); + MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"true"); #else MPI_Info_create(&hdl->win_info); - MPI_Info_set(hdl->win_info, "alloc_shm", "false"); + MPI_Info_set(hdl->win_info, (char*)"alloc_shm", (char*)"false"); #endif MPI_Win_allocate(2 * sizeof(int), sizeof(int), hdl->win_info, hdl->comm, &hdl->base, &hdl->window); diff --git a/teshsuite/smpi/mpich3-test/rma/rma-contig.c b/teshsuite/smpi/mpich3-test/rma/rma-contig.c index 581b95fe54..4ca7127ad7 100644 --- a/teshsuite/smpi/mpich3-test/rma/rma-contig.c +++ b/teshsuite/smpi/mpich3-test/rma/rma-contig.c @@ -9,15 +9,15 @@ #include #include -#define MAX_DATA_SIZE (1024*128*16) -#define MAX_NUM_ITERATIONS (8192*4) +#define MAX_DATA_SIZE (1024) +#define MAX_NUM_ITERATIONS (1024) #define MIN_NUM_ITERATIONS 8 #define NUM_WARMUP_ITER 1 const int verbose = 0; static int rank; -void run_test(int lock_mode, int lock_assert) +static void run_test(int lock_mode, int lock_assert) { int nproc, test_iter, target_rank, data_size; int *buf, *win_buf; diff --git a/teshsuite/smpi/mpich3-test/rma/testlist b/teshsuite/smpi/mpich3-test/rma/testlist index bd357d2fed..3cf6fc838e 100644 --- a/teshsuite/smpi/mpich3-test/rma/testlist +++ b/teshsuite/smpi/mpich3-test/rma/testlist @@ -4,8 +4,7 @@ putfence1 4 putfidx 4 getfence1 4 accfence1 4 -#Needs lock, unlock -#adlb_mimic1 3 +adlb_mimic1 3 accfence2 4 putpscw1 4 accpscw1 4 @@ -13,7 +12,6 @@ getgroup 4 transpose1 2 transpose2 2 transpose3 2 -#Needs MPI_Win_allocate transpose3_shm 2 transpose5 2 transpose6 1 @@ -25,14 +23,12 @@ test2 2 test3 2 test4 2 test5 2 -#Needs lock, unlock lockcontention 3 lockcontention2 4 lockcontention2 8 #Buggy one. #lockcontention3 8 lockopts 2 -#needs get_accumulate lock_dt 2 lock_dt_flush 2 lock_dt_flushlocal 2 @@ -41,7 +37,7 @@ lockall_dt_flush 4 timeLimit=240 lockall_dt_flushall 4 timeLimit=240 lockall_dt_flushlocal 4 timeLimit=240 lockall_dt_flushlocalall 4 timeLimit=240 -#lock_contention_dt 4 timeLimit=240 +lock_contention_dt 4 timeLimit=240 transpose4 2 #fetchandadd 7 #fetchandadd_tree 7 @@ -82,11 +78,11 @@ strided_getacc_indexed 4 window_creation 2 contention_put 4 contention_putget 4 -#put_base 2 -#put_bottom 2 +put_base 2 +put_bottom 2 #win_flavors 4 mpiversion=3.0 #win_flavors 3 mpiversion=3.0 -#manyrma2 2 timeLimit=500 +manyrma2 2 timeLimit=500 manyrma3 2 #win_shared 4 mpiversion=3.0 #win_shared_create_allocshm 4 mpiversion=3.0 @@ -95,7 +91,8 @@ manyrma3 2 #win_shared_noncontig_put 4 mpiversion=3.0 #win_zero 4 mpiversion=3.0 win_dynamic_acc 4 -#get_acc_local 1 mpiversion=3.0 +get_acc_local 1 +#issues with concurrent updates.. #linked_list 4 mpiversion=3.0 #linked_list_fop 4 mpiversion=3.0 compare_and_swap 4 @@ -108,7 +105,7 @@ fetch_and_op 4 #fetch_and_op_long_double 4 mpiversion=3.0 #get_accumulate_double 4 mpiversion=3.0 #get_accumulate_double_derived 4 mpiversion=3.0 -#get_accumulate_int 4 mpiversion=3.0 +get_accumulate 4 #get_accumulate_int_derived 4 mpiversion=3.0 #get_accumulate_long 4 mpiversion=3.0 #get_accumulate_long_derived 4 mpiversion=3.0 @@ -117,24 +114,27 @@ fetch_and_op 4 flush 4 #reqops 4 req_example 4 +rput_local_comp 2 mpiversion=3.0 +racc_local_comp 2 mpiversion=3.0 win_info 4 +#issues with concurrent updates.. #linked_list_lockall 4 mpiversion=3.0 -#pscw_ordering 4 mpiversion=3.0 -#linked_list_bench_lock_all 4 mpiversion=3.0 -#linked_list_bench_lock_excl 4 mpiversion=3.0 +pscw_ordering 4 +linked_list_bench_lock_all 4 +linked_list_bench_lock_excl 4 mpiversion=3.0 #linked_list_bench_lock_shr 4 mpiversion=3.0 #linked_list_bench_lock_shr_nocheck 4 mpiversion=3.0 #mutex_bench_shm 4 mpiversion=3.0 #mutex_bench_shm_ordered 4 mpiversion=3.0 -#rma-contig 2 mpiversion=3.0 timeLimit=720 +rma-contig 2 timeLimit=720 badrma 2 -#acc-loc 4 +acc-loc 4 #fence_shm 2 mpiversion=3.0 #mutex_bench 4 mpiversion=3.0 #mutex_bench_shared 4 mpiversion=3.0 win_shared_zerobyte 4 mpiversion=3.0 win_shared_put_flush_get 4 mpiversion=3.0 -#get-struct 2 +get-struct 2 at_complete 2 #atomic_rmw_fop 3 #atomic_rmw_cas 3 @@ -156,7 +156,8 @@ large-small-acc 2 #gacc_flush_get 3 mpiversion=3.0 #fop_flush_get 3 mpiversion=3.0 #cas_flush_get 3 mpiversion=3.0 -#rget-unlock 2 mpiversion=3.0 +#We still have an issue here, unlock should finish R* calls, but this causes issues. +#rget-unlock 2 #overlap_wins_put 3 #overlap_wins_acc 3 #overlap_wins_gacc 3 -- 2.20.1