We almost have everything in MPI3 RMAs, now.
I will try to activate more tests soon.
foreach(file accfence1 accfence2_am accfence2 accpscw1 allocmem epochtest getfence1 getgroup manyrma3 nullpscw lockcontention lockopts contig_displ
putfence1 putfidx putpscw1 test1_am test1 test1_dt test2_am test2 test3 test3_am test4 test4_am test5_am test5 transpose1 transpose2 lockcontention2
transpose3 transpose4 transpose5 transpose6 transpose7 window_creation selfrma locknull win_info
- at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc fetch_and_op flush
- lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed
- lockall_dt lockall_dt_flushall lock_dt_flush lockall_dt_flush lockall_dt_flushlocalall lockall_dt_flushlocal lock_dt_flushlocal
+ at_complete acc-pairtype manyget large-small-acc lock_dt win_dynamic_acc fetch_and_op flush req_example rmanull rmazero badrma
+ lock_nested winname attrorderwin baseattrwin fkeyvalwin strided_acc_indexed strided_getacc_indexed compare_and_swap
+ lockall_dt lockall_dt_flushall lock_dt_flush lockall_dt_flush lockall_dt_flushlocalall lockall_dt_flushlocal lock_dt_flushlocal
strided_acc_onelock strided_get_indexed strided_putget_indexed contention_put contention_putget)
- # not compiled files: acc-loc adlb_mimic1 badrma compare_and_swap contention_put
+ # not compiled files: acc-loc adlb_mimic1
# contention_putget contig_displ fence_shm fetchandadd_am fetchandadd fetchandadd_tree_am fetchandadd_tree
# fkeyvalwin get_acc_local get_accumulate linked_list_bench_lock_all
# linked_list_bench_lock_excl linked_list_bench_lock_shr linked_list linked_list_fop linked_list_lockall
- # manyrma2 mcs-mutex mixedsync mutex_bench lockcontention3
- # pscw_ordering put_base put_bottom req_example reqops rmanull rmazero rma-contig selfrma
+ # manyrma2 mcs-mutex mixedsync mutex_bench lockcontention3 reqops
+ # pscw_ordering put_base put_bottom rma-contig selfrma
# strided_getacc_indexed_shared
# win_flavors win_shared win_shared_noncontig win_shared_noncontig_put
# win_large_shm win_zero wintest get-struct atomic_rmw_fop atomic_rmw_gacc rget-unlock atomic_get atomic_rmw_cas
MPI_Win win;
int win_buf[SIZE], origin_buf[SIZE], result_buf[SIZE];
-
+int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
+ MPI_Datatype result_type, int target_count, MPI_Datatype target_type);
int do_test(int origin_count, MPI_Datatype origin_type, int result_count,
MPI_Datatype result_type, int target_count, MPI_Datatype target_type)
{
/* Use a global variable to inhibit compiler optimizations in the compute
* function. */
double junk = 0.0;
-
+void compute(int step, double *data);
void compute(int step, double *data)
{
int i;
MPI_Info_create(&win_info);
#ifdef USE_WIN_ALLOC_SHM
- MPI_Info_set(win_info, "alloc_shm", "true");
+ MPI_Info_set(win_info, (char*)"alloc_shm", (char*)"true");
#else
- MPI_Info_set(win_info, "alloc_shm", "false");
+ MPI_Info_set(win_info, (char*)"alloc_shm", (char*)"false");
#endif
MPI_Win_allocate(NSTEPS * N * sizeof(double), sizeof(double), win_info,
#mixedsync 4
epochtest 3
locknull 2
-#Needs MPI_Rput, rget, racumulate, MPI_Fetch_and_op, MPI_Compare_and_swap
-#rmanull 2
-#rmazero 2
+rmanull 2
+rmazero 2
strided_acc_indexed 2
strided_acc_onelock 2
#needs MPI_Type_create_subarray
#get_acc_local 1 mpiversion=3.0
#linked_list 4 mpiversion=3.0
#linked_list_fop 4 mpiversion=3.0
-#compare_and_swap 4 mpiversion=3.0
+compare_and_swap 4
fetch_and_op 4
#fetch_and_op_char 4 mpiversion=3.0
#fetch_and_op_short 4 mpiversion=3.0
#get_accumulate_short 4 mpiversion=3.0
#get_accumulate_short_derived 4 mpiversion=3.0
flush 4
-#reqops 4 mpiversion=3.0
-#req_example 4 mpiversion=3.0
+#reqops 4
+req_example 4
win_info 4
#linked_list_lockall 4 mpiversion=3.0
#pscw_ordering 4 mpiversion=3.0
#mutex_bench_shm 4 mpiversion=3.0
#mutex_bench_shm_ordered 4 mpiversion=3.0
#rma-contig 2 mpiversion=3.0 timeLimit=720
-#badrma 2 mpiversion=3.0
+badrma 2
#acc-loc 4
#fence_shm 2 mpiversion=3.0
#mutex_bench 4 mpiversion=3.0