static void _sg_cfg_cb__coll_barrier(const char *name, int pos){
_sg_cfg_cb__coll("barrier", mpi_coll_barrier_description, name, pos);
}
+
+static void _sg_cfg_cb__wtime_sleep(const char *name, int pos){
+ smpi_wtime_sleep = xbt_cfg_get_double(_sg_cfg_set, name);
+}
+
+static void _sg_cfg_cb__iprobe_sleep(const char *name, int pos){
+ smpi_iprobe_sleep = xbt_cfg_get_double(_sg_cfg_set, name);
+}
+
+static void _sg_cfg_cb__test_sleep(const char *name, int pos){
+ smpi_test_sleep = xbt_cfg_get_double(_sg_cfg_set, name);
+}
+
+
+
#endif
/* callback of the inclusion path */
xbt_cfg_register(&_sg_cfg_set, "smpi/iprobe",
"Minimum time to inject inside a call to MPI_Iprobe",
- xbt_cfgelm_double, 1, 1, NULL, NULL);
+ xbt_cfgelm_double, 1, 1, _sg_cfg_cb__iprobe_sleep, NULL);
xbt_cfg_setdefault_double(_sg_cfg_set, "smpi/iprobe", 1e-4);
xbt_cfg_register(&_sg_cfg_set, "smpi/test",
"Minimum time to inject inside a call to MPI_Test",
- xbt_cfgelm_double, 1, 1, NULL, NULL);
+ xbt_cfgelm_double, 1, 1, _sg_cfg_cb__test_sleep, NULL);
xbt_cfg_setdefault_double(_sg_cfg_set, "smpi/test", 1e-4);
xbt_cfg_register(&_sg_cfg_set, "smpi/wtime",
"Minimum time to inject inside a call to MPI_Wtime",
- xbt_cfgelm_double, 1, 1, NULL, NULL);
- xbt_cfg_setdefault_double(_sg_cfg_set, "smpi/wtime", 3e-8);
+ xbt_cfgelm_double, 1, 1, _sg_cfg_cb__wtime_sleep, NULL);
+ xbt_cfg_setdefault_double(_sg_cfg_set, "smpi/wtime", 0.0);
xbt_cfg_register(&_sg_cfg_set, "smpi/coll_selector",
"Which collective selector to use",
xbt_dynar_t smpi_or_values = NULL;
xbt_dynar_t smpi_ois_values = NULL;
+double smpi_wtime_sleep = 0.0;
+double smpi_iprobe_sleep = 1e-4;
+double smpi_test_sleep = 1e-4;
+
+
// Methods used to parse and store the values for timing injections in smpi
// These are taken from surf/network.c and generalized to have more factors
// These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
smpi_bench_end();
time = SIMIX_get_clock();
//to avoid deadlocks if called too many times
- double sleeptime= sg_cfg_get_double("smpi/wtime");
- simcall_process_sleep(sleeptime);
+ if(smpi_wtime_sleep > 0) simcall_process_sleep(smpi_wtime_sleep);
smpi_bench_begin();
} else {
time = SIMIX_get_clock();
//assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
//to avoid deadlocks
- double sleeptime= sg_cfg_get_double("smpi/test");
//multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
static int nsleeps = 1;
- simcall_process_sleep(nsleeps*sleeptime);
+ if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
smpi_empty_status(status);
flag = 1;
}
}
if(size > 0) {
- //to avoid deadlocks
- double sleeptime= sg_cfg_get_double("smpi/test");
//multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
static int nsleeps = 1;
- simcall_process_sleep(nsleeps*sleeptime);
+ if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
i = simcall_comm_testany(comms);
// not MPI_UNDEFINED, as this is a simix return code
comm, PERSISTENT | RECV);
//to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
- double sleeptime= sg_cfg_get_double("smpi/iprobe");
//multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
static int nsleeps = 1;
-
- simcall_process_sleep(nsleeps*sleeptime);
-
+ if(smpi_iprobe_sleep > 0) simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
// behave like a receive, but don't do it
smx_rdv_t mailbox;