details:
- by default, it is set to 0
- if is 0, the behavior of each MPI process (in the interface
level: MPI_Send, Recv, ...) is not group by host (this means
that you do not know where it was executed by looking only the
trace)
- otherwise, processes are grouped by host
info:
- traditional mpi-focused visualization tools usually do not
group by hosts: that is the reason we are keeping the default value
to 0 (not grouped)
git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/simgrid/simgrid/trunk@8498
48e7efb5-ca39-0410-a469-
dd3cf9ba447f
#ifdef HAVE_TRACING
#define OPT_TRACING_SMPI "tracing/smpi"
#ifdef HAVE_TRACING
#define OPT_TRACING_SMPI "tracing/smpi"
+#define OPT_TRACING_SMPI_GROUP "tracing/smpi/group"
#define OPT_TRACING_PLATFORM "tracing/platform"
#define OPT_TRACING_MSG_TASK "tracing/msg/task"
#define OPT_TRACING_MSG_PROCESS "tracing/msg/process"
#define OPT_TRACING_PLATFORM "tracing/platform"
#define OPT_TRACING_MSG_TASK "tracing/msg/task"
#define OPT_TRACING_MSG_PROCESS "tracing/msg/process"
return xbt_cfg_get_int(_surf_cfg_set, OPT_TRACING_SMPI);
}
return xbt_cfg_get_int(_surf_cfg_set, OPT_TRACING_SMPI);
}
+int TRACE_smpi_is_grouped(void)
+{
+ return xbt_cfg_get_int(_surf_cfg_set, OPT_TRACING_SMPI_GROUP);
+}
+
int TRACE_platform_is_enabled(void)
{
return xbt_cfg_get_int(_surf_cfg_set, OPT_TRACING_PLATFORM);
int TRACE_platform_is_enabled(void)
{
return xbt_cfg_get_int(_surf_cfg_set, OPT_TRACING_PLATFORM);
xbt_cfgelm_int, &default_tracing_smpi, 0, 1,
NULL, NULL);
xbt_cfgelm_int, &default_tracing_smpi, 0, 1,
NULL, NULL);
+ /* smpi grouped */
+ int default_tracing_smpi_grouped = 0;
+ xbt_cfg_register(&_surf_cfg_set, OPT_TRACING_SMPI_GROUP,
+ "Group MPI processes by host.",
+ xbt_cfgelm_int, &default_tracing_smpi_grouped, 0, 1,
+ NULL, NULL);
+
+
/* platform */
int default_tracing_platform = 0;
xbt_cfg_register(&_surf_cfg_set, OPT_TRACING_PLATFORM,
/* platform */
int default_tracing_platform = 0;
xbt_cfg_register(&_surf_cfg_set, OPT_TRACING_PLATFORM,
- pajeDefineContainerType("MPI_PROCESS", "HOST", "MPI_PROCESS");
+ if (TRACE_smpi_is_grouped()){
+ pajeDefineContainerType("MPI_PROCESS", "HOST", "MPI_PROCESS");
+ }else{
+ pajeDefineContainerType("MPI_PROCESS", "PLATFORM", "MPI_PROCESS");
+ }
pajeDefineStateType("MPI_STATE", "MPI_PROCESS", "MPI_STATE");
pajeDefineLinkType("MPI_LINK", "0", "MPI_PROCESS", "MPI_PROCESS",
"MPI_LINK");
pajeDefineStateType("MPI_STATE", "MPI_PROCESS", "MPI_STATE");
pajeDefineLinkType("MPI_LINK", "0", "MPI_PROCESS", "MPI_PROCESS",
"MPI_LINK");
/* from instr_config.c */
int TRACE_is_configured(void);
int TRACE_smpi_is_enabled(void);
/* from instr_config.c */
int TRACE_is_configured(void);
int TRACE_smpi_is_enabled(void);
+int TRACE_smpi_is_grouped(void);
int TRACE_platform_is_enabled(void);
int TRACE_msg_task_is_enabled(void);
int TRACE_msg_process_is_enabled(void);
int TRACE_platform_is_enabled(void);
int TRACE_msg_task_is_enabled(void);
int TRACE_msg_process_is_enabled(void);
char str[INSTR_DEFAULT_STR_SIZE];
TRACE_smpi_container(rank, str, INSTR_DEFAULT_STR_SIZE);
char str[INSTR_DEFAULT_STR_SIZE];
TRACE_smpi_container(rank, str, INSTR_DEFAULT_STR_SIZE);
- pajeCreateContainer(SIMIX_get_clock(), str, "MPI_PROCESS",
+ if (TRACE_smpi_is_grouped()){
+ pajeCreateContainer(SIMIX_get_clock(), str, "MPI_PROCESS",
SIMIX_host_get_name(SIMIX_host_self()), str);
SIMIX_host_get_name(SIMIX_host_self()), str);
+ }else{
+ pajeCreateContainer(SIMIX_get_clock(), str, "MPI_PROCESS",
+ "platform", str);
+ }
}
void TRACE_smpi_finalize(int rank)
}
void TRACE_smpi_finalize(int rank)