### MC ###
IF(HAVE_MC)
+ ADD_TESH(page_store --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc page_store.tesh)
ADD_TESH(tesh-mc-dwarf --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc/dwarf --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf dwarf.tesh)
ADD_TESH(tesh-mc-dwarf-expression --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/mc/dwarf_expression --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf_expression dwarf_expression.tesh)
set(MC_SRC
src/mc/mc_checkpoint.c
+ src/mc/mc_snapshot.c
+ src/mc/mc_page_store.cpp
+ src/mc/mc_page_snapshot.cpp
src/mc/mc_comm_determinism.c
- src/mc/mc_compare.c
+ src/mc/mc_compare.cpp
src/mc/mc_diff.c
src/mc/mc_dwarf.c
src/mc/mc_dwarf_attrnames.h
if(enable_compile_optimizations)
set(optCFLAGS "-O3 -finline-functions -funroll-loops -fno-strict-aliasing ")
- if(CMAKE_COMPILER_IS_GNUCC)
+ if(CMAKE_COMPILER_IS_GNUCC AND (NOT enable_model-checking))
if(WIN32)
if (COMPILER_C_VERSION_MAJOR_MINOR STRGREATER "4.7")
# On windows, we need 4.8 or higher to enable lto because of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=50293
set(optCFLAGS "-O0 ")
endif()
+if(enable_model-checking AND enable_compile_optimizations)
+ # Forget it, do not optimize the code (because it confuses the MC):
+ set(optCFLAGS "-O0 ")
+ # But you can still optimize this:
+ foreach(s
+ src/xbt/mmalloc/mm.c
+ src/xbt/snprintf.c src/xbt/log.c
+ src/xbt/dynar.c src/xbt/set.c src/xbt/setset.c
+ src/xbt/backtrace_linux.c
+ src/mc/mc_dwarf_expression.c src/mc/mc_dwarf.c src/mc/mc_member.c
+ src/mc/mc_snapshot.c src/mc/mc_page_store.cpp src/mc/mc_page_snapshot.cpp
+ src/mc/mc_compare.cpp src/mc/mc_diff.c
+ src/mc/mc_dwarf.c src/mc/mc_dwarf_attrnames.h src/mc/mc_dwarf_expression.c src/mc/mc_dwarf_tagnames.h
+ src/mc/mc_set.cpp)
+ set_source_files_properties(${s} PROPERTIES COMPILE_FLAGS "-O3 -finline-functions -funroll-loops -fno-strict-aliasing")
+ endforeach()
+endif()
+
if(APPLE AND COMPILER_C_VERSION_MAJOR_MINOR MATCHES "4.6")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-deprecated-declarations")
set(optCFLAGS "-O0 ")
message(STATUS "Add LDFLAGS: \"$ENV{LDFLAGS}\" to CMAKE_C_LINK_FLAGS")
set(CMAKE_C_LINK_FLAGS "${CMAKE_C_LINK_FLAGS} $ENV{LDFLAGS}")
endif()
-
-if(enable_model-checking AND enable_compile_optimizations)
- message(WARNING "Sorry for now GCC optimizations does not work with model checking.\nPlease turn off optimizations with command:\ncmake -Denable_compile_optimizations=off .")
-endif()
add_subdirectory(${CMAKE_HOME_DIRECTORY}/examples/xbt)
+add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/mc)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf)
add_subdirectory(${CMAKE_HOME_DIRECTORY}/teshsuite/mc/dwarf_expression)
/** Cache the size of a memory page for the current system. */
XBT_PUBLIC_DATA(int) xbt_pagesize;
+/** Cache the number of bits of addresses inside a given page, log2(xbt_pagesize). */
+ XBT_PUBLIC_DATA(int) xbt_pagebits;
+
XBT_PUBLIC(const char *) xbt_procname(void);
#define XBT_BACKTRACE_SIZE 10 /* FIXME: better place? Do document */
#include "xbt/dynar.h"
#include "xbt/dict.h"
+SG_BEGIN_DECL()
+
/* Datatype representing a separate heap. The whole point of the mmalloc module
* is to allow several such heaps in the process. It thus works by redefining
* all the classical memory management functions (malloc and friends) with an
struct s_mc_snapshot;
struct s_dw_type;
-int mmalloc_compare_heap(struct s_mc_snapshot* snapshot1, struct s_mc_snapshot* snapshot2, xbt_mheap_t heap1, xbt_mheap_t heap2);
+int mmalloc_compare_heap(struct s_mc_snapshot* snapshot1, struct s_mc_snapshot* snapshot2);
int mmalloc_linear_compare_heap(xbt_mheap_t heap1, xbt_mheap_t heap2);
int init_heap_information(xbt_mheap_t heap1, xbt_mheap_t heap2, xbt_dynar_t to_ignore1, xbt_dynar_t to_ignore2);
int compare_heap_area(void *area1, void* area2, struct s_mc_snapshot* snapshot1, struct s_mc_snapshot* snapshot2, xbt_dynar_t previous, struct s_dw_type *type, int pointer_level);
size_t mmalloc_get_bytes_used(xbt_mheap_t);
ssize_t mmalloc_get_busy_size(xbt_mheap_t, void *ptr);
+SG_END_DECL()
+
#endif
#endif /* MMALLOC_H */
*/
#define xbt_die(...) \
do { \
- XBT_LOG_EXTERNAL_CATEGORY(xbt); \
XBT_CCRITICAL(xbt, __VA_ARGS__); \
xbt_abort(); \
} while (0)
/** @} */
+XBT_LOG_EXTERNAL_CATEGORY(xbt);
+
/* these ones live in str.h, but redeclare them here so that we do
not need to load the whole str.h and its heavy dependencies */
#ifndef __USE_GNU /* do not redeclare existing headers */
#include <xbt/misc.h> /* XBT_PUBLIC */
+SG_BEGIN_DECL()
+
/** @brief get time in seconds
* gives the number of seconds since the Epoch (00:00:00 UTC, January 1, 1970).
XBT_PUBLIC(void) xbt_os_threadtimer_start(xbt_os_timer_t timer);
XBT_PUBLIC(void) xbt_os_threadtimer_resume(xbt_os_timer_t timer);
XBT_PUBLIC(void) xbt_os_threadtimer_stop(xbt_os_timer_t timer);
+
+SG_END_DECL()
+
#endif
-/* Copyright (c) 2008-2014. The SimGrid Team.
+ /* Copyright (c) 2008-2014. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
/********************************** Configuration of MC **************************************/
extern int _sg_do_model_check;
extern int _sg_mc_checkpoint;
+extern int _sg_mc_sparse_checkpoint;
+extern int _sg_mc_soft_dirty;
extern char* _sg_mc_property_file;
extern int _sg_mc_timeout;
extern int _sg_mc_hash;
/********************************* Global *************************************/
void _mc_cfg_cb_reduce(const char *name, int pos);
void _mc_cfg_cb_checkpoint(const char *name, int pos);
+void _mc_cfg_cb_sparse_checkpoint(const char *name, int pos);
+void _mc_cfg_cb_soft_dirty(const char *name, int pos);
void _mc_cfg_cb_property(const char *name, int pos);
void _mc_cfg_cb_timeout(const char *name, int pos);
void _mc_cfg_cb_hash(const char *name, int pos);
#include "xbt/module.h"
#include <xbt/mmalloc.h>
#include "../smpi/private.h"
+#include <alloca.h>
#include "xbt/mmalloc/mmprivate.h"
#include <libelf.h>
#include "mc_private.h"
+#include <mc/mc.h>
+
+#include "mc_mmu.h"
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_checkpoint, mc,
"Logging specific to mc_checkpoint");
{
//munmap(reg->data, reg->size);
xbt_free(reg->data);
+ if (reg->page_numbers) {
+ mc_free_page_snapshot_region(reg->page_numbers, mc_page_count(reg->size));
+ }
xbt_free(reg);
}
xbt_free(snapshot);
}
-
/******************************* Snapshot regions ********************************/
/*********************************************************************************/
-static mc_mem_region_t MC_region_new(int type, void *start_addr, size_t size)
+static mc_mem_region_t MC_region_new(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
{
+ if (_sg_mc_sparse_checkpoint) {
+ return mc_region_new_sparse(type, start_addr, size, ref_reg);
+ }
+
mc_mem_region_t new_reg = xbt_new(s_mc_mem_region_t, 1);
new_reg->start_addr = start_addr;
+ new_reg->data = NULL;
new_reg->size = size;
- //new_reg->data = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
- //if(new_reg->data==MAP_FAILED)
- //xbt_die("Could not mmap new memory for snapshot.");
+ new_reg->page_numbers = NULL;
new_reg->data = xbt_malloc(size);
memcpy(new_reg->data, start_addr, size);
- //madvise(new_reg->data, size, MADV_MERGEABLE);
-
XBT_DEBUG("New region : type : %d, data : %p (real addr %p), size : %zu",
type, new_reg->data, start_addr, size);
-
return new_reg;
+
}
-static void MC_region_restore(mc_mem_region_t reg)
+/** @brief Restore a region from a snapshot
+ *
+ * If we are using per page snapshots, it is possible to use the reference
+ * region in order to do an incremental restoration of the region: the
+ * softclean pages which are shared between the two snapshots do not need
+ * to be restored.
+ *
+ * @param reg Target region
+ * @param reg_reg Current region (if not NULL), used for lazy per page restoration
+ */
+static void MC_region_restore(mc_mem_region_t reg, mc_mem_region_t ref_reg)
{
/*FIXME: check if start_addr is still mapped, if it is not, then map it
- before copying the data */
-
- memcpy(reg->start_addr, reg->data, reg->size);
+ before copying the data */
+ if (!reg->page_numbers) {
+ memcpy(reg->start_addr, reg->data, reg->size);
+ } else {
+ mc_region_restore_sparse(reg, ref_reg);
+ }
return;
}
static void MC_snapshot_add_region(mc_snapshot_t snapshot, int type,
void *start_addr, size_t size)
+
{
- mc_mem_region_t new_reg = MC_region_new(type, start_addr, size);
+ mc_mem_region_t ref_reg =
+ mc_model_checker->parent_snapshot ? mc_model_checker->parent_snapshot->regions[type] : NULL;
+ mc_mem_region_t new_reg = MC_region_new(type, start_addr, size, ref_reg);
snapshot->regions[type] = new_reg;
return;
}
snapshot->privatization_regions =
xbt_new(mc_mem_region_t, SIMIX_process_count());
for (i = 0; i < SIMIX_process_count(); i++) {
+ mc_mem_region_t ref_reg =
+ mc_model_checker->parent_snapshot ? mc_model_checker->parent_snapshot->privatization_regions[i] : NULL;
snapshot->privatization_regions[i] =
- MC_region_new(-1, mappings[i], size_data_exe);
+ MC_region_new(-1, mappings[i], size_data_exe, ref_reg);
}
snapshot->privatization_index = loaded_page;
}
return result;
};
-static xbt_dynar_t MC_take_snapshot_stacks(mc_snapshot_t * snapshot, void *heap)
+static xbt_dynar_t MC_take_snapshot_stacks(mc_snapshot_t * snapshot)
{
xbt_dynar_t res =
st->local_variables = MC_get_local_variables_values(st->stack_frames);
unw_word_t sp = xbt_dynar_get_as(st->stack_frames, 0, mc_stack_frame_t)->sp;
- st->stack_pointer =
- ((char *) heap + (size_t) (((char *) ((long) sp) - (char *) std_heap)));
- st->real_address = current_stack->address;
xbt_dynar_push(res, &st);
(*snapshot)->stack_sizes =
xbt_realloc((*snapshot)->stack_sizes, (cursor + 1) * sizeof(size_t));
(*snapshot)->stack_sizes[cursor] =
- current_stack->size - ((char *) st->stack_pointer -
- (char *) ((char *) heap +
- ((char *) current_stack->address -
- (char *) std_heap)));
+ (char*) current_stack->address + current_stack->size - (char*) sp;
}
return res;
}
}
+/** @brief Can we remove this snapshot?
+ *
+ * Some snapshots cannot be removed (yet) because we need them
+ * at this point.
+ *
+ * @param snapshot
+ */
+int mc_important_snapshot(mc_snapshot_t snapshot)
+{
+ // We need this snapshot in order to know which
+ // pages needs to be stored in the next snapshot:
+ if (_sg_mc_sparse_checkpoint && snapshot == mc_model_checker->parent_snapshot)
+ return true;
+
+ return false;
+}
+
mc_snapshot_t MC_take_snapshot(int num_state)
{
/* Save the std heap and the writable mapped pages of libsimgrid and binary */
MC_get_memory_regions(snapshot);
+ if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
+ mc_softdirty_reset();
+ }
snapshot->to_ignore = MC_take_snapshot_ignore();
if (_sg_mc_visited > 0 || strcmp(_sg_mc_property_file, "")) {
snapshot->stacks =
- MC_take_snapshot_stacks(&snapshot, snapshot->regions[0]->data);
+ MC_take_snapshot_stacks(&snapshot);
if (_sg_mc_hash && snapshot->stacks != NULL) {
snapshot->hash = mc_hash_processes_state(num_state, snapshot->stacks);
} else {
snapshot->hash = 0;
}
- // mprotect the region after zero-ing ignored parts:
- /*size_t i;
- for(i=0; i!=NB_REGIONS; ++i) {
- mc_mem_region_t region = snapshot->regions[i];
- mprotect(region->data, region->size, PROT_READ);
- } */
-
MC_snapshot_ignore_restore(snapshot);
-
+ mc_model_checker->parent_snapshot = snapshot;
return snapshot;
-
}
void MC_restore_snapshot(mc_snapshot_t snapshot)
{
+ mc_snapshot_t parent_snapshot = mc_model_checker->parent_snapshot;
+
unsigned int i;
for (i = 0; i < NB_REGIONS; i++) {
// For privatized, variables we decided it was not necessary to take the snapshot:
if (snapshot->regions[i])
- MC_region_restore(snapshot->regions[i]);
+ MC_region_restore(snapshot->regions[i],
+ parent_snapshot ? parent_snapshot->regions[i] : NULL);
}
if (snapshot->privatization_regions) {
for (i = 0; i < SIMIX_process_count(); i++) {
if (snapshot->privatization_regions[i]) {
- MC_region_restore(snapshot->privatization_regions[i]);
+ MC_region_restore(snapshot->privatization_regions[i],
+ parent_snapshot ? parent_snapshot->privatization_regions[i] : NULL);
}
}
switch_data_segment(snapshot->privatization_index);
}
- MC_snapshot_ignore_restore(snapshot);
-}
-
-void *mc_translate_address(uintptr_t addr, mc_snapshot_t snapshot)
-{
-
- // If not in a process state/clone:
- if (!snapshot) {
- return (uintptr_t *) addr;
- }
- // If it is in a snapshot:
- for (size_t i = 0; i != NB_REGIONS; ++i) {
- mc_mem_region_t region = snapshot->regions[i];
- uintptr_t start = (uintptr_t) region->start_addr;
- uintptr_t end = start + region->size;
-
- // The address is in this region:
- if (addr >= start && addr < end) {
- uintptr_t offset = addr - start;
- return (void *) ((uintptr_t) region->data + offset);
- }
-
+ if (_sg_mc_sparse_checkpoint && _sg_mc_soft_dirty) {
+ mc_softdirty_reset();
}
- // It is not in a snapshot:
- return (void *) addr;
-}
-
-uintptr_t mc_untranslate_address(void *addr, mc_snapshot_t snapshot)
-{
- if (!snapshot) {
- return (uintptr_t) addr;
- }
-
- for (size_t i = 0; i != NB_REGIONS; ++i) {
- mc_mem_region_t region = snapshot->regions[i];
- if (addr >= region->data
- && addr <= (void *) (((char *) region->data) + region->size)) {
- size_t offset = (size_t) ((char *) addr - (char *) region->data);
- return ((uintptr_t) region->start_addr) + offset;
- }
- }
-
- return (uintptr_t) addr;
+ MC_snapshot_ignore_restore(snapshot);
+ mc_model_checker->parent_snapshot = snapshot;
}
mc_snapshot_t SIMIX_pre_mc_snapshot(smx_simcall_t simcall)
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <inttypes.h>
+#include <boost/unordered_set.hpp>
#include "mc_private.h"
typedef struct s_pointers_pair {
void *p1;
void *p2;
+ bool operator==(s_pointers_pair const& x) const {
+ return this->p1 == x.p1 && this->p2 == x.p2;
+ }
+ bool operator<(s_pointers_pair const& x) const {
+ return this->p1 < x.p1 || (this->p1 == x.p1 && this->p2 < x.p2);
+ }
} s_pointers_pair_t, *pointers_pair_t;
-__thread xbt_dynar_t compared_pointers;
+namespace boost {
+ template<>
+ struct hash<s_pointers_pair> {
+ typedef uintptr_t result_type;
+ result_type operator()(s_pointers_pair const& x) const {
+ return (result_type) x.p1 ^
+ ((result_type) x.p2 << 8 | (result_type) x.p2 >> (8*sizeof(uintptr_t) - 8));
+ }
+ };
+}
+
+struct mc_compare_state {
+ boost::unordered_set<s_pointers_pair> compared_pointers;
+};
+
+extern "C" {
/************************** Free functions ****************************/
/********************************************************************/
* \result !=0 if the pointers were added (they were not in the set),
* 0 otherwise (they were already in the set)
*/
-static int add_compared_pointers(void *p1, void *p2)
+static int add_compared_pointers(mc_compare_state& state, void *p1, void *p2)
{
-
- pointers_pair_t new_pair = xbt_new0(s_pointers_pair_t, 1);
- new_pair->p1 = p1;
- new_pair->p2 = p2;
-
- if (xbt_dynar_is_empty(compared_pointers)) {
- xbt_dynar_push(compared_pointers, &new_pair);
- return 1;
- }
-
- unsigned int cursor = 0;
- int start = 0;
- int end = xbt_dynar_length(compared_pointers) - 1;
- pointers_pair_t pair = NULL;
-
- pointers_pair_t *p =
- (pointers_pair_t *) xbt_dynar_get_ptr(compared_pointers, 0);
-
- while (start <= end) {
- cursor = (start + end) / 2;
- pair = p[cursor];
- if (pair->p1 < p1) {
- start = cursor + 1;
- } else if (pair->p1 > p1) {
- end = cursor - 1;
- } else if (pair->p2 < p2) {
- start = cursor + 1;
- } else if (pair->p2 > p2) {
- end = cursor - 1;
- } else {
- pointers_pair_free(new_pair);
- return 0;
- }
- }
-
- if (pair->p1 < p1)
- xbt_dynar_insert_at(compared_pointers, cursor + 1, &new_pair);
- else if (pair->p1 > p1)
- xbt_dynar_insert_at(compared_pointers, cursor, &new_pair);
- else if (pair->p2 < p2)
- xbt_dynar_insert_at(compared_pointers, cursor + 1, &new_pair);
- else if (pair->p2 > p2)
- xbt_dynar_insert_at(compared_pointers, cursor, &new_pair);
- else
- xbt_die("Unrecheable");
-
- return 1;
+ s_pointers_pair_t new_pair;
+ new_pair.p1 = p1;
+ new_pair.p2 = p2;
+ return state.compared_pointers.insert(new_pair).second ? 1 : 0;
}
-static int compare_areas_with_type(void *area1, void *area2,
- mc_snapshot_t snapshot1,
- mc_snapshot_t snapshot2, dw_type_t type,
- int region_size, int region_type,
- void *start_data, int pointer_level)
+static int compare_areas_with_type(struct mc_compare_state& state,
+ void* real_area1, mc_snapshot_t snapshot1, mc_mem_region_t region1,
+ void* real_area2, mc_snapshot_t snapshot2, mc_mem_region_t region2,
+ dw_type_t type, int pointer_level)
{
-
unsigned int cursor = 0;
dw_type_t member, subtype, subsubtype;
int elm_size, i, res;
- void *addr_pointed1, *addr_pointed2;
+ top:
switch (type->type) {
case DW_TAG_unspecified_type:
return 1;
case DW_TAG_base_type:
case DW_TAG_enumeration_type:
case DW_TAG_union_type:
- return (memcmp(area1, area2, type->byte_size) != 0);
+ {
+ void* data1 =
+ mc_snapshot_read_region(real_area1, region1, alloca(type->byte_size), type->byte_size);
+ void* data2 =
+ mc_snapshot_read_region(real_area2, region2, alloca(type->byte_size), type->byte_size);
+ return (memcmp(data1, data2, type->byte_size) != 0);
break;
+ }
case DW_TAG_typedef:
case DW_TAG_volatile_type:
case DW_TAG_const_type:
- return compare_areas_with_type(area1, area2, snapshot1, snapshot2,
- type->subtype, region_size, region_type,
- start_data, pointer_level);
- break;
+ // Poor man's TCO:
+ type = type->subtype;
+ goto top;
case DW_TAG_array_type:
subtype = type->subtype;
switch (subtype->type) {
break;
}
for (i = 0; i < type->element_count; i++) {
- res =
- compare_areas_with_type((char *) area1 + (i * elm_size),
- (char *) area2 + (i * elm_size), snapshot1,
- snapshot2, type->subtype, region_size,
- region_type, start_data, pointer_level);
+ size_t off = i * elm_size;
+ res = compare_areas_with_type(state,
+ (char*) real_area1 + off, snapshot1, region1,
+ (char*) real_area2 + off, snapshot2, region2,
+ type->subtype, pointer_level);
if (res == 1)
return res;
}
case DW_TAG_pointer_type:
case DW_TAG_reference_type:
case DW_TAG_rvalue_reference_type:
-
- addr_pointed1 = *((void **) (area1));
- addr_pointed2 = *((void **) (area2));
+ {
+ void* addr_pointed1 = mc_snapshot_read_pointer_region(real_area1, region1);
+ void* addr_pointed2 = mc_snapshot_read_pointer_region(real_area2, region2);
if (type->subtype && type->subtype->type == DW_TAG_subroutine_type) {
return (addr_pointed1 != addr_pointed2);
if (addr_pointed1 == NULL && addr_pointed2 == NULL)
return 0;
- if (!add_compared_pointers(addr_pointed1, addr_pointed2))
+ if (!add_compared_pointers(state, addr_pointed1, addr_pointed2))
return 0;
pointer_level++;
snapshot2, NULL, type->subtype, pointer_level);
}
// The pointers are both in the current object R/W segment:
- else if (addr_pointed1 > start_data
- && (char *) addr_pointed1 <= (char *) start_data + region_size) {
+ else if (addr_pointed1 > region1->start_addr
+ && (char *) addr_pointed1 <= (char *) region1->start_addr + region1->size) {
if (!
- (addr_pointed2 > start_data
- && (char *) addr_pointed2 <= (char *) start_data + region_size))
+ (addr_pointed2 > region2->start_addr
+ && (char *) addr_pointed2 <= (char *) region2->start_addr + region2->size))
return 1;
if (type->dw_type_id == NULL)
return (addr_pointed1 != addr_pointed2);
else {
- void *translated_addr_pointer1 =
- mc_translate_address((uintptr_t) addr_pointed1, snapshot1);
- void *translated_addr_pointer2 =
- mc_translate_address((uintptr_t) addr_pointed2, snapshot2);
- return compare_areas_with_type(translated_addr_pointer1,
- translated_addr_pointer2, snapshot1,
- snapshot2, type->subtype, region_size,
- region_type, start_data,
- pointer_level);
+ return compare_areas_with_type(state,
+ addr_pointed1, snapshot1, region1,
+ addr_pointed2, snapshot2, region2,
+ type->subtype, pointer_level);
}
}
}
}
break;
+ }
case DW_TAG_structure_type:
case DW_TAG_class_type:
xbt_dynar_foreach(type->members, cursor, member) {
void *member1 =
- mc_member_snapshot_resolve(area1, type, member, snapshot1);
+ mc_member_resolve(real_area1, type, member, snapshot1);
void *member2 =
- mc_member_snapshot_resolve(area2, type, member, snapshot2);
+ mc_member_resolve(real_area2, type, member, snapshot2);
+ mc_mem_region_t subregion1 = mc_get_region_hinted(member1, snapshot1, region1);
+ mc_mem_region_t subregion2 = mc_get_region_hinted(member2, snapshot2, region2);
res =
- compare_areas_with_type(member1, member2, snapshot1, snapshot2,
- member->subtype, region_size, region_type,
- start_data, pointer_level);
+ compare_areas_with_type(state,
+ member1, snapshot1, subregion1,
+ member2, snapshot2, subregion2,
+ member->subtype, pointer_level);
if (res == 1)
return res;
}
mc_mem_region_t r2, mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2)
{
-
- if (!compared_pointers) {
- compared_pointers =
- xbt_dynar_new(sizeof(pointers_pair_t), pointers_pair_free_voidp);
- } else {
- xbt_dynar_reset(compared_pointers);
- }
+ xbt_assert(r1 && r2,
+ "Missing region. Did you enable SMPI privatisation? It is not compatible with state comparison.");
+ struct mc_compare_state state;
xbt_dynar_t variables;
int res;
unsigned int cursor = 0;
dw_variable_t current_var;
- size_t offset;
- void *start_data;
- void *start_data_binary = mc_binary_info->start_rw;
- void *start_data_libsimgrid = mc_libsimgrid_info->start_rw;
mc_object_info_t object_info = NULL;
if (region_type == 2) {
object_info = mc_binary_info;
- start_data = start_data_binary;
} else {
object_info = mc_libsimgrid_info;
- start_data = start_data_libsimgrid;
}
variables = object_info->global_variables;
|| (char *) current_var->address > (char *) object_info->end_rw)
continue;
- offset = (char *) current_var->address - (char *) object_info->start_rw;
-
dw_type_t bvariable_type = current_var->type;
res =
- compare_areas_with_type((char *) r1->data + offset,
- (char *) r2->data + offset, snapshot1,
- snapshot2, bvariable_type, r1->size,
- region_type, start_data, 0);
+ compare_areas_with_type(state,
+ (char *) current_var->address, snapshot1, r1,
+ (char *) current_var->address, snapshot2, r2,
+ bvariable_type, 0);
if (res == 1) {
- XBT_VERB("Global variable %s (%p - %p) is different between snapshots",
- current_var->name, (char *) r1->data + offset,
- (char *) r2->data + offset);
- xbt_dynar_free(&compared_pointers);
- compared_pointers = NULL;
+ XBT_VERB("Global variable %s (%p) is different between snapshots",
+ current_var->name, (char *) current_var->address);
return 1;
}
}
- xbt_dynar_free(&compared_pointers);
- compared_pointers = NULL;
-
return 0;
}
static int compare_local_variables(mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2,
mc_snapshot_stack_t stack1,
- mc_snapshot_stack_t stack2, void *heap1,
- void *heap2)
+ mc_snapshot_stack_t stack2)
{
- void *start_data_binary = mc_binary_info->start_rw;
- void *start_data_libsimgrid = mc_libsimgrid_info->start_rw;
-
- if (!compared_pointers) {
- compared_pointers =
- xbt_dynar_new(sizeof(pointers_pair_t), pointers_pair_free_voidp);
- } else {
- xbt_dynar_reset(compared_pointers);
- }
+ struct mc_compare_state state;
if (xbt_dynar_length(stack1->local_variables) !=
xbt_dynar_length(stack2->local_variables)) {
XBT_VERB("Different number of local variables");
- xbt_dynar_free(&compared_pointers);
- compared_pointers = NULL;
return 1;
} else {
unsigned int cursor = 0;
local_variable_t current_var1, current_var2;
- int offset1, offset2, res;
+ int res;
while (cursor < xbt_dynar_length(stack1->local_variables)) {
current_var1 =
(local_variable_t) xbt_dynar_get_as(stack1->local_variables, cursor,
if (strcmp(current_var1->name, current_var2->name) != 0
|| current_var1->subprogram != current_var1->subprogram
|| current_var1->ip != current_var2->ip) {
- xbt_dynar_free(&compared_pointers);
// TODO, fix current_varX->subprogram->name to include name if DW_TAG_inlined_subprogram
XBT_VERB
("Different name of variable (%s - %s) or frame (%s - %s) or ip (%lu - %lu)",
current_var1->ip, current_var2->ip);
return 1;
}
- offset1 = (char *) current_var1->address - (char *) std_heap;
- offset2 = (char *) current_var2->address - (char *) std_heap;
// TODO, fix current_varX->subprogram->name to include name if DW_TAG_inlined_subprogram
- if (current_var1->region == 1) {
dw_type_t subtype = current_var1->type;
res =
- compare_areas_with_type((char *) heap1 + offset1,
- (char *) heap2 + offset2, snapshot1,
- snapshot2, subtype, 0, 1,
- start_data_libsimgrid, 0);
- } else {
- dw_type_t subtype = current_var2->type;
- res =
- compare_areas_with_type((char *) heap1 + offset1,
- (char *) heap2 + offset2, snapshot1,
- snapshot2, subtype, 0, 2, start_data_binary,
- 0);
- }
+ compare_areas_with_type(state,
+ current_var1->address, snapshot1, mc_get_snapshot_region(current_var1->address, snapshot1),
+ current_var2->address, snapshot2, mc_get_snapshot_region(current_var2->address, snapshot2),
+ subtype, 0);
+
if (res == 1) {
// TODO, fix current_varX->subprogram->name to include name if DW_TAG_inlined_subprogram
XBT_VERB
("Local variable %s (%p - %p) in frame %s is different between snapshots",
- current_var1->name, (char *) heap1 + offset1,
- (char *) heap2 + offset2, current_var1->subprogram->name);
- xbt_dynar_free(&compared_pointers);
- compared_pointers = NULL;
+ current_var1->name, current_var1->address, current_var2->address,
+ current_var1->subprogram->name);
return res;
}
cursor++;
}
- xbt_dynar_free(&compared_pointers);
- compared_pointers = NULL;
return 0;
}
}
XBT_VERB("(%d - %d) Different enabled processes", num1, num2);
}
- int i = 0;
+ unsigned long i = 0;
size_t size_used1, size_used2;
int is_diff = 0;
#endif
/* Init heap information used in heap comparison algorithm */
- res_init =
- init_heap_information((xbt_mheap_t) s1->regions[0]->data,
- (xbt_mheap_t) s2->regions[0]->data, s1->to_ignore,
- s2->to_ignore);
+ xbt_mheap_t heap1 = (xbt_mheap_t) mc_snapshot_read(std_heap, s1,
+ alloca(sizeof(struct mdesc)), sizeof(struct mdesc));
+ xbt_mheap_t heap2 = (xbt_mheap_t) mc_snapshot_read(std_heap, s2,
+ alloca(sizeof(struct mdesc)), sizeof(struct mdesc));
+ res_init = init_heap_information(heap1, heap2, s1->to_ignore, s2->to_ignore);
if (res_init == -1) {
#ifdef MC_DEBUG
XBT_DEBUG("(%d - %d) Different heap information", num1, num2);
(mc_snapshot_stack_t) xbt_dynar_get_as(s2->stacks, cursor,
mc_snapshot_stack_t);
diff_local =
- compare_local_variables(s1, s2, stack1, stack2, s1->regions[0]->data,
- s2->regions[0]->data);
+ compare_local_variables(s1, s2, stack1, stack2);
if (diff_local > 0) {
#ifdef MC_DEBUG
if (is_diff == 0) {
#endif
/* Compare heap */
- if (mmalloc_compare_heap(s1, s2, (xbt_mheap_t) s1->regions[0]->data,
- (xbt_mheap_t) s2->regions[0]->data) > 0) {
+ if (mmalloc_compare_heap(s1, s2) > 0) {
#ifdef MC_DEBUG
xbt_os_walltimer_stop(timer);
return simcall_mc_compare_snapshots(s1, s2);
}
+
+}
struct s_mc_diff {
/** \brief Base address of the real heap */
void *s_heap;
- /** \brief Base address of the first heap snapshot */
- void *heapbase1;
- /** \brief Base address of the second heap snapshot */
- void *heapbase2;
- malloc_info *heapinfo1, *heapinfo2;
size_t heaplimit;
// Number of blocks in the heaps:
size_t heapsize1, heapsize2;
return 0;
}
+// TODO, this should depend on the snapshot?
static int is_block_stack(int block)
{
unsigned int cursor = 0;
state->s_heap =
(char *) mmalloc_get_current_heap() - STD_HEAP_SIZE - xbt_pagesize;
- state->heapbase1 = (char *) heap1 + BLOCKSIZE;
- state->heapbase2 = (char *) heap2 + BLOCKSIZE;
-
- state->heapinfo1 =
- (malloc_info *) ((char *) heap1 +
- ((uintptr_t)
- ((char *) ((struct mdesc *) heap1)->heapinfo -
- (char *) state->s_heap)));
- state->heapinfo2 =
- (malloc_info *) ((char *) heap2 +
- ((uintptr_t)
- ((char *) ((struct mdesc *) heap2)->heapinfo -
- (char *) state->s_heap)));
-
state->heapsize1 = heap1->heapsize;
state->heapsize2 = heap2->heapsize;
memset(state->types2, 0,
state->heaplimit * MAX_FRAGMENT_PER_BLOCK * sizeof(type_name *));
- if (MC_is_active()) {
- MC_ignore_global_variable("mc_diff_info");
- }
-
return 0;
}
}
-int mmalloc_compare_heap(mc_snapshot_t snapshot1, mc_snapshot_t snapshot2,
- xbt_mheap_t heap1, xbt_mheap_t heap2)
+int mmalloc_compare_heap(mc_snapshot_t snapshot1, mc_snapshot_t snapshot2)
{
struct s_mc_diff *state = mc_diff_info;
- if (heap1 == NULL && heap2 == NULL) {
- XBT_DEBUG("Malloc descriptors null");
- return 0;
- }
-
/* Start comparison */
size_t i1, i2, j1, j2, k;
void *addr_block1, *addr_block2, *addr_frag1, *addr_frag2;
i1 = 1;
+ malloc_info heapinfo_temp1, heapinfo_temp2;
+ malloc_info heapinfo_temp2b;
+
+ mc_mem_region_t heap_region1 = snapshot1->regions[0];
+ mc_mem_region_t heap_region2 = snapshot2->regions[0];
+
+ // This is in snapshot do not use them directly:
+ malloc_info* heapinfos1 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot1);
+ malloc_info* heapinfos2 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot2);
+
while (i1 <= state->heaplimit) {
- if (state->heapinfo1[i1].type == -1) { /* Free block */
+ // TODO, lookup in the correct region in order to speed it up:
+ malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[i1], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
+ malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[i1], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
+
+ if (heapinfo1->type == -1) { /* Free block */
i1++;
continue;
}
((void *) (((ADDR2UINT(i1)) - 1) * BLOCKSIZE +
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
- if (state->heapinfo1[i1].type == 0) { /* Large block */
+ if (heapinfo1->type == 0) { /* Large block */
if (is_stack(addr_block1)) {
- for (k = 0; k < state->heapinfo1[i1].busy_block.size; k++)
+ for (k = 0; k < heapinfo1->busy_block.size; k++)
state->equals_to1_(i1 + k, 0) = make_heap_area(i1, -1);
- for (k = 0; k < state->heapinfo2[i1].busy_block.size; k++)
+ for (k = 0; k < heapinfo2->busy_block.size; k++)
state->equals_to2_(i1 + k, 0) = make_heap_area(i1, -1);
- i1 += state->heapinfo1[i1].busy_block.size;
+ i1 += heapinfo1->busy_block.size;
continue;
}
res_compare = 0;
/* Try first to associate to same block in the other heap */
- if (state->heapinfo2[i1].type == state->heapinfo1[i1].type) {
+ if (heapinfo2->type == heapinfo1->type) {
if (state->equals_to2_(i1, 0).valid == 0) {
NULL, NULL, 0);
if (res_compare != 1) {
- for (k = 1; k < state->heapinfo2[i1].busy_block.size; k++)
+ for (k = 1; k < heapinfo2->busy_block.size; k++)
state->equals_to2_(i1 + k, 0) = make_heap_area(i1, -1);
- for (k = 1; k < state->heapinfo1[i1].busy_block.size; k++)
+ for (k = 1; k < heapinfo1->busy_block.size; k++)
state->equals_to1_(i1 + k, 0) = make_heap_area(i1, -1);
equal = 1;
- i1 += state->heapinfo1[i1].busy_block.size;
+ i1 += heapinfo1->busy_block.size;
}
xbt_dynar_reset(previous);
continue;
}
- if (state->heapinfo2[i2].type != 0) {
+ malloc_info* heapinfo2b = mc_snapshot_read_region(&heapinfos2[i2], heap_region2, &heapinfo_temp2b, sizeof(malloc_info));
+
+ if (heapinfo2b->type != 0) {
i2++;
continue;
}
NULL, NULL, 0);
if (res_compare != 1) {
- for (k = 1; k < state->heapinfo2[i2].busy_block.size; k++)
+ for (k = 1; k < heapinfo2b->busy_block.size; k++)
state->equals_to2_(i2 + k, 0) = make_heap_area(i1, -1);
- for (k = 1; k < state->heapinfo1[i1].busy_block.size; k++)
+ for (k = 1; k < heapinfo1->busy_block.size; k++)
state->equals_to1_(i1 + k, 0) = make_heap_area(i2, -1);
equal = 1;
- i1 += state->heapinfo1[i1].busy_block.size;
+ i1 += heapinfo1->busy_block.size;
}
xbt_dynar_reset(previous);
if (!equal) {
XBT_DEBUG("Block %zu not found (size_used = %zu, addr = %p)", i1,
- state->heapinfo1[i1].busy_block.busy_size, addr_block1);
+ heapinfo1->busy_block.busy_size, addr_block1);
i1 = state->heaplimit + 1;
nb_diff1++;
//i1++;
} else { /* Fragmented block */
- for (j1 = 0; j1 < (size_t) (BLOCKSIZE >> state->heapinfo1[i1].type); j1++) {
+ for (j1 = 0; j1 < (size_t) (BLOCKSIZE >> heapinfo1->type); j1++) {
- if (state->heapinfo1[i1].busy_frag.frag_size[j1] == -1) /* Free fragment */
+ if (heapinfo1->busy_frag.frag_size[j1] == -1) /* Free fragment */
continue;
if (state->equals_to1_(i1, j1).valid)
continue;
addr_frag1 =
- (void *) ((char *) addr_block1 + (j1 << state->heapinfo1[i1].type));
+ (void *) ((char *) addr_block1 + (j1 << heapinfo1->type));
i2 = 1;
equal = 0;
/* Try first to associate to same fragment in the other heap */
- if (state->heapinfo2[i1].type == state->heapinfo1[i1].type) {
+ if (heapinfo2->type == heapinfo1->type) {
if (state->equals_to2_(i1, j1).valid == 0) {
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
addr_frag2 =
(void *) ((char *) addr_block2 +
- (j1 << state->heapinfo2[i1].type));
+ (j1 << heapinfo2->type));
res_compare =
compare_heap_area(addr_frag1, addr_frag2, snapshot1, snapshot2,
while (i2 <= state->heaplimit && !equal) {
- if (state->heapinfo2[i2].type <= 0) {
+ malloc_info* heapinfo2b = mc_snapshot_read_region(&heapinfos2[i2], heap_region2, &heapinfo_temp2b, sizeof(malloc_info));
+ if (heapinfo2b->type <= 0) {
i2++;
continue;
}
- for (j2 = 0; j2 < (size_t) (BLOCKSIZE >> state->heapinfo2[i2].type);
+ for (j2 = 0; j2 < (size_t) (BLOCKSIZE >> heapinfo2b->type);
j2++) {
if (i2 == i1 && j2 == j1)
(char *) ((xbt_mheap_t) state->s_heap)->heapbase));
addr_frag2 =
(void *) ((char *) addr_block2 +
- (j2 << state->heapinfo2[i2].type));
+ (j2 << heapinfo2b->type));
res_compare =
compare_heap_area(addr_frag1, addr_frag2, snapshot2, snapshot2,
if (!equal) {
XBT_DEBUG
("Block %zu, fragment %zu not found (size_used = %zd, address = %p)\n",
- i1, j1, state->heapinfo1[i1].busy_frag.frag_size[j1],
+ i1, j1, heapinfo1->busy_frag.frag_size[j1],
addr_frag1);
i2 = state->heaplimit + 1;
i1 = state->heaplimit + 1;
/* All blocks/fragments are equal to another block/fragment ? */
size_t i = 1, j = 0;
- void *real_addr_frag1 = NULL, *real_addr_block1 = NULL, *real_addr_block2 =
- NULL, *real_addr_frag2 = NULL;
- while (i <= state->heaplimit) {
- if (state->heapinfo1[i].type == 0) {
+ for(i = 1; i <= state->heaplimit; i++) {
+ malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[i], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
+ if (heapinfo1->type == 0) {
if (i1 == state->heaplimit) {
- if (state->heapinfo1[i].busy_block.busy_size > 0) {
+ if (heapinfo1->busy_block.busy_size > 0) {
if (state->equals_to1_(i, 0).valid == 0) {
if (XBT_LOG_ISENABLED(mc_diff, xbt_log_priority_debug)) {
- addr_block1 =
- ((void *) (((ADDR2UINT(i)) - 1) * BLOCKSIZE +
- (char *) state->heapbase1));
- XBT_DEBUG("Block %zu (%p) not found (size used = %zu)", i,
- addr_block1, state->heapinfo1[i].busy_block.busy_size);
+ // TODO, add address
+ XBT_DEBUG("Block %zu not found (size used = %zu)", i,
+ heapinfo1->busy_block.busy_size);
//mmalloc_backtrace_block_display((void*)heapinfo1, i);
}
nb_diff1++;
}
}
}
- if (state->heapinfo1[i].type > 0) {
- addr_block1 =
- ((void *) (((ADDR2UINT(i)) - 1) * BLOCKSIZE +
- (char *) state->heapbase1));
- real_addr_block1 =
- ((void *) (((ADDR2UINT(i)) - 1) * BLOCKSIZE +
- (char *) ((struct mdesc *) state->s_heap)->heapbase));
- for (j = 0; j < (size_t) (BLOCKSIZE >> state->heapinfo1[i].type); j++) {
+ if (heapinfo1->type > 0) {
+ for (j = 0; j < (size_t) (BLOCKSIZE >> heapinfo1->type); j++) {
if (i1 == state->heaplimit) {
- if (state->heapinfo1[i].busy_frag.frag_size[j] > 0) {
+ if (heapinfo1->busy_frag.frag_size[j] > 0) {
if (state->equals_to1_(i, j).valid == 0) {
if (XBT_LOG_ISENABLED(mc_diff, xbt_log_priority_debug)) {
- addr_frag1 =
- (void *) ((char *) addr_block1 +
- (j << state->heapinfo1[i].type));
- real_addr_frag1 =
- (void *) ((char *) real_addr_block1 +
- (j << state->heapinfo1[i].type));
+ // TODO, print fragment address
XBT_DEBUG
- ("Block %zu, Fragment %zu (%p - %p) not found (size used = %zd)",
- i, j, addr_frag1, real_addr_frag1,
- state->heapinfo1[i].busy_frag.frag_size[j]);
+ ("Block %zu, Fragment %zu not found (size used = %zd)",
+ i, j,
+ heapinfo1->busy_frag.frag_size[j]);
//mmalloc_backtrace_fragment_display((void*)heapinfo1, i, j);
}
nb_diff1++;
}
}
}
- i++;
}
if (i1 == state->heaplimit)
XBT_DEBUG("Number of blocks/fragments not found in heap1 : %d", nb_diff1);
- i = 1;
-
- while (i <= state->heaplimit) {
- if (state->heapinfo2[i].type == 0) {
+ for (i=1; i <= state->heaplimit; i++) {
+ malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[i], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
+ if (heapinfo2->type == 0) {
if (i1 == state->heaplimit) {
- if (state->heapinfo2[i].busy_block.busy_size > 0) {
+ if (heapinfo2->busy_block.busy_size > 0) {
if (state->equals_to2_(i, 0).valid == 0) {
if (XBT_LOG_ISENABLED(mc_diff, xbt_log_priority_debug)) {
- addr_block2 =
- ((void *) (((ADDR2UINT(i)) - 1) * BLOCKSIZE +
- (char *) state->heapbase2));
- XBT_DEBUG("Block %zu (%p) not found (size used = %zu)", i,
- addr_block2, state->heapinfo2[i].busy_block.busy_size);
+ // TODO, print address of the block
+ XBT_DEBUG("Block %zu not found (size used = %zu)", i,
+ heapinfo2->busy_block.busy_size);
//mmalloc_backtrace_block_display((void*)heapinfo2, i);
}
nb_diff2++;
}
}
}
- if (state->heapinfo2[i].type > 0) {
- addr_block2 =
- ((void *) (((ADDR2UINT(i)) - 1) * BLOCKSIZE +
- (char *) state->heapbase2));
- real_addr_block2 =
- ((void *) (((ADDR2UINT(i)) - 1) * BLOCKSIZE +
- (char *) ((struct mdesc *) state->s_heap)->heapbase));
- for (j = 0; j < (size_t) (BLOCKSIZE >> state->heapinfo2[i].type); j++) {
+ if (heapinfo2->type > 0) {
+ for (j = 0; j < (size_t) (BLOCKSIZE >> heapinfo2->type); j++) {
if (i1 == state->heaplimit) {
- if (state->heapinfo2[i].busy_frag.frag_size[j] > 0) {
+ if (heapinfo2->busy_frag.frag_size[j] > 0) {
if (state->equals_to2_(i, j).valid == 0) {
if (XBT_LOG_ISENABLED(mc_diff, xbt_log_priority_debug)) {
- addr_frag2 =
- (void *) ((char *) addr_block2 +
- (j << state->heapinfo2[i].type));
- real_addr_frag2 =
- (void *) ((char *) real_addr_block2 +
- (j << state->heapinfo2[i].type));
+ // TODO, print address of the block
XBT_DEBUG
- ("Block %zu, Fragment %zu (%p - %p) not found (size used = %zd)",
- i, j, addr_frag2, real_addr_frag2,
- state->heapinfo2[i].busy_frag.frag_size[j]);
+ ("Block %zu, Fragment %zu not found (size used = %zd)",
+ i, j,
+ heapinfo2->busy_frag.frag_size[j]);
//mmalloc_backtrace_fragment_display((void*)heapinfo2, i, j);
}
nb_diff2++;
}
}
}
- i++;
}
if (i1 == state->heaplimit)
XBT_DEBUG("Number of blocks/fragments not found in heap2 : %d", nb_diff2);
xbt_dynar_free(&previous);
- real_addr_frag1 = NULL, real_addr_block1 = NULL, real_addr_block2 =
- NULL, real_addr_frag2 = NULL;
-
return ((nb_diff1 > 0) || (nb_diff2 > 0));
}
* @param state
* @param real_area1 Process address for state 1
* @param real_area2 Process address for state 2
- * @param area1 Snapshot address for state 1
- * @param area2 Snapshot address for state 2
* @param snapshot1 Snapshot of state 1
* @param snapshot2 Snapshot of state 2
* @param previous
*/
static int compare_heap_area_without_type(struct s_mc_diff *state,
void *real_area1, void *real_area2,
- void *area1, void *area2,
mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2,
xbt_dynar_t previous, int size,
int pointer_align, res_compare;
ssize_t ignore1, ignore2;
+ mc_mem_region_t heap_region1 = snapshot1->regions[0];
+ mc_mem_region_t heap_region2 = snapshot2->regions[0];
+
while (i < size) {
if (check_ignore > 0) {
}
}
- if (memcmp(((char *) area1) + i, ((char *) area2) + i, 1) != 0) {
+ if (mc_snapshot_region_memcp(((char *) real_area1) + i, heap_region1, ((char *) real_area2) + i, heap_region2, 1) != 0) {
pointer_align = (i / sizeof(void *)) * sizeof(void *);
- addr_pointed1 = *((void **) ((char *) area1 + pointer_align));
- addr_pointed2 = *((void **) ((char *) area2 + pointer_align));
+ addr_pointed1 = mc_snapshot_read_pointer((char *) real_area1 + pointer_align, snapshot1);
+ addr_pointed2 = mc_snapshot_read_pointer((char *) real_area2 + pointer_align, snapshot2);
if (addr_pointed1 > maestro_stack_start
&& addr_pointed1 < maestro_stack_end
* @param state
* @param real_area1 Process address for state 1
* @param real_area2 Process address for state 2
- * @param area1 Snapshot address for state 1
- * @param area2 Snapshot address for state 2
* @param snapshot1 Snapshot of state 1
* @param snapshot2 Snapshot of state 2
* @param previous
*/
static int compare_heap_area_with_type(struct s_mc_diff *state,
void *real_area1, void *real_area2,
- void *area1, void *area2,
mc_snapshot_t snapshot1,
mc_snapshot_t snapshot2,
xbt_dynar_t previous, dw_type_t type,
int area_size, int check_ignore,
int pointer_level)
{
-
+top:
if (is_stack(real_area1) && is_stack(real_area2))
return 0;
dw_type_t member;
void *addr_pointed1, *addr_pointed2;;
+ mc_mem_region_t heap_region1 = snapshot1->regions[0];
+ mc_mem_region_t heap_region2 = snapshot2->regions[0];
+
switch (type->type) {
case DW_TAG_unspecified_type:
return 1;
if (real_area1 == real_area2)
return -1;
else
- return (memcmp(area1, area2, area_size) != 0);
+ return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, area_size) != 0);
} else {
if (area_size != -1 && type->byte_size != area_size)
return -1;
else {
- return (memcmp(area1, area2, type->byte_size) != 0);
+ return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
}
}
break;
if (area_size != -1 && type->byte_size != area_size)
return -1;
else
- return (memcmp(area1, area2, type->byte_size) != 0);
+ return (mc_snapshot_region_memcp(real_area1, heap_region1, real_area2, heap_region2, type->byte_size) != 0);
break;
case DW_TAG_typedef:
case DW_TAG_const_type:
case DW_TAG_volatile_type:
- return compare_heap_area_with_type(state, real_area1, real_area2, area1,
- area2, snapshot1, snapshot2, previous,
- type->subtype, area_size, check_ignore,
- pointer_level);
+ // Poor man's TCO:
+ type = type->subtype;
+ goto top;
break;
case DW_TAG_array_type:
subtype = type->subtype;
compare_heap_area_with_type(state,
(char *) real_area1 + (i * elm_size),
(char *) real_area2 + (i * elm_size),
- (char *) area1 + (i * elm_size),
- (char *) area2 + (i * elm_size),
snapshot1, snapshot2, previous,
type->subtype, subtype->byte_size,
check_ignore, pointer_level);
case DW_TAG_rvalue_reference_type:
case DW_TAG_pointer_type:
if (type->subtype && type->subtype->type == DW_TAG_subroutine_type) {
- addr_pointed1 = *((void **) (area1));
- addr_pointed2 = *((void **) (area2));
+ addr_pointed1 = mc_snapshot_read_pointer(real_area1, snapshot1);
+ addr_pointed2 = mc_snapshot_read_pointer(real_area2, snapshot2);
return (addr_pointed1 != addr_pointed2);;
} else {
pointer_level++;
if (pointer_level > 1) { /* Array of pointers */
for (i = 0; i < (area_size / sizeof(void *)); i++) {
- addr_pointed1 = *((void **) ((char *) area1 + (i * sizeof(void *))));
- addr_pointed2 = *((void **) ((char *) area2 + (i * sizeof(void *))));
+ addr_pointed1 = mc_snapshot_read_pointer((char*) real_area1 + i * sizeof(void *), snapshot1);
+ addr_pointed2 = mc_snapshot_read_pointer((char*) real_area2 + i * sizeof(void *), snapshot2);
if (addr_pointed1 > state->s_heap
&& addr_pointed1 < mc_snapshot_get_heap_end(snapshot1)
&& addr_pointed2 > state->s_heap
return res;
}
} else {
- addr_pointed1 = *((void **) (area1));
- addr_pointed2 = *((void **) (area2));
+ addr_pointed1 = mc_snapshot_read_pointer(real_area1, snapshot1);
+ addr_pointed2 = mc_snapshot_read_pointer(real_area2, snapshot2);
if (addr_pointed1 > state->s_heap
&& addr_pointed1 < mc_snapshot_get_heap_end(snapshot1)
&& addr_pointed2 > state->s_heap
for (i = 0; i < (area_size / type->byte_size); i++) {
res =
compare_heap_area_with_type(state,
- (char *) real_area1 +
- (i * type->byte_size),
- (char *) real_area2 +
- (i * type->byte_size),
- (char *) area1 +
- (i * type->byte_size),
- (char *) area2 +
- (i * type->byte_size), snapshot1,
- snapshot2, previous, type, -1,
+ (char *) real_area1 + i * type->byte_size,
+ (char *) real_area2 + i * type->byte_size,
+ snapshot1, snapshot2, previous, type, -1,
check_ignore, 0);
if (res == 1)
return res;
mc_member_resolve(real_area1, type, member, snapshot1);
char *real_member2 =
mc_member_resolve(real_area2, type, member, snapshot2);
- char *member1 =
- mc_translate_address((uintptr_t) real_member1, snapshot1);
- char *member2 =
- mc_translate_address((uintptr_t) real_member2, snapshot2);
res =
compare_heap_area_with_type(state, real_member1, real_member2,
- member1, member2, snapshot1, snapshot2,
+ snapshot1, snapshot2,
previous, member->subtype, -1,
check_ignore, 0);
if (res == 1) {
}
break;
case DW_TAG_union_type:
- return compare_heap_area_without_type(state, real_area1, real_area2, area1,
- area2, snapshot1, snapshot2, previous,
+ return compare_heap_area_without_type(state, real_area1, real_area2,
+ snapshot1, snapshot2, previous,
type->byte_size, check_ignore);
break;
default:
ssize_t size;
int check_ignore = 0;
- void *addr_block1, *addr_block2, *addr_frag1, *addr_frag2, *real_addr_block1,
- *real_addr_block2, *real_addr_frag1, *real_addr_frag2;
-
+ void *real_addr_block1, *real_addr_block2, *real_addr_frag1, *real_addr_frag2;
int type_size = -1;
int offset1 = 0, offset2 = 0;
int new_size1 = -1, new_size2 = -1;
int match_pairs = 0;
+ malloc_info* heapinfos1 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot1);
+ malloc_info* heapinfos2 = mc_snapshot_read_pointer(&((xbt_mheap_t)std_heap)->heapinfo, snapshot2);
+
+ malloc_info heapinfo_temp1, heapinfo_temp2;
+
void* real_area1_to_compare = area1;
void* real_area2_to_compare = area2;
- void *area1_to_compare = (char *) state->heapbase1 + ((char *)area1 - (char*)((xbt_mheap_t) state->s_heap)->heapbase);
- void *area2_to_compare = (char *) state->heapbase2 + ((char *)area2 - (char*)((xbt_mheap_t) state->s_heap)->heapbase);
if (previous == NULL) {
previous =
}
return 1;
}
- // Snapshot address of the block:
- addr_block1 =
- ((void *) (((ADDR2UINT(block1)) - 1) * BLOCKSIZE +
- (char *) state->heapbase1));
- addr_block2 =
- ((void *) (((ADDR2UINT(block2)) - 1) * BLOCKSIZE +
- (char *) state->heapbase2));
// Process address of the block:
real_addr_block1 =
}
- if ((state->heapinfo1[block1].type == -1) && (state->heapinfo2[block2].type == -1)) { /* Free block */
+ mc_mem_region_t heap_region1 = snapshot1->regions[0];
+ mc_mem_region_t heap_region2 = snapshot2->regions[0];
+
+ malloc_info* heapinfo1 = mc_snapshot_read_region(&heapinfos1[block1], heap_region1, &heapinfo_temp1, sizeof(malloc_info));
+ malloc_info* heapinfo2 = mc_snapshot_read_region(&heapinfos2[block2], heap_region2, &heapinfo_temp2, sizeof(malloc_info));
+
+ if ((heapinfo1->type == -1) && (heapinfo2->type == -1)) { /* Free block */
if (match_pairs) {
match_equals(state, previous);
}
return 0;
- } else if ((state->heapinfo1[block1].type == 0) && (state->heapinfo2[block2].type == 0)) { /* Complete block */
+ } else if ((heapinfo1->type == 0) && (heapinfo2->type == 0)) { /* Complete block */
// TODO, lookup variable type from block type as done for fragmented blocks
}
if (type_size != -1) {
- if (type_size != state->heapinfo1[block1].busy_block.busy_size
- && type_size != state->heapinfo2[block2].busy_block.busy_size
+ if (type_size != heapinfo1->busy_block.busy_size
+ && type_size != heapinfo2->busy_block.busy_size
&& type->name != NULL && !strcmp(type->name, "struct s_smx_context")) {
if (match_pairs) {
match_equals(state, previous);
}
}
- if (state->heapinfo1[block1].busy_block.size !=
- state->heapinfo2[block2].busy_block.size) {
+ if (heapinfo1->busy_block.size !=
+ heapinfo2->busy_block.size) {
if (match_pairs) {
xbt_dynar_free(&previous);
}
return 1;
}
- if (state->heapinfo1[block1].busy_block.busy_size !=
- state->heapinfo2[block2].busy_block.busy_size) {
+ if (heapinfo1->busy_block.busy_size !=
+ heapinfo2->busy_block.busy_size) {
if (match_pairs) {
xbt_dynar_free(&previous);
}
return 0;
}
- size = state->heapinfo1[block1].busy_block.busy_size;
+ size = heapinfo1->busy_block.busy_size;
// Remember (basic) type inference.
// The current data structure only allows us to do this for the whole block.
frag1 = -1;
frag2 = -1;
- if ((state->heapinfo1[block1].busy_block.ignore > 0)
- && (state->heapinfo2[block2].busy_block.ignore ==
- state->heapinfo1[block1].busy_block.ignore))
- check_ignore = state->heapinfo1[block1].busy_block.ignore;
+ if ((heapinfo1->busy_block.ignore > 0)
+ && (heapinfo2->busy_block.ignore ==
+ heapinfo1->busy_block.ignore))
+ check_ignore = heapinfo1->busy_block.ignore;
- } else if ((state->heapinfo1[block1].type > 0) && (state->heapinfo2[block2].type > 0)) { /* Fragmented block */
+ } else if ((heapinfo1->type > 0) && (heapinfo2->type > 0)) { /* Fragmented block */
// Fragment number:
frag1 =
- ((uintptr_t) (ADDR2UINT(area1) % (BLOCKSIZE))) >> state->
- heapinfo1[block1].type;
+ ((uintptr_t) (ADDR2UINT(area1) % (BLOCKSIZE))) >> heapinfo1->type;
frag2 =
- ((uintptr_t) (ADDR2UINT(area2) % (BLOCKSIZE))) >> state->
- heapinfo2[block2].type;
-
- // Snapshot address of the fragment:
- addr_frag1 =
- (void *) ((char *) addr_block1 +
- (frag1 << state->heapinfo1[block1].type));
- addr_frag2 =
- (void *) ((char *) addr_block2 +
- (frag2 << state->heapinfo2[block2].type));
+ ((uintptr_t) (ADDR2UINT(area2) % (BLOCKSIZE))) >> heapinfo2->type;
// Process address of the fragment:
real_addr_frag1 =
(void *) ((char *) real_addr_block1 +
- (frag1 << state->heapinfo1[block1].type));
+ (frag1 << heapinfo1->type));
real_addr_frag2 =
(void *) ((char *) real_addr_block2 +
- (frag2 << state->heapinfo2[block2].type));
+ (frag2 << heapinfo2->type));
// Check the size of the fragments against the size of the type:
if (type_size != -1) {
- if (state->heapinfo1[block1].busy_frag.frag_size[frag1] == -1
- || state->heapinfo2[block2].busy_frag.frag_size[frag2] == -1) {
+ if (heapinfo1->busy_frag.frag_size[frag1] == -1
+ || heapinfo2->busy_frag.frag_size[frag2] == -1) {
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
}
return -1;
}
-
// ?
- if (type_size != state->heapinfo1[block1].busy_frag.frag_size[frag1]
- || type_size != state->heapinfo2[block2].busy_frag.frag_size[frag2]) {
+ if (type_size != heapinfo1->busy_frag.frag_size[frag1]
+ || type_size != heapinfo2->busy_frag.frag_size[frag2]) {
if (match_pairs) {
match_equals(state, previous);
xbt_dynar_free(&previous);
}
}
// Compare the size of both fragments:
- if (state->heapinfo1[block1].busy_frag.frag_size[frag1] !=
- state->heapinfo2[block2].busy_frag.frag_size[frag2]) {
+ if (heapinfo1->busy_frag.frag_size[frag1] !=
+ heapinfo2->busy_frag.frag_size[frag2]) {
if (type_size == -1) {
if (match_pairs) {
match_equals(state, previous);
}
// Size of the fragment:
- size = state->heapinfo1[block1].busy_frag.frag_size[frag1];
+ size = heapinfo1->busy_frag.frag_size[frag1];
// Remember (basic) type inference.
// The current data structure only allows us to do this for the whole fragment.
return 0;
}
- if ((state->heapinfo1[block1].busy_frag.ignore[frag1] > 0)
- && (state->heapinfo2[block2].busy_frag.ignore[frag2] ==
- state->heapinfo1[block1].busy_frag.ignore[frag1]))
- check_ignore = state->heapinfo1[block1].busy_frag.ignore[frag1];
+ if ((heapinfo1->busy_frag.ignore[frag1] > 0)
+ && (heapinfo2->busy_frag.ignore[frag2] ==
+ heapinfo1->busy_frag.ignore[frag1]))
+ check_ignore = heapinfo1->busy_frag.ignore[frag1];
} else {
/* Start comparison */
if (type) {
res_compare =
- compare_heap_area_with_type(state, real_area1_to_compare, real_area2_to_compare,
- area1_to_compare, area2_to_compare,
- snapshot1, snapshot2,
+ compare_heap_area_with_type(state, real_area1_to_compare, real_area2_to_compare, snapshot1, snapshot2,
previous, type, size, check_ignore,
pointer_level);
} else {
res_compare =
- compare_heap_area_without_type(state, real_area1_to_compare, real_area2_to_compare,
- area1_to_compare, area2_to_compare,
- snapshot1, snapshot2,
+ compare_heap_area_without_type(state, real_area1_to_compare, real_area2_to_compare, snapshot1, snapshot2,
previous, size, check_ignore);
}
if (res_compare == 1) {
/*********************************************** Miscellaneous ***************************************************/
/****************************************************************************************************************/
+// Not used and broken code:
+# if 0
+
// Not used:
static int get_pointed_area_size(void *area, int heap)
{
((uintptr_t) (ADDR2UINT(area) % (BLOCKSIZE))) >> heapinfo[block].type;
return (int) heapinfo[block].busy_frag.frag_size[frag];
}
-
}
// Not used:
return distance;
}
+#endif
{
// Computed address:
uintptr_t address = (uintptr_t) state->stack[state->stack_size - 1];
- uintptr_t *p =
- (uintptr_t *) mc_translate_address(address, state->snapshot);
- state->stack[state->stack_size - 1] = *p;
+ uintptr_t temp;
+ uintptr_t* res = (uintptr_t*) mc_snapshot_read((void*) address, state->snapshot, &temp, sizeof(uintptr_t));
+ state->stack[state->stack_size - 1] = *res;
}
break;
int _sg_do_model_check = 0;
int _sg_mc_checkpoint = 0;
+int _sg_mc_sparse_checkpoint = 0;
+int _sg_mc_soft_dirty = 1;
char *_sg_mc_property_file = NULL;
int _sg_mc_timeout = 0;
int _sg_mc_hash = 0;
_sg_mc_checkpoint = xbt_cfg_get_int(_sg_cfg_set, name);
}
+void _mc_cfg_cb_sparse_checkpoint(const char *name, int pos) {
+ if (_sg_cfg_init_status && !_sg_do_model_check) {
+ xbt_die("You are specifying a checkpointing value after the initialization (through MSG_config?), but model-checking was not activated at config time (through --cfg=model-check:1). This won't work, sorry.");
+ }
+ _sg_mc_sparse_checkpoint = xbt_cfg_get_boolean(_sg_cfg_set, name);
+}
+
+void _mc_cfg_cb_soft_dirty(const char *name, int pos) {
+ if (_sg_cfg_init_status && !_sg_do_model_check) {
+ xbt_die("You are specifying a soft dirty value after the initialization (through MSG_config?), but model-checking was not activated at config time (through --cfg=model-check:1). This won't work, sorry.");
+ }
+ _sg_mc_soft_dirty = xbt_cfg_get_boolean(_sg_cfg_set, name);
+}
+
void _mc_cfg_cb_property(const char *name, int pos)
{
if (_sg_cfg_init_status && !_sg_do_model_check) {
}
+mc_model_checker_t mc_model_checker = NULL;
+
void MC_init()
{
-
int raw_mem_set = (mmalloc_get_current_heap() == mc_heap);
mc_time = xbt_new0(double, simix_process_maxpid);
MC_SET_MC_HEAP;
+ mc_model_checker = xbt_new0(s_mc_model_checker_t, 1);
+ mc_model_checker->pages = mc_pages_store_new();
+ mc_model_checker->fd_clear_refs = -1;
+ mc_model_checker->fd_pagemap = -1;
+
mc_comp_times = xbt_new0(s_mc_comparison_times_t, 1);
/* Initialize statistics */
MC_ignore_local_variable("ctx", "*");
MC_ignore_local_variable("self", "simcall_BODY_mc_snapshot");
- MC_ignore_local_variable("next_context", "smx_ctx_sysv_suspend_serial");
+ MC_ignore_local_variable("next_cont"
+ "ext", "smx_ctx_sysv_suspend_serial");
MC_ignore_local_variable("i", "smx_ctx_sysv_suspend_serial");
/* Ignore local variable about time used for tracing */
MC_ignore_local_variable("start_time", "*");
+ MC_ignore_global_variable("mc_model_checker");
+
+ // Mot of those things could be moved into mc_model_checker:
MC_ignore_global_variable("compared_pointers");
MC_ignore_global_variable("mc_comp_times");
MC_ignore_global_variable("mc_snapshot_comparison_time");
MC_ignore_global_variable("maestro_stack_end");
MC_ignore_global_variable("smx_total_comms");
+ if (MC_is_active()) {
+ MC_ignore_global_variable("mc_diff_info");
+ }
+
MC_ignore_heap(mc_time, simix_process_maxpid * sizeof(double));
smx_process_t process;
return (void *) state.stack[state.stack_size - 1];
}
-/** Resolve snapshot in the snapshot address space
- *
- * @param object Snapshot address of the struct/class
- * @param type Type of the struct/class
- * @param member Member description
- * @param snapshot Snapshot (or NULL)
- * @return Snapshot address of the given member of the 'object' struct/class
- */
-void *mc_member_snapshot_resolve(const void *object, dw_type_t type,
- dw_type_t member, mc_snapshot_t snapshot)
-{
- if (!member->location.size) {
- return (char *) object + member->offset;
- } else {
- // Translate the problem in the process address space:
- void *real_area =
- (void *) mc_untranslate_address((void *) object, snapshot);
- // Resolve the member in the process address space:
- void *real_member = mc_member_resolve(real_area, type, member, snapshot);
- // Translate back in the snapshot address space:
- return mc_translate_address((uintptr_t) real_member, snapshot);
- }
-}
--- /dev/null
+/* Copyright (c) 2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef MC_MMU_H
+#define MC_MMU_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+extern int xbt_pagesize;
+extern int xbt_pagebits;
+
+/** @brief How many memory pages are necessary to store size bytes?
+ *
+ * @param size Byte size
+ * @return Number of memory pages
+ */
+static inline __attribute__ ((always_inline))
+size_t mc_page_count(size_t size)
+{
+ size_t page_count = size >> xbt_pagebits;
+ if (size & (xbt_pagesize-1)) {
+ page_count ++;
+ }
+ return page_count;
+}
+
+/** @brief Get the virtual memory page number of a given address
+ *
+ * @param address Address
+ * @return Virtual memory page number of the given address
+ */
+static inline __attribute__ ((always_inline))
+size_t mc_page_number(void* base, void* address)
+{
+ xbt_assert(address>=base, "The address is not in the range");
+ return ((uintptr_t) address - (uintptr_t) base) >> xbt_pagebits;
+}
+
+/** @brief Get the offset of an address within a memory page
+ *
+ * @param address Address
+ * @return Offset within the memory page
+ */
+static inline __attribute__ ((always_inline))
+size_t mc_page_offset(void* address)
+{
+ return ((uintptr_t) address) & (xbt_pagesize-1);
+}
+
+/** @brief Get the virtual address of a virtual memory page
+ *
+ * @param base Address of the first page
+ * @param page Index of the page
+ */
+static inline __attribute__ ((always_inline))
+void* mc_page_from_number(void* base, size_t page)
+{
+ return (void*) ((char*)base + (page << xbt_pagebits));
+}
+
+static inline __attribute__ ((always_inline))
+bool mc_same_page(void* a, void* b)
+{
+ return ((uintptr_t) a >> xbt_pagebits) == ((uintptr_t) b >> xbt_pagebits);
+}
+
+#endif
--- /dev/null
+#include "mc_page_store.h"
+#include "mc_mmu.h"
+#include "mc_private.h"
+
+#include <xbt/mmalloc.h>
+
+#define SOFT_DIRTY_BIT_NUMBER 55
+#define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER)
+
+extern "C" {
+
+// ***** Region management:
+
+/** @brief Take a per-page snapshot of a region
+ *
+ * @param data The start of the region (must be at the beginning of a page)
+ * @param pag_count Number of pages of the region
+ * @param pagemap Linux kernel pagemap values fot this region (or NULL)
+ * @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL)
+ * @return Snapshot page numbers of this new snapshot
+ */
+size_t* mc_take_page_snapshot_region(void* data, size_t page_count, uint64_t* pagemap, size_t* reference_pages)
+{
+ size_t* pagenos = (size_t*) malloc(page_count * sizeof(size_t));
+
+ for (size_t i=0; i!=page_count; ++i) {
+ bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY);
+ if (softclean && reference_pages) {
+ // The page is softclean, it is the same page as the reference page:
+ pagenos[i] = reference_pages[i];
+ mc_model_checker->pages->ref_page(reference_pages[i]);
+ } else {
+ // Otherwise, we need to store the page the hard hard
+ // (by reading its content):
+ void* page = (char*) data + (i << xbt_pagebits);
+ pagenos[i] = mc_model_checker->pages->store_page(page);
+ }
+ }
+
+ return pagenos;
+}
+
+void mc_free_page_snapshot_region(size_t* pagenos, size_t page_count)
+{
+ for (size_t i=0; i!=page_count; ++i) {
+ mc_model_checker->pages->unref_page(pagenos[i]);
+ }
+}
+
+/** @brief Restore a snapshot of a region
+ *
+ * If possible, the restoration will be incremental
+ * (the modified pages will not be touched).
+ *
+ * @param data The start of the region (must be at the beginning of a page)
+ * @param pag_count Number of pages of the region
+ * @param pagemap Linux kernel pagemap values fot this region (or NULL)
+ * @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL)
+ */
+void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region)
+{
+ for (size_t i=0; i!=page_count; ++i) {
+
+ bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY);
+ if (softclean && reference_region && reference_region->page_numbers[i] == region->page_numbers[i]) {
+ // The page is softclean and is the same as the reference one:
+ // the page is already in the target state.
+ continue;
+ }
+
+ // Otherwise, copy the page:
+ void* target_page = mc_page_from_number(region->start_addr, i);
+ const void* source_page = mc_model_checker->pages->get_page(region->page_numbers[i]);
+ memcpy(target_page, source_page, xbt_pagesize);
+ }
+}
+
+// ***** Soft dirty tracking
+
+/** @brief Like pread() but without partial reads */
+static size_t pread_whole(int fd, void* buf, size_t count, off_t offset) {
+ size_t res = 0;
+
+ char* data = (char*) buf;
+ while(count) {
+ ssize_t n = pread(fd, buf, count, offset);
+ // EOF
+ if (n==0)
+ return res;
+
+ // Error (or EINTR):
+ if (n==-1) {
+ if (errno == EINTR)
+ continue;
+ else
+ return -1;
+ }
+
+ // It might be a partial read:
+ count -= n;
+ data += n;
+ offset += n;
+ res += n;
+ }
+
+ return res;
+}
+
+static inline __attribute__ ((always_inline))
+void mc_ensure_fd(int* fd, const char* path, int flags) {
+ if (*fd != -1)
+ return;
+ *fd = open(path, flags);
+ if (*fd == -1) {
+ xbt_die("Could not open file %s", path);
+ }
+}
+
+/** @brief Reset the soft-dirty bits
+ *
+ * This is done after checkpointing and after checkpoint restoration
+ * (if per page checkpoiting is used) in order to know which pages were
+ * modified.
+ *
+ * See https://www.kernel.org/doc/Documentation/vm/soft-dirty.txt
+ * */
+void mc_softdirty_reset() {
+ mc_ensure_fd(&mc_model_checker->fd_clear_refs, "/proc/self/clear_refs", O_WRONLY|O_CLOEXEC);
+ if( ::write(mc_model_checker->fd_clear_refs, "4\n", 2) != 2) {
+ xbt_die("Could not reset softdirty bits");
+ }
+}
+
+/** @brief Read memory page informations
+ *
+ * For each virtual memory page of the process,
+ * /proc/self/pagemap provides a 64 bit field of information.
+ * We are interested in the soft-dirty bit: with this we can track which
+ * pages were modified between snapshots/restorations and avoid
+ * copying data which was not modified.
+ *
+ * See https://www.kernel.org/doc/Documentation/vm/pagemap.txt
+ *
+ * @param pagemap Output buffer for pagemap informations
+ * @param start_addr Address of the first page
+ * @param page_count Number of pages
+ */
+static void mc_read_pagemap(uint64_t* pagemap, size_t page_start, size_t page_count)
+{
+ mc_ensure_fd(&mc_model_checker->fd_pagemap, "/proc/self/pagemap", O_RDONLY|O_CLOEXEC);
+ size_t bytesize = sizeof(uint64_t) * page_count;
+ off_t offset = sizeof(uint64_t) * page_start;
+ if (pread_whole(mc_model_checker->fd_pagemap, pagemap, bytesize, offset) != bytesize) {
+ xbt_die("Could not read pagemap");
+ }
+}
+
+// ***** High level API
+
+mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg)
+{
+ mc_mem_region_t new_reg = xbt_new(s_mc_mem_region_t, 1);
+
+ new_reg->start_addr = start_addr;
+ new_reg->data = NULL;
+ new_reg->size = size;
+ new_reg->page_numbers = NULL;
+
+ xbt_assert((((uintptr_t)start_addr) & (xbt_pagesize-1)) == 0,
+ "Not at the beginning of a page");
+ size_t page_count = mc_page_count(size);
+
+ uint64_t* pagemap = NULL;
+ if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
+ mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count);
+ }
+
+ // Take incremental snapshot:
+ new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap,
+ ref_reg==NULL ? NULL : ref_reg->page_numbers);
+
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
+ return new_reg;
+}
+
+void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg)
+{
+ xbt_assert((((uintptr_t)reg->start_addr) & (xbt_pagesize-1)) == 0,
+ "Not at the beginning of a page");
+ size_t page_count = mc_page_count(reg->size);
+
+ uint64_t* pagemap = NULL;
+
+ // Read soft-dirty bits if necessary in order to know which pages have changed:
+ if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
+ mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count);
+ }
+
+ // Incremental per-page snapshot restoration:
+ mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
+
+ // This is funny, the restoration can restore the state of the current heap,
+ // if this happen free(pagemap) would free from the wrong heap:
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
+}
+
+}
--- /dev/null
+/* Copyright (c) 2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <unistd.h>
+#include <string.h> // memcpy, memcp
+
+#include <sys/mman.h>
+
+#include <boost/foreach.hpp>
+
+#include <xbt.h>
+
+#include "mc_page_store.h"
+
+#include "mc_mmu.h"
+
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_page_snapshot, mc,
+ "Logging specific to mc_page_snapshot");
+
+// ***** Utility:
+
+/** @brief Compte a hash for the given memory page
+ *
+ * The page is used before inserting the page in the page store
+ * in order to find duplicate of this pae in the page store.
+ *
+ * @param data Memory page
+ * @return hash off the page
+ */
+static inline __attribute__ ((always_inline))
+uint64_t mc_hash_page(const void* data)
+{
+ const uint64_t* values = (const uint64_t*) data;
+ size_t n = xbt_pagesize / sizeof(uint64_t);
+
+ // This djb2:
+ uint64_t hash = 5381;
+ for (size_t i=0; i!=n; ++i) {
+ hash = ((hash << 5) + hash) + values[i];
+ }
+ return hash;
+}
+
+// ***** snapshot_page_manager
+
+s_mc_pages_store::s_mc_pages_store(size_t size) :
+ memory_(NULL), capacity_(0), top_index_(0)
+{
+ // Using mmap in order to be able to expand the region
+ // by relocating it somewhere else in the virtual memory
+ // space:
+ void * memory = ::mmap(NULL, size << xbt_pagebits, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_POPULATE, -1, 0);
+ if (memory==MAP_FAILED) {
+ xbt_die("Could not mmap initial snapshot pages.");
+ }
+
+ this->top_index_ = 0;
+ this->capacity_ = size;
+ this->memory_ = memory;
+ this->page_counts_.resize(size);
+}
+
+s_mc_pages_store::~s_mc_pages_store()
+{
+ ::munmap(this->memory_, this->capacity_ << xbt_pagebits);
+}
+
+void s_mc_pages_store::resize(size_t size)
+{
+ size_t old_bytesize = this->capacity_ << xbt_pagebits;
+ size_t new_bytesize = size << xbt_pagebits;
+
+ // Expand the memory region by moving it into another
+ // virtual memory address if necessary:
+ void* new_memory = mremap(this->memory_, old_bytesize, new_bytesize, MREMAP_MAYMOVE);
+ if (new_memory == MAP_FAILED) {
+ xbt_die("Could not mremap snapshot pages.");
+ }
+
+ this->capacity_ = size;
+ this->memory_ = new_memory;
+ this->page_counts_.resize(size, 0);
+}
+
+/** Allocate a free page
+ *
+ * @return index of the free page
+ */
+size_t s_mc_pages_store::alloc_page()
+{
+ if (this->free_pages_.empty()) {
+
+ // Expand the region:
+ if (this->top_index_ == this->capacity_) {
+ // All the pages are allocated, we need add more pages:
+ this->resize(2 * this->capacity_);
+ }
+
+ // Use a page from the top:
+ return this->top_index_++;
+
+ } else {
+
+ // Use a page from free_pages_ (inside of the region):
+ size_t res = this->free_pages_[this->free_pages_.size() - 1];
+ this->free_pages_.pop_back();
+ return res;
+
+ }
+}
+
+void s_mc_pages_store::remove_page(size_t pageno)
+{
+ this->free_pages_.push_back(pageno);
+ const void* page = this->get_page(pageno);
+ uint64_t hash = mc_hash_page(page);
+ this->hash_index_[hash].erase(pageno);
+}
+
+/** Store a page in memory */
+size_t s_mc_pages_store::store_page(void* page)
+{
+ xbt_assert(mc_page_offset(page)==0, "Not at the beginning of a page");
+ xbt_assert(top_index_ <= this->capacity_, "top_index is not consistent");
+
+ // First, we check if a page with the same content is already in the page
+ // store:
+ // 1. compute the hash of the page;
+ // 2. find pages with the same hash using `hash_index_`;
+ // 3. find a page with the same content.
+ uint64_t hash = mc_hash_page(page);
+ page_set_type& page_set = this->hash_index_[hash];
+ BOOST_FOREACH (size_t pageno, page_set) {
+ const void* snapshot_page = this->get_page(pageno);
+ if (memcmp(page, snapshot_page, xbt_pagesize) == 0) {
+
+ // If a page with the same content is already in the page store it is
+ // reused and its reference count is incremented.
+ page_counts_[pageno]++;
+ return pageno;
+
+ }
+ }
+
+ // Otherwise, a new page is allocated in the page store and the content
+ // of the page is `memcpy()`-ed to this new page.
+ size_t pageno = alloc_page();
+ xbt_assert(this->page_counts_[pageno]==0, "Allocated page is already used");
+ void* snapshot_page = (void*) this->get_page(pageno);
+ memcpy(snapshot_page, page, xbt_pagesize);
+ page_set.insert(pageno);
+ page_counts_[pageno]++;
+ return pageno;
+}
+
+// ***** Main C API
+
+extern "C" {
+
+mc_pages_store_t mc_pages_store_new()
+{
+ return new s_mc_pages_store_t(500);
+}
+
+}
--- /dev/null
+/* Copyright (c) 2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+#include <vector>
+
+#include <boost/utility.hpp>
+#include <boost/unordered_map.hpp>
+#include <boost/unordered_set.hpp>
+#endif
+
+#include <xbt.h>
+
+#include "mc_mmu.h"
+
+#ifndef MC_PAGE_STORE_H
+#define MC_PAGE_STORE_H
+
+struct s_mc_pages_store;
+
+#ifdef __cplusplus
+
+/** @brief Storage for snapshot memory pages
+ *
+ * The first (lower) layer of the per-page snapshot mechanism is a page
+ * store: it's responsibility is to store immutable shareable
+ * reference-counted memory pages independently of the snapshoting
+ * logic. Snapshot management and representation, soft-dirty tracking is
+ * handled to an higher layer. READMORE
+ *
+ * Data structure:
+ *
+ * * A pointer (`memory_`) to a (currently anonymous) `mmap()`ed memory
+ * region holding the memory pages (the address of the first page).
+ *
+ * We want to keep this memory region aligned on the memory pages (so
+ * that we might be able to create non-linear memory mappings on those
+ * pages in the future) and be able to expand it without coyping the
+ * data (there will be a lot of pages here): we will be able to
+ * efficiently expand the memory mapping using `mremap()`, moving it
+ * to another virtual address if necessary.
+ *
+ * Because we will move this memory mapping on the virtual address
+ * space, only the index of the page will be stored in the snapshots
+ * and the page will always be looked up by going through `memory`:
+ *
+ * void* page = (char*) page_store->memory + page_index << pagebits;
+ *
+ * * The number of pages mapped in virtual memory (`capacity_`). Once all
+ * those pages are used, we need to expand the page store with
+ * `mremap()`.
+ *
+ * * A reference count for each memory page `page_counts_`. Each time a
+ * snapshot references a page, the counter is incremented. If a
+ * snapshot is freed, the reference count is decremented. When the
+ * reference count, of a page reaches 0 it is added to a list of available
+ * pages (`free_pages_`).
+ *
+ * * A list of free pages `free_pages_` which can be reused. This avoids having
+ * to scan the reference count list to find a free page.
+ *
+ * * When we are expanding the memory map we do not want to add thousand of page
+ * to the `free_pages_` list and remove them just afterwards. The `top_index_`
+ * field is an index after which all pages are free and are not in the `free_pages_`
+ * list.
+ *
+ * * When we are adding a page, we need to check if a page with the same
+ * content is already in the page store in order to reuse it. For this
+ * reason, we maintain an index (`hash_index_`) mapping the hash of a
+ * page to the list of page indices with this hash.
+ * We use a fast (non cryptographic) hash so there may be conflicts:
+ * we must be able to store multiple indices for the same hash.
+ *
+ */
+struct s_mc_pages_store {
+private: // Types
+ typedef uint64_t hash_type;
+ typedef boost ::unordered_set<size_t> page_set_type;
+ typedef boost::unordered_map<hash_type, page_set_type> pages_map_type;
+
+private: // Fields:
+ /** First page
+ *
+ * mc_page_store_get_page expects that this is the first field.
+ * */
+ void* memory_;
+ /** Number of available pages in virtual memory */
+ size_t capacity_;
+ /** Top of the used pages (index of the next available page) */
+ size_t top_index_;
+ /** Page reference count */
+ std::vector<uint64_t> page_counts_;
+ /** Index of available pages before the top */
+ std::vector<size_t> free_pages_;
+ /** Index from page hash to page index */
+ pages_map_type hash_index_;
+
+private: // Methods
+ void resize(size_t size);
+ size_t alloc_page();
+ void remove_page(size_t pageno);
+
+public: // Constructors
+ explicit s_mc_pages_store(size_t size);
+ ~s_mc_pages_store();
+
+public: // Methods
+
+ /** @brief Decrement the reference count for a given page
+ *
+ * Decrement the reference count of this page. Used when a snapshot is
+ * destroyed.
+ *
+ * If the reference count reaches zero, the page is recycled:
+ * it is added to the `free_pages_` list and removed from the `hash_index_`.
+ *
+ * */
+ void unref_page(size_t pageno);
+
+ /** @brief Increment the refcount for a given page
+ *
+ * This method used to increase a reference count of a page if we know
+ * that the content of a page is the same as a page already in the page
+ * store.
+ *
+ * This will be the case if a page if soft clean: we know that is has not
+ * changed since the previous cnapshot/restoration and we can avoid
+ * hashing the page, comparing byte-per-byte to candidates.
+ * */
+ void ref_page(size_t pageno);
+
+ /** @brief Store a page in the page store */
+ size_t store_page(void* page);
+
+ /** @brief Get a page from its page number
+ *
+ * @param Number of the memory page in the store
+ * @return Start of the page
+ */
+ const void* get_page(size_t pageno) const;
+
+public: // Debug/test methods
+
+ /** @brief Get the number of references for a page */
+ size_t get_ref(size_t pageno);
+
+ /** @brief Get the number of used pages */
+ size_t size();
+
+ /** @brief Get the capacity of the page store
+ *
+ * The capacity is expanded by a system call (mremap).
+ * */
+ size_t capacity();
+
+};
+
+inline __attribute__((always_inline))
+void s_mc_pages_store::unref_page(size_t pageno) {
+ if ((--this->page_counts_[pageno]) == 0) {
+ this->remove_page(pageno);
+ }
+}
+
+inline __attribute__((always_inline))
+void s_mc_pages_store::ref_page(size_t pageno) {
+ ++this->page_counts_[pageno];
+}
+
+inline __attribute__((always_inline))
+const void* s_mc_pages_store::get_page(size_t pageno) const {
+ return mc_page_from_number(this->memory_, pageno);
+}
+
+inline __attribute__((always_inline))
+size_t s_mc_pages_store::get_ref(size_t pageno) {
+ return this->page_counts_[pageno];
+}
+
+inline __attribute__((always_inline))
+size_t s_mc_pages_store::size() {
+ return this->top_index_ - this->free_pages_.size();
+}
+
+inline __attribute__((always_inline))
+size_t s_mc_pages_store::capacity() {
+ return this->capacity_;
+}
+
+#endif
+
+SG_BEGIN_DECL()
+
+typedef struct s_mc_pages_store s_mc_pages_store_t, * mc_pages_store_t;
+mc_pages_store_t mc_pages_store_new();
+
+/**
+ */
+static inline __attribute__((always_inline))
+const void* mc_page_store_get_page(mc_pages_store_t page_store, size_t pageno)
+{
+ // This is page_store->memory_:
+ void* memory = *(void**)page_store;
+ return mc_page_from_number(memory, pageno);
+}
+
+SG_END_DECL()
+
+#endif
#include "simgrid_config.h"
#include <stdio.h>
+#include <stdbool.h>
#ifndef WIN32
#include <sys/mman.h>
#endif
#include "msg/datatypes.h"
#include "xbt/strbuff.h"
#include "xbt/parmap.h"
+#include "mc_mmu.h"
+#include "mc_page_store.h"
+
+SG_BEGIN_DECL()
typedef struct s_dw_frame s_dw_frame_t, *dw_frame_t;
typedef struct s_mc_function_index_item s_mc_function_index_item_t, *mc_function_index_item_t;
void *data;
// Size of the data region:
size_t size;
+ // For per-page snapshots, this is an array to the number of
+ size_t* page_numbers;
} s_mc_mem_region_t, *mc_mem_region_t;
+static inline __attribute__ ((always_inline))
+bool mc_region_contain(mc_mem_region_t region, void* p)
+{
+ return p >= region->start_addr &&
+ p < (void*)((char*) region->start_addr + region->size);
+}
+
/** Ignored data
*
* Some parts of the snapshot are ignored by zeroing them out: the real
xbt_dynar_t ignored_data;
} s_mc_snapshot_t, *mc_snapshot_t;
+mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot);
+
+static inline __attribute__ ((always_inline))
+mc_mem_region_t mc_get_region_hinted(void* addr, mc_snapshot_t snapshot, mc_mem_region_t region)
+{
+ if (mc_region_contain(region, addr))
+ return region;
+ else
+ return mc_get_snapshot_region(addr, snapshot);
+}
+
/** Information about a given stack frame
*
*/
typedef struct s_mc_snapshot_stack{
xbt_dynar_t local_variables;
- void *stack_pointer;
- void *real_address;
xbt_dynar_t stack_frames; // mc_stack_frame_t
}s_mc_snapshot_stack_t, *mc_snapshot_stack_t;
size_t size;
}s_mc_checkpoint_ignore_region_t, *mc_checkpoint_ignore_region_t;
-inline static void* mc_snapshot_get_heap_end(mc_snapshot_t snapshot) {
- if(snapshot==NULL)
- xbt_die("snapshot is NULL");
- xbt_mheap_t heap = (xbt_mheap_t)snapshot->regions[0]->data;
- return heap->breakval;
-}
+static void* mc_snapshot_get_heap_end(mc_snapshot_t snapshot);
mc_snapshot_t SIMIX_pre_mc_snapshot(smx_simcall_t simcall);
mc_snapshot_t MC_take_snapshot(int num_state);
void MC_restore_snapshot(mc_snapshot_t);
void MC_free_snapshot(mc_snapshot_t);
-/** \brief Translate a pointer from process address space to snapshot address space
- *
- * The address space contains snapshot of the main/application memory:
- * this function finds the address in a given snaphot for a given
- * real/application address.
- *
- * For read only memory regions and other regions which are not int the
- * snapshot, the address is not changed.
- *
- * \param addr Application address
- * \param snapshot The snapshot of interest (if NULL no translation is done)
- * \return Translated address in the snapshot address space
- * */
-void* mc_translate_address(uintptr_t addr, mc_snapshot_t snapshot);
+int mc_important_snapshot(mc_snapshot_t snapshot);
-/** \brief Translate a pointer from the snapshot address space to the application address space
- *
- * This is the inverse of mc_translate_address.
+size_t* mc_take_page_snapshot_region(void* data, size_t page_count, uint64_t* pagemap, size_t* reference_pages);
+void mc_free_page_snapshot_region(size_t* pagenos, size_t page_count);
+void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region);
+
+mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg);
+void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg);
+void mc_softdirty_reset();
+
+static inline __attribute__((always_inline))
+bool mc_snapshot_region_linear(mc_mem_region_t region) {
+ return !region || !region->data;
+}
+
+void* mc_snapshot_read_fragmented(void* addr, mc_mem_region_t region, void* target, size_t size);
+
+void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, void* target, size_t size);
+int mc_snapshot_region_memcp(
+ void* addr1, mc_mem_region_t region1,
+ void* addr2, mc_mem_region_t region2, size_t size);
+int mc_snapshot_memcp(
+ void* addr1, mc_snapshot_t snapshot1,
+ void* addr2, mc_snapshot_t snapshot2, size_t size);
+
+static void* mc_snapshot_read_pointer(void* addr, mc_snapshot_t snapshot);
+
+/** @brief State of the model-checker (global variables for the model checker)
*
- * \param addr Address in the snapshot address space
- * \param snapsot Snapshot of interest (if NULL no translation is done)
- * \return Translated address in the application address space
+ * Each part of the state of the model chercker represented as a global
+ * variable prevents some sharing between snapshots and must be ignored.
+ * By moving as much state as possible in this structure allocated
+ * on the model-chercker heap, we avoid those issues.
*/
-uintptr_t mc_untranslate_address(void* addr, mc_snapshot_t snapshot);
+typedef struct s_mc_model_checker {
+ // This is the parent snapshot of the current state:
+ mc_snapshot_t parent_snapshot;
+ mc_pages_store_t pages;
+ int fd_clear_refs;
+ int fd_pagemap;
+} s_mc_model_checker_t, *mc_model_checker_t;
+
+extern mc_model_checker_t mc_model_checker;
extern xbt_dynar_t mc_checkpoint_ignore;
};
void* mc_member_resolve(const void* base, dw_type_t type, dw_type_t member, mc_snapshot_t snapshot);
-void* mc_member_snapshot_resolve(const void* base, dw_type_t type, dw_type_t member, mc_snapshot_t snapshot);
typedef struct s_dw_variable{
Dwarf_Off dwarf_offset; /* Global offset of the field. */
* */
uint64_t mc_hash_processes_state(int num_state, xbt_dynar_t stacks);
+/* *********** Snapshot *********** */
+
+static inline __attribute__((always_inline))
+void* mc_translate_address_region(uintptr_t addr, mc_mem_region_t region)
+{
+ size_t pageno = mc_page_number(region->start_addr, (void*) addr);
+ size_t snapshot_pageno = region->page_numbers[pageno];
+ const void* snapshot_page = mc_page_store_get_page(mc_model_checker->pages, snapshot_pageno);
+ return (char*) snapshot_page + mc_page_offset((void*) addr);
+}
+
+/** \brief Translate a pointer from process address space to snapshot address space
+ *
+ * The address space contains snapshot of the main/application memory:
+ * this function finds the address in a given snaphot for a given
+ * real/application address.
+ *
+ * For read only memory regions and other regions which are not int the
+ * snapshot, the address is not changed.
+ *
+ * \param addr Application address
+ * \param snapshot The snapshot of interest (if NULL no translation is done)
+ * \return Translated address in the snapshot address space
+ * */
+static inline __attribute__((always_inline))
+void* mc_translate_address(uintptr_t addr, mc_snapshot_t snapshot)
+{
+
+ // If not in a process state/clone:
+ if (!snapshot) {
+ return (uintptr_t *) addr;
+ }
+
+ mc_mem_region_t region = mc_get_snapshot_region((void*) addr, snapshot);
+
+ xbt_assert(mc_region_contain(region, (void*) addr), "Trying to read out of the region boundary.");
+
+ if (!region) {
+ return (void *) addr;
+ }
+
+ // Flat snapshot:
+ else if (region->data) {
+ uintptr_t offset = addr - (uintptr_t) region->start_addr;
+ return (void *) ((uintptr_t) region->data + offset);
+ }
+
+ // Per-page snapshot:
+ else if (region->page_numbers) {
+ return mc_translate_address_region(addr, region);
+ }
+
+ else {
+ xbt_die("No data for this memory region");
+ }
+}
+
+static inline __attribute__ ((always_inline))
+ void* mc_snapshot_get_heap_end(mc_snapshot_t snapshot) {
+ if(snapshot==NULL)
+ xbt_die("snapshot is NULL");
+ void** addr = &((xbt_mheap_t)std_heap)->breakval;
+ return mc_snapshot_read_pointer(addr, snapshot);
+}
+
+static inline __attribute__ ((always_inline))
+void* mc_snapshot_read_pointer(void* addr, mc_snapshot_t snapshot)
+{
+ void* res;
+ return *(void**) mc_snapshot_read(addr, snapshot, &res, sizeof(void*));
+}
+
+/** @brief Read memory from a snapshot region
+ *
+ * @param addr Process (non-snapshot) address of the data
+ * @param region Snapshot memory region where the data is located
+ * @param target Buffer to store the value
+ * @param size Size of the data to read in bytes
+ * @return Pointer where the data is located (target buffer of original location)
+ */
+static inline __attribute__((always_inline))
+void* mc_snapshot_read_region(void* addr, mc_mem_region_t region, void* target, size_t size)
+{
+ uintptr_t offset = (uintptr_t) addr - (uintptr_t) region->start_addr;
+
+ xbt_assert(addr >= region->start_addr && (char*) addr+size <= (char*)region->start_addr+region->size,
+ "Trying to read out of the region boundary.");
+
+ // Linear memory region:
+ if (region->data) {
+ return (void*) ((uintptr_t) region->data + offset);
+ }
+
+ // Fragmented memory region:
+ else if (region->page_numbers) {
+ void* end = (char*) addr + size - 1;
+ if( mc_same_page(addr, end) ) {
+ // The memory is contained in a single page:
+ return mc_translate_address_region((uintptr_t) addr, region);
+ } else {
+ // The memory spans several pages:
+ return mc_snapshot_read_fragmented(addr, region, target, size);
+ }
+ }
+
+ else {
+ xbt_die("No data available for this region");
+ }
+}
+
+static inline __attribute__ ((always_inline))
+void* mc_snapshot_read_pointer_region(void* addr, mc_mem_region_t region)
+{
+ void* res;
+ return *(void**) mc_snapshot_read_region(addr, region, &res, sizeof(void*));
+}
+
+SG_END_DECL()
+
#endif
--- /dev/null
+/* Copyright (c) 2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include <stdbool.h>
+
+#include "mc_private.h"
+#include "mc_mmu.h"
+#include "mc_page_store.h"
+
+mc_mem_region_t mc_get_snapshot_region(void* addr, mc_snapshot_t snapshot)
+{
+ for (size_t i = 0; i != NB_REGIONS; ++i) {
+ mc_mem_region_t region = snapshot->regions[i];
+ void* start = region->start_addr;
+ void* end = (char*) start + region->size;
+
+ if (addr >= start && addr < end) {
+ return region;
+ }
+ }
+
+ return NULL;
+}
+
+/** @brief Read memory from a snapshot region broken across fragmented pages
+ *
+ * @param addr Process (non-snapshot) address of the data
+ * @param region Snapshot memory region where the data is located
+ * @param target Buffer to store the value
+ * @param size Size of the data to read in bytes
+ * @return Pointer where the data is located (target buffer of original location)
+ */
+void* mc_snapshot_read_fragmented(void* addr, mc_mem_region_t region, void* target, size_t size)
+{
+ void* end = (char*) addr + size - 1;
+ size_t page_end = mc_page_number(NULL, end);
+ void* dest = target;
+
+ // Read each page:
+ while (mc_page_number(NULL, addr) != page_end) {
+ void* snapshot_addr = mc_translate_address_region((uintptr_t) addr, region);
+ void* next_page = mc_page_from_number(NULL, mc_page_number(NULL, addr) + 1);
+ size_t readable = (char*) next_page - (char*) addr;
+ memcpy(dest, snapshot_addr, readable);
+ addr = (char*) addr + readable;
+ dest = (char*) dest + readable;
+ size -= readable;
+ }
+
+ // Read the end:
+ void* snapshot_addr = mc_translate_address_region((uintptr_t)addr, region);
+ memcpy(dest, snapshot_addr, size);
+
+ return target;
+}
+
+/** @brief Read memory from a snapshot
+ *
+ * @param addr Process (non-snapshot) address of the data
+ * @param snapshot Snapshot (or NULL is no snapshot)
+ * @param target Buffer to store the value
+ * @param size Size of the data to read in bytes
+ * @return Pointer where the data is located (target buffer of original location)
+ */
+void* mc_snapshot_read(void* addr, mc_snapshot_t snapshot, void* target, size_t size)
+{
+ if (snapshot) {
+ mc_mem_region_t region = mc_get_snapshot_region(addr, snapshot);
+ return mc_snapshot_read_region(addr, region, target, size);
+ } else {
+ return addr;
+ }
+}
+
+/** Compare memory between snapshots (with known regions)
+ *
+ * @param addr1 Address in the first snapshot
+ * @param snapshot2 Region of the address in the first snapshot
+ * @param addr2 Address in the second snapshot
+ * @param snapshot2 Region of the address in the second snapshot
+ * @return same as memcmp
+ * */
+int mc_snapshot_region_memcp(
+ void* addr1, mc_mem_region_t region1,
+ void* addr2, mc_mem_region_t region2, size_t size)
+{
+ // Using alloca() for large allocations may trigger stack overflow:
+ // use malloc if the buffer is too big.
+
+ bool stack_alloc = size < 64;
+ void* buffer = stack_alloc ? alloca(2*size) : malloc(2*size);
+ void* buffer1 = mc_snapshot_read_region(addr1, region1, buffer, size);
+ void* buffer2 = mc_snapshot_read_region(addr2, region2, (char*) buffer + size, size);
+ int res;
+ if (buffer1 == buffer2) {
+ res = 0;
+ } else {
+ res = memcmp(buffer1, buffer2, size);
+ }
+ if (!stack_alloc) {
+ free(buffer);
+ }
+ return res;
+}
+
+/** Compare memory between snapshots
+ *
+ * @param addr1 Address in the first snapshot
+ * @param snapshot1 First snapshot
+ * @param addr2 Address in the second snapshot
+ * @param snapshot2 Second snapshot
+ * @return same as memcmp
+ * */
+int mc_snapshot_memcp(
+ void* addr1, mc_snapshot_t snapshot1,
+ void* addr2, mc_snapshot_t snapshot2, size_t size)
+{
+ mc_mem_region_t region1 = mc_get_snapshot_region(addr1, snapshot1);
+ mc_mem_region_t region2 = mc_get_snapshot_region(addr2, snapshot2);
+ return mc_snapshot_region_memcp(addr1, region1, addr2, region2, size);
+}
} else if (_sg_mc_liveness) {
nb_processes = ((mc_visited_pair_t) ref)->nb_processes;
heap_bytes_used = ((mc_visited_pair_t) ref)->heap_bytes_used;
+ } else {
+ xbt_die("Both liveness and safety are disabled.");
}
int start = 0;
(mc_visited_pair_t) xbt_dynar_get_as(list, cursor, mc_visited_pair_t);
nb_processes_test = ((mc_visited_pair_t) ref_test)->nb_processes;
heap_bytes_used_test = ((mc_visited_pair_t) ref_test)->heap_bytes_used;
+ } else {
+ nb_processes_test = 0;
+ heap_bytes_used_test = 0;
+ xbt_die("Both liveness and safety are disabled.");
}
if (nb_processes_test < nb_processes) {
start = cursor + 1;
int min2 = mc_stats->expanded_states;
unsigned int cursor2 = 0;
unsigned int index2 = 0;
- xbt_dynar_foreach(visited_states, cursor2, state_test) {
- if (state_test->num < min2) {
+ xbt_dynar_foreach(visited_states, cursor2, state_test){
+ if (!mc_important_snapshot(state_test->system_state) && state_test->num < min2) {
index2 = cursor2;
min2 = state_test->num;
}
unsigned int cursor2 = 0;
unsigned int index2 = 0;
xbt_dynar_foreach(visited_pairs, cursor2, pair_test) {
- if (pair_test->num < min2) {
+ if (!mc_important_snapshot(pair_test->graph_state->system_state) && pair_test->num < min2) {
index2 = cursor2;
min2 = pair_test->num;
}
xbt_cfgelm_int, 1, 1, _mc_cfg_cb_checkpoint, NULL);
xbt_cfg_setdefault_int(_sg_cfg_set, "model-check/checkpoint", 0);
+ /* do stateful model-checking */
+ xbt_cfg_register(&_sg_cfg_set, "model-check/sparse-checkpoint",
+ "Use sparse per-page snapshots.",
+ xbt_cfgelm_boolean, 1, 1, _mc_cfg_cb_sparse_checkpoint, NULL);
+ xbt_cfg_setdefault_boolean(_sg_cfg_set, "model-check/sparse-checkpoint", "no");
+
+ /* do stateful model-checking */
+ xbt_cfg_register(&_sg_cfg_set, "model-check/soft-dirty",
+ "Use sparse per-page snapshots.",
+ xbt_cfgelm_boolean, 1, 1, _mc_cfg_cb_soft_dirty, NULL);
+ xbt_cfg_setdefault_boolean(_sg_cfg_set, "model-check/soft-dirty", "yes");
+
/* do liveness model-checking */
xbt_cfg_register(&_sg_cfg_set, "model-check/property",
"Specify the name of the file containing the property. It must be the result of the ltl2ba program.",
int _sg_do_clean_atexit = 1;
int xbt_pagesize;
+int xbt_pagebits = 0;
/* Declare xbt_preinit and xbt_postexit as constructor/destructor of the library.
* This is crude and rather compiler-specific, unfortunately.
GetSystemInfo(&si);
xbt_pagesize = si.dwPageSize;
#endif
+
+ xbt_pagebits = 0;
+ int x = xbt_pagesize;
+ while(x >>= 1) {
+ ++xbt_pagebits;
+ }
+
#ifdef MMALLOC_WANT_OVERRIDE_LEGACY
mmalloc_preinit();
#endif
#ifndef _WIN32
srand48(seed);
#endif
-
atexit(xbt_postexit);
}
--- /dev/null
+cmake_minimum_required(VERSION 2.6)
+
+if(HAVE_MC)
+ set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_BINARY_DIR}")
+
+ add_executable(page_store page_store.cpp)
+ target_link_libraries(page_store simgrid)
+endif()
+
+set(tesh_files
+ ${tesh_files}
+ ${CMAKE_CURRENT_SOURCE_DIR}/page_store.tesh
+ PARENT_SCOPE
+ )
+set(testsuite_src
+ ${testsuite_src}
+ ${CMAKE_CURRENT_SOURCE_DIR}/page_store.cpp
+ PARENT_SCOPE
+ )
--- /dev/null
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include "mc/mc_page_store.h"
+
+static int value = 0;
+
+static void new_content(void* data, size_t size)
+{
+ memset(data, ++value, size);
+}
+
+static void* getpage()
+{
+ return mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+}
+
+int main(int argc, char** argv)
+{
+ // Init
+ size_t pagesize = (size_t) getpagesize();
+ mc_pages_store_t store = new s_mc_pages_store(500);
+ void* data = getpage();
+
+ // Init:
+ xbt_assert(store->size()==0, "Bad size");
+
+ // Store the page once:
+ new_content(data, pagesize);
+ size_t pageno1 = store->store_page(data);
+ xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
+ const void* copy = store->get_page(pageno1);
+ xbt_assert(memcmp(data, copy, pagesize)==0, "Page data should be the same");
+ xbt_assert(store->size()==1, "Bad size");
+
+ // Store the same page again:
+ size_t pageno2 = store->store_page(data);
+ xbt_assert(pageno1==pageno2, "Page should be the same");
+ xbt_assert(store->get_ref(pageno1)==2, "Bad refcount");
+ xbt_assert(store->size()==1, "Bad size");
+
+ // Store a new page:
+ new_content(data, pagesize);
+ size_t pageno3 = store->store_page(data);
+ xbt_assert(pageno1 != pageno3, "New page should be different");
+ xbt_assert(store->size()==2, "Bad size");
+
+ // Unref pages:
+ store->unref_page(pageno1);
+ xbt_assert(store->get_ref(pageno1)==1, "Bad refcount");
+ xbt_assert(store->size()==2, "Bad size");
+ store->unref_page(pageno2);
+ xbt_assert(store->size()==1, "Bad size");
+
+ // Reallocate page:
+ new_content(data, pagesize);
+ size_t pageno4 = store->store_page(data);
+ xbt_assert(pageno1 == pageno4, "Page was not reused");
+ xbt_assert(store->get_ref(pageno4)==1, "Bad refcount");
+ xbt_assert(store->size()==2, "Bad size");
+
+ return 0;
+}
--- /dev/null
+#! ./tesh
+
+$ $SG_TEST_EXENV ${bindir:=.}/page_store