From 1123a69b82b73a4edd1cc9012330de33919d171e Mon Sep 17 00:00:00 2001 From: Gabriel Corona Date: Thu, 19 Jun 2014 15:13:42 +0200 Subject: [PATCH] [mc] Do not allocate pagemap buffer on the heap This won't work very well with small stacks. --- include/xbt/mmalloc.h | 4 ++++ src/mc/mc_page_snapshot.cpp | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/include/xbt/mmalloc.h b/include/xbt/mmalloc.h index e4bf2a5595..13222c6f1d 100644 --- a/include/xbt/mmalloc.h +++ b/include/xbt/mmalloc.h @@ -23,6 +23,8 @@ #include "xbt/dynar.h" #include "xbt/dict.h" +SG_BEGIN_DECL() + /* Datatype representing a separate heap. The whole point of the mmalloc module * is to allow several such heaps in the process. It thus works by redefining * all the classical memory management functions (malloc and friends) with an @@ -75,5 +77,7 @@ void reset_heap_information(void); size_t mmalloc_get_bytes_used(xbt_mheap_t); ssize_t mmalloc_get_busy_size(xbt_mheap_t, void *ptr); +SG_END_DECL() + #endif #endif /* MMALLOC_H */ diff --git a/src/mc/mc_page_snapshot.cpp b/src/mc/mc_page_snapshot.cpp index 833d70cf5b..c049b7d326 100644 --- a/src/mc/mc_page_snapshot.cpp +++ b/src/mc/mc_page_snapshot.cpp @@ -1,6 +1,8 @@ #include "mc_page_store.h" #include "mc_mmu.h" +#include + #define SOFT_DIRTY_BIT_NUMBER 55 #define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER) @@ -150,7 +152,7 @@ mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc uint64_t* pagemap = NULL; if (mc_model_checker->parent_snapshot) { - pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count); + pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count); mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count); } @@ -158,6 +160,9 @@ mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap, ref_reg==NULL ? NULL : ref_reg->page_numbers); + if(pagemap) { + mfree((xbt_mheap_t) mc_heap, pagemap); + } return new_reg; } @@ -171,12 +176,18 @@ void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg) // Read soft-dirty bits if necessary in order to know which pages have changed: if (mc_model_checker->parent_snapshot) { - pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count); + pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count); mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count); } // Incremental per-page snapshot restoration: mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg); + + // This is funny, the restoration can restore the state of the current heap, + // if this happen free(pagemap) would free from the wrong heap: + if(pagemap) { + mfree((xbt_mheap_t) mc_heap, pagemap); + } } } -- 2.20.1