This won't work very well with small stacks.
#include "xbt/dynar.h"
#include "xbt/dict.h"
#include "xbt/dynar.h"
#include "xbt/dict.h"
/* Datatype representing a separate heap. The whole point of the mmalloc module
* is to allow several such heaps in the process. It thus works by redefining
* all the classical memory management functions (malloc and friends) with an
/* Datatype representing a separate heap. The whole point of the mmalloc module
* is to allow several such heaps in the process. It thus works by redefining
* all the classical memory management functions (malloc and friends) with an
size_t mmalloc_get_bytes_used(xbt_mheap_t);
ssize_t mmalloc_get_busy_size(xbt_mheap_t, void *ptr);
size_t mmalloc_get_bytes_used(xbt_mheap_t);
ssize_t mmalloc_get_busy_size(xbt_mheap_t, void *ptr);
#endif
#endif /* MMALLOC_H */
#endif
#endif /* MMALLOC_H */
#include "mc_page_store.h"
#include "mc_mmu.h"
#include "mc_page_store.h"
#include "mc_mmu.h"
+#include <xbt/mmalloc.h>
+
#define SOFT_DIRTY_BIT_NUMBER 55
#define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER)
#define SOFT_DIRTY_BIT_NUMBER 55
#define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER)
uint64_t* pagemap = NULL;
if (mc_model_checker->parent_snapshot) {
uint64_t* pagemap = NULL;
if (mc_model_checker->parent_snapshot) {
- pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count);
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count);
}
mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count);
}
new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap,
ref_reg==NULL ? NULL : ref_reg->page_numbers);
new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap,
ref_reg==NULL ? NULL : ref_reg->page_numbers);
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
// Read soft-dirty bits if necessary in order to know which pages have changed:
if (mc_model_checker->parent_snapshot) {
// Read soft-dirty bits if necessary in order to know which pages have changed:
if (mc_model_checker->parent_snapshot) {
- pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count);
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count);
}
// Incremental per-page snapshot restoration:
mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count);
}
// Incremental per-page snapshot restoration:
mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
+
+ // This is funny, the restoration can restore the state of the current heap,
+ // if this happen free(pagemap) would free from the wrong heap:
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }