#include "mc_page_store.h"
#include "mc_mmu.h"
+#include <xbt/mmalloc.h>
+
#define SOFT_DIRTY_BIT_NUMBER 55
#define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER)
// ***** Region management:
+/** @brief Take a per-page snapshot of a region
+ *
+ * @param data The start of the region (must be at the beginning of a page)
+ * @param pag_count Number of pages of the region
+ * @param pagemap Linux kernel pagemap values fot this region (or NULL)
+ * @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL)
+ * @return Snapshot page numbers of this new snapshot
+ */
size_t* mc_take_page_snapshot_region(void* data, size_t page_count, uint64_t* pagemap, size_t* reference_pages)
{
size_t* pagenos = (size_t*) malloc(page_count * sizeof(size_t));
for (size_t i=0; i!=page_count; ++i) {
bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY);
- if (softclean) {
+ if (softclean && reference_pages) {
// The page is softclean, it is the same page as the reference page:
pagenos[i] = reference_pages[i];
mc_model_checker->pages->ref_page(reference_pages[i]);
}
}
+/** @brief Restore a snapshot of a region
+ *
+ * If possible, the restoration will be incremental
+ * (the modified pages will not be touched).
+ *
+ * @param data The start of the region (must be at the beginning of a page)
+ * @param pag_count Number of pages of the region
+ * @param pagemap Linux kernel pagemap values fot this region (or NULL)
+ * @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL)
+ */
void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region)
{
for (size_t i=0; i!=page_count; ++i) {
if (n==0)
return res;
- // Error (or EAGAIN):
+ // Error (or EINTR):
if (n==-1) {
- if (errno == EAGAIN)
+ if (errno == EINTR)
continue;
else
return -1;
}
+ // It might be a partial read:
count -= n;
data += n;
offset += n;
return res;
}
-static inline void mc_ensure_fd(int* fd, const char* path, int flags) {
+static inline __attribute__ ((always_inline))
+void mc_ensure_fd(int* fd, const char* path, int flags) {
if (*fd != -1)
return;
*fd = open(path, flags);
}
}
-/** @brief Reset the softdirty bits
+/** @brief Reset the soft-dirty bits
*
* This is done after checkpointing and after checkpoint restoration
* (if per page checkpoiting is used) in order to know which pages were
* modified.
+ *
+ * See https://www.kernel.org/doc/Documentation/vm/soft-dirty.txt
* */
void mc_softdirty_reset() {
mc_ensure_fd(&mc_model_checker->fd_clear_refs, "/proc/self/clear_refs", O_WRONLY|O_CLOEXEC);
}
}
-/** @brief Read /proc/self/pagemap informations in order to find properties on the pages
+/** @brief Read memory page informations
*
- * For each virtual memory page, this file provides informations.
+ * For each virtual memory page of the process,
+ * /proc/self/pagemap provides a 64 bit field of information.
* We are interested in the soft-dirty bit: with this we can track which
* pages were modified between snapshots/restorations and avoid
* copying data which was not modified.
*
+ * See https://www.kernel.org/doc/Documentation/vm/pagemap.txt
+ *
* @param pagemap Output buffer for pagemap informations
* @param start_addr Address of the first page
* @param page_count Number of pages
size_t page_count = mc_page_count(size);
uint64_t* pagemap = NULL;
- if (mc_model_checker->parent_snapshot) {
- pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count);
+ if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count);
}
new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap,
ref_reg==NULL ? NULL : ref_reg->page_numbers);
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
return new_reg;
}
uint64_t* pagemap = NULL;
// Read soft-dirty bits if necessary in order to know which pages have changed:
- if (mc_model_checker->parent_snapshot) {
- pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count);
+ if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count);
}
// Incremental per-page snapshot restoration:
mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
+
+ // This is funny, the restoration can restore the state of the current heap,
+ // if this happen free(pagemap) would free from the wrong heap:
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
}
}