#include "mc_page_store.h"
#include "mc_mmu.h"
+#include <xbt/mmalloc.h>
+
#define SOFT_DIRTY_BIT_NUMBER 55
#define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER)
if (n==0)
return res;
- // Error (or EAGAIN):
+ // Error (or EINTR):
if (n==-1) {
- if (errno == EAGAIN)
+ if (errno == EINTR)
continue;
else
return -1;
}
+ // It might be a partial read:
count -= n;
data += n;
offset += n;
}
}
-/** @brief Reset the softdirty bits
+/** @brief Reset the soft-dirty bits
*
* This is done after checkpointing and after checkpoint restoration
* (if per page checkpoiting is used) in order to know which pages were
* modified.
+ *
+ * See https://www.kernel.org/doc/Documentation/vm/soft-dirty.txt
* */
void mc_softdirty_reset() {
mc_ensure_fd(&mc_model_checker->fd_clear_refs, "/proc/self/clear_refs", O_WRONLY|O_CLOEXEC);
}
}
-/** @brief Read /proc/self/pagemap informations in order to find properties on the pages
+/** @brief Read memory page informations
*
- * For each virtual memory page, this file provides informations.
+ * For each virtual memory page of the process,
+ * /proc/self/pagemap provides a 64 bit field of information.
* We are interested in the soft-dirty bit: with this we can track which
* pages were modified between snapshots/restorations and avoid
* copying data which was not modified.
*
+ * See https://www.kernel.org/doc/Documentation/vm/pagemap.txt
+ *
* @param pagemap Output buffer for pagemap informations
* @param start_addr Address of the first page
* @param page_count Number of pages
size_t page_count = mc_page_count(size);
uint64_t* pagemap = NULL;
- if (mc_model_checker->parent_snapshot) {
- pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count);
+ if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count);
}
new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap,
ref_reg==NULL ? NULL : ref_reg->page_numbers);
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
return new_reg;
}
uint64_t* pagemap = NULL;
// Read soft-dirty bits if necessary in order to know which pages have changed:
- if (mc_model_checker->parent_snapshot) {
- pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count);
+ if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) {
+ pagemap = (uint64_t*) mmalloc_no_memset((xbt_mheap_t) mc_heap, sizeof(uint64_t) * page_count);
mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count);
}
// Incremental per-page snapshot restoration:
mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg);
+
+ // This is funny, the restoration can restore the state of the current heap,
+ // if this happen free(pagemap) would free from the wrong heap:
+ if(pagemap) {
+ mfree((xbt_mheap_t) mc_heap, pagemap);
+ }
}
}