X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/d47e21d93998034c0f8fa4a9a6c750002f7642fa..e3f5ca3fd3726e67046afbd03e48e9cd7294b835:/src/mc/mc_page_snapshot.cpp diff --git a/src/mc/mc_page_snapshot.cpp b/src/mc/mc_page_snapshot.cpp index 92959adc98..dcf0d9bbc8 100644 --- a/src/mc/mc_page_snapshot.cpp +++ b/src/mc/mc_page_snapshot.cpp @@ -1,5 +1,9 @@ #include "mc_page_store.h" #include "mc_mmu.h" +#include "mc_private.h" +#include "mc_snapshot.h" + +#include #define SOFT_DIRTY_BIT_NUMBER 55 #define SOFT_DIRTY (((uint64_t)1) << SOFT_DIRTY_BIT_NUMBER) @@ -8,17 +12,26 @@ extern "C" { // ***** Region management: +/** @brief Take a per-page snapshot of a region + * + * @param data The start of the region (must be at the beginning of a page) + * @param pag_count Number of pages of the region + * @param pagemap Linux kernel pagemap values fot this region (or NULL) + * @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL) + * @return Snapshot page numbers of this new snapshot + */ size_t* mc_take_page_snapshot_region(void* data, size_t page_count, uint64_t* pagemap, size_t* reference_pages) { size_t* pagenos = (size_t*) malloc(page_count * sizeof(size_t)); for (size_t i=0; i!=page_count; ++i) { - if (pagemap && (pagemap[i] & SOFT_DIRTY)) { + bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY); + if (softclean && reference_pages) { // The page is softclean, it is the same page as the reference page: pagenos[i] = reference_pages[i]; mc_model_checker->pages->ref_page(reference_pages[i]); } else { - // Otherwise, we need to store the page the hard hard + // Otherwise, we need to store the page the hard way // (by reading its content): void* page = (char*) data + (i << xbt_pagebits); pagenos[i] = mc_model_checker->pages->store_page(page); @@ -35,20 +48,31 @@ void mc_free_page_snapshot_region(size_t* pagenos, size_t page_count) } } -void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, uint64_t* pagemap, mc_mem_region_t reference_region) +/** @brief Restore a snapshot of a region + * + * If possible, the restoration will be incremental + * (the modified pages will not be touched). + * + * @param start_addr + * @param page_count Number of pages of the region + * @param pagenos + * @param pagemap Linux kernel pagemap values fot this region (or NULL) + * @param reference_pages Snapshot page numbers of the previous soft_dirty_reset (or NULL) + */ +void mc_restore_page_snapshot_region(void* start_addr, size_t page_count, size_t* pagenos, uint64_t* pagemap, size_t* reference_pagenos) { for (size_t i=0; i!=page_count; ++i) { bool softclean = pagemap && !(pagemap[i] & SOFT_DIRTY); - if (softclean && reference_region && reference_region->page_numbers[i] == region->page_numbers[i]) { + if (softclean && reference_pagenos && pagenos[i] == reference_pagenos[i]) { // The page is softclean and is the same as the reference one: // the page is already in the target state. continue; } // Otherwise, copy the page: - void* target_page = mc_page_from_number(region->start_addr, i); - const void* source_page = mc_model_checker->pages->get_page(region->page_numbers[i]); + void* target_page = mc_page_from_number(start_addr, i); + const void* source_page = mc_model_checker->pages->get_page(pagenos[i]); memcpy(target_page, source_page, xbt_pagesize); } } @@ -57,7 +81,7 @@ void mc_restore_page_snapshot_region(mc_mem_region_t region, size_t page_count, /** @brief Like pread() but without partial reads */ static size_t pread_whole(int fd, void* buf, size_t count, off_t offset) { - size_t res; + size_t res = 0; char* data = (char*) buf; while(count) { @@ -66,14 +90,15 @@ static size_t pread_whole(int fd, void* buf, size_t count, off_t offset) { if (n==0) return res; - // Error (or EAGAIN): + // Error (or EINTR): if (n==-1) { - if (errno == EAGAIN) + if (errno == EINTR) continue; else return -1; } + // It might be a partial read: count -= n; data += n; offset += n; @@ -83,7 +108,8 @@ static size_t pread_whole(int fd, void* buf, size_t count, off_t offset) { return res; } -static inline void mc_ensure_fd(int* fd, const char* path, int flags) { +static inline __attribute__ ((always_inline)) +void mc_ensure_fd(int* fd, const char* path, int flags) { if (*fd != -1) return; *fd = open(path, flags); @@ -92,11 +118,13 @@ static inline void mc_ensure_fd(int* fd, const char* path, int flags) { } } -/** @brief Reset the softdirty bits +/** @brief Reset the soft-dirty bits * * This is done after checkpointing and after checkpoint restoration * (if per page checkpoiting is used) in order to know which pages were * modified. + * + * See https://www.kernel.org/doc/Documentation/vm/soft-dirty.txt * */ void mc_softdirty_reset() { mc_ensure_fd(&mc_model_checker->fd_clear_refs, "/proc/self/clear_refs", O_WRONLY|O_CLOEXEC); @@ -105,13 +133,16 @@ void mc_softdirty_reset() { } } -/** @brief Read /proc/self/pagemap informations in order to find properties on the pages +/** @brief Read memory page informations * - * For each virtual memory page, this file provides informations. + * For each virtual memory page of the process, + * /proc/self/pagemap provides a 64 bit field of information. * We are interested in the soft-dirty bit: with this we can track which * pages were modified between snapshots/restorations and avoid * copying data which was not modified. * + * See https://www.kernel.org/doc/Documentation/vm/pagemap.txt + * * @param pagemap Output buffer for pagemap informations * @param start_addr Address of the first page * @param page_count Number of pages @@ -122,53 +153,74 @@ static void mc_read_pagemap(uint64_t* pagemap, size_t page_start, size_t page_co size_t bytesize = sizeof(uint64_t) * page_count; off_t offset = sizeof(uint64_t) * page_start; if (pread_whole(mc_model_checker->fd_pagemap, pagemap, bytesize, offset) != bytesize) { - xbt_die("Coult not read pagemap"); + xbt_die("Could not read pagemap"); } } // ***** High level API -mc_mem_region_t mc_region_new_sparse(int type, void *start_addr, size_t size, mc_mem_region_t ref_reg) +mc_mem_region_t mc_region_new_sparse(mc_region_type_t region_type, + void *start_addr, void* permanent_addr, size_t size, + mc_mem_region_t ref_reg) { - mc_mem_region_t new_reg = xbt_new(s_mc_mem_region_t, 1); - - new_reg->start_addr = start_addr; - new_reg->data = NULL; - new_reg->size = size; - new_reg->page_numbers = NULL; + mc_mem_region_t region = xbt_new(s_mc_mem_region_t, 1); + region->region_type = region_type; + region->storage_type = MC_REGION_STORAGE_TYPE_CHUNKED; + region->start_addr = start_addr; + region->permanent_addr = permanent_addr; + region->size = size; xbt_assert((((uintptr_t)start_addr) & (xbt_pagesize-1)) == 0, "Not at the beginning of a page"); + xbt_assert((((uintptr_t)permanent_addr) & (xbt_pagesize-1)) == 0, + "Not at the beginning of a page"); size_t page_count = mc_page_count(size); uint64_t* pagemap = NULL; - if (mc_model_checker->parent_snapshot) { - pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count); - mc_read_pagemap(pagemap, mc_page_number(NULL, start_addr), page_count); + if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) { + pagemap = (uint64_t*) mmalloc_no_memset(mc_heap, sizeof(uint64_t) * page_count); + mc_read_pagemap(pagemap, mc_page_number(NULL, permanent_addr), page_count); } + size_t* reg_page_numbers = NULL; + if (ref_reg!=NULL && ref_reg->storage_type == MC_REGION_STORAGE_TYPE_CHUNKED) + reg_page_numbers = ref_reg->chunked.page_numbers; + // Take incremental snapshot: - new_reg->page_numbers = mc_take_page_snapshot_region(start_addr, page_count, pagemap, ref_reg->page_numbers); + region->chunked.page_numbers = mc_take_page_snapshot_region( + permanent_addr, page_count, pagemap, reg_page_numbers); - return new_reg; + if(pagemap) { + mfree(mc_heap, pagemap); + } + return region; } void mc_region_restore_sparse(mc_mem_region_t reg, mc_mem_region_t ref_reg) { - xbt_assert((((uintptr_t)reg->start_addr) & (xbt_pagesize-1)) == 0, + xbt_assert((((uintptr_t)reg->permanent_addr) & (xbt_pagesize-1)) == 0, "Not at the beginning of a page"); size_t page_count = mc_page_count(reg->size); uint64_t* pagemap = NULL; // Read soft-dirty bits if necessary in order to know which pages have changed: - if (mc_model_checker->parent_snapshot) { - pagemap = (uint64_t*) alloca(sizeof(uint64_t) * page_count); - mc_read_pagemap(pagemap, mc_page_number(NULL, reg->start_addr), page_count); + if (_sg_mc_soft_dirty && mc_model_checker->parent_snapshot) { + pagemap = (uint64_t*) mmalloc_no_memset(mc_heap, sizeof(uint64_t) * page_count); + mc_read_pagemap(pagemap, mc_page_number(NULL, reg->permanent_addr), page_count); } - // Incremental per-page snapshot restoration: - mc_restore_page_snapshot_region(reg, page_count, pagemap, ref_reg); + // Incremental per-page snapshot restoration:s + size_t* reg_page_numbers = NULL; + if (ref_reg && ref_reg->storage_type == MC_REGION_STORAGE_TYPE_CHUNKED) + reg_page_numbers = ref_reg->chunked.page_numbers; + + mc_restore_page_snapshot_region(reg->permanent_addr, page_count, reg->chunked.page_numbers, + pagemap, reg_page_numbers); + + if(pagemap) { + free(pagemap); + } } }