1 /* Copyright (c) 2015-2021. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #ifndef SIMGRID_MC_PAGESTORE_HPP
7 #define SIMGRID_MC_PAGESTORE_HPP
9 #include "src/mc/mc_forward.hpp"
10 #include "src/mc/mc_mmu.hpp"
12 #include <unordered_map>
13 #include <unordered_set>
16 #ifndef XBT_ALWAYS_INLINE
17 #define XBT_ALWAYS_INLINE inline __attribute__((always_inline))
23 /** @brief Storage for snapshot memory pages
25 * The first (lower) layer of the per-page snapshot mechanism is a page store:
26 * its responsibility is to store immutable shareable reference-counted memory
27 * pages independently of the snapshotting logic. Snapshot management and
28 * representation is handled to an higher layer. READMORE
32 * * A pointer (`memory_`) to a (currently anonymous) `mmap()`ed memory
33 * region holding the memory pages (the address of the first page).
35 * We want to keep this memory region aligned on the memory pages (so
36 * that we might be able to create non-linear memory mappings on those
37 * pages in the future) and be able to expand it without copying the
38 * data (there will be a lot of pages here): we will be able to
39 * efficiently expand the memory mapping using `mremap()`, moving it
40 * to another virtual address if necessary.
42 * Because we will move this memory mapping on the virtual address
43 * space, only the index of the page will be stored in the snapshots
44 * and the page will always be looked up by going through `memory`:
46 * void* page = (char*) page_store->memory + page_index << pagebits;
48 * * The number of pages mapped in virtual memory (`capacity_`). Once all
49 * those pages are used, we need to expand the page store with
52 * * A reference count for each memory page `page_counts_`. Each time a
53 * snapshot references a page, the counter is incremented. If a
54 * snapshot is freed, the reference count is decremented. When the
55 * reference count, of a page reaches 0 it is added to a list of available
56 * pages (`free_pages_`).
58 * * A list of free pages `free_pages_` which can be reused. This avoids having
59 * to scan the reference count list to find a free page.
61 * * When we are expanding the memory map we do not want to add thousand of page
62 * to the `free_pages_` list and remove them just afterwards. The `top_index_`
63 * field is an index after which all pages are free and are not in the `free_pages_`
66 * * When we are adding a page, we need to check if a page with the same
67 * content is already in the page store in order to reuse it. For this
68 * reason, we maintain an index (`hash_index_`) mapping the hash of a
69 * page to the list of page indices with this hash.
70 * We use a fast (non cryptographic) hash so there may be conflicts:
71 * we must be able to store multiple indices for the same hash.
76 using hash_type = std::uint64_t;
80 // We are using a cheap hash to index a page.
81 // We should expect collision and we need to associate multiple page indices
83 using page_set_type = std::unordered_set<std::size_t>;
84 using pages_map_type = std::unordered_map<hash_type, page_set_type>;
89 /** Number of available pages in virtual memory */
90 std::size_t capacity_;
91 /** Top of the used pages (index of the next available page) */
92 std::size_t top_index_;
93 /** Page reference count */
94 std::vector<std::uint64_t> page_counts_;
95 /** Index of available pages before the top */
96 std::vector<std::size_t> free_pages_;
97 /** Index from page hash to page index */
98 pages_map_type hash_index_;
101 void resize(std::size_t size);
102 std::size_t alloc_page();
103 void remove_page(std::size_t pageno);
107 PageStore(PageStore const&) = delete;
108 PageStore& operator=(PageStore const&) = delete;
109 explicit PageStore(std::size_t size);
114 /** @brief Decrement the reference count for a given page
116 * Decrement the reference count of this page. Used when a snapshot is destroyed.
118 * If the reference count reaches zero, the page is recycled:
119 * it is added to the `free_pages_` list and removed from the `hash_index_`.
122 void unref_page(std::size_t pageno);
124 /** @brief Increment the refcount for a given page
126 * This method used to increase a reference count of a page if we know
127 * that the content of a page is the same as a page already in the page
130 * This will be the case if a page if soft clean: we know that is has not
131 * changed since the previous snapshot/restoration and we can avoid
132 * hashing the page, comparing byte-per-byte to candidates.
134 void ref_page(size_t pageno);
136 /** @brief Store a page in the page store */
137 std::size_t store_page(const void* page);
139 /** @brief Get a page from its page number
141 * @param pageno Number of the memory page in the store
142 * @return Start of the page
144 void* get_page(std::size_t pageno) const;
146 // Debug/test methods
148 /** @brief Get the number of references for a page */
149 std::size_t get_ref(std::size_t pageno) const;
151 /** @brief Get the number of used pages */
152 std::size_t size() const;
154 /** @brief Get the capacity of the page store
156 * The capacity is expanded by a system call (mremap).
158 std::size_t capacity() const;
161 XBT_ALWAYS_INLINE void PageStore::unref_page(std::size_t pageno)
163 if ((--this->page_counts_[pageno]) == 0)
164 this->remove_page(pageno);
167 XBT_ALWAYS_INLINE void PageStore::ref_page(size_t pageno)
169 ++this->page_counts_[pageno];
172 XBT_ALWAYS_INLINE void* PageStore::get_page(std::size_t pageno) const
174 return (void*)simgrid::mc::mmu::join(pageno, (std::uintptr_t)this->memory_);
177 XBT_ALWAYS_INLINE std::size_t PageStore::get_ref(std::size_t pageno) const
179 return this->page_counts_[pageno];
182 XBT_ALWAYS_INLINE std::size_t PageStore::size() const
184 return this->top_index_ - this->free_pages_.size();
187 XBT_ALWAYS_INLINE std::size_t PageStore::capacity() const
189 return this->capacity_;
193 } // namespace simgrid