1 /* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 /* Shared allocations are handled through shared memory segments.
7 * Associated data and metadata are used as follows:
10 * `allocs' map ---- -.
11 * ---------- shared_data_t shared_metadata_t / | | |
12 * .->| <name> | ---> -------------------- <--. ----------------- | | | |
13 * | ---------- | fd of <name> | | | size of mmap | --| | | |
14 * | | count (2) | |-- | data | \ | | |
15 * `----------------- | <name> | | ----------------- ---- |
16 * -------------------- | ^ |
18 * | | `allocs_metadata' map |
19 * | | ---------------------- |
20 * | `-- | <addr of mmap #1> |<-'
21 * | .-- | <addr of mmap #2> |<-.
22 * | | ---------------------- |
28 * | shared_metadata_t / | |
29 * | ----------------- | | |
30 * | | size of mmap | --| | |
32 * ----------------- | | |
39 #include "private.hpp"
40 #include "xbt/config.hpp"
44 #include <sys/types.h>
53 #define MAP_ANONYMOUS MAP_ANON
57 #define MAP_POPULATE 0
60 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_shared, smpi, "Logging specific to SMPI (shared memory macros)");
63 /** Some location in the source code
65 * This information is used by SMPI_SHARED_MALLOC to allocate some shared memory for all simulated processes.
68 class smpi_source_location : public std::string {
70 smpi_source_location() = default;
71 smpi_source_location(const char* filename, int line) : std::string(std::string(filename) + ":" + std::to_string(line))
76 struct shared_data_t {
81 std::unordered_map<smpi_source_location, shared_data_t, std::hash<std::string>> allocs;
82 typedef decltype(allocs)::value_type shared_data_key_type;
84 struct shared_metadata_t {
86 size_t allocated_size;
88 std::vector<std::pair<size_t, size_t>> private_blocks;
89 shared_data_key_type* data;
92 std::map<const void*, shared_metadata_t> allocs_metadata;
93 std::map<std::string, void*> calls;
96 static int smpi_shared_malloc_bogusfile = -1;
97 static int smpi_shared_malloc_bogusfile_huge_page = -1;
98 static unsigned long smpi_shared_malloc_blocksize = 1UL << 20;
103 void smpi_shared_destroy()
106 allocs_metadata.clear();
110 static size_t shm_size(int fd) {
113 if(fstat(fd, &st) < 0) {
114 xbt_die("Could not stat fd %d: %s", fd, strerror(errno));
116 return static_cast<size_t>(st.st_size);
120 static void* shm_map(int fd, size_t size, shared_data_key_type* data) {
121 shared_metadata_t meta;
123 if(size > shm_size(fd) && (ftruncate(fd, static_cast<off_t>(size)) < 0)) {
124 xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno));
127 void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
128 if(mem == MAP_FAILED) {
129 xbt_die("Failed to map fd %d with size %zu: %s\n"
130 "If you are running a lot of ranks, you may be exceeding the amount of mappings allowed per process.\n"
131 "On Linux systems, change this value with sudo sysctl -w vm.max_map_count=newvalue (default value: 65536)\n"
133 "https://simgrid.org/doc/latest/Configuring_SimGrid.html#configuring-the-user-code-virtualization for more "
135 fd, size, strerror(errno));
139 meta.allocated_ptr = mem;
140 meta.allocated_size = size;
141 allocs_metadata[mem] = meta;
142 XBT_DEBUG("MMAP %zu to %p", size, mem);
146 static void *smpi_shared_malloc_local(size_t size, const char *file, int line)
149 smpi_source_location loc(file, line);
150 auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
151 auto data = res.first;
153 // The new element was inserted.
154 // Generate a shared memory name from the address of the shared_data:
155 char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on macOS (shm_open raises ENAMETOOLONG otherwise)
156 snprintf(shmname, 31, "/shmalloc%p", &*data);
157 int fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
160 xbt_die("Please cleanup /dev/shm/%s", shmname);
162 xbt_die("An unhandled error occurred while opening %s. shm_open: %s", shmname, strerror(errno));
164 data->second.fd = fd;
165 data->second.count = 1;
166 mem = shm_map(fd, size, &*data);
167 if (shm_unlink(shmname) < 0) {
168 XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno));
170 XBT_DEBUG("Mapping %s at %p through %d", shmname, mem, fd);
172 mem = shm_map(data->second.fd, size, &*data);
173 data->second.count++;
175 XBT_DEBUG("Shared malloc %zu in %p (metadata at %p)", size, mem, &*data);
179 // Align functions, from http://stackoverflow.com/questions/4840410/how-to-align-a-pointer-in-c
180 #define ALIGN_UP(n, align) (((n) + (align)-1) & -(align))
181 #define ALIGN_DOWN(n, align) ((n) & -(align))
183 constexpr unsigned PAGE_SIZE = 0x1000;
184 constexpr unsigned HUGE_PAGE_SIZE = 1U << 21;
186 /* Similar to smpi_shared_malloc, but only sharing the blocks described by shared_block_offsets.
187 * This array contains the offsets (in bytes) of the block to share.
188 * Even indices are the start offsets (included), odd indices are the stop offsets (excluded).
189 * For instance, if shared_block_offsets == {27, 42}, then the elements mem[27], mem[28], ..., mem[41] are shared.
190 * The others are not.
193 void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks)
195 std::string huge_page_mount_point = simgrid::config::get_value<std::string>("smpi/shared-malloc-hugepage");
196 bool use_huge_page = not huge_page_mount_point.empty();
197 #ifndef MAP_HUGETLB /* If the system header don't define that mmap flag */
198 xbt_assert(not use_huge_page,
199 "Huge pages are not available on your system, you cannot use the smpi/shared-malloc-hugepage option.");
201 smpi_shared_malloc_blocksize =
202 static_cast<unsigned long>(simgrid::config::get_value<double>("smpi/shared-malloc-blocksize"));
204 size_t allocated_size;
206 xbt_assert(smpi_shared_malloc_blocksize == HUGE_PAGE_SIZE, "the block size of shared malloc should be equal to the size of a huge page.");
207 allocated_size = size + 2*smpi_shared_malloc_blocksize;
210 xbt_assert(smpi_shared_malloc_blocksize % PAGE_SIZE == 0, "the block size of shared malloc should be a multiple of the page size.");
211 allocated_size = size;
215 /* First reserve memory area */
216 void* allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
218 xbt_assert(allocated_ptr != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root "
219 "to allow big allocations.\n",
222 mem = (void*)ALIGN_UP((int64_t)allocated_ptr, HUGE_PAGE_SIZE);
226 XBT_DEBUG("global shared allocation. Blocksize %lu", smpi_shared_malloc_blocksize);
227 /* Create a fd to a new file on disk, make it smpi_shared_malloc_blocksize big, and unlink it.
228 * It still exists in memory but not in the file system (thus it cannot be leaked). */
229 /* Create bogus file if not done already
230 * We need two different bogusfiles:
231 * smpi_shared_malloc_bogusfile_huge_page is used for calls to mmap *with* MAP_HUGETLB,
232 * smpi_shared_malloc_bogusfile is used for calls to mmap *without* MAP_HUGETLB.
233 * We cannot use a same file for the two type of calls, since the first one needs to be
234 * opened in a hugetlbfs mount point whereas the second needs to be a "classical" file. */
235 if(use_huge_page && smpi_shared_malloc_bogusfile_huge_page == -1) {
236 std::string huge_page_filename = huge_page_mount_point + "/simgrid-shmalloc-XXXXXX";
237 smpi_shared_malloc_bogusfile_huge_page = mkstemp((char*)huge_page_filename.c_str());
238 XBT_DEBUG("bogusfile_huge_page: %s\n", huge_page_filename.c_str());
239 unlink(huge_page_filename.c_str());
241 if(smpi_shared_malloc_bogusfile == -1) {
242 char name[] = "/tmp/simgrid-shmalloc-XXXXXX";
243 smpi_shared_malloc_bogusfile = mkstemp(name);
244 XBT_DEBUG("bogusfile : %s\n", name);
246 char* dumb = new char[smpi_shared_malloc_blocksize](); // zero initialized
247 ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize);
249 xbt_die("Could not write bogus file for shared malloc");
253 int mmap_base_flag = MAP_FIXED | MAP_SHARED | MAP_POPULATE;
254 int mmap_flag = mmap_base_flag;
255 int huge_fd = use_huge_page ? smpi_shared_malloc_bogusfile_huge_page : smpi_shared_malloc_bogusfile;
258 mmap_flag |= MAP_HUGETLB;
261 XBT_DEBUG("global shared allocation, begin mmap");
263 /* Map the bogus file in place of the anonymous memory */
264 for(int i_block = 0; i_block < nb_shared_blocks; i_block ++) {
265 XBT_DEBUG("\tglobal shared allocation, mmap block %d/%d", i_block+1, nb_shared_blocks);
266 size_t start_offset = shared_block_offsets[2*i_block];
267 size_t stop_offset = shared_block_offsets[2*i_block+1];
268 xbt_assert(start_offset < stop_offset, "start_offset (%zu) should be lower than stop offset (%zu)", start_offset, stop_offset);
269 xbt_assert(stop_offset <= size, "stop_offset (%zu) should be lower than size (%zu)", stop_offset, size);
270 if(i_block < nb_shared_blocks-1)
271 xbt_assert(stop_offset < shared_block_offsets[2*i_block+2],
272 "stop_offset (%zu) should be lower than its successor start offset (%zu)", stop_offset, shared_block_offsets[2*i_block+2]);
273 size_t start_block_offset = ALIGN_UP((int64_t)start_offset, smpi_shared_malloc_blocksize);
274 size_t stop_block_offset = ALIGN_DOWN((int64_t)stop_offset, smpi_shared_malloc_blocksize);
275 for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) {
276 XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset);
277 void* pos = (void*)((unsigned long)mem + offset);
278 void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag,
280 xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
281 "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ? "
282 "You can also try using the sysctl vm.max_map_count. "
283 "If you are using huge pages, check that you have at least one huge page (/proc/sys/vm/nr_hugepages) "
284 "and that the directory you are passing is mounted correctly (mount /path/to/huge -t hugetlbfs -o rw,mode=0777).",
287 size_t low_page_start_offset = ALIGN_UP((int64_t)start_offset, PAGE_SIZE);
288 size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN((int64_t)stop_offset, (int64_t)PAGE_SIZE);
289 if(low_page_start_offset < low_page_stop_offset) {
290 XBT_DEBUG("\t\tglobal shared allocation, mmap block start");
291 void* pos = (void*)((unsigned long)mem + low_page_start_offset);
292 void* res = mmap(pos, low_page_stop_offset-low_page_start_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
293 smpi_shared_malloc_bogusfile, 0);
294 xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
295 "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?"
296 "You can also try using the sysctl vm.max_map_count",
299 if(low_page_stop_offset <= stop_block_offset) {
300 XBT_DEBUG("\t\tglobal shared allocation, mmap block stop");
301 size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE);
302 if(high_page_stop_offset > stop_block_offset) {
303 void* pos = (void*)((unsigned long)mem + stop_block_offset);
304 void* res = mmap(pos, high_page_stop_offset-stop_block_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
305 smpi_shared_malloc_bogusfile, 0);
306 xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
307 "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?"
308 "You can also try using the sysctl vm.max_map_count",
314 shared_metadata_t newmeta;
315 //register metadata for memcpy avoidance
316 shared_data_key_type* data = new shared_data_key_type;
317 data->second.fd = -1;
318 data->second.count = 1;
321 newmeta.allocated_ptr = allocated_ptr;
322 newmeta.allocated_size = allocated_size;
323 if(shared_block_offsets[0] > 0) {
324 newmeta.private_blocks.push_back(std::make_pair(0, shared_block_offsets[0]));
327 for(i_block = 0; i_block < nb_shared_blocks-1; i_block ++) {
328 newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], shared_block_offsets[2*i_block+2]));
330 if(shared_block_offsets[2*i_block+1] < size) {
331 newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], size));
333 allocs_metadata[mem] = newmeta;
335 XBT_DEBUG("global shared allocation, allocated_ptr %p - %p", allocated_ptr, (void*)(((uint64_t)allocated_ptr)+allocated_size));
336 XBT_DEBUG("global shared allocation, returned_ptr %p - %p", mem, (void*)(((uint64_t)mem)+size));
342 void *smpi_shared_malloc_intercept(size_t size, const char *file, int line) {
343 if( smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh())
344 return ::operator new(size);
346 return smpi_shared_malloc(size, file, line);
349 void* smpi_shared_calloc_intercept(size_t num_elm, size_t elem_size, const char* file, int line){
350 if( smpi_cfg_auto_shared_malloc_thresh() == 0 || elem_size*num_elm < smpi_cfg_auto_shared_malloc_thresh()){
351 void* ptr = ::operator new(elem_size*num_elm);
352 memset(ptr, 0, elem_size*num_elm);
355 return smpi_shared_malloc(elem_size*num_elm, file, line);
359 void *smpi_shared_malloc(size_t size, const char *file, int line) {
360 if (size > 0 && smpi_cfg_shared_malloc() == SharedMallocType::LOCAL) {
361 return smpi_shared_malloc_local(size, file, line);
362 } else if (smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) {
363 int nb_shared_blocks = 1;
364 size_t shared_block_offsets[2] = {0, size};
365 return smpi_shared_malloc_partial(size, shared_block_offsets, nb_shared_blocks);
367 XBT_DEBUG("Classic allocation of %zu bytes", size);
368 return ::operator new(size);
371 int smpi_is_shared(const void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset){
372 private_blocks.clear(); // being paranoid
373 if (allocs_metadata.empty())
375 if (smpi_cfg_shared_malloc() == SharedMallocType::LOCAL || smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) {
376 auto low = allocs_metadata.lower_bound(ptr);
377 if (low != allocs_metadata.end() && low->first == ptr) {
378 private_blocks = low->second.private_blocks;
382 if (low == allocs_metadata.begin())
385 if (ptr < (char*)low->first + low->second.size) {
386 xbt_assert(ptr > (char*)low->first, "Oops, there seems to be a bug in the shared memory metadata.");
387 *offset = ((uint8_t*)ptr) - ((uint8_t*) low->first);
388 private_blocks = low->second.private_blocks;
397 std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>>& vec,
398 size_t offset, size_t buff_size)
400 std::vector<std::pair<size_t, size_t>> result;
401 for (auto const& block : vec) {
402 auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size),
403 std::min(std::max((size_t)0, block.second - offset), buff_size));
404 if (new_block.second > 0 && new_block.first < buff_size)
405 result.push_back(new_block);
410 std::vector<std::pair<size_t, size_t>> merge_private_blocks(const std::vector<std::pair<size_t, size_t>>& src,
411 const std::vector<std::pair<size_t, size_t>>& dst)
413 std::vector<std::pair<size_t, size_t>> result;
416 while(i_src < src.size() && i_dst < dst.size()) {
417 std::pair<size_t, size_t> block;
418 if(src[i_src].second <= dst[i_dst].first) {
421 else if(dst[i_dst].second <= src[i_src].first) {
424 else { // src.second > dst.first && dst.second > src.first → the blocks are overlapping
425 block = std::make_pair(std::max(src[i_src].first, dst[i_dst].first),
426 std::min(src[i_src].second, dst[i_dst].second));
427 result.push_back(block);
428 if(src[i_src].second < dst[i_dst].second)
437 void smpi_shared_free(void *ptr)
439 if (smpi_cfg_shared_malloc() == SharedMallocType::LOCAL) {
440 auto meta = allocs_metadata.find(ptr);
441 if (meta == allocs_metadata.end()) {
442 ::operator delete(ptr);
445 shared_data_t* data = &meta->second.data->second;
446 if (munmap(meta->second.allocated_ptr, meta->second.allocated_size) < 0) {
447 XBT_WARN("Unmapping of fd %d failed: %s", data->fd, strerror(errno));
450 if (data->count <= 0) {
452 allocs.erase(allocs.find(meta->second.data->first));
453 allocs_metadata.erase(ptr);
454 XBT_DEBUG("Shared free - Local - with removal - of %p", ptr);
456 XBT_DEBUG("Shared free - Local - no removal - of %p, count = %d", ptr, data->count);
459 } else if (smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) {
460 auto meta = allocs_metadata.find(ptr);
461 if (meta != allocs_metadata.end()){
462 meta->second.data->second.count--;
463 XBT_DEBUG("Shared free - Global - of %p", ptr);
464 munmap(ptr, meta->second.size);
465 if(meta->second.data->second.count==0){
466 delete meta->second.data;
467 allocs_metadata.erase(ptr);
470 ::operator delete(ptr);
475 XBT_DEBUG("Classic deallocation of %p", ptr);
476 ::operator delete(ptr);
481 int smpi_shared_known_call(const char* func, const char* input)
483 std::string loc = std::string(func) + ":" + input;
484 return calls.find(loc) != calls.end();
487 void* smpi_shared_get_call(const char* func, const char* input) {
488 std::string loc = std::string(func) + ":" + input;
490 return calls.at(loc);
493 void* smpi_shared_set_call(const char* func, const char* input, void* data) {
494 std::string loc = std::string(func) + ":" + input;