X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/cb746c8d5f7015456bbfd78b4ae303142599aabe..e4ecb51dcdf597fb02340d7855dafd0da9bd9018:/src/smpi/internals/smpi_shared.cpp diff --git a/src/smpi/internals/smpi_shared.cpp b/src/smpi/internals/smpi_shared.cpp index 9bf2045811..42108cd182 100644 --- a/src/smpi/internals/smpi_shared.cpp +++ b/src/smpi/internals/smpi_shared.cpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2007, 2009-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2023. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -33,20 +33,22 @@ * \ | | * ---- */ -#include +#include +#include #include +#include #include "private.hpp" +#include "xbt/config.hpp" +#include "xbt/file.hpp" + #include -#include -#ifndef WIN32 +#include "smpi_utils.hpp" +#include #include -#endif -#include -#include -#include - +#include +#include #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif @@ -57,9 +59,7 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_shared, smpi, "Logging specific to SMPI (shared memory macros)"); -#define PTR_STRLEN (2 + 2 * sizeof(void*) + 1) - -namespace{ +namespace { /** Some location in the source code * * This information is used by SMPI_SHARED_MALLOC to allocate some shared memory for all simulated processes. @@ -79,7 +79,7 @@ struct shared_data_t { }; std::unordered_map> allocs; -typedef decltype(allocs)::value_type shared_data_key_type; +using shared_data_key_type = decltype(allocs)::value_type; struct shared_metadata_t { size_t size; @@ -89,16 +89,13 @@ struct shared_metadata_t { shared_data_key_type* data; }; -std::map allocs_metadata; -std::map calls; - -#ifndef WIN32 -static int smpi_shared_malloc_bogusfile = -1; -static int smpi_shared_malloc_bogusfile_huge_page = -1; -static unsigned long smpi_shared_malloc_blocksize = 1UL << 20; -#endif -} +std::map allocs_metadata; +std::map> calls; +int smpi_shared_malloc_bogusfile = -1; +int smpi_shared_malloc_bogusfile_huge_page = -1; +unsigned long smpi_shared_malloc_blocksize = 1UL << 20; +} // namespace void smpi_shared_destroy() { @@ -107,36 +104,14 @@ void smpi_shared_destroy() calls.clear(); } -static size_t shm_size(int fd) { - struct stat st; - - if(fstat(fd, &st) < 0) { - xbt_die("Could not stat fd %d: %s", fd, strerror(errno)); - } - return static_cast(st.st_size); -} - -#ifndef WIN32 -static void* shm_map(int fd, size_t size, shared_data_key_type* data) { - char loc[PTR_STRLEN]; +static void* shm_map(int fd, size_t size, shared_data_key_type* data) +{ + void* mem = smpi_temp_shm_mmap(fd, size); shared_metadata_t meta; - - if(size > shm_size(fd) && (ftruncate(fd, static_cast(size)) < 0)) { - xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno)); - } - - void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if(mem == MAP_FAILED) { - xbt_die( - "Failed to map fd %d with size %zu: %s\n" - "If you are running a lot of ranks, you may be exceeding the amount of mappings allowed per process.\n" - "On Linux systems, change this value with sudo sysctl -w vm.max_map_count=newvalue (default value: 65536)\n" - "Please see http://simgrid.gforge.inria.fr/simgrid/latest/doc/html/options.html#options_virt for more info.", - fd, size, strerror(errno)); - } - snprintf(loc, PTR_STRLEN, "%p", mem); meta.size = size; meta.data = data; + meta.allocated_ptr = mem; + meta.allocated_size = size; allocs_metadata[mem] = meta; XBT_DEBUG("MMAP %zu to %p", size, mem); return mem; @@ -146,41 +121,27 @@ static void *smpi_shared_malloc_local(size_t size, const char *file, int line) { void* mem; smpi_source_location loc(file, line); - auto res = allocs.insert(std::make_pair(loc, shared_data_t())); - auto data = res.first; - if (res.second) { + auto [data, inserted] = allocs.try_emplace(loc); + if (inserted) { // The new element was inserted. - // Generate a shared memory name from the address of the shared_data: - char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on Mac OS X (shm_open raises ENAMETOOLONG otherwise) - snprintf(shmname, 31, "/shmalloc%p", &*data); - int fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); - if (fd < 0) { - if (errno == EEXIST) - xbt_die("Please cleanup /dev/shm/%s", shmname); - else - xbt_die("An unhandled error occurred while opening %s. shm_open: %s", shmname, strerror(errno)); - } - data->second.fd = fd; + int fd = smpi_temp_shm_get(); + data->second.fd = fd; data->second.count = 1; mem = shm_map(fd, size, &*data); - if (shm_unlink(shmname) < 0) { - XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno)); - } - XBT_DEBUG("Mapping %s at %p through %d", shmname, mem, fd); } else { mem = shm_map(data->second.fd, size, &*data); data->second.count++; } - XBT_DEBUG("Shared malloc %zu in %p (metadata at %p)", size, mem, &*data); + XBT_DEBUG("Shared malloc %zu in %p through %d (metadata at %p)", size, mem, data->second.fd, &*data); return mem; } // Align functions, from http://stackoverflow.com/questions/4840410/how-to-align-a-pointer-in-c -#define PAGE_SIZE 0x1000 -#define ALIGN_UP(n, align) (((n) + (align)-1) & -(align)) -#define ALIGN_DOWN(n, align) ((n) & -(align)) +#define ALIGN_UP(n, align) (((int64_t)(n) + (int64_t)(align) - 1) & -(int64_t)(align)) +#define ALIGN_DOWN(n, align) ((int64_t)(n) & -(int64_t)(align)) -#define HUGE_PAGE_SIZE 1<<21 +constexpr unsigned PAGE_SIZE = 0x1000; +constexpr unsigned HUGE_PAGE_SIZE = 1U << 21; /* Similar to smpi_shared_malloc, but only sharing the blocks described by shared_block_offsets. * This array contains the offsets (in bytes) of the block to share. @@ -189,16 +150,16 @@ static void *smpi_shared_malloc_local(size_t size, const char *file, int line) * The others are not. */ -void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks) +void* smpi_shared_malloc_partial(size_t size, const size_t* shared_block_offsets, int nb_shared_blocks) { - std::string huge_page_mount_point = xbt_cfg_get_string("smpi/shared-malloc-hugepage"); + std::string huge_page_mount_point = simgrid::config::get_value("smpi/shared-malloc-hugepage"); bool use_huge_page = not huge_page_mount_point.empty(); #ifndef MAP_HUGETLB /* If the system header don't define that mmap flag */ xbt_assert(not use_huge_page, "Huge pages are not available on your system, you cannot use the smpi/shared-malloc-hugepage option."); - use_huge_page = 0; #endif - smpi_shared_malloc_blocksize = static_cast(xbt_cfg_get_double("smpi/shared-malloc-blocksize")); + smpi_shared_malloc_blocksize = + static_cast(simgrid::config::get_value("smpi/shared-malloc-blocksize")); void* mem; size_t allocated_size; if(use_huge_page) { @@ -212,13 +173,13 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int /* First reserve memory area */ - void* allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + void* allocated_ptr = mmap(nullptr, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); xbt_assert(allocated_ptr != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root " "to allow big allocations.\n", size >> 20); if(use_huge_page) - mem = (void*)ALIGN_UP((uint64_t)allocated_ptr, HUGE_PAGE_SIZE); + mem = (void*)ALIGN_UP(allocated_ptr, HUGE_PAGE_SIZE); else mem = allocated_ptr; @@ -242,11 +203,8 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int smpi_shared_malloc_bogusfile = mkstemp(name); XBT_DEBUG("bogusfile : %s\n", name); unlink(name); - char* dumb = new char[smpi_shared_malloc_blocksize](); // zero initialized - ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize); - if(err<0) - xbt_die("Could not write bogus file for shared malloc"); - delete[] dumb; + xbt_assert(ftruncate(smpi_shared_malloc_bogusfile, smpi_shared_malloc_blocksize) == 0, + "Could not write bogus file for shared malloc"); } int mmap_base_flag = MAP_FIXED | MAP_SHARED | MAP_POPULATE; @@ -273,25 +231,25 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize); for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) { XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset); - void* pos = (void*)((unsigned long)mem + offset); - void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag, - huge_fd, 0); + void* pos = static_cast(mem) + offset; + const void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag, huge_fd, 0); xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the " - "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ? " + "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ? " "You can also try using the sysctl vm.max_map_count. " "If you are using huge pages, check that you have at least one huge page (/proc/sys/vm/nr_hugepages) " "and that the directory you are passing is mounted correctly (mount /path/to/huge -t hugetlbfs -o rw,mode=0777).", strerror(errno)); } size_t low_page_start_offset = ALIGN_UP(start_offset, PAGE_SIZE); - size_t low_page_stop_offset = start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE); + size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE); if(low_page_start_offset < low_page_stop_offset) { XBT_DEBUG("\t\tglobal shared allocation, mmap block start"); - void* pos = (void*)((unsigned long)mem + low_page_start_offset); - void* res = mmap(pos, low_page_stop_offset-low_page_start_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page - smpi_shared_malloc_bogusfile, 0); + void* pos = static_cast(mem) + low_page_start_offset; + const void* res = mmap(pos, low_page_stop_offset - low_page_start_offset, PROT_READ | PROT_WRITE, + mmap_base_flag, // not a full huge page + smpi_shared_malloc_bogusfile, 0); xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the " - "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?" + "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?" "You can also try using the sysctl vm.max_map_count", strerror(errno)); } @@ -299,11 +257,12 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int XBT_DEBUG("\t\tglobal shared allocation, mmap block stop"); size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE); if(high_page_stop_offset > stop_block_offset) { - void* pos = (void*)((unsigned long)mem + stop_block_offset); - void* res = mmap(pos, high_page_stop_offset-stop_block_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page - smpi_shared_malloc_bogusfile, 0); + void* pos = static_cast(mem) + stop_block_offset; + const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE, + mmap_base_flag, // not a full huge page + smpi_shared_malloc_bogusfile, 0); xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the " - "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?" + "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?" "You can also try using the sysctl vm.max_map_count", strerror(errno)); } @@ -312,7 +271,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int shared_metadata_t newmeta; //register metadata for memcpy avoidance - shared_data_key_type* data = new shared_data_key_type; + auto* data = new shared_data_key_type; data->second.fd = -1; data->second.count = 1; newmeta.size = size; @@ -320,14 +279,14 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int newmeta.allocated_ptr = allocated_ptr; newmeta.allocated_size = allocated_size; if(shared_block_offsets[0] > 0) { - newmeta.private_blocks.push_back(std::make_pair(0, shared_block_offsets[0])); + newmeta.private_blocks.emplace_back(0, shared_block_offsets[0]); } int i_block; for(i_block = 0; i_block < nb_shared_blocks-1; i_block ++) { - newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], shared_block_offsets[2*i_block+2])); + newmeta.private_blocks.emplace_back(shared_block_offsets[2 * i_block + 1], shared_block_offsets[2 * i_block + 2]); } if(shared_block_offsets[2*i_block+1] < size) { - newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], size)); + newmeta.private_blocks.emplace_back(shared_block_offsets[2 * i_block + 1], size); } allocs_metadata[mem] = newmeta; @@ -337,23 +296,79 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int return mem; } -void *smpi_shared_malloc(size_t size, const char *file, int line) { - if (size > 0 && smpi_cfg_shared_malloc == shmalloc_local) { +void* smpi_shared_malloc_intercept(size_t size, const char* file, int line) +{ + if( smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh()){ + void* ptr = xbt_malloc(size); + if(not smpi_cfg_trace_call_use_absolute_path()) + simgrid::smpi::utils::account_malloc_size(size, simgrid::xbt::Path(file).get_base_name(), line, ptr); + else + simgrid::smpi::utils::account_malloc_size(size, file, line, ptr); + return ptr; + } else { + simgrid::smpi::utils::account_shared_size(size); + return smpi_shared_malloc(size, file, line); + } +} + +void* smpi_shared_calloc_intercept(size_t num_elm, size_t elem_size, const char* file, int line) +{ + size_t size = elem_size * num_elm; + if (smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh()) { + void* ptr = xbt_malloc0(size); + if(not smpi_cfg_trace_call_use_absolute_path()) + simgrid::smpi::utils::account_malloc_size(size, simgrid::xbt::Path(file).get_base_name(), line, ptr); + else + simgrid::smpi::utils::account_malloc_size(size, file, line, ptr); + return ptr; + } else { + simgrid::smpi::utils::account_shared_size(size); + return memset(smpi_shared_malloc(size, file, line), 0, size); + } +} + +void* smpi_shared_realloc_intercept(void* data, size_t size, const char* file, int line) +{ + if (size == 0) { + smpi_shared_free(data); + return nullptr; + } + if (data == nullptr) + return smpi_shared_malloc_intercept(size, file, line); + + auto meta = allocs_metadata.find(data); + if (meta == allocs_metadata.end()) { + XBT_DEBUG("Classical realloc(%p, %zu)", data, size); + return xbt_realloc(data, size); + } + + XBT_DEBUG("Shared realloc(%p, %zu) (old size: %zu)", data, size, meta->second.size); + void* ptr = smpi_shared_malloc_intercept(size, file, line); + if (ptr != data) { + memcpy(ptr, data, std::min(size, meta->second.size)); + smpi_shared_free(data); + } + return ptr; +} + +void* smpi_shared_malloc(size_t size, const char* file, int line) +{ + if (size > 0 && smpi_cfg_shared_malloc() == SharedMallocType::LOCAL) { return smpi_shared_malloc_local(size, file, line); - } else if (smpi_cfg_shared_malloc == shmalloc_global) { + } else if (smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) { int nb_shared_blocks = 1; - size_t shared_block_offsets[2] = {0, size}; - return smpi_shared_malloc_partial(size, shared_block_offsets, nb_shared_blocks); + const std::array shared_block_offsets = {{0, size}}; + return smpi_shared_malloc_partial(size, shared_block_offsets.data(), nb_shared_blocks); } XBT_DEBUG("Classic allocation of %zu bytes", size); - return ::operator new(size); + return xbt_malloc(size); } -int smpi_is_shared(void* ptr, std::vector> &private_blocks, size_t *offset){ +int smpi_is_shared(const void* ptr, std::vector> &private_blocks, size_t *offset){ private_blocks.clear(); // being paranoid if (allocs_metadata.empty()) return 0; - if ( smpi_cfg_shared_malloc == shmalloc_local || smpi_cfg_shared_malloc == shmalloc_global) { + if (smpi_cfg_shared_malloc() == SharedMallocType::LOCAL || smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) { auto low = allocs_metadata.lower_bound(ptr); if (low != allocs_metadata.end() && low->first == ptr) { private_blocks = low->second.private_blocks; @@ -375,18 +390,22 @@ int smpi_is_shared(void* ptr, std::vector> &private_bl } } -std::vector> shift_and_frame_private_blocks(const std::vector> vec, size_t offset, size_t buff_size) { - std::vector> result; - for (auto const& block : vec) { - auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size), - std::min(std::max((size_t)0, block.second - offset), buff_size)); - if (new_block.second > 0 && new_block.first < buff_size) - result.push_back(new_block); - } - return result; +std::vector> shift_and_frame_private_blocks(const std::vector>& vec, + size_t offset, size_t buff_size) +{ + std::vector> result; + for (auto const& [block_begin, block_end] : vec) { + auto new_block = std::make_pair(std::clamp(block_begin - offset, (size_t)0, buff_size), + std::clamp(block_end - offset, (size_t)0, buff_size)); + if (new_block.second > 0 && new_block.first < buff_size) + result.push_back(new_block); + } + return result; } -std::vector> merge_private_blocks(std::vector> src, std::vector> dst) { +std::vector> merge_private_blocks(const std::vector>& src, + const std::vector>& dst) +{ std::vector> result; unsigned i_src = 0; unsigned i_dst = 0; @@ -413,12 +432,11 @@ std::vector> merge_private_blocks(std::vectorsecond.data->second; @@ -429,27 +447,32 @@ void smpi_shared_free(void *ptr) if (data->count <= 0) { close(data->fd); allocs.erase(allocs.find(meta->second.data->first)); - allocs_metadata.erase(ptr); - XBT_DEBUG("Shared free - with removal - of %p", ptr); + allocs_metadata.erase(meta); + XBT_DEBUG("Shared free - Local - with removal - of %p", ptr); } else { - XBT_DEBUG("Shared free - no removal - of %p, count = %d", ptr, data->count); + XBT_DEBUG("Shared free - Local - no removal - of %p, count = %d", ptr, data->count); } - } else if (smpi_cfg_shared_malloc == shmalloc_global) { + } else if (smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) { auto meta = allocs_metadata.find(ptr); if (meta != allocs_metadata.end()){ meta->second.data->second.count--; - if(meta->second.data->second.count==0) + XBT_DEBUG("Shared free - Global - of %p", ptr); + munmap(ptr, meta->second.size); + if(meta->second.data->second.count==0){ delete meta->second.data; + allocs_metadata.erase(meta); + } + }else{ + xbt_free(ptr); + return; } - munmap(ptr, meta->second.size); } else { XBT_DEBUG("Classic deallocation of %p", ptr); - ::operator delete(ptr); + xbt_free(ptr); } } -#endif int smpi_shared_known_call(const char* func, const char* input) {