-/* Copyright (c) 2007, 2009-2017. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2021. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
* \ | |
* ----
*/
-#include <map>
+#include <array>
#include <cstring>
+#include <map>
#include "private.hpp"
+#include "xbt/config.hpp"
+#include "xbt/file.hpp"
+
#include <cerrno>
#include <sys/types.h>
#ifndef WIN32
#include <sys/mman.h>
#endif
-#include <cstdio>
-#include <fcntl.h>
-#include <sys/stat.h>
-
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "smpi_utils.hpp"
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_shared, smpi, "Logging specific to SMPI (shared memory macros)");
-#define PTR_STRLEN (2 + 2 * sizeof(void*) + 1)
-
namespace{
/** Some location in the source code
*
};
std::unordered_map<smpi_source_location, shared_data_t, std::hash<std::string>> allocs;
-typedef decltype(allocs)::value_type shared_data_key_type;
+using shared_data_key_type = decltype(allocs)::value_type;
struct shared_metadata_t {
size_t size;
shared_data_key_type* data;
};
-std::map<void*, shared_metadata_t> allocs_metadata;
-std::map<std::string, void*> calls;
+std::map<const void*, shared_metadata_t> allocs_metadata;
+std::map<std::string, void*, std::less<>> calls;
#ifndef WIN32
-static int smpi_shared_malloc_bogusfile = -1;
-static int smpi_shared_malloc_bogusfile_huge_page = -1;
-static unsigned long smpi_shared_malloc_blocksize = 1UL << 20;
+int smpi_shared_malloc_bogusfile = -1;
+int smpi_shared_malloc_bogusfile_huge_page = -1;
+unsigned long smpi_shared_malloc_blocksize = 1UL << 20;
#endif
}
-
void smpi_shared_destroy()
{
allocs.clear();
calls.clear();
}
-static size_t shm_size(int fd) {
- struct stat st;
-
- if(fstat(fd, &st) < 0) {
- xbt_die("Could not stat fd %d: %s", fd, strerror(errno));
- }
- return static_cast<size_t>(st.st_size);
-}
-
#ifndef WIN32
-static void* shm_map(int fd, size_t size, shared_data_key_type* data) {
- char loc[PTR_STRLEN];
+static void* shm_map(int fd, size_t size, shared_data_key_type* data)
+{
+ void* mem = smpi_temp_shm_mmap(fd, size);
shared_metadata_t meta;
-
- if(size > shm_size(fd) && (ftruncate(fd, static_cast<off_t>(size)) < 0)) {
- xbt_die("Could not truncate fd %d to %zu: %s", fd, size, strerror(errno));
- }
-
- void* mem = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- if(mem == MAP_FAILED) {
- xbt_die(
- "Failed to map fd %d with size %zu: %s\n"
- "If you are running a lot of ranks, you may be exceeding the amount of mappings allowed per process.\n"
- "On Linux systems, change this value with sudo sysctl -w vm.max_map_count=newvalue (default value: 65536)\n"
- "Please see http://simgrid.gforge.inria.fr/simgrid/latest/doc/html/options.html#options_virt for more info.",
- fd, size, strerror(errno));
- }
- snprintf(loc, PTR_STRLEN, "%p", mem);
meta.size = size;
meta.data = data;
+ meta.allocated_ptr = mem;
+ meta.allocated_size = size;
allocs_metadata[mem] = meta;
XBT_DEBUG("MMAP %zu to %p", size, mem);
return mem;
auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
auto data = res.first;
if (res.second) {
- // The insertion did not take place.
- // Generate a shared memory name from the address of the shared_data:
- char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on Mac OS X (shm_open raises ENAMETOOLONG otherwise)
- snprintf(shmname, 31, "/shmalloc%p", &*data);
- int fd = shm_open(shmname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
- if (fd < 0) {
- if (errno == EEXIST)
- xbt_die("Please cleanup /dev/shm/%s", shmname);
- else
- xbt_die("An unhandled error occurred while opening %s. shm_open: %s", shmname, strerror(errno));
- }
- data->second.fd = fd;
+ // The new element was inserted.
+ int fd = smpi_temp_shm_get();
+ data->second.fd = fd;
data->second.count = 1;
mem = shm_map(fd, size, &*data);
- if (shm_unlink(shmname) < 0) {
- XBT_WARN("Could not early unlink %s. shm_unlink: %s", shmname, strerror(errno));
- }
- XBT_DEBUG("Mapping %s at %p through %d", shmname, mem, fd);
} else {
mem = shm_map(data->second.fd, size, &*data);
data->second.count++;
}
- XBT_DEBUG("Shared malloc %zu in %p (metadata at %p)", size, mem, &*data);
+ XBT_DEBUG("Shared malloc %zu in %p through %d (metadata at %p)", size, mem, data->second.fd, &*data);
return mem;
}
// Align functions, from http://stackoverflow.com/questions/4840410/how-to-align-a-pointer-in-c
-#define PAGE_SIZE 0x1000
-#define ALIGN_UP(n, align) (((n) + (align)-1) & -(align))
-#define ALIGN_DOWN(n, align) ((n) & -(align))
+#define ALIGN_UP(n, align) (((int64_t)(n) + (int64_t)(align) - 1) & -(int64_t)(align))
+#define ALIGN_DOWN(n, align) ((int64_t)(n) & -(int64_t)(align))
-#define HUGE_PAGE_SIZE 1<<21
+constexpr unsigned PAGE_SIZE = 0x1000;
+constexpr unsigned HUGE_PAGE_SIZE = 1U << 21;
/* Similar to smpi_shared_malloc, but only sharing the blocks described by shared_block_offsets.
* This array contains the offsets (in bytes) of the block to share.
* The others are not.
*/
-void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks)
+void* smpi_shared_malloc_partial(size_t size, const size_t* shared_block_offsets, int nb_shared_blocks)
{
- std::string huge_page_mount_point = xbt_cfg_get_string("smpi/shared-malloc-hugepage");
+ std::string huge_page_mount_point = simgrid::config::get_value<std::string>("smpi/shared-malloc-hugepage");
bool use_huge_page = not huge_page_mount_point.empty();
#ifndef MAP_HUGETLB /* If the system header don't define that mmap flag */
xbt_assert(not use_huge_page,
"Huge pages are not available on your system, you cannot use the smpi/shared-malloc-hugepage option.");
- use_huge_page = 0;
#endif
- smpi_shared_malloc_blocksize = static_cast<unsigned long>(xbt_cfg_get_double("smpi/shared-malloc-blocksize"));
+ smpi_shared_malloc_blocksize =
+ static_cast<unsigned long>(simgrid::config::get_value<double>("smpi/shared-malloc-blocksize"));
void* mem;
size_t allocated_size;
if(use_huge_page) {
/* First reserve memory area */
- void* allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ void* allocated_ptr = mmap(nullptr, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
xbt_assert(allocated_ptr != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root "
"to allow big allocations.\n",
size >> 20);
if(use_huge_page)
- mem = (void*)ALIGN_UP((uint64_t)allocated_ptr, HUGE_PAGE_SIZE);
+ mem = (void*)ALIGN_UP(allocated_ptr, HUGE_PAGE_SIZE);
else
mem = allocated_ptr;
smpi_shared_malloc_bogusfile = mkstemp(name);
XBT_DEBUG("bogusfile : %s\n", name);
unlink(name);
- char* dumb = new char[smpi_shared_malloc_blocksize](); // zero initialized
- ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize);
- if(err<0)
- xbt_die("Could not write bogus file for shared malloc");
- delete[] dumb;
+ xbt_assert(ftruncate(smpi_shared_malloc_bogusfile, smpi_shared_malloc_blocksize) == 0,
+ "Could not write bogus file for shared malloc");
}
int mmap_base_flag = MAP_FIXED | MAP_SHARED | MAP_POPULATE;
"stop_offset (%zu) should be lower than its successor start offset (%zu)", stop_offset, shared_block_offsets[2*i_block+2]);
size_t start_block_offset = ALIGN_UP(start_offset, smpi_shared_malloc_blocksize);
size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize);
- for (unsigned block_id=0, i = start_block_offset / smpi_shared_malloc_blocksize; i < stop_block_offset / smpi_shared_malloc_blocksize; block_id++, i++) {
- XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %u", block_id);
- void* pos = (void*)((unsigned long)mem + i * smpi_shared_malloc_blocksize);
- void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag,
- huge_fd, 0);
+ for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) {
+ XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset);
+ void* pos = static_cast<char*>(mem) + offset;
+ const void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag, huge_fd, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
- "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ? "
+ "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ? "
"You can also try using the sysctl vm.max_map_count. "
"If you are using huge pages, check that you have at least one huge page (/proc/sys/vm/nr_hugepages) "
"and that the directory you are passing is mounted correctly (mount /path/to/huge -t hugetlbfs -o rw,mode=0777).",
strerror(errno));
}
size_t low_page_start_offset = ALIGN_UP(start_offset, PAGE_SIZE);
- size_t low_page_stop_offset = start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE);
+ size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE);
if(low_page_start_offset < low_page_stop_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block start");
- void* pos = (void*)((unsigned long)mem + low_page_start_offset);
- void* res = mmap(pos, low_page_stop_offset-low_page_start_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
- smpi_shared_malloc_bogusfile, 0);
+ void* pos = static_cast<char*>(mem) + low_page_start_offset;
+ const void* res = mmap(pos, low_page_stop_offset - low_page_start_offset, PROT_READ | PROT_WRITE,
+ mmap_base_flag, // not a full huge page
+ smpi_shared_malloc_bogusfile, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
- "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?"
+ "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?"
"You can also try using the sysctl vm.max_map_count",
strerror(errno));
}
XBT_DEBUG("\t\tglobal shared allocation, mmap block stop");
size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE);
if(high_page_stop_offset > stop_block_offset) {
- void* pos = (void*)((unsigned long)mem + stop_block_offset);
- void* res = mmap(pos, high_page_stop_offset-stop_block_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
- smpi_shared_malloc_bogusfile, 0);
+ void* pos = static_cast<char*>(mem) + stop_block_offset;
+ const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE,
+ mmap_base_flag, // not a full huge page
+ smpi_shared_malloc_bogusfile, 0);
xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the "
- "size of the mapped file using --cfg=smpi/shared-malloc-blocksize=newvalue (default 1048576) ?"
+ "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ?"
"You can also try using the sysctl vm.max_map_count",
strerror(errno));
}
shared_metadata_t newmeta;
//register metadata for memcpy avoidance
- shared_data_key_type* data = new shared_data_key_type;
+ auto* data = new shared_data_key_type;
data->second.fd = -1;
data->second.count = 1;
newmeta.size = size;
newmeta.allocated_ptr = allocated_ptr;
newmeta.allocated_size = allocated_size;
if(shared_block_offsets[0] > 0) {
- newmeta.private_blocks.push_back(std::make_pair(0, shared_block_offsets[0]));
+ newmeta.private_blocks.emplace_back(0, shared_block_offsets[0]);
}
int i_block;
for(i_block = 0; i_block < nb_shared_blocks-1; i_block ++) {
- newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], shared_block_offsets[2*i_block+2]));
+ newmeta.private_blocks.emplace_back(shared_block_offsets[2 * i_block + 1], shared_block_offsets[2 * i_block + 2]);
}
if(shared_block_offsets[2*i_block+1] < size) {
- newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], size));
+ newmeta.private_blocks.emplace_back(shared_block_offsets[2 * i_block + 1], size);
}
allocs_metadata[mem] = newmeta;
return mem;
}
-void *smpi_shared_malloc(size_t size, const char *file, int line) {
- if (size > 0 && smpi_cfg_shared_malloc == shmalloc_local) {
+void* smpi_shared_malloc_intercept(size_t size, const char* file, int line)
+{
+ if( smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh()){
+ void* ptr = xbt_malloc(size);
+ if(not smpi_cfg_trace_call_use_absolute_path())
+ simgrid::smpi::utils::account_malloc_size(size, simgrid::xbt::Path(file).get_base_name(), line, ptr);
+ else
+ simgrid::smpi::utils::account_malloc_size(size, file, line, ptr);
+ return ptr;
+ } else {
+ simgrid::smpi::utils::account_shared_size(size);
+ return smpi_shared_malloc(size, file, line);
+ }
+}
+
+void* smpi_shared_calloc_intercept(size_t num_elm, size_t elem_size, const char* file, int line)
+{
+ size_t size = elem_size * num_elm;
+ if (smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh()) {
+ void* ptr = xbt_malloc0(size);
+ if(not smpi_cfg_trace_call_use_absolute_path())
+ simgrid::smpi::utils::account_malloc_size(size, simgrid::xbt::Path(file).get_base_name(), line, ptr);
+ else
+ simgrid::smpi::utils::account_malloc_size(size, file, line, ptr);
+ return ptr;
+ } else {
+ simgrid::smpi::utils::account_shared_size(size);
+ return memset(smpi_shared_malloc(size, file, line), 0, size);
+ }
+}
+
+void* smpi_shared_realloc_intercept(void* data, size_t size, const char* file, int line)
+{
+ if (size == 0) {
+ smpi_shared_free(data);
+ return nullptr;
+ }
+ if (data == nullptr)
+ return smpi_shared_malloc_intercept(size, file, line);
+
+ auto meta = allocs_metadata.find(data);
+ if (meta == allocs_metadata.end()) {
+ XBT_DEBUG("Classical realloc(%p, %zu)", data, size);
+ return xbt_realloc(data, size);
+ }
+
+ XBT_DEBUG("Shared realloc(%p, %zu) (old size: %zu)", data, size, meta->second.size);
+ void* ptr = smpi_shared_malloc_intercept(size, file, line);
+ if (ptr != data) {
+ memcpy(ptr, data, std::min(size, meta->second.size));
+ smpi_shared_free(data);
+ }
+ return ptr;
+}
+
+void* smpi_shared_malloc(size_t size, const char* file, int line)
+{
+ if (size > 0 && smpi_cfg_shared_malloc() == SharedMallocType::LOCAL) {
return smpi_shared_malloc_local(size, file, line);
- } else if (smpi_cfg_shared_malloc == shmalloc_global) {
+ } else if (smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) {
int nb_shared_blocks = 1;
- size_t shared_block_offsets[2] = {0, size};
- return smpi_shared_malloc_partial(size, shared_block_offsets, nb_shared_blocks);
+ const std::array<size_t, 2> shared_block_offsets = {{0, size}};
+ return smpi_shared_malloc_partial(size, shared_block_offsets.data(), nb_shared_blocks);
}
XBT_DEBUG("Classic allocation of %zu bytes", size);
- return ::operator new(size);
+ return xbt_malloc(size);
}
-int smpi_is_shared(void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset){
+int smpi_is_shared(const void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset){
private_blocks.clear(); // being paranoid
if (allocs_metadata.empty())
return 0;
- if ( smpi_cfg_shared_malloc == shmalloc_local || smpi_cfg_shared_malloc == shmalloc_global) {
+ if (smpi_cfg_shared_malloc() == SharedMallocType::LOCAL || smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) {
auto low = allocs_metadata.lower_bound(ptr);
if (low != allocs_metadata.end() && low->first == ptr) {
private_blocks = low->second.private_blocks;
}
}
-std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>> vec, size_t offset, size_t buff_size) {
- std::vector<std::pair<size_t, size_t>> result;
- for (auto const& block : vec) {
- auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size),
- std::min(std::max((size_t)0, block.second - offset), buff_size));
- if (new_block.second > 0 && new_block.first < buff_size)
- result.push_back(new_block);
- }
- return result;
+std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>>& vec,
+ size_t offset, size_t buff_size)
+{
+ std::vector<std::pair<size_t, size_t>> result;
+ for (auto const& block : vec) {
+ auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size),
+ std::min(std::max((size_t)0, block.second - offset), buff_size));
+ if (new_block.second > 0 && new_block.first < buff_size)
+ result.push_back(new_block);
+ }
+ return result;
}
-std::vector<std::pair<size_t, size_t>> merge_private_blocks(std::vector<std::pair<size_t, size_t>> src, std::vector<std::pair<size_t, size_t>> dst) {
+std::vector<std::pair<size_t, size_t>> merge_private_blocks(const std::vector<std::pair<size_t, size_t>>& src,
+ const std::vector<std::pair<size_t, size_t>>& dst)
+{
std::vector<std::pair<size_t, size_t>> result;
unsigned i_src = 0;
unsigned i_dst = 0;
void smpi_shared_free(void *ptr)
{
- if (smpi_cfg_shared_malloc == shmalloc_local) {
- char loc[PTR_STRLEN];
- snprintf(loc, PTR_STRLEN, "%p", ptr);
+ simgrid::smpi::utils::account_free(ptr);
+ if (smpi_cfg_shared_malloc() == SharedMallocType::LOCAL) {
auto meta = allocs_metadata.find(ptr);
if (meta == allocs_metadata.end()) {
- XBT_WARN("Cannot free: %p was not shared-allocated by SMPI - maybe its size was 0?", ptr);
+ xbt_free(ptr);
return;
}
shared_data_t* data = &meta->second.data->second;
if (data->count <= 0) {
close(data->fd);
allocs.erase(allocs.find(meta->second.data->first));
- allocs_metadata.erase(ptr);
- XBT_DEBUG("Shared free - with removal - of %p", ptr);
+ allocs_metadata.erase(meta);
+ XBT_DEBUG("Shared free - Local - with removal - of %p", ptr);
} else {
- XBT_DEBUG("Shared free - no removal - of %p, count = %d", ptr, data->count);
+ XBT_DEBUG("Shared free - Local - no removal - of %p, count = %d", ptr, data->count);
}
- } else if (smpi_cfg_shared_malloc == shmalloc_global) {
+ } else if (smpi_cfg_shared_malloc() == SharedMallocType::GLOBAL) {
auto meta = allocs_metadata.find(ptr);
if (meta != allocs_metadata.end()){
meta->second.data->second.count--;
- if(meta->second.data->second.count==0)
+ XBT_DEBUG("Shared free - Global - of %p", ptr);
+ munmap(ptr, meta->second.size);
+ if(meta->second.data->second.count==0){
delete meta->second.data;
+ allocs_metadata.erase(meta);
+ }
+ }else{
+ xbt_free(ptr);
+ return;
}
- munmap(ptr, meta->second.size);
} else {
XBT_DEBUG("Classic deallocation of %p", ptr);
- ::operator delete(ptr);
+ xbt_free(ptr);
}
}
#endif