-/* Copyright (c) 2007-2020. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2022. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "private.hpp"
#include "xbt/config.hpp"
+#include "xbt/file.hpp"
#include <cerrno>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
-
+#include "smpi_utils.hpp"
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#endif
}
-
void smpi_shared_destroy()
{
allocs.clear();
{
void* mem;
smpi_source_location loc(file, line);
- auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
- auto data = res.first;
- if (res.second) {
+ auto [data, inserted] = allocs.try_emplace(loc);
+ if (inserted) {
// The new element was inserted.
int fd = smpi_temp_shm_get();
data->second.fd = fd;
smpi_shared_malloc_bogusfile = mkstemp(name);
XBT_DEBUG("bogusfile : %s\n", name);
unlink(name);
- int err = ftruncate(smpi_shared_malloc_bogusfile, smpi_shared_malloc_blocksize);
- if (err != 0)
- xbt_die("Could not write bogus file for shared malloc");
+ xbt_assert(ftruncate(smpi_shared_malloc_bogusfile, smpi_shared_malloc_blocksize) == 0,
+ "Could not write bogus file for shared malloc");
}
int mmap_base_flag = MAP_FIXED | MAP_SHARED | MAP_POPULATE;
void* smpi_shared_malloc_intercept(size_t size, const char* file, int line)
{
- if( smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh())
- return ::operator new(size);
- else
+ if( smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh()){
+ void* ptr = xbt_malloc(size);
+ if(not smpi_cfg_trace_call_use_absolute_path())
+ simgrid::smpi::utils::account_malloc_size(size, simgrid::xbt::Path(file).get_base_name(), line, ptr);
+ else
+ simgrid::smpi::utils::account_malloc_size(size, file, line, ptr);
+ return ptr;
+ } else {
+ simgrid::smpi::utils::account_shared_size(size);
return smpi_shared_malloc(size, file, line);
+ }
}
void* smpi_shared_calloc_intercept(size_t num_elm, size_t elem_size, const char* file, int line)
{
- if( smpi_cfg_auto_shared_malloc_thresh() == 0 || elem_size*num_elm < smpi_cfg_auto_shared_malloc_thresh()){
- void* ptr = ::operator new(elem_size*num_elm);
- memset(ptr, 0, elem_size*num_elm);
+ size_t size = elem_size * num_elm;
+ if (smpi_cfg_auto_shared_malloc_thresh() == 0 || size < smpi_cfg_auto_shared_malloc_thresh()) {
+ void* ptr = xbt_malloc0(size);
+ if(not smpi_cfg_trace_call_use_absolute_path())
+ simgrid::smpi::utils::account_malloc_size(size, simgrid::xbt::Path(file).get_base_name(), line, ptr);
+ else
+ simgrid::smpi::utils::account_malloc_size(size, file, line, ptr);
return ptr;
- } else
- return smpi_shared_malloc(elem_size*num_elm, file, line);
+ } else {
+ simgrid::smpi::utils::account_shared_size(size);
+ return memset(smpi_shared_malloc(size, file, line), 0, size);
+ }
+}
+
+void* smpi_shared_realloc_intercept(void* data, size_t size, const char* file, int line)
+{
+ if (size == 0) {
+ smpi_shared_free(data);
+ return nullptr;
+ }
+ if (data == nullptr)
+ return smpi_shared_malloc_intercept(size, file, line);
+
+ auto meta = allocs_metadata.find(data);
+ if (meta == allocs_metadata.end()) {
+ XBT_DEBUG("Classical realloc(%p, %zu)", data, size);
+ return xbt_realloc(data, size);
+ }
+
+ XBT_DEBUG("Shared realloc(%p, %zu) (old size: %zu)", data, size, meta->second.size);
+ void* ptr = smpi_shared_malloc_intercept(size, file, line);
+ if (ptr != data) {
+ memcpy(ptr, data, std::min(size, meta->second.size));
+ smpi_shared_free(data);
+ }
+ return ptr;
}
void* smpi_shared_malloc(size_t size, const char* file, int line)
return smpi_shared_malloc_partial(size, shared_block_offsets.data(), nb_shared_blocks);
}
XBT_DEBUG("Classic allocation of %zu bytes", size);
- return ::operator new(size);
+ return xbt_malloc(size);
}
int smpi_is_shared(const void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset){
size_t offset, size_t buff_size)
{
std::vector<std::pair<size_t, size_t>> result;
- for (auto const& block : vec) {
- auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size),
- std::min(std::max((size_t)0, block.second - offset), buff_size));
+ for (auto const& [block_begin, block_end] : vec) {
+ auto new_block = std::make_pair(std::clamp(block_begin - offset, (size_t)0, buff_size),
+ std::clamp(block_end - offset, (size_t)0, buff_size));
if (new_block.second > 0 && new_block.first < buff_size)
result.push_back(new_block);
}
void smpi_shared_free(void *ptr)
{
+ simgrid::smpi::utils::account_free(ptr);
if (smpi_cfg_shared_malloc() == SharedMallocType::LOCAL) {
auto meta = allocs_metadata.find(ptr);
if (meta == allocs_metadata.end()) {
- ::operator delete(ptr);
+ xbt_free(ptr);
return;
}
shared_data_t* data = &meta->second.data->second;
if (data->count <= 0) {
close(data->fd);
allocs.erase(allocs.find(meta->second.data->first));
- allocs_metadata.erase(ptr);
+ allocs_metadata.erase(meta);
XBT_DEBUG("Shared free - Local - with removal - of %p", ptr);
} else {
XBT_DEBUG("Shared free - Local - no removal - of %p, count = %d", ptr, data->count);
munmap(ptr, meta->second.size);
if(meta->second.data->second.count==0){
delete meta->second.data;
- allocs_metadata.erase(ptr);
+ allocs_metadata.erase(meta);
}
}else{
- ::operator delete(ptr);
+ xbt_free(ptr);
return;
}
} else {
XBT_DEBUG("Classic deallocation of %p", ptr);
- ::operator delete(ptr);
+ xbt_free(ptr);
}
}
#endif