-/* Copyright (c) 2007, 2009-2017. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2007-2018. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include <cstring>
#include "private.hpp"
+#include "xbt/config.hpp"
+
#include <cerrno>
#include <sys/types.h>
auto res = allocs.insert(std::make_pair(loc, shared_data_t()));
auto data = res.first;
if (res.second) {
- // The insertion did not take place.
+ // The new element was inserted.
// Generate a shared memory name from the address of the shared_data:
char shmname[32]; // cannot be longer than PSHMNAMLEN = 31 on Mac OS X (shm_open raises ENAMETOOLONG otherwise)
snprintf(shmname, 31, "/shmalloc%p", &*data);
void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks)
{
- std::string huge_page_mount_point = xbt_cfg_get_string("smpi/shared-malloc-hugepage");
+ std::string huge_page_mount_point = simgrid::config::get_value<std::string>("smpi/shared-malloc-hugepage");
bool use_huge_page = not huge_page_mount_point.empty();
#ifndef MAP_HUGETLB /* If the system header don't define that mmap flag */
xbt_assert(not use_huge_page,
"Huge pages are not available on your system, you cannot use the smpi/shared-malloc-hugepage option.");
- use_huge_page = 0;
#endif
- smpi_shared_malloc_blocksize = static_cast<unsigned long>(xbt_cfg_get_double("smpi/shared-malloc-blocksize"));
+ smpi_shared_malloc_blocksize =
+ static_cast<unsigned long>(simgrid::config::get_value<double>("smpi/shared-malloc-blocksize"));
void* mem;
size_t allocated_size;
if(use_huge_page) {
"to allow big allocations.\n",
size >> 20);
if(use_huge_page)
- mem = (void*)ALIGN_UP((uint64_t)allocated_ptr, HUGE_PAGE_SIZE);
+ mem = (void*)ALIGN_UP((int64_t)allocated_ptr, HUGE_PAGE_SIZE);
else
mem = allocated_ptr;
if(i_block < nb_shared_blocks-1)
xbt_assert(stop_offset < shared_block_offsets[2*i_block+2],
"stop_offset (%zu) should be lower than its successor start offset (%zu)", stop_offset, shared_block_offsets[2*i_block+2]);
- size_t start_block_offset = ALIGN_UP(start_offset, smpi_shared_malloc_blocksize);
- size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize);
+ size_t start_block_offset = ALIGN_UP((int64_t)start_offset, smpi_shared_malloc_blocksize);
+ size_t stop_block_offset = ALIGN_DOWN((int64_t)stop_offset, smpi_shared_malloc_blocksize);
for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset);
void* pos = (void*)((unsigned long)mem + offset);
"and that the directory you are passing is mounted correctly (mount /path/to/huge -t hugetlbfs -o rw,mode=0777).",
strerror(errno));
}
- size_t low_page_start_offset = ALIGN_UP(start_offset, PAGE_SIZE);
- size_t low_page_stop_offset = start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE);
+ size_t low_page_start_offset = ALIGN_UP((int64_t)start_offset, PAGE_SIZE);
+ size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN((int64_t)stop_offset, (int64_t)PAGE_SIZE);
if(low_page_start_offset < low_page_stop_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block start");
void* pos = (void*)((unsigned long)mem + low_page_start_offset);
}
if(low_page_stop_offset <= stop_block_offset) {
XBT_DEBUG("\t\tglobal shared allocation, mmap block stop");
- size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE);
+ size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN((int64_t)stop_offset, PAGE_SIZE);
if(high_page_stop_offset > stop_block_offset) {
void* pos = (void*)((unsigned long)mem + stop_block_offset);
void* res = mmap(pos, high_page_stop_offset-stop_block_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page
}
}
-std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>> vec, size_t offset, size_t buff_size) {
- std::vector<std::pair<size_t, size_t>> result;
- for (auto const& block : vec) {
- auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size),
- std::min(std::max((size_t)0, block.second - offset), buff_size));
- if (new_block.second > 0 && new_block.first < buff_size)
- result.push_back(new_block);
- }
- return result;
+std::vector<std::pair<size_t, size_t>> shift_and_frame_private_blocks(const std::vector<std::pair<size_t, size_t>>& vec,
+ size_t offset, size_t buff_size)
+{
+ std::vector<std::pair<size_t, size_t>> result;
+ for (auto const& block : vec) {
+ auto new_block = std::make_pair(std::min(std::max((size_t)0, block.first - offset), buff_size),
+ std::min(std::max((size_t)0, block.second - offset), buff_size));
+ if (new_block.second > 0 && new_block.first < buff_size)
+ result.push_back(new_block);
+ }
+ return result;
}
-std::vector<std::pair<size_t, size_t>> merge_private_blocks(std::vector<std::pair<size_t, size_t>> src, std::vector<std::pair<size_t, size_t>> dst) {
+std::vector<std::pair<size_t, size_t>> merge_private_blocks(const std::vector<std::pair<size_t, size_t>>& src,
+ const std::vector<std::pair<size_t, size_t>>& dst)
+{
std::vector<std::pair<size_t, size_t>> result;
unsigned i_src = 0;
unsigned i_dst = 0;