X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/e709643ef0c5b61c6c878016c418bffa2b1b20cd..b60c8af597ab1859e4b804954e6e6df37e8cff06:/src/smpi/internals/smpi_shared.cpp diff --git a/src/smpi/internals/smpi_shared.cpp b/src/smpi/internals/smpi_shared.cpp index b2653ab587..869f72de8f 100644 --- a/src/smpi/internals/smpi_shared.cpp +++ b/src/smpi/internals/smpi_shared.cpp @@ -178,7 +178,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int /* First reserve memory area */ - void* allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + void* allocated_ptr = mmap(nullptr, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); xbt_assert(allocated_ptr != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root " "to allow big allocations.\n", @@ -239,7 +239,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int size_t stop_block_offset = ALIGN_DOWN(stop_offset, smpi_shared_malloc_blocksize); for (size_t offset = start_block_offset; offset < stop_block_offset; offset += smpi_shared_malloc_blocksize) { XBT_DEBUG("\t\tglobal shared allocation, mmap block offset %zx", offset); - void* pos = (void*)((unsigned long)mem + offset); + void* pos = static_cast(mem) + offset; const void* res = mmap(pos, smpi_shared_malloc_blocksize, PROT_READ | PROT_WRITE, mmap_flag, huge_fd, 0); xbt_assert(res == pos, "Could not map folded virtual memory (%s). Do you perhaps need to increase the " "size of the mapped file using --cfg=smpi/shared-malloc-blocksize:newvalue (default 1048576) ? " @@ -252,7 +252,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int size_t low_page_stop_offset = (int64_t)start_block_offset < ALIGN_DOWN(stop_offset, PAGE_SIZE) ? start_block_offset : ALIGN_DOWN(stop_offset, PAGE_SIZE); if(low_page_start_offset < low_page_stop_offset) { XBT_DEBUG("\t\tglobal shared allocation, mmap block start"); - void* pos = (void*)((unsigned long)mem + low_page_start_offset); + void* pos = static_cast(mem) + low_page_start_offset; const void* res = mmap(pos, low_page_stop_offset - low_page_start_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page smpi_shared_malloc_bogusfile, 0); @@ -265,7 +265,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int XBT_DEBUG("\t\tglobal shared allocation, mmap block stop"); size_t high_page_stop_offset = stop_offset == size ? size : ALIGN_DOWN(stop_offset, PAGE_SIZE); if(high_page_stop_offset > stop_block_offset) { - void* pos = (void*)((unsigned long)mem + stop_block_offset); + void* pos = static_cast(mem) + stop_block_offset; const void* res = mmap(pos, high_page_stop_offset - stop_block_offset, PROT_READ | PROT_WRITE, mmap_base_flag, // not a full huge page smpi_shared_malloc_bogusfile, 0); @@ -279,7 +279,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int shared_metadata_t newmeta; //register metadata for memcpy avoidance - shared_data_key_type* data = new shared_data_key_type; + auto* data = new shared_data_key_type; data->second.fd = -1; data->second.count = 1; newmeta.size = size; @@ -287,14 +287,14 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int newmeta.allocated_ptr = allocated_ptr; newmeta.allocated_size = allocated_size; if(shared_block_offsets[0] > 0) { - newmeta.private_blocks.push_back(std::make_pair(0, shared_block_offsets[0])); + newmeta.private_blocks.emplace_back(0, shared_block_offsets[0]); } int i_block; for(i_block = 0; i_block < nb_shared_blocks-1; i_block ++) { - newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], shared_block_offsets[2*i_block+2])); + newmeta.private_blocks.emplace_back(shared_block_offsets[2 * i_block + 1], shared_block_offsets[2 * i_block + 2]); } if(shared_block_offsets[2*i_block+1] < size) { - newmeta.private_blocks.push_back(std::make_pair(shared_block_offsets[2*i_block+1], size)); + newmeta.private_blocks.emplace_back(shared_block_offsets[2 * i_block + 1], size); } allocs_metadata[mem] = newmeta;