/* Memory allocator `malloc'. */
-/* Copyright (c) 2010-2015. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2023. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
static void initialize(xbt_mheap_t mdp);
static void *register_morecore(xbt_mheap_t mdp, size_t size);
-static void *align(xbt_mheap_t mdp, size_t size);
+static void* mmalloc_aligned(xbt_mheap_t mdp, size_t size);
/* Allocation aligned on block boundary.
*
* It never returns NULL, but dies verbosely on error.
*/
-static void *align(struct mdesc *mdp, size_t size)
+static void* mmalloc_aligned(struct mdesc* mdp, size_t size)
{
void *result;
unsigned long int adj;
mmorecore(mdp, adj);
result = (char *) result + adj;
}
- return (result);
+ return result;
}
-/** Initialise heapinfo about the heapinfo pages :)
- *
- */
-static void initialize_heapinfo_heapinfo(xbt_mheap_t mdp)
+/** Initialize heapinfo about the heapinfo pages :) */
+static void initialize_heapinfo_heapinfo(const s_xbt_mheap_t* mdp)
{
// Update heapinfo about the heapinfo pages (!):
- xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0);
- int block = BLOCK(mdp->heapinfo);
+ mmalloc_assert((uintptr_t)mdp->heapinfo % BLOCKSIZE == 0, "Failed assert in initialize_heapinfo_heapinfo()");
+ size_t block = BLOCK(mdp->heapinfo);
size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE;
// Mark them as free:
for (size_t j=0; j!=nblocks; ++j) {
}
/* Finish the initialization of the mheap. If we want to inline it
- * properly, we need to make the align function publicly visible, too */
+ * properly, we need to make the mmalloc_aligned function publicly visible, too */
static void initialize(xbt_mheap_t mdp)
{
- int i;
- malloc_info mi; /* to compute the offset of the swag hook */
-
// Update mdp meta-data:
mdp->heapsize = HEAP / BLOCKSIZE;
- mdp->heapinfo = (malloc_info *)
- align(mdp, mdp->heapsize * sizeof(malloc_info));
+ mdp->heapinfo = (malloc_info*)mmalloc_aligned(mdp, mdp->heapsize * sizeof(malloc_info));
mdp->heapbase = (void *) mdp->heapinfo;
mdp->flags |= MMALLOC_INITIALIZED;
initialize_heapinfo_heapinfo(mdp);
- for (i=0;i<BLOCKLOG;i++) {
- xbt_swag_init(&(mdp->fraghead[i]),
- xbt_swag_offset(mi, freehook));
+ for (int i = 0; i < BLOCKLOG; i++) {
+ malloc_info mi; /* to compute the offset of the swag hook */
+ xbt_swag_init(&(mdp->fraghead[i]), xbt_swag_offset(mi, freehook));
}
}
-#define update_hook(a,offset) do { if (a) { a = ((char*)a +(offset));} }while(0)
+static inline void update_hook(void **a, size_t offset)
+{
+ if (*a)
+ *a = (char*)*a + offset;
+}
/* Get neatly aligned memory from the low level layers, and register it
* into the heap info table as necessary. */
static void *register_morecore(struct mdesc *mdp, size_t size)
{
- int i;
- void *result;
- malloc_info *newinfo, *oldinfo;
- size_t newsize;
-
- result = align(mdp, size); // Never returns NULL
+ void* result = mmalloc_aligned(mdp, size); // Never returns NULL
/* Check if we need to grow the info table (in a multiplicative manner) */
- if ((size_t) BLOCK((char *) result + size) > mdp->heapsize) {
- int it;
-
- newsize = mdp->heapsize;
- while ((size_t) BLOCK((char *) result + size) > newsize)
+ if (BLOCK((char*)result + size) > mdp->heapsize) {
+ size_t newsize = mdp->heapsize;
+ while (BLOCK((char*)result + size) > newsize)
newsize *= 2;
/* Copy old info into new location */
- oldinfo = mdp->heapinfo;
- newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
+ malloc_info* oldinfo = mdp->heapinfo;
+ malloc_info* newinfo = (malloc_info*)mmalloc_aligned(mdp, newsize * sizeof(malloc_info));
memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info));
- /* Initialise the new blockinfo : */
+ /* Initialize the new blockinfo : */
memset((char*) newinfo + mdp->heapsize * sizeof(malloc_info), 0,
(newsize - mdp->heapsize)* sizeof(malloc_info));
/* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */
size_t offset=((char*)newinfo)-((char*)oldinfo);
- for (i=1/*first element of heapinfo describes the mdesc area*/;
- i<mdp->heaplimit;
- i++) {
- update_hook(newinfo[i].freehook.next,offset);
- update_hook(newinfo[i].freehook.prev,offset);
+ for (size_t i = 1 /*first element of heapinfo describes the mdesc area*/; i < mdp->heaplimit; i++) {
+ update_hook(&newinfo[i].freehook.next, offset);
+ update_hook(&newinfo[i].freehook.prev, offset);
}
// also update the starting points of the swag
- for (i=0;i<BLOCKLOG;i++) {
- update_hook(mdp->fraghead[i].head,offset);
- update_hook(mdp->fraghead[i].tail,offset);
+ for (int i = 0; i < BLOCKLOG; i++) {
+ update_hook(&mdp->fraghead[i].head, offset);
+ update_hook(&mdp->fraghead[i].tail, offset);
}
mdp->heapinfo = newinfo;
/* mark the space previously occupied by the block info as free by first marking it
* as occupied in the regular way, and then freing it */
- for (it=0; it<BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++){
+ for (size_t it = 0; it < BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++) {
newinfo[BLOCK(oldinfo)+it].type = MMALLOC_TYPE_UNFRAGMENTED;
newinfo[BLOCK(oldinfo)+it].busy_block.ignore = 0;
}
}
mdp->heaplimit = BLOCK((char *) result + size);
- return (result);
+ return result;
}
-#undef update_hook
/* Allocate memory from the heap. */
void *mmalloc(xbt_mheap_t mdp, size_t size) {
}
return res;
}
-/* Spliting mmalloc this way is mandated by a trick in mrealloc, that gives
+
+static void mmalloc_mark_used(xbt_mheap_t mdp, size_t block, size_t nblocks, size_t requested_size)
+{
+ for (size_t it = 0; it < nblocks; it++) {
+ mdp->heapinfo[block + it].type = MMALLOC_TYPE_UNFRAGMENTED;
+ mdp->heapinfo[block + it].busy_block.busy_size = 0;
+ mdp->heapinfo[block + it].busy_block.ignore = 0;
+ mdp->heapinfo[block + it].busy_block.size = 0;
+ }
+ mdp->heapinfo[block].busy_block.size = nblocks;
+ mdp->heapinfo[block].busy_block.busy_size = requested_size;
+ mdp->heapstats.chunks_used++;
+ mdp->heapstats.bytes_used += nblocks * BLOCKSIZE;
+}
+
+/* Splitting mmalloc this way is mandated by a trick in mrealloc, that gives
back the memory of big blocks to the system before reallocating them: we don't
want to loose the beginning of the area when this happens */
void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size)
{
void *result;
- size_t block, blocks, lastblocks, start;
- size_t i;
- size_t log;
- int it;
+ size_t block;
size_t requested_size = size; // The amount of memory requested by user, for real
if (size < SMALLEST_POSSIBLE_MALLOC)
size = SMALLEST_POSSIBLE_MALLOC;
- // printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout);
-
if (!(mdp->flags & MMALLOC_INITIALIZED))
initialize(mdp);
- mmalloc_paranoia(mdp);
-
/* Determine the allocation policy based on the request size. */
if (size <= BLOCKSIZE / 2) {
/* Small allocation to receive a fragment of a block.
Determine the logarithm to base two of the fragment size. */
- log = 1;
+ size_t log = 1;
--size;
while ((size /= 2) != 0) {
++log;
for (candidate_frag=0;candidate_frag<(size_t) (BLOCKSIZE >> log);candidate_frag++)
if (candidate_info->busy_frag.frag_size[candidate_frag] == -1)
break;
- xbt_assert(candidate_frag < (size_t) (BLOCKSIZE >> log),
- "Block %zu was registered as containing free fragments of type %zu, but I can't find any",candidate_block,log);
+ mmalloc_assert(candidate_frag < (size_t)(BLOCKSIZE >> log),
+ "Block %zu was registered as containing free fragments of type %zu, but I can't find any",
+ candidate_block, log);
result = (void*) (((char*)ADDRESS(candidate_block)) + (candidate_frag << log));
/* Update our metadata about this fragment */
candidate_info->busy_frag.frag_size[candidate_frag] = requested_size;
candidate_info->busy_frag.ignore[candidate_frag] = 0;
- //xbt_backtrace_no_malloc(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE);
- //xbt_libunwind_backtrace(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE);
/* Update the statistics. */
mdp -> heapstats.chunks_used++;
result = mmalloc(mdp, BLOCKSIZE); // does not return NULL
block = BLOCK(result);
- mdp->heapinfo[block].type = log;
+ mdp->heapinfo[block].type = (int)log;
/* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */
+ size_t i;
for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) {
mdp->heapinfo[block].busy_frag.frag_size[i] = -1;
mdp->heapinfo[block].busy_frag.ignore[i] = 0;
/* mark the fragment returned as busy */
mdp->heapinfo[block].busy_frag.frag_size[0] = requested_size;
mdp->heapinfo[block].busy_frag.ignore[0] = 0;
- //xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE);
- //xbt_libunwind_backtrace(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE);
/* update stats */
mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1;
Search the free list in a circle starting at the last place visited.
If we loop completely around without finding a large enough
space we will have to get more memory from the system. */
- blocks = BLOCKIFY(size);
- start = block = MALLOC_SEARCH_START;
+ size_t blocks = BLOCKIFY(size);
+ size_t start = MALLOC_SEARCH_START;
+ block = MALLOC_SEARCH_START;
while (mdp->heapinfo[block].free_block.size < blocks) {
if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool!
- fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type);
+ fprintf(stderr,
+ "Internal error: found a free block not marked as such (block=%zu type=%d). Please report this bug.\n",
+ block, mdp->heapinfo[block].type);
abort();
}
the new core will be contiguous with the final free
block; if so we don't need to get as much. */
block = mdp->heapinfo[0].free_block.prev;
- lastblocks = mdp->heapinfo[block].free_block.size;
+ size_t lastblocks = mdp->heapinfo[block].free_block.size;
if (mdp->heaplimit != 0 &&
block + lastblocks == mdp->heaplimit &&
mmorecore(mdp, 0) == ADDRESS(block + lastblocks) &&
result = register_morecore(mdp, blocks * BLOCKSIZE);
block = BLOCK(result);
- for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
- mdp->heapinfo[block+it].busy_block.busy_size = 0;
- mdp->heapinfo[block+it].busy_block.ignore = 0;
- mdp->heapinfo[block+it].busy_block.size = 0;
- }
- mdp->heapinfo[block].busy_block.size = blocks;
- mdp->heapinfo[block].busy_block.busy_size = requested_size;
- //mdp->heapinfo[block].busy_block.bt_size=xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
- //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
- mdp -> heapstats.chunks_used++;
- mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
+ mmalloc_mark_used(mdp, block, blocks, requested_size);
return result;
}
= mdp->heapindex = mdp->heapinfo[block].free_block.next;
}
- for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED;
- mdp->heapinfo[block+it].busy_block.busy_size = 0;
- mdp->heapinfo[block+it].busy_block.ignore = 0;
- mdp->heapinfo[block+it].busy_block.size = 0;
- }
- mdp->heapinfo[block].busy_block.size = blocks;
- mdp->heapinfo[block].busy_block.busy_size = requested_size;
- //mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
- //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
-
- mdp -> heapstats.chunks_used++;
- mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
+ mmalloc_mark_used(mdp, block, blocks, requested_size);
mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE;
}
- //printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout);
- return (result);
+ return result;
}