/* Memory allocator `malloc'. */
-/* Copyright (c) 2010-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2019. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
return (result);
}
+/** Initialise heapinfo about the heapinfo pages :)
+ *
+ */
+static void initialize_heapinfo_heapinfo(xbt_mheap_t mdp)
+{
+ // Update heapinfo about the heapinfo pages (!):
+ xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0);
+ int block = BLOCK(mdp->heapinfo);
+ size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE;
+ // Mark them as free:
+ for (size_t j=0; j!=nblocks; ++j) {
+ mdp->heapinfo[block+j].type = MMALLOC_TYPE_FREE;
+ mdp->heapinfo[block+j].free_block.size = 0;
+ mdp->heapinfo[block+j].free_block.next = 0;
+ mdp->heapinfo[block+j].free_block.prev = 0;
+ }
+ mdp->heapinfo[block].free_block.size = nblocks;
+}
+
/* Finish the initialization of the mheap. If we want to inline it
* properly, we need to make the align function publicly visible, too */
static void initialize(xbt_mheap_t mdp)
int i;
malloc_info mi; /* to compute the offset of the swag hook */
+ // Update mdp meta-data:
mdp->heapsize = HEAP / BLOCKSIZE;
mdp->heapinfo = (malloc_info *)
align(mdp, mdp->heapsize * sizeof(malloc_info));
+ mdp->heapbase = (void *) mdp->heapinfo;
+ mdp->flags |= MMALLOC_INITIALIZED;
+ // Update root heapinfo:
memset((void *) mdp->heapinfo, 0, mdp->heapsize * sizeof(malloc_info));
- mdp->heapinfo[0].type=-1;
+ mdp->heapinfo[0].type = MMALLOC_TYPE_FREE;
mdp->heapinfo[0].free_block.size = 0;
mdp->heapinfo[0].free_block.next = mdp->heapinfo[0].free_block.prev = 0;
mdp->heapindex = 0;
- mdp->heapbase = (void *) mdp->heapinfo;
- mdp->flags |= MMALLOC_INITIALIZED;
+
+ initialize_heapinfo_heapinfo(mdp);
for (i=0;i<BLOCKLOG;i++) {
xbt_swag_init(&(mdp->fraghead[i]),
}
}
-#define update_hook(a,offset) do { if (a) { a = ((char*)a +(offset));} }while(0)
+static inline void update_hook(void **a, size_t offset)
+{
+ if (*a)
+ *a = (char*)*a + offset;
+}
/* Get neatly aligned memory from the low level layers, and register it
* into the heap info table as necessary. */
static void *register_morecore(struct mdesc *mdp, size_t size)
{
- int i;
- void *result;
- malloc_info *newinfo, *oldinfo;
- size_t newsize;
-
- result = align(mdp, size); // Never returns NULL
+ void* result = align(mdp, size); // Never returns NULL
/* Check if we need to grow the info table (in a multiplicative manner) */
if ((size_t) BLOCK((char *) result + size) > mdp->heapsize) {
int it;
- newsize = mdp->heapsize;
+ size_t newsize = mdp->heapsize;
while ((size_t) BLOCK((char *) result + size) > newsize)
newsize *= 2;
/* Copy old info into new location */
- oldinfo = mdp->heapinfo;
- newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
- memset(newinfo, 0, newsize * sizeof(malloc_info));
+ malloc_info* oldinfo = mdp->heapinfo;
+ malloc_info* newinfo = (malloc_info*)align(mdp, newsize * sizeof(malloc_info));
memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info));
+ /* Initialise the new blockinfo : */
+ memset((char*) newinfo + mdp->heapsize * sizeof(malloc_info), 0,
+ (newsize - mdp->heapsize)* sizeof(malloc_info));
+
/* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */
size_t offset=((char*)newinfo)-((char*)oldinfo);
- for (i=1/*first element of heapinfo describes the mdesc area*/;
- i<mdp->heaplimit;
- i++) {
- update_hook(newinfo[i].freehook.next,offset);
- update_hook(newinfo[i].freehook.prev,offset);
+ for (int i = 1 /*first element of heapinfo describes the mdesc area*/; i < mdp->heaplimit; i++) {
+ update_hook(&newinfo[i].freehook.next, offset);
+ update_hook(&newinfo[i].freehook.prev, offset);
}
// also update the starting points of the swag
- for (i=0;i<BLOCKLOG;i++) {
- update_hook(mdp->fraghead[i].head,offset);
- update_hook(mdp->fraghead[i].tail,offset);
+ for (int i = 0; i < BLOCKLOG; i++) {
+ update_hook(&mdp->fraghead[i].head, offset);
+ update_hook(&mdp->fraghead[i].tail, offset);
}
mdp->heapinfo = newinfo;
/* mark the space previously occupied by the block info as free by first marking it
* as occupied in the regular way, and then freing it */
for (it=0; it<BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++){
- newinfo[BLOCK(oldinfo)+it].type = 0;
+ newinfo[BLOCK(oldinfo)+it].type = MMALLOC_TYPE_UNFRAGMENTED;
newinfo[BLOCK(oldinfo)+it].busy_block.ignore = 0;
}
newinfo[BLOCK(oldinfo)].busy_block.busy_size = size;
mfree(mdp, (void *) oldinfo);
mdp->heapsize = newsize;
+
+ initialize_heapinfo_heapinfo(mdp);
}
mdp->heaplimit = BLOCK((char *) result + size);
return (result);
}
-#undef update_hook
/* Allocate memory from the heap. */
void *mmalloc(xbt_mheap_t mdp, size_t size) {
void *res= mmalloc_no_memset(mdp,size);
-// fprintf(stderr,"malloc(%zu)~>%p\n",size,res);
- memset(res,0,size);
+ if (mdp->options & XBT_MHEAP_OPTION_MEMSET) {
+ memset(res,0,size);
+ }
return res;
}
+
+static void mmalloc_mark_used(xbt_mheap_t mdp, size_t block, size_t nblocks, size_t requested_size)
+{
+ for (int it = 0; it < nblocks; it++) {
+ mdp->heapinfo[block + it].type = MMALLOC_TYPE_UNFRAGMENTED;
+ mdp->heapinfo[block + it].busy_block.busy_size = 0;
+ mdp->heapinfo[block + it].busy_block.ignore = 0;
+ mdp->heapinfo[block + it].busy_block.size = 0;
+ }
+ mdp->heapinfo[block].busy_block.size = nblocks;
+ mdp->heapinfo[block].busy_block.busy_size = requested_size;
+ mdp->heapstats.chunks_used++;
+ mdp->heapstats.bytes_used += nblocks * BLOCKSIZE;
+}
+
/* Spliting mmalloc this way is mandated by a trick in mrealloc, that gives
back the memory of big blocks to the system before reallocating them: we don't
want to loose the beginning of the area when this happens */
void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size)
{
void *result;
- size_t block, blocks, lastblocks, start;
- register size_t i;
- register size_t log;
- int it;
+ size_t block;
size_t requested_size = size; // The amount of memory requested by user, for real
- /* Work even if the user was stupid enough to ask a ridicullously small block (even 0-length),
+ /* Work even if the user was stupid enough to ask a ridiculously small block (even 0-length),
* ie return a valid block that can be realloced and freed.
* glibc malloc does not use this trick but return a constant pointer, but we need to enlist the free fragments later on.
*/
if (size < SMALLEST_POSSIBLE_MALLOC)
size = SMALLEST_POSSIBLE_MALLOC;
- // printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout);
-
if (!(mdp->flags & MMALLOC_INITIALIZED))
initialize(mdp);
- mmalloc_paranoia(mdp);
-
/* Determine the allocation policy based on the request size. */
if (size <= BLOCKSIZE / 2) {
/* Small allocation to receive a fragment of a block.
Determine the logarithm to base two of the fragment size. */
- log = 1;
+ size_t log = 1;
--size;
while ((size /= 2) != 0) {
++log;
/* Update our metadata about this fragment */
candidate_info->busy_frag.frag_size[candidate_frag] = requested_size;
candidate_info->busy_frag.ignore[candidate_frag] = 0;
- //xbt_backtrace_no_malloc(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE);
- //xbt_libunwind_backtrace(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE);
/* Update the statistics. */
mdp -> heapstats.chunks_used++;
mdp->heapinfo[block].type = log;
/* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */
+ size_t i;
for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) {
mdp->heapinfo[block].busy_frag.frag_size[i] = -1;
mdp->heapinfo[block].busy_frag.ignore[i] = 0;
/* mark the fragment returned as busy */
mdp->heapinfo[block].busy_frag.frag_size[0] = requested_size;
mdp->heapinfo[block].busy_frag.ignore[0] = 0;
- //xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE);
- //xbt_libunwind_backtrace(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE);
-
+
/* update stats */
mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1;
mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log);
Search the free list in a circle starting at the last place visited.
If we loop completely around without finding a large enough
space we will have to get more memory from the system. */
- blocks = BLOCKIFY(size);
- start = block = MALLOC_SEARCH_START;
+ size_t blocks = BLOCKIFY(size);
+ size_t start = MALLOC_SEARCH_START;
+ block = MALLOC_SEARCH_START;
while (mdp->heapinfo[block].free_block.size < blocks) {
if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool!
fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type);
the new core will be contiguous with the final free
block; if so we don't need to get as much. */
block = mdp->heapinfo[0].free_block.prev;
- lastblocks = mdp->heapinfo[block].free_block.size;
+ size_t lastblocks = mdp->heapinfo[block].free_block.size;
if (mdp->heaplimit != 0 &&
block + lastblocks == mdp->heaplimit &&
mmorecore(mdp, 0) == ADDRESS(block + lastblocks) &&
result = register_morecore(mdp, blocks * BLOCKSIZE);
block = BLOCK(result);
- for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = 0;
- mdp->heapinfo[block+it].busy_block.busy_size = 0;
- mdp->heapinfo[block+it].busy_block.ignore = 0;
- }
- mdp->heapinfo[block].busy_block.size = blocks;
- mdp->heapinfo[block].busy_block.busy_size = requested_size;
- //mdp->heapinfo[block].busy_block.bt_size=xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
- //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
- mdp -> heapstats.chunks_used++;
- mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
+ mmalloc_mark_used(mdp, block, blocks, requested_size);
return result;
}
= mdp->heapindex = mdp->heapinfo[block].free_block.next;
}
- for (it=0;it<blocks;it++){
- mdp->heapinfo[block+it].type = 0;
- mdp->heapinfo[block+it].busy_block.busy_size = 0;
- mdp->heapinfo[block+it].busy_block.ignore = 0;
- }
- mdp->heapinfo[block].busy_block.size = blocks;
- mdp->heapinfo[block].busy_block.busy_size = requested_size;
- //mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
- //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE);
-
- mdp -> heapstats.chunks_used++;
- mdp -> heapstats.bytes_used += blocks * BLOCKSIZE;
+ mmalloc_mark_used(mdp, block, blocks, requested_size);
mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE;
}
- //printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout);
return (result);
}