X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/0ab6b907319bfb2726f97ed2d21b9f4d6992ca86..b9625f82f86db0674e911887addce45dca31b57f:/src/xbt/mmalloc/mmalloc.c diff --git a/src/xbt/mmalloc/mmalloc.c b/src/xbt/mmalloc/mmalloc.c index 84f4a5412d..d3c26bbd26 100644 --- a/src/xbt/mmalloc/mmalloc.c +++ b/src/xbt/mmalloc/mmalloc.c @@ -1,7 +1,6 @@ /* Memory allocator `malloc'. */ -/* Copyright (c) 2010-2014. The SimGrid Team. - * All rights reserved. */ +/* Copyright (c) 2010-2020. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -48,14 +47,14 @@ static void *align(struct mdesc *mdp, size_t size) return (result); } -/** Initialise heapinfo about the heapinfo pages :) +/** Initialize heapinfo about the heapinfo pages :) * */ -static void initialize_heapinfo_heapinfo(xbt_mheap_t mdp) +static void initialize_heapinfo_heapinfo(const s_xbt_mheap_t* mdp) { // Update heapinfo about the heapinfo pages (!): xbt_assert((uintptr_t) mdp->heapinfo % BLOCKSIZE == 0); - int block = BLOCK(mdp->heapinfo); + size_t block = BLOCK(mdp->heapinfo); size_t nblocks = mdp->heapsize * sizeof(malloc_info) / BLOCKSIZE; // Mark them as free: for (size_t j=0; j!=nblocks; ++j) { @@ -96,49 +95,46 @@ static void initialize(xbt_mheap_t mdp) } } -#define update_hook(a,offset) do { if (a) { a = ((char*)a +(offset));} }while(0) +static inline void update_hook(void **a, size_t offset) +{ + if (*a) + *a = (char*)*a + offset; +} /* Get neatly aligned memory from the low level layers, and register it * into the heap info table as necessary. */ static void *register_morecore(struct mdesc *mdp, size_t size) { - int i; - void *result; - malloc_info *newinfo, *oldinfo; - size_t newsize; - - result = align(mdp, size); // Never returns NULL + void* result = align(mdp, size); // Never returns NULL /* Check if we need to grow the info table (in a multiplicative manner) */ if ((size_t) BLOCK((char *) result + size) > mdp->heapsize) { int it; - newsize = mdp->heapsize; + size_t newsize = mdp->heapsize; while ((size_t) BLOCK((char *) result + size) > newsize) newsize *= 2; /* Copy old info into new location */ - oldinfo = mdp->heapinfo; - newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info)); + malloc_info* oldinfo = mdp->heapinfo; + malloc_info* newinfo = (malloc_info*)align(mdp, newsize * sizeof(malloc_info)); memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info)); - /* Initialise the new blockinfo : */ + /* Initialize the new blockinfo : */ memset((char*) newinfo + mdp->heapsize * sizeof(malloc_info), 0, (newsize - mdp->heapsize)* sizeof(malloc_info)); /* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */ size_t offset=((char*)newinfo)-((char*)oldinfo); - for (i=1/*first element of heapinfo describes the mdesc area*/; - iheaplimit; - i++) { - update_hook(newinfo[i].freehook.next,offset); - update_hook(newinfo[i].freehook.prev,offset); + for (int i = 1 /*first element of heapinfo describes the mdesc area*/; i < mdp->heaplimit; i++) { + update_hook(&newinfo[i].freehook.next, offset); + update_hook(&newinfo[i].freehook.prev, offset); } // also update the starting points of the swag - for (i=0;ifraghead[i].head,offset); - update_hook(mdp->fraghead[i].tail,offset); + for (int i = 0; i < BLOCKLOG; i++) { + update_hook(&mdp->fraghead[i].head, offset); + update_hook(&mdp->fraghead[i].tail, offset); } mdp->heapinfo = newinfo; @@ -160,7 +156,6 @@ static void *register_morecore(struct mdesc *mdp, size_t size) mdp->heaplimit = BLOCK((char *) result + size); return (result); } -#undef update_hook /* Allocate memory from the heap. */ void *mmalloc(xbt_mheap_t mdp, size_t size) { @@ -170,38 +165,46 @@ void *mmalloc(xbt_mheap_t mdp, size_t size) { } return res; } -/* Spliting mmalloc this way is mandated by a trick in mrealloc, that gives + +static void mmalloc_mark_used(xbt_mheap_t mdp, size_t block, size_t nblocks, size_t requested_size) +{ + for (int it = 0; it < nblocks; it++) { + mdp->heapinfo[block + it].type = MMALLOC_TYPE_UNFRAGMENTED; + mdp->heapinfo[block + it].busy_block.busy_size = 0; + mdp->heapinfo[block + it].busy_block.ignore = 0; + mdp->heapinfo[block + it].busy_block.size = 0; + } + mdp->heapinfo[block].busy_block.size = nblocks; + mdp->heapinfo[block].busy_block.busy_size = requested_size; + mdp->heapstats.chunks_used++; + mdp->heapstats.bytes_used += nblocks * BLOCKSIZE; +} + +/* Splitting mmalloc this way is mandated by a trick in mrealloc, that gives back the memory of big blocks to the system before reallocating them: we don't want to loose the beginning of the area when this happens */ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) { void *result; - size_t block, blocks, lastblocks, start; - register size_t i; - register size_t log; - int it; + size_t block; size_t requested_size = size; // The amount of memory requested by user, for real - /* Work even if the user was stupid enough to ask a ridicullously small block (even 0-length), + /* Work even if the user was stupid enough to ask a ridiculously small block (even 0-length), * ie return a valid block that can be realloced and freed. * glibc malloc does not use this trick but return a constant pointer, but we need to enlist the free fragments later on. */ if (size < SMALLEST_POSSIBLE_MALLOC) size = SMALLEST_POSSIBLE_MALLOC; - // printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout); - if (!(mdp->flags & MMALLOC_INITIALIZED)) initialize(mdp); - mmalloc_paranoia(mdp); - /* Determine the allocation policy based on the request size. */ if (size <= BLOCKSIZE / 2) { /* Small allocation to receive a fragment of a block. Determine the logarithm to base two of the fragment size. */ - log = 1; + size_t log = 1; --size; while ((size /= 2) != 0) { ++log; @@ -233,8 +236,6 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) /* Update our metadata about this fragment */ candidate_info->busy_frag.frag_size[candidate_frag] = requested_size; candidate_info->busy_frag.ignore[candidate_frag] = 0; - //xbt_backtrace_no_malloc(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE); - //xbt_libunwind_backtrace(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE); /* Update the statistics. */ mdp -> heapstats.chunks_used++; @@ -249,8 +250,9 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) result = mmalloc(mdp, BLOCKSIZE); // does not return NULL block = BLOCK(result); - mdp->heapinfo[block].type = log; + mdp->heapinfo[block].type = (int)log; /* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */ + size_t i; for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) { mdp->heapinfo[block].busy_frag.frag_size[i] = -1; mdp->heapinfo[block].busy_frag.ignore[i] = 0; @@ -264,9 +266,7 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) /* mark the fragment returned as busy */ mdp->heapinfo[block].busy_frag.frag_size[0] = requested_size; mdp->heapinfo[block].busy_frag.ignore[0] = 0; - //xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE); - //xbt_libunwind_backtrace(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE); - + /* update stats */ mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); @@ -277,11 +277,14 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) Search the free list in a circle starting at the last place visited. If we loop completely around without finding a large enough space we will have to get more memory from the system. */ - blocks = BLOCKIFY(size); - start = block = MALLOC_SEARCH_START; + size_t blocks = BLOCKIFY(size); + size_t start = MALLOC_SEARCH_START; + block = MALLOC_SEARCH_START; while (mdp->heapinfo[block].free_block.size < blocks) { if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool! - fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type); + fprintf(stderr, + "Internal error: found a free block not marked as such (block=%zu type=%d). Please report this bug.\n", + block, mdp->heapinfo[block].type); abort(); } @@ -291,7 +294,7 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) the new core will be contiguous with the final free block; if so we don't need to get as much. */ block = mdp->heapinfo[0].free_block.prev; - lastblocks = mdp->heapinfo[block].free_block.size; + size_t lastblocks = mdp->heapinfo[block].free_block.size; if (mdp->heaplimit != 0 && block + lastblocks == mdp->heaplimit && mmorecore(mdp, 0) == ADDRESS(block + lastblocks) && @@ -307,18 +310,7 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) result = register_morecore(mdp, blocks * BLOCKSIZE); block = BLOCK(result); - for (it=0;itheapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED; - mdp->heapinfo[block+it].busy_block.busy_size = 0; - mdp->heapinfo[block+it].busy_block.ignore = 0; - mdp->heapinfo[block+it].busy_block.size = 0; - } - mdp->heapinfo[block].busy_block.size = blocks; - mdp->heapinfo[block].busy_block.busy_size = requested_size; - //mdp->heapinfo[block].busy_block.bt_size=xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE); - //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE); - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; + mmalloc_mark_used(mdp, block, blocks, requested_size); return result; } @@ -349,22 +341,10 @@ void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) = mdp->heapindex = mdp->heapinfo[block].free_block.next; } - for (it=0;itheapinfo[block+it].type = MMALLOC_TYPE_UNFRAGMENTED; - mdp->heapinfo[block+it].busy_block.busy_size = 0; - mdp->heapinfo[block+it].busy_block.ignore = 0; - } - mdp->heapinfo[block].busy_block.size = blocks; - mdp->heapinfo[block].busy_block.busy_size = requested_size; - //mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE); - //mdp->heapinfo[block].busy_block.bt_size = xbt_libunwind_backtrace(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE); - - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; + mmalloc_mark_used(mdp, block, blocks, requested_size); mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; } - //printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout); return (result); }