X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/33a9da85867c540b95d99573defe39b47c5f6f45..ea39fd08c260e7faa92334952d4acd37e3892b6c:/src/xbt/mmalloc/mmalloc.c diff --git a/src/xbt/mmalloc/mmalloc.c b/src/xbt/mmalloc/mmalloc.c index a67700096e..95c5db6671 100644 --- a/src/xbt/mmalloc/mmalloc.c +++ b/src/xbt/mmalloc/mmalloc.c @@ -2,335 +2,327 @@ Copyright 1990, 1991, 1992 Free Software Foundation Written May 1989 by Mike Haertel. - Heavily modified Mar 1992 by Fred Fish for mmap'd version. + Heavily modified Mar 1992 by Fred Fish for mmap'd version. */ -The GNU C Library is free software; you can redistribute it and/or -modify it under the terms of the GNU Library General Public License as -published by the Free Software Foundation; either version 2 of the -License, or (at your option) any later version. +/* Copyright (c) 2010. The SimGrid Team. + * All rights reserved. */ -The GNU C Library is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Library General Public License for more details. +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ -You should have received a copy of the GNU Library General Public -License along with the GNU C Library; see the file COPYING.LIB. If -not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. - - The author may be reached (Email) at the address mike@ai.mit.edu, - or (US mail) as Mike Haertel c/o Free Software Foundation. */ - -#include /* Prototypes for memcpy, memmove, memset, etc */ +#include /* Prototypes for memcpy, memmove, memset, etc */ #include #include "mmprivate.h" /* Prototypes for local functions */ -static int initialize PARAMS ((struct mdesc *)); -static PTR morecore PARAMS ((struct mdesc *, size_t)); -static PTR align PARAMS ((struct mdesc *, size_t)); +static void initialize(xbt_mheap_t mdp); +static void *register_morecore(xbt_mheap_t mdp, size_t size); +static void *align(xbt_mheap_t mdp, size_t size); -/* Aligned allocation. */ - -static PTR -align (mdp, size) - struct mdesc *mdp; - size_t size; +/* Allocation aligned on block boundary. + * + * It never returns NULL, but dies verbosely on error. + */ +static void *align(struct mdesc *mdp, size_t size) { - PTR result; + void *result; unsigned long int adj; - result = mdp -> morecore (mdp, size); - adj = RESIDUAL (result, BLOCKSIZE); - if (adj != 0) - { - adj = BLOCKSIZE - adj; - mdp -> morecore (mdp, adj); - result = (PTR) result + adj; - } + result = mmorecore(mdp, size); + + /* if this reservation does not fill up the last block of our resa, + * complete the reservation by also asking for the full lastest block. + * + * Also, the returned block is aligned to the end of block (but I've + * no fucking idea of why, actually -- http://abstrusegoose.com/432 -- + * but not doing so seems to lead to issues). + */ + adj = RESIDUAL(result, BLOCKSIZE); + if (adj != 0) { + adj = BLOCKSIZE - adj; + mmorecore(mdp, adj); + result = (char *) result + adj; + } return (result); } -/* Set everything up and remember that we have. */ - -static int -initialize (mdp) - struct mdesc *mdp; +/* Finish the initialization of the mheap. If we want to inline it + * properly, we need to make the align function publicly visible, too */ +static void initialize(xbt_mheap_t mdp) { - mdp -> heapsize = HEAP / BLOCKSIZE; - mdp -> heapinfo = (malloc_info *) - align (mdp, mdp -> heapsize * sizeof (malloc_info)); - if (mdp -> heapinfo == NULL) - { - return (0); - } - memset ((PTR)mdp -> heapinfo, 0, mdp -> heapsize * sizeof (malloc_info)); - mdp -> heapinfo[0].free.size = 0; - mdp -> heapinfo[0].free.next = mdp -> heapinfo[0].free.prev = 0; - mdp -> heapindex = 0; - mdp -> heapbase = (PTR) mdp -> heapinfo; - mdp -> flags |= MMALLOC_INITIALIZED; - return (1); + int i; + malloc_info mi; /* to compute the offset of the swag hook */ + + mdp->heapsize = HEAP / BLOCKSIZE; + mdp->heapinfo = (malloc_info *) + align(mdp, mdp->heapsize * sizeof(malloc_info)); + + memset((void *) mdp->heapinfo, 0, mdp->heapsize * sizeof(malloc_info)); + mdp->heapinfo[0].type=-1; + mdp->heapinfo[0].free_block.size = 0; + mdp->heapinfo[0].free_block.next = mdp->heapinfo[0].free_block.prev = 0; + mdp->heapindex = 0; + mdp->heapbase = (void *) mdp->heapinfo; + mdp->flags |= MMALLOC_INITIALIZED; + + for (i=0;ifraghead[i]), + xbt_swag_offset(mi, freehook)); + } } -/* Get neatly aligned memory, initializing or - growing the heap info table as necessary. */ +#define update_hook(a,offset) do { if (a) { a = ((char*)a +(offset));} }while(0) -static PTR -morecore (mdp, size) - struct mdesc *mdp; - size_t size; +/* Get neatly aligned memory from the low level layers, and register it + * into the heap info table as necessary. */ +static void *register_morecore(struct mdesc *mdp, size_t size) { - PTR result; + int i; + void *result; malloc_info *newinfo, *oldinfo; size_t newsize; - result = align (mdp, size); - if (result == NULL) - { - return (NULL); - } + result = align(mdp, size); // Never returns NULL + + /* Check if we need to grow the info table (in a multiplicative manner) */ + if ((size_t) BLOCK((char *) result + size) > mdp->heapsize) { + int it; - /* Check if we need to grow the info table. */ - if ((size_t) BLOCK ((PTR) result + size) > mdp -> heapsize) - { - newsize = mdp -> heapsize; - while ((size_t) BLOCK ((PTR) result + size) > newsize) - { - newsize *= 2; - } - newinfo = (malloc_info *) align (mdp, newsize * sizeof (malloc_info)); - if (newinfo == NULL) - { - mdp -> morecore (mdp, -size); - return (NULL); - } - memset ((PTR) newinfo, 0, newsize * sizeof (malloc_info)); - memcpy ((PTR) newinfo, (PTR) mdp -> heapinfo, - mdp -> heapsize * sizeof (malloc_info)); - oldinfo = mdp -> heapinfo; - newinfo[BLOCK (oldinfo)].busy.type = 0; - newinfo[BLOCK (oldinfo)].busy.info.size - = BLOCKIFY (mdp -> heapsize * sizeof (malloc_info)); - mdp -> heapinfo = newinfo; - __mmalloc_free (mdp, (PTR)oldinfo); - mdp -> heapsize = newsize; + newsize = mdp->heapsize; + while ((size_t) BLOCK((char *) result + size) > newsize) + newsize *= 2; + + /* Copy old info into new location */ + oldinfo = mdp->heapinfo; + newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info)); + memset(newinfo, 0, newsize * sizeof(malloc_info)); + memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info)); + + /* Update the swag of busy blocks containing free fragments by applying the offset to all swag_hooks. Yeah. My hand is right in the fan and I still type */ + size_t offset=((char*)newinfo)-((char*)oldinfo); + + for (i=1/*first element of heapinfo describes the mdesc area*/; + iheaplimit; + i++) { + update_hook(newinfo[i].freehook.next,offset); + update_hook(newinfo[i].freehook.prev,offset); + } + // also update the starting points of the swag + for (i=0;ifraghead[i].head,offset); + update_hook(mdp->fraghead[i].tail,offset); } + mdp->heapinfo = newinfo; - mdp -> heaplimit = BLOCK ((PTR) result + size); + /* mark the space previously occupied by the block info as free by first marking it + * as occupied in the regular way, and then freing it */ + for (it=0; itheapsize * sizeof(malloc_info)); it++) + newinfo[BLOCK(oldinfo)+it].type = 0; + + newinfo[BLOCK(oldinfo)].busy_block.size = BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); + newinfo[BLOCK(oldinfo)].busy_block.busy_size = size; + newinfo[BLOCK(oldinfo)].busy_block.bt_size = 0;// FIXME setup the backtrace + mfree(mdp, (void *) oldinfo); + mdp->heapsize = newsize; + } + + mdp->heaplimit = BLOCK((char *) result + size); return (result); } +#undef update_hook /* Allocate memory from the heap. */ - -PTR -mmalloc (md, size) - PTR md; - size_t size; +void *mmalloc(xbt_mheap_t mdp, size_t size) { + void *res= mmalloc_no_memset(mdp,size); +// fprintf(stderr,"malloc(%zu)~>%p\n",size,res); + memset(res,0,size); + return res; +} +/* Spliting mmalloc this way is mandated by a trick in mrealloc, that gives + back the memory of big blocks to the system before reallocating them: we don't + want to loose the beginning of the area when this happens */ +void *mmalloc_no_memset(xbt_mheap_t mdp, size_t size) { - struct mdesc *mdp; - PTR result; + void *result; size_t block, blocks, lastblocks, start; register size_t i; - struct list *next; register size_t log; + int it; + + size_t requested_size = size; // The amount of memory requested by user, for real + + /* Work even if the user was stupid enough to ask a ridicullously small block (even 0-length), + * ie return a valid block that can be realloced and freed. + * glibc malloc does not use this trick but return a constant pointer, but we need to enlist the free fragments later on. + */ + if (size < SMALLEST_POSSIBLE_MALLOC) + size = SMALLEST_POSSIBLE_MALLOC; + + // printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout); - if (size == 0) - { - return (NULL); + if (!(mdp->flags & MMALLOC_INITIALIZED)) + initialize(mdp); + + mmalloc_paranoia(mdp); + + /* Determine the allocation policy based on the request size. */ + if (size <= BLOCKSIZE / 2) { + /* Small allocation to receive a fragment of a block. + Determine the logarithm to base two of the fragment size. */ + log = 1; + --size; + while ((size /= 2) != 0) { + ++log; } - mdp = MD_TO_MDP (md); + /* Look in the fragment lists for a free fragment of the desired size. */ + if (xbt_swag_size(&mdp->fraghead[log])>0) { + /* There are free fragments of this size; Get one of them and prepare to return it. + Update the block's nfree and if no other free fragment, get out of the swag. */ + + /* search a fragment that I could return as a result */ + malloc_info *candidate_info = xbt_swag_getFirst(&mdp->fraghead[log]); + size_t candidate_block = (candidate_info - &(mdp->heapinfo[0])); + size_t candidate_frag; + for (candidate_frag=0;candidate_frag<(size_t) (BLOCKSIZE >> log);candidate_frag++) + if (candidate_info->busy_frag.frag_size[candidate_frag] == -1) + break; + xbt_assert(candidate_frag < (size_t) (BLOCKSIZE >> log), + "Block %zu was registered as containing free fragments of type %zu, but I can't find any",candidate_block,log); + + result = (void*) (((char*)ADDRESS(candidate_block)) + (candidate_frag << log)); + + /* Remove this fragment from the list of free guys */ + candidate_info->busy_frag.nfree--; + if (candidate_info->busy_frag.nfree == 0) { + xbt_swag_remove(candidate_info,&mdp->fraghead[log]); + } + + /* Update our metadata about this fragment */ + candidate_info->busy_frag.frag_size[candidate_frag] = requested_size; + xbt_backtrace_no_malloc(candidate_info->busy_frag.bt[candidate_frag],XBT_BACKTRACE_SIZE); + + /* Update the statistics. */ + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += 1 << log; + mdp -> heapstats.chunks_free--; + mdp -> heapstats.bytes_free -= 1 << log; + + } else { + /* No free fragments of the desired size, so get a new block + and break it into fragments, returning the first. */ + + result = mmalloc(mdp, BLOCKSIZE); // does not return NULL + block = BLOCK(result); + + mdp->heapinfo[block].type = log; + /* Link all fragments but the first as free, and add the block to the swag of blocks containing free frags */ + for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) { + mdp->heapinfo[block].busy_frag.frag_size[i] = -1; + } + mdp->heapinfo[block].busy_frag.nfree = i - 1; + mdp->heapinfo[block].freehook.prev = NULL; + mdp->heapinfo[block].freehook.next = NULL; + + xbt_swag_insert(&mdp->heapinfo[block], &(mdp->fraghead[log])); + + /* mark the fragment returned as busy */ + mdp->heapinfo[block].busy_frag.frag_size[0] = requested_size; + xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_frag.bt[0],XBT_BACKTRACE_SIZE); - if (mdp -> mmalloc_hook != NULL) - { - return ((*mdp -> mmalloc_hook) (md, size)); + /* update stats */ + mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; + mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); + mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log); } - - if (!(mdp -> flags & MMALLOC_INITIALIZED)) - { - if (!initialize (mdp)) - { - return (NULL); - } + } else { + /* Large allocation to receive one or more blocks. + Search the free list in a circle starting at the last place visited. + If we loop completely around without finding a large enough + space we will have to get more memory from the system. */ + blocks = BLOCKIFY(size); + start = block = MALLOC_SEARCH_START; + while (mdp->heapinfo[block].free_block.size < blocks) { + if (mdp->heapinfo[block].type >=0) { // Don't trust xbt_die and friends in malloc-level library, you fool! + fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type); + abort(); + } + + block = mdp->heapinfo[block].free_block.next; + if (block == start) { + /* Need to get more from the system. Check to see if + the new core will be contiguous with the final free + block; if so we don't need to get as much. */ + block = mdp->heapinfo[0].free_block.prev; + lastblocks = mdp->heapinfo[block].free_block.size; + if (mdp->heaplimit != 0 && + block + lastblocks == mdp->heaplimit && + mmorecore(mdp, 0) == ADDRESS(block + lastblocks) && + (register_morecore(mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) { + /* Which block we are extending (the `final free + block' referred to above) might have changed, if + it got combined with a freed info table. */ + block = mdp->heapinfo[0].free_block.prev; + + mdp->heapinfo[block].free_block.size += (blocks - lastblocks); + continue; + } + result = register_morecore(mdp, blocks * BLOCKSIZE); + + block = BLOCK(result); + for (it=0;itheapinfo[block+it].type = 0; + } + mdp->heapinfo[block].busy_block.size = blocks; + mdp->heapinfo[block].busy_block.busy_size = requested_size; + mdp->heapinfo[block].busy_block.bt_size=xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE); + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; + + return result; + } + /* Need large block(s), but found some in the existing heap */ } - if (size < sizeof (struct list)) - { - size = sizeof (struct list); + /* At this point we have found a suitable free list entry. + Figure out how to remove what we need from the list. */ + result = ADDRESS(block); + if (mdp->heapinfo[block].free_block.size > blocks) { + /* The block we found has a bit left over, + so relink the tail end back into the free list. */ + mdp->heapinfo[block + blocks].free_block.size + = mdp->heapinfo[block].free_block.size - blocks; + mdp->heapinfo[block + blocks].free_block.next + = mdp->heapinfo[block].free_block.next; + mdp->heapinfo[block + blocks].free_block.prev + = mdp->heapinfo[block].free_block.prev; + mdp->heapinfo[mdp->heapinfo[block].free_block.prev].free_block.next + = mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev + = mdp->heapindex = block + blocks; + } else { + /* The block exactly matches our requirements, + so just remove it from the list. */ + mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev + = mdp->heapinfo[block].free_block.prev; + mdp->heapinfo[mdp->heapinfo[block].free_block.prev].free_block.next + = mdp->heapindex = mdp->heapinfo[block].free_block.next; } - /* Determine the allocation policy based on the request size. */ - if (size <= BLOCKSIZE / 2) - { - /* Small allocation to receive a fragment of a block. - Determine the logarithm to base two of the fragment size. */ - log = 1; - --size; - while ((size /= 2) != 0) - { - ++log; - } - - /* Look in the fragment lists for a - free fragment of the desired size. */ - next = mdp -> fraghead[log].next; - if (next != NULL) - { - /* There are free fragments of this size. - Pop a fragment out of the fragment list and return it. - Update the block's nfree and first counters. */ - result = (PTR) next; - next -> prev -> next = next -> next; - if (next -> next != NULL) - { - next -> next -> prev = next -> prev; - } - block = BLOCK (result); - if (--mdp -> heapinfo[block].busy.info.frag.nfree != 0) - { - mdp -> heapinfo[block].busy.info.frag.first = - RESIDUAL (next -> next, BLOCKSIZE) >> log; - } - - /* Update the statistics. */ - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += 1 << log; - mdp -> heapstats.chunks_free--; - mdp -> heapstats.bytes_free -= 1 << log; - } - else - { - /* No free fragments of the desired size, so get a new block - and break it into fragments, returning the first. */ - result = mmalloc (md, BLOCKSIZE); - if (result == NULL) - { - return (NULL); - } - - /* Link all fragments but the first into the free list. */ - for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) - { - next = (struct list *) ((PTR) result + (i << log)); - next -> next = mdp -> fraghead[log].next; - next -> prev = &mdp -> fraghead[log]; - next -> prev -> next = next; - if (next -> next != NULL) - { - next -> next -> prev = next; - } - } - - /* Initialize the nfree and first counters for this block. */ - block = BLOCK (result); - mdp -> heapinfo[block].busy.type = log; - mdp -> heapinfo[block].busy.info.frag.nfree = i - 1; - mdp -> heapinfo[block].busy.info.frag.first = i - 1; - - mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; - mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); - mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log); - } - } - else - { - /* Large allocation to receive one or more blocks. - Search the free list in a circle starting at the last place visited. - If we loop completely around without finding a large enough - space we will have to get more memory from the system. */ - blocks = BLOCKIFY(size); - start = block = MALLOC_SEARCH_START; - while (mdp -> heapinfo[block].free.size < blocks) - { - block = mdp -> heapinfo[block].free.next; - if (block == start) - { - /* Need to get more from the system. Check to see if - the new core will be contiguous with the final free - block; if so we don't need to get as much. */ - block = mdp -> heapinfo[0].free.prev; - lastblocks = mdp -> heapinfo[block].free.size; - if (mdp -> heaplimit != 0 && - block + lastblocks == mdp -> heaplimit && - mdp -> morecore (mdp, 0) == ADDRESS(block + lastblocks) && - (morecore (mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) - { - /* Which block we are extending (the `final free - block' referred to above) might have changed, if - it got combined with a freed info table. */ - block = mdp -> heapinfo[0].free.prev; - - mdp -> heapinfo[block].free.size += (blocks - lastblocks); - mdp -> heapstats.bytes_free += - (blocks - lastblocks) * BLOCKSIZE; - continue; - } - result = morecore(mdp, blocks * BLOCKSIZE); - if (result == NULL) - { - return (NULL); - } - block = BLOCK (result); - mdp -> heapinfo[block].busy.type = 0; - mdp -> heapinfo[block].busy.info.size = blocks; - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; - return (result); - } - } - - /* At this point we have found a suitable free list entry. - Figure out how to remove what we need from the list. */ - result = ADDRESS(block); - if (mdp -> heapinfo[block].free.size > blocks) - { - /* The block we found has a bit left over, - so relink the tail end back into the free list. */ - mdp -> heapinfo[block + blocks].free.size - = mdp -> heapinfo[block].free.size - blocks; - mdp -> heapinfo[block + blocks].free.next - = mdp -> heapinfo[block].free.next; - mdp -> heapinfo[block + blocks].free.prev - = mdp -> heapinfo[block].free.prev; - mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next - = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev - = mdp -> heapindex = block + blocks; - } - else - { - /* The block exactly matches our requirements, - so just remove it from the list. */ - mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev - = mdp -> heapinfo[block].free.prev; - mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next - = mdp -> heapindex = mdp -> heapinfo[block].free.next; - mdp -> heapstats.chunks_free--; - } - - mdp -> heapinfo[block].busy.type = 0; - mdp -> heapinfo[block].busy.info.size = blocks; - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; - mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; + for (it=0;itheapinfo[block+it].type = 0; } + mdp->heapinfo[block].busy_block.size = blocks; + mdp->heapinfo[block].busy_block.busy_size = requested_size; + //mdp->heapinfo[block].busy_block.bt_size = 0; + mdp->heapinfo[block].busy_block.bt_size = xbt_backtrace_no_malloc(mdp->heapinfo[block].busy_block.bt,XBT_BACKTRACE_SIZE); - return (result); -} + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; + mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; -/* When using this package, provide a version of malloc/realloc/free built - on top of it, so that if we use the default sbrk() region we will not - collide with another malloc package trying to do the same thing, if - the application contains any "hidden" calls to malloc/realloc/free (such - as inside a system library). */ + } + //printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout); -PTR -malloc (size) - size_t size; -{ - PTR result; - result = mmalloc ((PTR) NULL, size); return (result); }