From: mquinson Date: Thu, 6 May 2010 23:43:13 +0000 (+0000) Subject: Fight for better integration of mmalloc, mc and xbt X-Git-Tag: SVN~33 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/4c04d8355923f8323a1a3195fe3b73ac08b984ea Fight for better integration of mmalloc, mc and xbt I wanted to move the legacy interception (ie, redefinition of malloc/free/realloc) from mc to mmalloc, where it should be. But it broke libgras, with memory curruption symptoms in tesh. I thought that it was a race condition because mmalloc is not reentrent. So, I protected the main functions with a xbt_os_mutex (and creating it in the default heap was challenging since the mutex itself must be in the heap, which is not ready yet). But it didn't help: I still had the memory corruption symptoms in tesh, inchanged. Maybe some system function get fucked up by the mmalloc stuff? So I decided to comment out the legacy interception from mmalloc for a while (breaking mc, but who cares at this point?). But the xbt_os_mutexes are not in libsimgrid since pthreads and ucontextes didn't intermix well a few years ago. So, I changed the corresponding stubs from "die loudly" to "do nothing". That's dangerous (one could think he's protected by mutex when not), but I need the EPR refactoring to sort it out correctly. All this for... nothing. The legacy interception will still be in mc. And I guess that mc must be in another library, not the simulation one, if it does not intermix well with syscalls. Well mmalloc is now a bit more thread-friendly, let's be positive. git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/simgrid/simgrid/trunk@7711 48e7efb5-ca39-0410-a469-dd3cf9ba447f --- diff --git a/include/xbt/mmalloc.h b/include/xbt/mmalloc.h index 9ce4cd00db..f813b78016 100644 --- a/include/xbt/mmalloc.h +++ b/include/xbt/mmalloc.h @@ -62,4 +62,10 @@ extern int mmtrace(void); extern void* mmalloc_findbase(int size); +/* To change the heap used when using the legacy version malloc/free/realloc and such */ +void mmalloc_set_current_heap(void *new_heap); +void* mmalloc_get_current_heap(void); + + + #endif /* MMALLOC_H */ diff --git a/src/include/mc/mc.h b/src/include/mc/mc.h index f14aef36b5..064a753a3b 100644 --- a/src/include/mc/mc.h +++ b/src/include/mc/mc.h @@ -29,7 +29,8 @@ XBT_PUBLIC(void) MC_transition_set_comm(mc_transition_t, smx_comm_t); /********************************* Memory *************************************/ XBT_PUBLIC(void) MC_memory_init(void); /* Initialize the memory subsystem */ -XBT_PUBLIC(void) MC_memory_exit(void); /* Finish the memory subsystem */ +XBT_PUBLIC(void) MC_memory_exit(void); + SG_END_DECL() diff --git a/src/mc/mc_memory.c b/src/mc/mc_memory.c index 3f08a5992c..970f00fc00 100644 --- a/src/mc/mc_memory.c +++ b/src/mc/mc_memory.c @@ -17,7 +17,6 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_memory, mc, /* Pointers to each of the heap regions to use */ void *std_heap=NULL; /* memory erased each time the MC stuff rollbacks to the beginning. Almost everything goes here */ void *raw_heap=NULL; /* memory persistent over the MC rollbacks. Only MC stuff should go there */ -void *actual_heap=NULL; /* The heap we are currently using. Either std_heap or raw_heap. Controlled by macros MC_SET_RAW_MEM/MC_UNSET_RAW_MEM */ /* Pointers to the beginning and end of the .data and .bss segment of libsimgrid */ /* They are initialized once at memory_init */ @@ -82,58 +81,12 @@ void MC_memory_init() } /* Finish the memory subsystem */ -void MC_memory_exit() -{ - mmalloc_detach(std_heap); - mmalloc_detach(raw_heap); - actual_heap = NULL; -} - -void *malloc(size_t n) { - void *ret = mmalloc(actual_heap, n); - - DEBUG2("%zu bytes were allocated at %p",n, ret); - return ret; -} - -void *calloc(size_t nmemb, size_t size) -{ - size_t total_size = nmemb * size; - - void *ret = mmalloc(actual_heap, total_size); - -/* Fill the allocated memory with zeroes to mimic calloc behaviour */ - memset(ret,'\0', total_size); - - DEBUG2("%zu bytes were mallocated and zeroed at %p",total_size, ret); - return ret; +#include "xbt_modinter.h" +void MC_memory_exit(void) { + if (raw_heap) + mmalloc_detach(raw_heap); } - -void *realloc(void *p, size_t s) -{ - void *ret = NULL; - - if (s) { - if (p) - ret = mrealloc(actual_heap, p,s); - else - ret = malloc(s); /* FIXME: shouldn't this be mmalloc? */ - } else { - if (p) { - free(p); - } - } - DEBUG2("%zu bytes were reallocated at %p",s,ret); - return ret; -} - -void free(void *p) -{ - DEBUG1("%p was freed",p); -// xbt_assert(actual_heap != NULL); FIXME: I had to comment this - return mfree(actual_heap, p); -} /* FIXME: Horrible hack! because the mmalloc library doesn't provide yet of */ /* an API to query about the status of a heap, we simply call mmstats and */ diff --git a/src/mc/private.h b/src/mc/private.h index 6ecb3ce451..b6a3762dc4 100644 --- a/src/mc/private.h +++ b/src/mc/private.h @@ -108,14 +108,13 @@ void MC_print_statistics(mc_stats_t); /* Normally the system should operate in std, for switching to raw mode */ /* you must wrap the code between MC_SET_RAW_MODE and MC_UNSET_RAW_MODE */ -extern void *actual_heap; extern void *std_heap; extern void *raw_heap; extern void *libsimgrid_data_addr_start; extern size_t libsimgrid_data_size; -#define MC_SET_RAW_MEM actual_heap = raw_heap -#define MC_UNSET_RAW_MEM actual_heap = std_heap +#define MC_SET_RAW_MEM mmalloc_set_current_heap(raw_heap) +#define MC_UNSET_RAW_MEM mmalloc_set_current_heap(std_heap) /******************************* MEMORY MAPPINGS ***************************/ /* These functions and data structures implements a binary interface for */ diff --git a/src/xbt/mmalloc/attach.c b/src/xbt/mmalloc/attach.c index 31e6a4baed..db06a3c470 100644 --- a/src/xbt/mmalloc/attach.c +++ b/src/xbt/mmalloc/attach.c @@ -68,9 +68,7 @@ static struct mdesc *reuse (int fd); On failure returns NULL. */ -void * -mmalloc_attach (int fd, void *baseaddr) -{ +void * mmalloc_attach (int fd, void *baseaddr) { struct mdesc mtemp; struct mdesc *mdp; void* mbase; @@ -86,10 +84,10 @@ mmalloc_attach (int fd, void *baseaddr) if (fd >= 0) { if (fstat (fd, &sbuf) < 0) - return (NULL); + return (NULL); else if (sbuf.st_size > 0) - return ((void*) reuse (fd)); + return ((void*) reuse (fd)); } /* If the user provided NULL BASEADDR then fail */ @@ -119,18 +117,6 @@ mmalloc_attach (int fd, void *baseaddr) /* If we have not been passed a valid open file descriptor for the file to map to, then open /dev/zero and use that to map to. */ -/* if (mdp -> fd < 0)*/ -/* {*/ -/* if ((mdp -> fd = open ("/dev/zero", O_RDWR)) < 0)*/ -/* {*/ -/* return (NULL);*/ -/* }*/ -/* else*/ -/* {*/ -/* mdp -> flags |= MMALLOC_DEVZERO;*/ -/* }*/ -/* }*/ - /* Now try to map in the first page, copy the malloc descriptor structure there, and arrange to return a pointer to this new copy. If the mapping fails, then close the file descriptor if it was opened by us, and arrange @@ -139,14 +125,21 @@ mmalloc_attach (int fd, void *baseaddr) if ((mbase = mdp -> morecore (mdp, sizeof (mtemp))) != NULL) { memcpy (mbase, mdp, sizeof (mtemp)); -// mdp = (struct mdesc *) mbase; + // mdp = (struct mdesc *) mbase; } else { abort(); -// mdp = NULL; + // mdp = NULL; + } + + { /* create the mutex within that heap */ + void*old_heap=mmalloc_get_current_heap(); + mmalloc_set_current_heap(mbase); + mdp->mutex =xbt_os_mutex_init(); + mmalloc_set_current_heap(old_heap); } - + return ((void*) mbase); } @@ -194,13 +187,22 @@ reuse (int fd) if (__mmalloc_remap_core (&mtemp) == mtemp.base) { mdp = (struct mdesc *) mtemp.base; - mdp -> fd = fd; - mdp -> morecore = __mmalloc_mmap_morecore; - if (mdp -> mfree_hook != NULL) - { - mmcheckf ((void*) mdp, (void (*) (void)) NULL, 1); - } + mdp -> fd = fd; + mdp -> morecore = __mmalloc_mmap_morecore; + mdp->mutex =xbt_os_mutex_init(); + if (mdp -> mfree_hook != NULL) + { + mmcheckf ((void*) mdp, (void (*) (void)) NULL, 1); + } + } + + { /* create the mutex within that heap */ + void*old_heap=mmalloc_get_current_heap(); + mmalloc_set_current_heap(mdp); + mdp->mutex =xbt_os_mutex_init(); + mmalloc_set_current_heap(old_heap); } + return (mdp); } diff --git a/src/xbt/mmalloc/detach.c b/src/xbt/mmalloc/detach.c index 43b8687072..dbf4d34b29 100644 --- a/src/xbt/mmalloc/detach.c +++ b/src/xbt/mmalloc/detach.c @@ -37,6 +37,7 @@ mmalloc_detach (void *md) { mtemp = *(struct mdesc *) md; + xbt_os_mutex_destroy(((struct mdesc*)md)->mutex); /* Now unmap all the pages associated with this region by asking for a negative increment equal to the current size of the region. */ diff --git a/src/xbt/mmalloc/keys.c b/src/xbt/mmalloc/keys.c index a63c80564f..8f168b8d7b 100644 --- a/src/xbt/mmalloc/keys.c +++ b/src/xbt/mmalloc/keys.c @@ -32,11 +32,13 @@ mmalloc_setkey (void *md, int keynum, void *key) struct mdesc *mdp = (struct mdesc *) md; int result = 0; + LOCK(mdp); if ((mdp != NULL) && (keynum >= 0) && (keynum < MMALLOC_KEYS)) { mdp -> keys [keynum] = key; result++; } + UNLOCK(mdp); return (result); } diff --git a/src/xbt/mmalloc/mcalloc.c b/src/xbt/mmalloc/mcalloc.c index 6c26022a04..92f7eb16e3 100644 --- a/src/xbt/mmalloc/mcalloc.c +++ b/src/xbt/mmalloc/mcalloc.c @@ -26,14 +26,3 @@ mcalloc (void *md, register size_t nmemb, register size_t size) } return (result); } - -/* When using this package, provide a version of malloc/realloc/free built - on top of it, so that if we use the default sbrk() region we will not - collide with another malloc package trying to do the same thing, if - the application contains any "hidden" calls to malloc/realloc/free (such - as inside a system library). - FIXME: disabled for now */ - -//void* calloc (size_t nmemb, size_t size) { -// return (mcalloc ((void*) NULL, nmemb, size)); -//} diff --git a/src/xbt/mmalloc/mfree.c b/src/xbt/mmalloc/mfree.c index b2bc6496ca..142777b290 100644 --- a/src/xbt/mmalloc/mfree.c +++ b/src/xbt/mmalloc/mfree.c @@ -25,74 +25,80 @@ __mmalloc_free (struct mdesc *mdp, void *ptr) block = BLOCK (ptr); + if ((char*)ptr < (char*)mdp->heapbase || block > mdp->heapsize ) { + printf("Ouch, this pointer is not mine. I refuse to free it.\n"); + return; + } + + type = mdp -> heapinfo[block].busy.type; switch (type) - { - case 0: - /* Get as many statistics as early as we can. */ - mdp -> heapstats.chunks_used--; - mdp -> heapstats.bytes_used -= - mdp -> heapinfo[block].busy.info.size * BLOCKSIZE; - mdp -> heapstats.bytes_free += - mdp -> heapinfo[block].busy.info.size * BLOCKSIZE; - - /* Find the free cluster previous to this one in the free list. + { + case 0: + /* Get as many statistics as early as we can. */ + mdp -> heapstats.chunks_used--; + mdp -> heapstats.bytes_used -= + mdp -> heapinfo[block].busy.info.size * BLOCKSIZE; + mdp -> heapstats.bytes_free += + mdp -> heapinfo[block].busy.info.size * BLOCKSIZE; + + /* Find the free cluster previous to this one in the free list. Start searching at the last block referenced; this may benefit programs with locality of allocation. */ - i = mdp -> heapindex; - if (i > block) - { - while (i > block) - { - i = mdp -> heapinfo[i].free.prev; - } - } - else - { - do - { - i = mdp -> heapinfo[i].free.next; - } - while ((i != 0) && (i < block)); - i = mdp -> heapinfo[i].free.prev; - } - - /* Determine how to link this block into the free list. */ - if (block == i + mdp -> heapinfo[i].free.size) - { - /* Coalesce this block with its predecessor. */ - mdp -> heapinfo[i].free.size += - mdp -> heapinfo[block].busy.info.size; - block = i; - } - else - { - /* Really link this block back into the free list. */ - mdp -> heapinfo[block].free.size = - mdp -> heapinfo[block].busy.info.size; - mdp -> heapinfo[block].free.next = mdp -> heapinfo[i].free.next; - mdp -> heapinfo[block].free.prev = i; - mdp -> heapinfo[i].free.next = block; - mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev = block; - mdp -> heapstats.chunks_free++; - } - - /* Now that the block is linked in, see if we can coalesce it + i = mdp -> heapindex; + if (i > block) + { + while (i > block) + { + i = mdp -> heapinfo[i].free.prev; + } + } + else + { + do + { + i = mdp -> heapinfo[i].free.next; + } + while ((i != 0) && (i < block)); + i = mdp -> heapinfo[i].free.prev; + } + + /* Determine how to link this block into the free list. */ + if (block == i + mdp -> heapinfo[i].free.size) + { + /* Coalesce this block with its predecessor. */ + mdp -> heapinfo[i].free.size += + mdp -> heapinfo[block].busy.info.size; + block = i; + } + else + { + /* Really link this block back into the free list. */ + mdp -> heapinfo[block].free.size = + mdp -> heapinfo[block].busy.info.size; + mdp -> heapinfo[block].free.next = mdp -> heapinfo[i].free.next; + mdp -> heapinfo[block].free.prev = i; + mdp -> heapinfo[i].free.next = block; + mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev = block; + mdp -> heapstats.chunks_free++; + } + + /* Now that the block is linked in, see if we can coalesce it with its successor (by deleting its successor from the list and adding in its size). */ - if (block + mdp -> heapinfo[block].free.size == - mdp -> heapinfo[block].free.next) - { - mdp -> heapinfo[block].free.size - += mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.size; - mdp -> heapinfo[block].free.next - = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.next; - mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev = block; - mdp -> heapstats.chunks_free--; - } + if (block + mdp -> heapinfo[block].free.size == + mdp -> heapinfo[block].free.next) + { + mdp -> heapinfo[block].free.size + += mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.size; + mdp -> heapinfo[block].free.next + = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.next; + mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev = block; + mdp -> heapstats.chunks_free--; + } - /* Now see if we can return stuff to the system. */ - /* blocks = mdp -> heapinfo[block].free.size; + /* Now see if we can return stuff to the system. */ + /* blocks = mdp -> heapinfo[block].free.size; if (blocks >= FINAL_FREE_BLOCKS && block + blocks == mdp -> heaplimit && mdp -> morecore (mdp, 0) == ADDRESS (block + blocks)) { @@ -108,127 +114,106 @@ __mmalloc_free (struct mdesc *mdp, void *ptr) mdp -> heapstats.bytes_free -= bytes; }*/ - /* Set the next search to begin at this block. */ - mdp -> heapindex = block; - break; + /* Set the next search to begin at this block. */ + mdp -> heapindex = block; + break; - default: - /* Do some of the statistics. */ - mdp -> heapstats.chunks_used--; - mdp -> heapstats.bytes_used -= 1 << type; - mdp -> heapstats.chunks_free++; - mdp -> heapstats.bytes_free += 1 << type; + default: + /* Do some of the statistics. */ + mdp -> heapstats.chunks_used--; + mdp -> heapstats.bytes_used -= 1 << type; + mdp -> heapstats.chunks_free++; + mdp -> heapstats.bytes_free += 1 << type; - /* Get the address of the first free fragment in this block. */ - prev = (struct list *) - ((char*) ADDRESS(block) + - ( mdp -> heapinfo[block].busy.info.frag.first << type)); + /* Get the address of the first free fragment in this block. */ + prev = (struct list *) + ((char*) ADDRESS(block) + + ( mdp -> heapinfo[block].busy.info.frag.first << type)); - if (mdp -> heapinfo[block].busy.info.frag.nfree == - (BLOCKSIZE >> type) - 1) - { - /* If all fragments of this block are free, remove them + if (mdp -> heapinfo[block].busy.info.frag.nfree == + (BLOCKSIZE >> type) - 1) + { + /* If all fragments of this block are free, remove them from the fragment list and free the whole block. */ - next = prev; - for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i) - { - next = next -> next; - } - prev -> prev -> next = next; - if (next != NULL) - { - next -> prev = prev -> prev; - } - mdp -> heapinfo[block].busy.type = 0; - mdp -> heapinfo[block].busy.info.size = 1; - - /* Keep the statistics accurate. */ - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += BLOCKSIZE; - mdp -> heapstats.chunks_free -= BLOCKSIZE >> type; - mdp -> heapstats.bytes_free -= BLOCKSIZE; - - mfree ((void*) mdp, (void*) ADDRESS(block)); - } - else if (mdp -> heapinfo[block].busy.info.frag.nfree != 0) - { - /* If some fragments of this block are free, link this + next = prev; + for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i) + { + next = next -> next; + } + prev -> prev -> next = next; + if (next != NULL) + { + next -> prev = prev -> prev; + } + mdp -> heapinfo[block].busy.type = 0; + mdp -> heapinfo[block].busy.info.size = 1; + + /* Keep the statistics accurate. */ + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += BLOCKSIZE; + mdp -> heapstats.chunks_free -= BLOCKSIZE >> type; + mdp -> heapstats.bytes_free -= BLOCKSIZE; + + mfree ((void*) mdp, (void*) ADDRESS(block)); + } + else if (mdp -> heapinfo[block].busy.info.frag.nfree != 0) + { + /* If some fragments of this block are free, link this fragment into the fragment list after the first free fragment of this block. */ - next = (struct list *) ptr; - next -> next = prev -> next; - next -> prev = prev; - prev -> next = next; - if (next -> next != NULL) - { - next -> next -> prev = next; - } - ++mdp -> heapinfo[block].busy.info.frag.nfree; - } - else - { - /* No fragments of this block are free, so link this + next = (struct list *) ptr; + next -> next = prev -> next; + next -> prev = prev; + prev -> next = next; + if (next -> next != NULL) + { + next -> next -> prev = next; + } + ++mdp -> heapinfo[block].busy.info.frag.nfree; + } + else + { + /* No fragments of this block are free, so link this fragment into the fragment list and announce that it is the first free fragment of this block. */ - prev = (struct list *) ptr; - mdp -> heapinfo[block].busy.info.frag.nfree = 1; - mdp -> heapinfo[block].busy.info.frag.first = - RESIDUAL (ptr, BLOCKSIZE) >> type; - prev -> next = mdp -> fraghead[type].next; - prev -> prev = &mdp -> fraghead[type]; - prev -> prev -> next = prev; - if (prev -> next != NULL) - { - prev -> next -> prev = prev; - } - } - break; + prev = (struct list *) ptr; + mdp -> heapinfo[block].busy.info.frag.nfree = 1; + mdp -> heapinfo[block].busy.info.frag.first = + RESIDUAL (ptr, BLOCKSIZE) >> type; + prev -> next = mdp -> fraghead[type].next; + prev -> prev = &mdp -> fraghead[type]; + prev -> prev -> next = prev; + if (prev -> next != NULL) + { + prev -> next -> prev = prev; + } } + break; + } } /* Return memory to the heap. */ -void -mfree (void *md, void *ptr) -{ +void mfree (void *md, void *ptr) { struct mdesc *mdp; register struct alignlist *l; - if (ptr != NULL) - { - mdp = MD_TO_MDP (md); - for (l = mdp -> aligned_blocks; l != NULL; l = l -> next) - { - if (l -> aligned == ptr) - { - l -> aligned = NULL; /* Mark the slot in the list as free. */ - ptr = l -> exact; - break; - } - } - if (mdp -> mfree_hook != NULL) - { - (*mdp -> mfree_hook) (mdp, ptr); - } - else - { - __mmalloc_free (mdp, ptr); - } + if (ptr != NULL) { + mdp = MD_TO_MDP (md); + LOCK(mdp); + for (l = mdp -> aligned_blocks; l != NULL; l = l -> next) { + if (l -> aligned == ptr) { + l -> aligned = NULL; /* Mark the slot in the list as free. */ + ptr = l -> exact; + break; + } } + if (mdp -> mfree_hook != NULL) { + (*mdp -> mfree_hook) (mdp, ptr); + } else { + __mmalloc_free (mdp, ptr); + } + UNLOCK(mdp); + } } - -/* Useless prototype to make gcc happy */ -void free(void* ptr); - - -/* When using this package, provide a version of malloc/realloc/free built - on top of it, so that if we use the default sbrk() region we will not - collide with another malloc package trying to do the same thing, if - the application contains any "hidden" calls to malloc/realloc/free (such - as inside a system library). - FIXME: disabled for now */ - -//void free (void* ptr) { -// mfree ((void*) NULL, ptr); -//} diff --git a/src/xbt/mmalloc/mm.c b/src/xbt/mmalloc/mm.c index 7aeda845be..a096061fbf 100644 --- a/src/xbt/mmalloc/mm.c +++ b/src/xbt/mmalloc/mm.c @@ -29,3 +29,4 @@ #include "detach.c" #include "keys.c" #include "sbrk-sup.c" +#include "mm_legacy.c" diff --git a/src/xbt/mmalloc/mm_legacy.c b/src/xbt/mmalloc/mm_legacy.c new file mode 100644 index 0000000000..d126d185f8 --- /dev/null +++ b/src/xbt/mmalloc/mm_legacy.c @@ -0,0 +1,77 @@ +/* Copyright (c) 2010. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +/* Redefine the classical malloc/free/realloc functions so that they fit well in the mmalloc framework */ + +#include "mmprivate.h" + +static void *__mmalloc_current_heap=NULL; /* The heap we are currently using. */ + +void* mmalloc_get_current_heap(void) { + return __mmalloc_current_heap; +} +void mmalloc_set_current_heap(void *new_heap) { + __mmalloc_current_heap=new_heap; +} + +#ifdef MMALLOC_WANT_OVERIDE_LEGACY +void *malloc(size_t n) { + void *ret = mmalloc(__mmalloc_current_heap, n); + + return ret; +} + +void *calloc(size_t nmemb, size_t size) { + size_t total_size = nmemb * size; + + void *ret = mmalloc(__mmalloc_current_heap, total_size); + + /* Fill the allocated memory with zeroes to mimic calloc behaviour */ + memset(ret,'\0', total_size); + + return ret; +} + +void *realloc(void *p, size_t s) { + void *ret = NULL; + + if (s) { + if (p) + ret = mrealloc(__mmalloc_current_heap, p,s); + else + ret = mmalloc(__mmalloc_current_heap,s); + } else { + if (p) + mfree(__mmalloc_current_heap,p); + } + + return ret; +} + +void free(void *p) { + return mfree(__mmalloc_current_heap, p); +} +#endif + +/* Make sure it works with md==NULL */ + +#define HEAP_OFFSET 40960000 /* Safety gap from the heap's break address */ + +void *mmalloc_get_default_md(void) { + xbt_assert(__mmalloc_default_mdp); + return __mmalloc_default_mdp; +} + +/* Initialize the default malloc descriptor. */ +#include "xbt_modinter.h" +void mmalloc_preinit(void) { + __mmalloc_default_mdp = mmalloc_attach(-1, (char *)sbrk(0) + HEAP_OFFSET); + xbt_assert(__mmalloc_default_mdp != NULL); +} +void mmalloc_postexit(void) { + /* Do not detach the default mdp or ldl won't be able to free the memory it allocated since we're in memory */ + // mmalloc_detach(__mmalloc_default_mdp); +} diff --git a/src/xbt/mmalloc/mmalloc.c b/src/xbt/mmalloc/mmalloc.c index c7d1a07fac..c717fe58e7 100644 --- a/src/xbt/mmalloc/mmalloc.c +++ b/src/xbt/mmalloc/mmalloc.c @@ -22,35 +22,32 @@ static void* align (struct mdesc *mdp, size_t size); /* Aligned allocation. */ -static void* -align (struct mdesc *mdp, size_t size) -{ +static void* align (struct mdesc *mdp, size_t size) { void* result; unsigned long int adj; result = mdp -> morecore (mdp, size); adj = RESIDUAL (result, BLOCKSIZE); if (adj != 0) - { - adj = BLOCKSIZE - adj; - mdp -> morecore (mdp, adj); - result = (char*) result + adj; - } + { + adj = BLOCKSIZE - adj; + mdp -> morecore (mdp, adj); + result = (char*) result + adj; + } return (result); } /* Set everything up and remember that we have. */ -static int -initialize (struct mdesc *mdp) +static int initialize (struct mdesc *mdp) { mdp -> heapsize = HEAP / BLOCKSIZE; mdp -> heapinfo = (malloc_info *) - align (mdp, mdp -> heapsize * sizeof (malloc_info)); + align (mdp, mdp -> heapsize * sizeof (malloc_info)); if (mdp -> heapinfo == NULL) - { - return (0); - } + { + return (0); + } memset ((void*)mdp -> heapinfo, 0, mdp -> heapsize * sizeof (malloc_info)); mdp -> heapinfo[0].free.size = 0; mdp -> heapinfo[0].free.next = mdp -> heapinfo[0].free.prev = 0; @@ -63,8 +60,7 @@ initialize (struct mdesc *mdp) /* Get neatly aligned memory, initializing or growing the heap info table as necessary. */ -static void* -morecore (struct mdesc *mdp, size_t size) +static void* morecore (struct mdesc *mdp, size_t size) { void* result; malloc_info *newinfo, *oldinfo; @@ -72,35 +68,35 @@ morecore (struct mdesc *mdp, size_t size) result = align (mdp, size); if (result == NULL) - { - return (NULL); - } + { + return (NULL); + } /* Check if we need to grow the info table. */ if ((size_t) BLOCK ((char*) result + size) > mdp -> heapsize) + { + newsize = mdp -> heapsize; + while ((size_t) BLOCK ((char*) result + size) > newsize) + { + newsize *= 2; + } + newinfo = (malloc_info *) align (mdp, newsize * sizeof (malloc_info)); + if (newinfo == NULL) { - newsize = mdp -> heapsize; - while ((size_t) BLOCK ((char*) result + size) > newsize) - { - newsize *= 2; - } - newinfo = (malloc_info *) align (mdp, newsize * sizeof (malloc_info)); - if (newinfo == NULL) - { - mdp -> morecore (mdp, -size); - return (NULL); - } - memset ((void*) newinfo, 0, newsize * sizeof (malloc_info)); - memcpy ((void*) newinfo, (void*) mdp -> heapinfo, - mdp -> heapsize * sizeof (malloc_info)); - oldinfo = mdp -> heapinfo; - newinfo[BLOCK (oldinfo)].busy.type = 0; - newinfo[BLOCK (oldinfo)].busy.info.size - = BLOCKIFY (mdp -> heapsize * sizeof (malloc_info)); - mdp -> heapinfo = newinfo; - __mmalloc_free (mdp, (void*)oldinfo); - mdp -> heapsize = newsize; + mdp -> morecore (mdp, -size); + return (NULL); } + memset ((void*) newinfo, 0, newsize * sizeof (malloc_info)); + memcpy ((void*) newinfo, (void*) mdp -> heapinfo, + mdp -> heapsize * sizeof (malloc_info)); + oldinfo = mdp -> heapinfo; + newinfo[BLOCK (oldinfo)].busy.type = 0; + newinfo[BLOCK (oldinfo)].busy.info.size + = BLOCKIFY (mdp -> heapsize * sizeof (malloc_info)); + mdp -> heapinfo = newinfo; + __mmalloc_free (mdp, (void*)oldinfo); + mdp -> heapsize = newsize; + } mdp -> heaplimit = BLOCK ((char*) result + size); return (result); @@ -108,9 +104,7 @@ morecore (struct mdesc *mdp, size_t size) /* Allocate memory from the heap. */ -void* -mmalloc (void *md, size_t size) -{ +void* mmalloc (void *md, size_t size) { struct mdesc *mdp; void* result; size_t block, blocks, lastblocks, start; @@ -119,197 +113,192 @@ mmalloc (void *md, size_t size) register size_t log; if (size == 0) - { - return (NULL); - } + return (NULL); mdp = MD_TO_MDP (md); - - if (mdp -> mmalloc_hook != NULL) - { - return ((*mdp -> mmalloc_hook) (md, size)); - } + LOCK(mdp); + printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout); - if (!(mdp -> flags & MMALLOC_INITIALIZED)) - { - if (!initialize (mdp)) - { - return (NULL); - } - } - if (size < sizeof (struct list)) - { - size = sizeof (struct list); + if (mdp -> mmalloc_hook != NULL) { + void * res = ((*mdp -> mmalloc_hook) (md, size)); + UNLOCK(mdp); + return res; + } + + if (!(mdp -> flags & MMALLOC_INITIALIZED)) { + if (!initialize (mdp)) { + UNLOCK(mdp); + return (NULL); } + } + + if (size < sizeof (struct list)) { + size = sizeof (struct list); + } /* Determine the allocation policy based on the request size. */ if (size <= BLOCKSIZE / 2) - { - /* Small allocation to receive a fragment of a block. + { + /* Small allocation to receive a fragment of a block. Determine the logarithm to base two of the fragment size. */ - log = 1; - --size; - while ((size /= 2) != 0) - { - ++log; - } - - /* Look in the fragment lists for a + log = 1; + --size; + while ((size /= 2) != 0) + { + ++log; + } + + /* Look in the fragment lists for a free fragment of the desired size. */ - next = mdp -> fraghead[log].next; - if (next != NULL) - { - /* There are free fragments of this size. + next = mdp -> fraghead[log].next; + if (next != NULL) + { + /* There are free fragments of this size. Pop a fragment out of the fragment list and return it. Update the block's nfree and first counters. */ - result = (void*) next; - next -> prev -> next = next -> next; - if (next -> next != NULL) - { - next -> next -> prev = next -> prev; - } - block = BLOCK (result); - if (--mdp -> heapinfo[block].busy.info.frag.nfree != 0) - { - mdp -> heapinfo[block].busy.info.frag.first = - RESIDUAL (next -> next, BLOCKSIZE) >> log; - } - - /* Update the statistics. */ - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += 1 << log; - mdp -> heapstats.chunks_free--; - mdp -> heapstats.bytes_free -= 1 << log; - } - else - { - /* No free fragments of the desired size, so get a new block + result = (void*) next; + next -> prev -> next = next -> next; + if (next -> next != NULL) + { + next -> next -> prev = next -> prev; + } + block = BLOCK (result); + if (--mdp -> heapinfo[block].busy.info.frag.nfree != 0) + { + mdp -> heapinfo[block].busy.info.frag.first = + RESIDUAL (next -> next, BLOCKSIZE) >> log; + } + + /* Update the statistics. */ + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += 1 << log; + mdp -> heapstats.chunks_free--; + mdp -> heapstats.bytes_free -= 1 << log; + } + else + { + /* No free fragments of the desired size, so get a new block and break it into fragments, returning the first. */ - result = mmalloc (md, BLOCKSIZE); - if (result == NULL) - { - return (NULL); - } - - /* Link all fragments but the first into the free list. */ - for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) - { - next = (struct list *) ((char*) result + (i << log)); - next -> next = mdp -> fraghead[log].next; - next -> prev = &mdp -> fraghead[log]; - next -> prev -> next = next; - if (next -> next != NULL) - { - next -> next -> prev = next; - } - } - - /* Initialize the nfree and first counters for this block. */ - block = BLOCK (result); - mdp -> heapinfo[block].busy.type = log; - mdp -> heapinfo[block].busy.info.frag.nfree = i - 1; - mdp -> heapinfo[block].busy.info.frag.first = i - 1; - - mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; - mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); - mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log); - } + UNLOCK(mdp); + printf("(%s) No free fragment...",xbt_thread_self_name()); + result = mmalloc (md, BLOCKSIZE); + printf("(%s) Fragment: %p...",xbt_thread_self_name(),result); + LOCK(mdp); + if (result == NULL) + { + UNLOCK(mdp); + return (NULL); + } + + /* Link all fragments but the first into the free list. */ + for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i) + { + next = (struct list *) ((char*) result + (i << log)); + next -> next = mdp -> fraghead[log].next; + next -> prev = &mdp -> fraghead[log]; + next -> prev -> next = next; + if (next -> next != NULL) + { + next -> next -> prev = next; + } + } + + /* Initialize the nfree and first counters for this block. */ + block = BLOCK (result); + mdp -> heapinfo[block].busy.type = log; + mdp -> heapinfo[block].busy.info.frag.nfree = i - 1; + mdp -> heapinfo[block].busy.info.frag.first = i - 1; + + mdp -> heapstats.chunks_free += (BLOCKSIZE >> log) - 1; + mdp -> heapstats.bytes_free += BLOCKSIZE - (1 << log); + mdp -> heapstats.bytes_used -= BLOCKSIZE - (1 << log); } + } else - { - /* Large allocation to receive one or more blocks. + { + /* Large allocation to receive one or more blocks. Search the free list in a circle starting at the last place visited. If we loop completely around without finding a large enough space we will have to get more memory from the system. */ - blocks = BLOCKIFY(size); - start = block = MALLOC_SEARCH_START; - while (mdp -> heapinfo[block].free.size < blocks) - { - block = mdp -> heapinfo[block].free.next; - if (block == start) - { - /* Need to get more from the system. Check to see if + blocks = BLOCKIFY(size); + start = block = MALLOC_SEARCH_START; + while (mdp -> heapinfo[block].free.size < blocks) + { + block = mdp -> heapinfo[block].free.next; + if (block == start) + { + /* Need to get more from the system. Check to see if the new core will be contiguous with the final free block; if so we don't need to get as much. */ - block = mdp -> heapinfo[0].free.prev; - lastblocks = mdp -> heapinfo[block].free.size; - if (mdp -> heaplimit != 0 && - block + lastblocks == mdp -> heaplimit && - mdp -> morecore (mdp, 0) == ADDRESS(block + lastblocks) && - (morecore (mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) - { - /* Which block we are extending (the `final free + block = mdp -> heapinfo[0].free.prev; + lastblocks = mdp -> heapinfo[block].free.size; + if (mdp -> heaplimit != 0 && + block + lastblocks == mdp -> heaplimit && + mdp -> morecore (mdp, 0) == ADDRESS(block + lastblocks) && + (morecore (mdp, (blocks - lastblocks) * BLOCKSIZE)) != NULL) + { + /* Which block we are extending (the `final free block' referred to above) might have changed, if it got combined with a freed info table. */ - block = mdp -> heapinfo[0].free.prev; - - mdp -> heapinfo[block].free.size += (blocks - lastblocks); - mdp -> heapstats.bytes_free += - (blocks - lastblocks) * BLOCKSIZE; - continue; - } - result = morecore(mdp, blocks * BLOCKSIZE); - if (result == NULL) - { - return (NULL); - } - block = BLOCK (result); - mdp -> heapinfo[block].busy.type = 0; - mdp -> heapinfo[block].busy.info.size = blocks; - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; - return (result); - } - } - - /* At this point we have found a suitable free list entry. + block = mdp -> heapinfo[0].free.prev; + + mdp -> heapinfo[block].free.size += (blocks - lastblocks); + mdp -> heapstats.bytes_free += + (blocks - lastblocks) * BLOCKSIZE; + continue; + } + result = morecore(mdp, blocks * BLOCKSIZE); + if (result == NULL) + { + UNLOCK(mdp); + return (NULL); + } + block = BLOCK (result); + mdp -> heapinfo[block].busy.type = 0; + mdp -> heapinfo[block].busy.info.size = blocks; + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; + UNLOCK(mdp); + return (result); + } + } + + /* At this point we have found a suitable free list entry. Figure out how to remove what we need from the list. */ - result = ADDRESS(block); - if (mdp -> heapinfo[block].free.size > blocks) - { - /* The block we found has a bit left over, + result = ADDRESS(block); + if (mdp -> heapinfo[block].free.size > blocks) + { + /* The block we found has a bit left over, so relink the tail end back into the free list. */ - mdp -> heapinfo[block + blocks].free.size - = mdp -> heapinfo[block].free.size - blocks; - mdp -> heapinfo[block + blocks].free.next - = mdp -> heapinfo[block].free.next; - mdp -> heapinfo[block + blocks].free.prev - = mdp -> heapinfo[block].free.prev; - mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next - = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev - = mdp -> heapindex = block + blocks; - } - else - { - /* The block exactly matches our requirements, + mdp -> heapinfo[block + blocks].free.size + = mdp -> heapinfo[block].free.size - blocks; + mdp -> heapinfo[block + blocks].free.next + = mdp -> heapinfo[block].free.next; + mdp -> heapinfo[block + blocks].free.prev + = mdp -> heapinfo[block].free.prev; + mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next + = mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev + = mdp -> heapindex = block + blocks; + } + else + { + /* The block exactly matches our requirements, so just remove it from the list. */ - mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev - = mdp -> heapinfo[block].free.prev; - mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next - = mdp -> heapindex = mdp -> heapinfo[block].free.next; - mdp -> heapstats.chunks_free--; - } - - mdp -> heapinfo[block].busy.type = 0; - mdp -> heapinfo[block].busy.info.size = blocks; - mdp -> heapstats.chunks_used++; - mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; - mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; + mdp -> heapinfo[mdp -> heapinfo[block].free.next].free.prev + = mdp -> heapinfo[block].free.prev; + mdp -> heapinfo[mdp -> heapinfo[block].free.prev].free.next + = mdp -> heapindex = mdp -> heapinfo[block].free.next; + mdp -> heapstats.chunks_free--; } + mdp -> heapinfo[block].busy.type = 0; + mdp -> heapinfo[block].busy.info.size = blocks; + mdp -> heapstats.chunks_used++; + mdp -> heapstats.bytes_used += blocks * BLOCKSIZE; + mdp -> heapstats.bytes_free -= blocks * BLOCKSIZE; + } + printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout); + UNLOCK(mdp); return (result); } - -/* When using this package, provide a version of malloc/realloc/free built - on top of it, so that if we use the default sbrk() region we will not - collide with another malloc package trying to do the same thing, if - the application contains any "hidden" calls to malloc/realloc/free (such - as inside a system library). - FIXME: disabled for now */ - -//void* malloc (size_t size) { -// void* result; -// result = mmalloc (NULL, size); -// return (result); -//} diff --git a/src/xbt/mmalloc/mmemalign.c b/src/xbt/mmalloc/mmemalign.c index c6296fd3be..690496afad 100644 --- a/src/xbt/mmalloc/mmemalign.c +++ b/src/xbt/mmalloc/mmemalign.c @@ -18,33 +18,33 @@ mmemalign (void *md, size_t alignment, size_t size) struct mdesc *mdp; if ((result = mmalloc (md, size + alignment - 1)) != NULL) + { + adj = RESIDUAL (result, alignment); + if (adj != 0) { - adj = RESIDUAL (result, alignment); - if (adj != 0) - { - mdp = MD_TO_MDP (md); - for (l = mdp -> aligned_blocks; l != NULL; l = l -> next) - { - if (l -> aligned == NULL) - { - /* This slot is free. Use it. */ - break; - } - } - if (l == NULL) - { - l = (struct alignlist *) mmalloc (md, sizeof (struct alignlist)); - if (l == NULL) - { - mfree (md, result); - return (NULL); - } - l -> next = mdp -> aligned_blocks; - mdp -> aligned_blocks = l; - } - l -> exact = result; - result = l -> aligned = (char*) result + alignment - adj; - } + mdp = MD_TO_MDP (md); + for (l = mdp -> aligned_blocks; l != NULL; l = l -> next) + { + if (l -> aligned == NULL) + { + /* This slot is free. Use it. */ + break; + } + } + if (l == NULL) + { + l = (struct alignlist *) mmalloc (md, sizeof (struct alignlist)); + if (l == NULL) + { + mfree (md, result); + return (NULL); + } + l -> next = mdp -> aligned_blocks; + mdp -> aligned_blocks = l; + } + l -> exact = result; + result = l -> aligned = (char*) result + alignment - adj; } + } return (result); } diff --git a/src/xbt/mmalloc/mmprivate.h b/src/xbt/mmalloc/mmprivate.h index 5f25ef673c..55908d212b 100644 --- a/src/xbt/mmalloc/mmprivate.h +++ b/src/xbt/mmalloc/mmprivate.h @@ -13,6 +13,7 @@ #ifndef __MMPRIVATE_H #define __MMPRIVATE_H 1 +#include "xbt/xbt_os_thread.h" #include "xbt/mmalloc.h" #ifdef HAVE_LIMITS_H @@ -76,8 +77,12 @@ #define ADDRESS(B) ((void*) (((ADDR2UINT(B)) - 1) * BLOCKSIZE + (char*) mdp -> heapbase)) -/* Data structure giving per-block information. */ +/* Thread-safety (if the mutex is already created)*/ +#define LOCK(mdp) if (mdp->mutex) xbt_os_mutex_acquire(mdp->mutex) +#define UNLOCK(mdp) if (mdp->mutex) xbt_os_mutex_release(mdp->mutex) +const char *xbt_thread_self_name(void); +/* Data structure giving per-block information. */ typedef union { /* Heap information for a busy block. */ @@ -144,28 +149,23 @@ struct mstats managing, and thus also becomes the file header for the mapped file, if such a file exists. */ -struct mdesc -{ +struct mdesc { + xbt_os_mutex_t mutex; /* The "magic number" for an mmalloc file. */ - char magic[MMALLOC_MAGIC_SIZE]; /* The size in bytes of this structure, used as a sanity check when reusing a previously created mapped file. */ - unsigned int headersize; /* The version number of the mmalloc package that created this file. */ - unsigned char version; /* Some flag bits to keep track of various internal things. */ - unsigned int flags; /* If a system call made by the mmalloc package fails, the errno is preserved for future examination. */ - int saved_errno; /* Pointer to the function that is used to get more core, or return core @@ -177,7 +177,6 @@ struct mdesc FIXME: For mapped regions shared by more than one process, this needs to be maintained on a per-process basis. */ - void* (*morecore) (struct mdesc *mdp, int size); /* Pointer to the function that causes an abort when the memory checking @@ -186,45 +185,37 @@ struct mdesc FIXME: For mapped regions shared by more than one process, this needs to be maintained on a per-process basis. */ - void (*abortfunc) (void); /* Debugging hook for free. FIXME: For mapped regions shared by more than one process, this needs to be maintained on a per-process basis. */ - void (*mfree_hook) (void* mdp, void* ptr); /* Debugging hook for `malloc'. FIXME: For mapped regions shared by more than one process, this needs to be maintained on a per-process basis. */ - void* (*mmalloc_hook) (void* mdp, size_t size); /* Debugging hook for realloc. FIXME: For mapped regions shared by more than one process, this needs to be maintained on a per-process basis. */ - void* (*mrealloc_hook) (void* mdp, void* ptr, size_t size); /* Number of info entries. */ - size_t heapsize; /* Pointer to first block of the heap (base of the first block). */ - void* heapbase; /* Current search index for the heap table. */ /* Search index in the info table. */ - size_t heapindex; /* Limit of valid info table indices. */ - size_t heaplimit; /* Block information table. @@ -314,7 +305,7 @@ extern void* __mmalloc_remap_core (struct mdesc *mdp); #define MD_TO_MDP(md) \ ((md) == NULL \ - ? __mmalloc_default_mdp \ + ? __mmalloc_default_mdp \ : (struct mdesc *) (md)) #endif /* __MMPRIVATE_H */ diff --git a/src/xbt/mmalloc/mrealloc.c b/src/xbt/mmalloc/mrealloc.c index ccd837c969..1b788ec394 100644 --- a/src/xbt/mmalloc/mrealloc.c +++ b/src/xbt/mmalloc/mrealloc.c @@ -19,130 +19,123 @@ new region. This module has incestuous knowledge of the internals of both mfree and mmalloc. */ -void* -mrealloc (void *md, void *ptr, size_t size) -{ +void* mrealloc (void *md, void *ptr, size_t size) { struct mdesc *mdp; void* result; int type; size_t block, blocks, oldlimit; - if (size == 0) - { - mfree (md, ptr); - return (mmalloc (md, 0)); - } - else if (ptr == NULL) - { - return (mmalloc (md, size)); - } + + if (size == 0) { + mfree (md, ptr); + return (mmalloc (md, 0)); + } else if (ptr == NULL) { + return (mmalloc (md, size)); + } mdp = MD_TO_MDP (md); + printf("(%s)realloc %p to %d...",xbt_thread_self_name(),ptr,(int)size); + if ((char*)ptr < (char*)mdp->heapbase || BLOCK(ptr) > mdp->heapsize ) { + + printf("FIXME. Ouch, this pointer is not mine. I will malloc it instead of reallocing it.\n"); + result = mmalloc(md,size); + abort(); + return result; + } + + LOCK(mdp); if (mdp -> mrealloc_hook != NULL) - { - return ((*mdp -> mrealloc_hook) (md, ptr, size)); - } + { + UNLOCK(mdp); + return ((*mdp -> mrealloc_hook) (md, ptr, size)); + } block = BLOCK (ptr); type = mdp -> heapinfo[block].busy.type; switch (type) + { + case 0: + /* Maybe reallocate a large block to a small fragment. */ + if (size <= BLOCKSIZE / 2) { - case 0: - /* Maybe reallocate a large block to a small fragment. */ - if (size <= BLOCKSIZE / 2) - { - result = mmalloc (md, size); - if (result != NULL) - { - memcpy (result, ptr, size); - mfree (md, ptr); - return (result); - } - } - - /* The new size is a large allocation as well; + UNLOCK(mdp); + printf("(%s) alloc large block...",xbt_thread_self_name()); + result = mmalloc (md, size); + if (result != NULL) + { + memcpy (result, ptr, size); + mfree (md, ptr); + return (result); + } + } + + /* The new size is a large allocation as well; see if we can hold it in place. */ - blocks = BLOCKIFY (size); - if (blocks < mdp -> heapinfo[block].busy.info.size) - { - /* The new size is smaller; return excess memory to the free list. */ - mdp -> heapinfo[block + blocks].busy.type = 0; - mdp -> heapinfo[block + blocks].busy.info.size - = mdp -> heapinfo[block].busy.info.size - blocks; - mdp -> heapinfo[block].busy.info.size = blocks; - mfree (md, ADDRESS (block + blocks)); - result = ptr; - } - else if (blocks == mdp -> heapinfo[block].busy.info.size) - { - /* No size change necessary. */ - result = ptr; - } - else - { - /* Won't fit, so allocate a new region that will. + LOCK(mdp); + blocks = BLOCKIFY (size); + if (blocks < mdp -> heapinfo[block].busy.info.size) + { + /* The new size is smaller; return excess memory to the free list. */ + printf("(%s) return excess memory...",xbt_thread_self_name()); + mdp -> heapinfo[block + blocks].busy.type = 0; + mdp -> heapinfo[block + blocks].busy.info.size + = mdp -> heapinfo[block].busy.info.size - blocks; + mdp -> heapinfo[block].busy.info.size = blocks; + mfree (md, ADDRESS (block + blocks)); + result = ptr; + } + else if (blocks == mdp -> heapinfo[block].busy.info.size) + { + /* No size change necessary. */ + result = ptr; + } + else + { + /* Won't fit, so allocate a new region that will. Free the old region first in case there is sufficient adjacent free space to grow without moving. */ - blocks = mdp -> heapinfo[block].busy.info.size; - /* Prevent free from actually returning memory to the system. */ - oldlimit = mdp -> heaplimit; - mdp -> heaplimit = 0; - mfree (md, ptr); - mdp -> heaplimit = oldlimit; - result = mmalloc (md, size); - if (result == NULL) - { - mmalloc (md, blocks * BLOCKSIZE); - return (NULL); - } - if (ptr != result) - { - memmove (result, ptr, blocks * BLOCKSIZE); - } - } - break; - - default: - /* Old size is a fragment; type is logarithm + blocks = mdp -> heapinfo[block].busy.info.size; + /* Prevent free from actually returning memory to the system. */ + oldlimit = mdp -> heaplimit; + mdp -> heaplimit = 0; + mfree (md, ptr); + mdp -> heaplimit = oldlimit; + UNLOCK(mdp); + result = mmalloc (md, size); + if (result == NULL) { + mmalloc (md, blocks * BLOCKSIZE); + return (NULL); + } + if (ptr != result) + memmove (result, ptr, blocks * BLOCKSIZE); + LOCK(mdp); + } + break; + + default: + /* Old size is a fragment; type is logarithm to base two of the fragment size. */ - if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type)) - { - /* The new size is the same kind of fragment. */ - result = ptr; - } - else - { - /* The new size is different; allocate a new space, + if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type)) { + /* The new size is the same kind of fragment. */ + printf("(%s) new size is same kind of fragment...",xbt_thread_self_name()); + result = ptr; + } else { + /* The new size is different; allocate a new space, and copy the lesser of the new size and the old. */ - result = mmalloc (md, size); - if (result == NULL) - { - return (NULL); - } - memcpy (result, ptr, MIN (size, (size_t) 1 << type)); - mfree (md, ptr); - } - break; - } + printf("(%s) new size is different...",xbt_thread_self_name()); + UNLOCK(mdp); + result = mmalloc (md, size); + if (result == NULL) + return (NULL); + + memcpy (result, ptr, MIN (size, (size_t) 1 << type)); + mfree (md, ptr); + } + break; + } + printf("(%s) Done reallocing: %p\n",xbt_thread_self_name(),result);fflush(stdout); return (result); } - -/* Useless prototype to make gcc happy */ -void *realloc (void *ptr, size_t size); - -/* When using this package, provide a version of malloc/realloc/free built - on top of it, so that if we use the default sbrk() region we will not - collide with another malloc package trying to do the same thing, if - the application contains any "hidden" calls to malloc/realloc/free (such - as inside a system library). - FIXME: disabled for now */ - -//void * realloc (void *ptr, size_t size) { -// void* result; - -// result = mrealloc (NULL, ptr, size); -// return (result); -//} diff --git a/src/xbt/mmalloc/sbrk-sup.c b/src/xbt/mmalloc/sbrk-sup.c index 64e249fe15..52c8230bfb 100644 --- a/src/xbt/mmalloc/sbrk-sup.c +++ b/src/xbt/mmalloc/sbrk-sup.c @@ -49,19 +49,3 @@ sbrk_morecore (mdp, size) return (result); } -#define HEAP_OFFSET 20480000 /* Safety gap from the heap's break address */ - -void *mmalloc_get_default_md(void) { - return __mmalloc_default_mdp; -} - -/* Initialize the default malloc descriptor. */ -#include "xbt_modinter.h" -void mmalloc_preinit(void) { - __mmalloc_default_mdp = mmalloc_attach(-1, (char *)sbrk(0) + HEAP_OFFSET); - xbt_assert(__mmalloc_default_mdp != NULL); -} -void mmalloc_postexit(void) { - /* Do not detach the default mdp or ldl won't be able to free the memory it allocated since we're in memory */ - // mmalloc_detach(__mmalloc_default_mdp); -} diff --git a/src/xbt/xbt_sg_stubs.c b/src/xbt/xbt_sg_stubs.c index 6a86c13a9c..b609c109d3 100644 --- a/src/xbt/xbt_sg_stubs.c +++ b/src/xbt/xbt_sg_stubs.c @@ -71,30 +71,39 @@ void xbt_os_thread_yield(void) xbt_os_mutex_t xbt_os_mutex_init(void) { + /* xbt_backtrace_display_current(); xbt_die ("No pthread in SG when compiled against the ucontext (xbt_os_mutex_init)"); + */ + return NULL; } void xbt_os_mutex_acquire(xbt_os_mutex_t mutex) { + /* xbt_backtrace_display_current(); xbt_die ("No pthread in SG when compiled against the ucontext (xbt_os_mutex_acquire)"); + */ } void xbt_os_mutex_release(xbt_os_mutex_t mutex) { + /* xbt_backtrace_display_current(); xbt_die ("No pthread in SG when compiled against the ucontext (xbt_os_mutex_release)"); + */ } void xbt_os_mutex_destroy(xbt_os_mutex_t mutex) { + /* xbt_backtrace_display_current(); xbt_die ("No pthread in SG when compiled against the ucontext (xbt_os_mutex_destroy)"); + */ } xbt_os_cond_t xbt_os_cond_init(void) diff --git a/src/xbt_modinter.h b/src/xbt_modinter.h index 279c741815..425a010c97 100644 --- a/src/xbt_modinter.h +++ b/src/xbt_modinter.h @@ -28,4 +28,5 @@ void xbt_os_thread_mod_postexit(void); void mmalloc_preinit(void); void mmalloc_postexit(void); + #endif /* XBT_MODINTER_H */ diff --git a/tools/tesh/run_context.c b/tools/tesh/run_context.c index c8052d0a68..59f787e187 100644 --- a/tools/tesh/run_context.c +++ b/tools/tesh/run_context.c @@ -353,7 +353,8 @@ static void *thread_reader(void *r) /* let this thread wait for the child so that the main thread can detect the timeout without blocking on the wait */ got_pid = waitpid(rctx->pid, &rctx->status, 0); if (got_pid != rctx->pid) { - perror(bprintf("Cannot wait for the child %s", rctx->cmd)); + perror(bprintf("(%s) Cannot wait for the child %s (got pid %d where pid %d were expected;rctx=%p;status=%d)", + xbt_thread_self_name(), rctx->cmd, (int)got_pid, (int)rctx->pid,rctx,rctx->status)); ERROR1("Test suite `%s': NOK (system error)", testsuite_name); rctx_armageddon(rctx, 4); return NULL;