/* Return memory to the heap.
Like `mfree' but don't call a mfree_hook if there is one. */
-void __mmalloc_free(struct mdesc *mdp, void *ptr)
+/* Return memory to the heap. */
+void mfree(struct mdesc *mdp, void *ptr)
{
int type;
size_t block;
register size_t i;
struct list *prev, *next;
+ int it;
+
+ if (ptr == NULL)
+ return;
block = BLOCK(ptr);
}
- type = mdp->heapinfo[block].busy.type;
+ type = mdp->heapinfo[block].type;
+ if (type<0)
+ THROWF(arg_error,0,"Asked to free a fragment in a block that is already free. I'm puzzled");
+
switch (type) {
case 0:
/* Find the free cluster previous to this one in the free list.
i = mdp->heapindex;
if (i > block) {
while (i > block) {
- i = mdp->heapinfo[i].free.prev;
+ i = mdp->heapinfo[i].free_block.prev;
}
} else {
do {
- i = mdp->heapinfo[i].free.next;
+ i = mdp->heapinfo[i].free_block.next;
}
while ((i != 0) && (i < block));
- i = mdp->heapinfo[i].free.prev;
+ i = mdp->heapinfo[i].free_block.prev;
}
/* Determine how to link this block into the free list. */
- if (block == i + mdp->heapinfo[i].free.size) {
+ if (block == i + mdp->heapinfo[i].free_block.size) {
+
/* Coalesce this block with its predecessor. */
- mdp->heapinfo[i].free.size += mdp->heapinfo[block].busy.info.block.size;
+ mdp->heapinfo[i].free_block.size += mdp->heapinfo[block].busy_block.size;
+ /* Mark all my ex-blocks as free */
+ for (it=0; it<mdp->heapinfo[block].busy_block.size; it++) {
+ if (mdp->heapinfo[block+it].type <0) {
+ fprintf(stderr,"Internal Error: Asked to free a block already marked as free (block=%lu it=%d type=%lu). Please report this bug.\n",
+ (unsigned long)block,it,(unsigned long)mdp->heapinfo[block].type);
+ abort();
+ }
+ mdp->heapinfo[block+it].type = -1;
+ }
+
block = i;
} else {
+ //fprintf(stderr,"Free block %d to %d (as a new chunck)\n",block,block+mdp->heapinfo[block].busy_block.size);
/* Really link this block back into the free list. */
- mdp->heapinfo[block].free.size = mdp->heapinfo[block].busy.info.block.size;
- mdp->heapinfo[block].free.next = mdp->heapinfo[i].free.next;
- mdp->heapinfo[block].free.prev = i;
- mdp->heapinfo[i].free.next = block;
- mdp->heapinfo[mdp->heapinfo[block].free.next].free.prev = block;
+ mdp->heapinfo[block].free_block.size = mdp->heapinfo[block].busy_block.size;
+ mdp->heapinfo[block].free_block.next = mdp->heapinfo[i].free_block.next;
+ mdp->heapinfo[block].free_block.prev = i;
+ mdp->heapinfo[i].free_block.next = block;
+ mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev = block;
+ /* Mark all my ex-blocks as free */
+ for (it=0; it<mdp->heapinfo[block].free_block.size; it++) {
+ if (mdp->heapinfo[block+it].type <0) {
+ fprintf(stderr,"Internal error: Asked to free a block already marked as free (block=%lu it=%d/%lu type=%lu). Please report this bug.\n",
+ (unsigned long)block,it,(unsigned long)mdp->heapinfo[block].free_block.size,(unsigned long)mdp->heapinfo[block].type);
+ abort();
+ }
+ mdp->heapinfo[block+it].type = -1;
+ }
}
/* Now that the block is linked in, see if we can coalesce it
with its successor (by deleting its successor from the list
and adding in its size). */
- if (block + mdp->heapinfo[block].free.size ==
- mdp->heapinfo[block].free.next) {
- mdp->heapinfo[block].free.size
- += mdp->heapinfo[mdp->heapinfo[block].free.next].free.size;
- mdp->heapinfo[block].free.next
- = mdp->heapinfo[mdp->heapinfo[block].free.next].free.next;
- mdp->heapinfo[mdp->heapinfo[block].free.next].free.prev = block;
+ if (block + mdp->heapinfo[block].free_block.size ==
+ mdp->heapinfo[block].free_block.next) {
+ mdp->heapinfo[block].free_block.size
+ += mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.size;
+ mdp->heapinfo[block].free_block.next
+ = mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.next;
+ mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev = block;
}
/* Now see if we can return stuff to the system. */
/* Get the address of the first free fragment in this block. */
prev = (struct list *)
((char *) ADDRESS(block) +
- (mdp->heapinfo[block].busy.info.frag.first << type));
+ (mdp->heapinfo[block].busy_frag.first << type));
- if (mdp->heapinfo[block].busy.info.frag.nfree ==
+ if (mdp->heapinfo[block].busy_frag.nfree ==
(BLOCKSIZE >> type) - 1) {
/* If all fragments of this block are free, remove them
from the fragment list and free the whole block. */
if (next != NULL) {
next->prev = prev->prev;
}
- mdp->heapinfo[block].busy.type = 0;
- mdp->heapinfo[block].busy.info.block.size = 1;
- mdp->heapinfo[block].busy.info.block.busy_size = 0;
+ /* pretend that this block is used and free it so that it gets properly coalesced with adjacent free blocks */
+ mdp->heapinfo[block].type = 0;
+ mdp->heapinfo[block].busy_block.size = 1;
+ mdp->heapinfo[block].busy_block.busy_size = 0;
mfree((void *) mdp, (void *) ADDRESS(block));
- } else if (mdp->heapinfo[block].busy.info.frag.nfree != 0) {
+ } else if (mdp->heapinfo[block].busy_frag.nfree != 0) {
/* If some fragments of this block are free, link this
fragment into the fragment list after the first free
fragment of this block. */
if (next->next != NULL) {
next->next->prev = next;
}
- ++mdp->heapinfo[block].busy.info.frag.nfree;
+ ++mdp->heapinfo[block].busy_frag.nfree;
} else {
- /* No fragments of this block are free, so link this
- fragment into the fragment list and announce that
+ /* No fragments of this block were free before the one we just released,
+ * so link this fragment into the fragment list and announce that
it is the first free fragment of this block. */
prev = (struct list *) ptr;
- mdp->heapinfo[block].busy.info.frag.nfree = 1;
- mdp->heapinfo[block].busy.info.frag.first =
+ mdp->heapinfo[block].busy_frag.nfree = 1;
+ mdp->heapinfo[block].busy_frag.first =
RESIDUAL(ptr, BLOCKSIZE) >> type;
prev->next = mdp->fraghead[type].next;
prev->prev = &mdp->fraghead[type];
break;
}
}
-
-/* Return memory to the heap. */
-
-void mfree(xbt_mheap_t mdp, void *ptr)
-{
- if (ptr != NULL)
- __mmalloc_free(mdp, ptr);
-}
block_free1 = mdp1->heapindex;
block_free2 = mdp2->heapindex;
- while(mdp1->heapinfo[block_free1].free.prev != 0){
- block_free1 = mdp1->heapinfo[block_free1].free.prev;
+ while(mdp1->heapinfo[block_free1].free_block.prev != 0){
+ block_free1 = mdp1->heapinfo[block_free1].free_block.prev;
}
- while(mdp2->heapinfo[block_free2].free.prev != 0){
- block_free2 = mdp1->heapinfo[block_free2].free.prev;
+ while(mdp2->heapinfo[block_free2].free_block.prev != 0){
+ block_free2 = mdp1->heapinfo[block_free2].free_block.prev;
}
if(block_free1 != block_free2){
first_block_free = block_free1;
- if(mdp1->heapinfo[first_block_free].free.size != mdp2->heapinfo[first_block_free].free.size){
+ if(mdp1->heapinfo[first_block_free].free_block.size != mdp2->heapinfo[first_block_free].free_block.size){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different size (in blocks) of the first free cluster");
errors++;
/* Check busy blocks (circular checking)*/
- i = first_block_free + mdp1->heapinfo[first_block_free].free.size;
+ i = first_block_free + mdp1->heapinfo[first_block_free].free_block.size;
- if(mdp1->heapinfo[first_block_free].free.next != mdp2->heapinfo[first_block_free].free.next){
+ if(mdp1->heapinfo[first_block_free].free_block.next != mdp2->heapinfo[first_block_free].free_block.next){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different next block free");
errors++;
}
block_free = first_block_free;
- next_block_free = mdp1->heapinfo[first_block_free].free.next;
+ next_block_free = mdp1->heapinfo[first_block_free].free_block.next;
if(next_block_free == 0)
next_block_free = mdp1->heaplimit;
while(i<next_block_free){
- if(mdp1->heapinfo[i].busy.type != mdp2->heapinfo[i].busy.type){
+ if(mdp1->heapinfo[i].type != mdp2->heapinfo[i].type){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different type of busy block");
errors++;
addr_block1 = (char *)mdp1 + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE);
addr_block2 = (char *)mdp2 + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE);
- switch(mdp1->heapinfo[i].busy.type){
+ switch(mdp1->heapinfo[i].type){ //FIXME deal with type<0 == free
case 0 :
- if(mdp1->heapinfo[i].busy.info.block.size != mdp2->heapinfo[i].busy.info.block.size){
+ if(mdp1->heapinfo[i].busy_block.size != mdp2->heapinfo[i].busy_block.size){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different size of a large cluster");
errors++;
return 1;
}
}else{
- if(memcmp(addr_block1, addr_block2, (mdp1->heapinfo[i].busy.info.block.size * BLOCKSIZE)) != 0){
+ if(memcmp(addr_block1, addr_block2, (mdp1->heapinfo[i].busy_block.size * BLOCKSIZE)) != 0){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
- XBT_DEBUG("Different data in block %zu (size = %zu) (addr_block1 = %p (current = %p) - addr_block2 = %p)", i, mdp1->heapinfo[i].busy.info.block.size, addr_block1, (char *)std_heap_addr + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE), addr_block2);
+ XBT_DEBUG("Different data in block %zu (size = %zu) (addr_block1 = %p (current = %p) - addr_block2 = %p)", i, mdp1->heapinfo[i].busy_block.size, addr_block1, (char *)std_heap_addr + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE), addr_block2);
errors++;
}else{
return 1;
}
}
}
- i = i+mdp1->heapinfo[i].busy.info.block.size;
+ i = i+mdp1->heapinfo[i].busy_block.size;
break;
default :
- if(mdp1->heapinfo[i].busy.info.frag.nfree != mdp2->heapinfo[i].busy.info.frag.nfree){
+ if(mdp1->heapinfo[i].busy_frag.nfree != mdp2->heapinfo[i].busy_frag.nfree){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different free fragments in the fragmented block %zu", i);
errors++;
return 1;
}
}else{
- if(mdp1->heapinfo[i].busy.info.frag.first != mdp2->heapinfo[i].busy.info.frag.first){
+ if(mdp1->heapinfo[i].busy_frag.first != mdp2->heapinfo[i].busy_frag.first){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different first free fragments in the block %zu", i);
errors++;
return 1;
}
}else{
- frag_size = pow(2,mdp1->heapinfo[i].busy.type);
+ frag_size = pow(2,mdp1->heapinfo[i].type);
for(j=0 ; j< (BLOCKSIZE/frag_size); j++){
if(memcmp((char *)addr_block1 + (j * frag_size), (char *)addr_block2 + (j * frag_size), frag_size) != 0){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
if( i != first_block_free){
- if(mdp1->heapinfo[block_free].free.next != mdp2->heapinfo[block_free].free.next){
+ if(mdp1->heapinfo[block_free].free_block.next != mdp2->heapinfo[block_free].free_block.next){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different next block free");
errors++;
}
}
- block_free = mdp1->heapinfo[block_free].free.next;
- next_block_free = mdp1->heapinfo[block_free].free.next;
+ block_free = mdp1->heapinfo[block_free].free_block.next;
+ next_block_free = mdp1->heapinfo[block_free].free_block.next;
- i = block_free + mdp1->heapinfo[block_free].free.size;
+ i = block_free + mdp1->heapinfo[block_free].free_block.size;
if((next_block_free == 0) && (i != mdp1->heaplimit)){
while(i < mdp1->heaplimit){
- if(mdp1->heapinfo[i].busy.type != mdp2->heapinfo[i].busy.type){
+ if(mdp1->heapinfo[i].type != mdp2->heapinfo[i].type){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different type of busy block");
errors++;
addr_block1 = (char *)mdp1 + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE);
addr_block2 = (char *)mdp2 + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE);
- switch(mdp1->heapinfo[i].busy.type){
+ switch(mdp1->heapinfo[i].type){
case 0 :
- if(mdp1->heapinfo[i].busy.info.block.size != mdp2->heapinfo[i].busy.info.block.size){
+ if(mdp1->heapinfo[i].busy_block.size != mdp2->heapinfo[i].busy_block.size){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different size of a large cluster");
errors++;
return 1;
}
}else{
- if(memcmp(addr_block1, addr_block2, (mdp1->heapinfo[i].busy.info.block.size * BLOCKSIZE)) != 0){
+ if(memcmp(addr_block1, addr_block2, (mdp1->heapinfo[i].busy_block.size * BLOCKSIZE)) != 0){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different data in block %zu (addr_block1 = %p (current = %p) - addr_block2 = %p)", i, addr_block1, (char *)std_heap_addr + sizeof(struct mdesc) + ((i-1) * BLOCKSIZE), addr_block2);
errors++;
}
}
- i = i+mdp1->heapinfo[i].busy.info.block.size;
+ i = i+mdp1->heapinfo[i].busy_block.size;
break;
default :
- if(mdp1->heapinfo[i].busy.info.frag.nfree != mdp2->heapinfo[i].busy.info.frag.nfree){
+ if(mdp1->heapinfo[i].busy_frag.nfree != mdp2->heapinfo[i].busy_frag.nfree){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different free fragments in the fragmented block %zu", i);
errors++;
return 1;
}
}else{
- if(mdp1->heapinfo[i].busy.info.frag.first != mdp2->heapinfo[i].busy.info.frag.first){
+ if(mdp1->heapinfo[i].busy_frag.first != mdp2->heapinfo[i].busy_frag.first){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
XBT_DEBUG("Different first free fragments in the block %zu", i);
errors++;
return 1;
}
}else{
- frag_size = pow(2,mdp1->heapinfo[i].busy.type);
+ frag_size = pow(2,mdp1->heapinfo[i].type);
for(j=0 ; j< (BLOCKSIZE/frag_size); j++){
if(memcmp((char *)addr_block1 + (j * frag_size), (char *)addr_block2 + (j * frag_size), frag_size) != 0){
if(XBT_LOG_ISENABLED(xbt_mm_legacy, xbt_log_priority_debug)){
return (0);
}
memset((void *) mdp->heapinfo, 0, mdp->heapsize * sizeof(malloc_info));
- mdp->heapinfo[0].free.size = 0;
- mdp->heapinfo[0].free.next = mdp->heapinfo[0].free.prev = 0;
+ mdp->heapinfo[0].type=-1;
+ mdp->heapinfo[0].free_block.size = 0;
+ mdp->heapinfo[0].free_block.next = mdp->heapinfo[0].free_block.prev = 0;
mdp->heapindex = 0;
mdp->heapbase = (void *) mdp->heapinfo;
mdp->flags |= MMALLOC_INITIALIZED;
return (NULL);
}
- /* Check if we need to grow the info table. */
+ /* Check if we need to grow the info table (in a multiplicative manner) */
if ((size_t) BLOCK((char *) result + size) > mdp->heapsize) {
+ int it;
+
newsize = mdp->heapsize;
- while ((size_t) BLOCK((char *) result + size) > newsize) {
+ while ((size_t) BLOCK((char *) result + size) > newsize)
newsize *= 2;
- }
- newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
- if (newinfo == NULL) {
- mmorecore(mdp, -size);
- return (NULL);
- }
- memset((void *) newinfo, 0, newsize * sizeof(malloc_info));
- memcpy((void *) newinfo, (void *) mdp->heapinfo,
- mdp->heapsize * sizeof(malloc_info));
+
+ /* Copy old info into new location */
oldinfo = mdp->heapinfo;
- newinfo[BLOCK(oldinfo)].busy.type = 0;
- newinfo[BLOCK(oldinfo)].busy.info.block.size
- = BLOCKIFY(mdp->heapsize * sizeof(malloc_info));
- newinfo[BLOCK(oldinfo)].busy.info.block.busy_size = size;
+ newinfo = (malloc_info *) align(mdp, newsize * sizeof(malloc_info));
+ memset(newinfo, 0, newsize * sizeof(malloc_info));
+ memcpy(newinfo, oldinfo, mdp->heapsize * sizeof(malloc_info));
mdp->heapinfo = newinfo;
- __mmalloc_free(mdp, (void *) oldinfo);
+
+ /* mark the space previously occupied by the block info as free by first marking it
+ * as occupied in the regular way, and then freing it */
+ for (it=0; it<BLOCKIFY(mdp->heapsize * sizeof(malloc_info)); it++)
+ newinfo[BLOCK(oldinfo)+it].type = 0;
+
+ newinfo[BLOCK(oldinfo)].busy_block.size = BLOCKIFY(mdp->heapsize * sizeof(malloc_info));
+ newinfo[BLOCK(oldinfo)].busy_block.busy_size = size;
+ mfree(mdp, (void *) oldinfo);
mdp->heapsize = newsize;
}
register size_t i;
struct list *next;
register size_t log;
+ int it;
/* Work even if the user was stupid enough to ask a 0-byte block, ie return a valid block that can be realloced or freed
* glibc malloc does not use this trick but return a constant pointer, but my hack is quicker to implement ;)
next->next->prev = next->prev;
}
block = BLOCK(result);
- if (--mdp->heapinfo[block].busy.info.frag.nfree != 0) {
- mdp->heapinfo[block].busy.info.frag.first =
+ if (--mdp->heapinfo[block].busy_frag.nfree != 0) {
+ mdp->heapinfo[block].busy_frag.first =
RESIDUAL(next->next, BLOCKSIZE) >> log;
}
/* Initialize the nfree and first counters for this block. */
block = BLOCK(result);
- mdp->heapinfo[block].busy.type = log;
- mdp->heapinfo[block].busy.info.frag.nfree = i - 1;
- mdp->heapinfo[block].busy.info.frag.first = i - 1;
+ mdp->heapinfo[block].type = log;
+ mdp->heapinfo[block].busy_frag.nfree = i - 1;
+ mdp->heapinfo[block].busy_frag.first = i - 1;
}
} else {
/* Large allocation to receive one or more blocks.
space we will have to get more memory from the system. */
blocks = BLOCKIFY(size);
start = block = MALLOC_SEARCH_START;
- while (mdp->heapinfo[block].free.size < blocks) {
- block = mdp->heapinfo[block].free.next;
+ while (mdp->heapinfo[block].free_block.size < blocks) {
+ if (mdp->heapinfo[block].type >=0) {
+ fprintf(stderr,"Internal error: found a free block not marked as such (block=%lu type=%lu). Please report this bug.\n",(unsigned long)block,(unsigned long)mdp->heapinfo[block].type);
+ abort();
+ }
+
+ block = mdp->heapinfo[block].free_block.next;
if (block == start) {
/* Need to get more from the system. Check to see if
the new core will be contiguous with the final free
block; if so we don't need to get as much. */
- block = mdp->heapinfo[0].free.prev;
- lastblocks = mdp->heapinfo[block].free.size;
+ block = mdp->heapinfo[0].free_block.prev;
+ lastblocks = mdp->heapinfo[block].free_block.size;
if (mdp->heaplimit != 0 &&
block + lastblocks == mdp->heaplimit &&
mmorecore(mdp, 0) == ADDRESS(block + lastblocks) &&
/* Which block we are extending (the `final free
block' referred to above) might have changed, if
it got combined with a freed info table. */
- block = mdp->heapinfo[0].free.prev;
+ block = mdp->heapinfo[0].free_block.prev;
- mdp->heapinfo[block].free.size += (blocks - lastblocks);
+ mdp->heapinfo[block].free_block.size += (blocks - lastblocks);
continue;
}
result = register_morecore(mdp, blocks * BLOCKSIZE);
return (NULL);
}
block = BLOCK(result);
- mdp->heapinfo[block].busy.type = 0;
- mdp->heapinfo[block].busy.info.block.size = blocks;
- mdp->heapinfo[block].busy.info.block.busy_size = size;
+ for (it=0;it<blocks;it++)
+ mdp->heapinfo[block+it].type = 0;
+ mdp->heapinfo[block].busy_block.size = blocks;
+ mdp->heapinfo[block].busy_block.busy_size = size;
return (result);
}
}
/* At this point we have found a suitable free list entry.
Figure out how to remove what we need from the list. */
result = ADDRESS(block);
- if (mdp->heapinfo[block].free.size > blocks) {
+ if (mdp->heapinfo[block].free_block.size > blocks) {
/* The block we found has a bit left over,
so relink the tail end back into the free list. */
- mdp->heapinfo[block + blocks].free.size
- = mdp->heapinfo[block].free.size - blocks;
- mdp->heapinfo[block + blocks].free.next
- = mdp->heapinfo[block].free.next;
- mdp->heapinfo[block + blocks].free.prev
- = mdp->heapinfo[block].free.prev;
- mdp->heapinfo[mdp->heapinfo[block].free.prev].free.next
- = mdp->heapinfo[mdp->heapinfo[block].free.next].free.prev
+ mdp->heapinfo[block + blocks].free_block.size
+ = mdp->heapinfo[block].free_block.size - blocks;
+ mdp->heapinfo[block + blocks].free_block.next
+ = mdp->heapinfo[block].free_block.next;
+ mdp->heapinfo[block + blocks].free_block.prev
+ = mdp->heapinfo[block].free_block.prev;
+ mdp->heapinfo[mdp->heapinfo[block].free_block.prev].free_block.next
+ = mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev
= mdp->heapindex = block + blocks;
} else {
/* The block exactly matches our requirements,
so just remove it from the list. */
- mdp->heapinfo[mdp->heapinfo[block].free.next].free.prev
- = mdp->heapinfo[block].free.prev;
- mdp->heapinfo[mdp->heapinfo[block].free.prev].free.next
- = mdp->heapindex = mdp->heapinfo[block].free.next;
+ mdp->heapinfo[mdp->heapinfo[block].free_block.next].free_block.prev
+ = mdp->heapinfo[block].free_block.prev;
+ mdp->heapinfo[mdp->heapinfo[block].free_block.prev].free_block.next
+ = mdp->heapindex = mdp->heapinfo[block].free_block.next;
}
- mdp->heapinfo[block].busy.type = 0;
- mdp->heapinfo[block].busy.info.block.size = blocks;
- mdp->heapinfo[block].busy.info.block.busy_size = size;
+ for (it=0;it<blocks;it++)
+ mdp->heapinfo[block+it].type = 0;
+ mdp->heapinfo[block].busy_block.size = blocks;
+ mdp->heapinfo[block].busy_block.busy_size = size;
}
//printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout);
return (result);
#include <sys/mman.h>
#include "mmprivate.h"
-#include "xbt/ex.h"
/* Cache the pagesize for the current host machine. Note that if the host
does not readily provide a getpagesize() function, we need to emulate it
#include "portable.h"
#include "xbt/xbt_os_thread.h"
#include "xbt/mmalloc.h"
+#include "xbt/ex.h"
#include <semaphore.h>
#ifdef HAVE_LIMITS_H
* When looking for free blocks, we traverse the mdp->heapinfo looking
* for a cluster of free blocks that would be large enough.
*
+ * The size of the cluster is only to be trusted in the first block of the cluster.
+ * If the cluster results of the fusion of several clusters, the previously first
+ * block of their cluster will have partial data. The only information kept consistent over
+ * all blocks of the clusters is their type (== -1).
+ *
* Note that there is no way to determine if the block is free or busy by exploring
* this structure only. It wasn't intended to be crawled for comparison and we should fix it (TODO).
*
* I retrieve the first block of my cluster.
*
* TODO:
- * - add an indication of the requested size in the busy.block structure
- * - add the same for each fragments
- * - make room to store the backtrace of where the fragment were malloced, too.
+ * - add an indication of the requested size in each fragment, similarly to busy_block.busy_size
+ * - make room to store the backtrace of where the blocks and fragment were malloced, too.
*/
-typedef union {
+typedef struct {
+ int type; /* 0: busy large block
+ >0: busy fragmented (fragments of size 2^type bytes)
+ <0: free block */
+ union {
/* Heap information for a busy block. */
- struct {
- /* Zero for a large block, or positive giving the
- logarithm to the base two of the fragment size. */
- int type;
- union {
- struct {
- size_t nfree; /* Free fragments in a fragmented block. */
- size_t first; /* First free fragment of the block. */
- } frag;
- struct {
- size_t size; /* Size (in blocks) of a large cluster. */
- size_t busy_size;
- } block;
- } info;
- } busy;
- /* Heap information for a free block (that may be the first of
- a free cluster). */
- struct {
- size_t size; /* Size (in blocks) of a free cluster. */
- size_t next; /* Index of next free cluster. */
- size_t prev; /* Index of previous free cluster. */
- } free;
+ struct {
+ size_t nfree; /* Free fragments in a fragmented block. */
+ size_t first; /* First free fragment of the block. */
+ } busy_frag;
+ struct {
+ size_t size; /* Size (in blocks) of a large cluster. */
+ size_t busy_size; /* Actually used space, in bytes */
+ } busy_block;
+ /* Heap information for a free block (that may be the first of a free cluster). */
+ struct {
+ size_t size; /* Size (in blocks) of a free cluster. */
+ size_t next; /* Index of next free cluster. */
+ size_t prev; /* Index of previous free cluster. */
+ } free_block;
+ };
} malloc_info;
/* Doubly linked lists of free fragments. */
size_t heaplimit;
/* Block information table.
- Allocated with malign/__mmalloc_free (not mmalloc/mfree). */
+ Allocated with malign/mfree (not mmalloc/mfree). */
/* Table indexed by block number giving per-block information. */
malloc_info *heapinfo;
#define MMALLOC_ANONYMOUS (1 << 1) /* Use anonymous mapping */
#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */
-/* Internal version of `mfree' used in `morecore'. */
-
-extern void __mmalloc_free(struct mdesc *mdp, void *ptr);
-
/* A default malloc descriptor for the single sbrk() managed region. */
extern struct mdesc *__mmalloc_default_mdp;
int type;
size_t block, blocks, oldlimit;
+ /* Only keep real realloc and hidden malloc and free to the relevant functions */
if (size == 0) {
+ fprintf(stderr,"free from realloc...");
mfree(mdp, ptr);
+ fprintf(stderr,"done\n");
return mmalloc(mdp, 0);
} else if (ptr == NULL) {
return mmalloc(mdp, size);
}
-
//printf("(%s)realloc %p to %d...",xbt_thread_self_name(),ptr,(int)size);
if ((char *) ptr < (char *) mdp->heapbase || BLOCK(ptr) > mdp->heapsize) {
block = BLOCK(ptr);
- type = mdp->heapinfo[block].busy.type;
+ type = mdp->heapinfo[block].type;
+ if (type<0)
+ THROWF(arg_error,0,"Asked realloc a fragment comming from a *free* block. I'm puzzled.");
+
switch (type) {
case 0:
/* Maybe reallocate a large block to a small fragment. */
/* The new size is a large allocation as well;
see if we can hold it in place. */
blocks = BLOCKIFY(size);
- if (blocks < mdp->heapinfo[block].busy.info.block.size) {
+ if (blocks < mdp->heapinfo[block].busy_block.size) {
+ int it;
/* The new size is smaller; return excess memory to the free list. */
//printf("(%s) return excess memory...",xbt_thread_self_name());
- mdp->heapinfo[block + blocks].busy.type = 0;
- mdp->heapinfo[block + blocks].busy.info.block.size
- = mdp->heapinfo[block].busy.info.block.size - blocks;
- mdp->heapinfo[block].busy.info.block.size = blocks;
- mdp->heapinfo[block].busy.info.block.busy_size = size;
+ for (it= block+blocks; it< mdp->heapinfo[block].busy_block.size ; it++)
+ mdp->heapinfo[it].type = 0;
+ mdp->heapinfo[block + blocks].busy_block.size
+ = mdp->heapinfo[block].busy_block.size - blocks;
+ mdp->heapinfo[block].busy_block.size = blocks;
+ mdp->heapinfo[block].busy_block.busy_size = size;
+
mfree(mdp, ADDRESS(block + blocks));
result = ptr;
- } else if (blocks == mdp->heapinfo[block].busy.info.block.size) {
+ } else if (blocks == mdp->heapinfo[block].busy_block.size) {
/* No size change necessary. */
result = ptr;
} else {
/* Won't fit, so allocate a new region that will.
Free the old region first in case there is sufficient
adjacent free space to grow without moving. */
- blocks = mdp->heapinfo[block].busy.info.block.size;
+ blocks = mdp->heapinfo[block].busy_block.size;
/* Prevent free from actually returning memory to the system. */
oldlimit = mdp->heaplimit;
mdp->heaplimit = 0;