X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/196bcabf3c533a02f12b43f040f8fb5cb1e43235..3325badb97bd570d6a0297cec024e26cf0f8a59d:/src/xbt/mmalloc/mmprivate.h diff --git a/src/xbt/mmalloc/mmprivate.h b/src/xbt/mmalloc/mmprivate.h index f322b11c32..061a4e7bdd 100644 --- a/src/xbt/mmalloc/mmprivate.h +++ b/src/xbt/mmalloc/mmprivate.h @@ -17,6 +17,7 @@ #include "xbt/xbt_os_thread.h" #include "xbt/mmalloc.h" #include "xbt/ex.h" +#include "xbt/dynar.h" #include #include @@ -28,9 +29,9 @@ # endif #endif -#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */ -#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */ -#define MMALLOC_VERSION 2 /* Current mmalloc version */ +#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */ +#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */ +#define MMALLOC_VERSION 2 /* Current mmalloc version */ /* The allocator divides the heap into blocks of fixed size; large requests receive one or more whole blocks, and small requests @@ -40,10 +41,10 @@ FIXME: we are not targeting 16bits machines anymore; update values */ -#define INT_BIT (CHAR_BIT * sizeof(int)) -#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) -#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG) -#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) +#define INT_BIT (CHAR_BIT * sizeof(int)) +#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) +#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG) +#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) /* We keep fragment-specific meta-data for introspection purposes, and these * information are kept in fixed lenght arrays. Here is the computation of @@ -64,25 +65,25 @@ sign of the result is machine dependent for negative values, so force it to be treated as an unsigned int. */ -#define ADDR2UINT(addr) ((uintptr_t) ((char*) (addr) - (char*) NULL)) +#define ADDR2UINT(addr) ((uintptr_t) ((char*) (addr) - (char*) NULL)) #define RESIDUAL(addr,bsize) ((uintptr_t) (ADDR2UINT (addr) % (bsize))) /* Determine the amount of memory spanned by the initial heap table (not an absolute limit). */ -#define HEAP (INT_BIT > 16 ? 4194304 : 65536) +#define HEAP (INT_BIT > 16 ? 4194304 : 65536) /* Number of contiguous free blocks allowed to build up at the end of memory before they will be returned to the system. FIXME: this is not used anymore: we never return memory to the system. */ -#define FINAL_FREE_BLOCKS 8 +#define FINAL_FREE_BLOCKS 8 /* Where to start searching the free list when looking for new memory. The two possible values are 0 and heapindex. Starting at 0 seems to reduce total memory usage, while starting at heapindex seems to run faster. */ -#define MALLOC_SEARCH_START mdp -> heapindex +#define MALLOC_SEARCH_START mdp -> heapindex /* Address to block number and vice versa. */ @@ -96,6 +97,28 @@ struct list { struct list *prev; }; +/* Statistics available to the user. */ +struct mstats +{ + size_t bytes_total; /* Total size of the heap. */ + size_t chunks_used; /* Chunks allocated by the user. */ + size_t bytes_used; /* Byte total of user-allocated chunks. */ + size_t chunks_free; /* Chunks in the free list. */ + size_t bytes_free; /* Byte total of chunks in the free list. */ +}; + +typedef struct s_heap_area{ + int block; + int fragment; +}s_heap_area_t, *heap_area_t; + +typedef struct s_heap_area_pair{ + int block1; + int fragment1; + int block2; + int fragment2; +}s_heap_area_pair_t, *heap_area_pair_t; + /* Data structure giving per-block information. * * There is one such structure in the mdp->heapinfo array per block used in that heap, @@ -119,26 +142,27 @@ struct list { * The type field is consistently updated for every blocks, even within clusters of blocks. * You can crawl the array and rely on that value. * - * TODO: - * - make room to store the backtrace of where the blocks and fragment were malloced, too. */ typedef struct { int type; /* 0: busy large block - >0: busy fragmented (fragments of size 2^type bytes) - <0: free block */ + >0: busy fragmented (fragments of size 2^type bytes) + <0: free block */ + union { /* Heap information for a busy block. */ struct { size_t nfree; /* Free fragments in a fragmented block. */ size_t first; /* First free fragment of the block. */ - unsigned short frag_size[MAX_FRAGMENT_PER_BLOCK]; + short frag_size[MAX_FRAGMENT_PER_BLOCK]; void *bt[MAX_FRAGMENT_PER_BLOCK][XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ + heap_area_t equal_to[MAX_FRAGMENT_PER_BLOCK]; } busy_frag; struct { size_t size; /* Size (in blocks) of a large cluster. */ size_t busy_size; /* Actually used space, in bytes */ void *bt[XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ int bt_size; + heap_area_t equal_to; } busy_block; /* Heap information for a free block (that may be the first of a free cluster). */ struct { @@ -222,19 +246,17 @@ struct mdesc { int fd; -}; - -int mmalloc_compare_mdesc(struct mdesc *mdp1, struct mdesc *mdp2, void* s_heap, void* r_heap); + /* Instrumentation. */ -void mmalloc_display_info(void *h); + struct mstats heapstats; -const char* get_addr_memory_map(void *addr, void* s_heap, void* r_heap); +}; /* Bits to look at in the malloc descriptor flags word */ -#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */ +#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */ #define MMALLOC_ANONYMOUS (1 << 1) /* Use anonymous mapping */ -#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */ +#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */ /* A default malloc descriptor for the single sbrk() managed region. */