X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/47ac9cef9d5cf9ccf4739355db1e0bb3d90dda33..08d324beef1e0fa723013987a98ed7740d2f8343:/src/xbt/mmalloc/mmprivate.h diff --git a/src/xbt/mmalloc/mmprivate.h b/src/xbt/mmalloc/mmprivate.h index a9018c5d98..ccc667dc8f 100644 --- a/src/xbt/mmalloc/mmprivate.h +++ b/src/xbt/mmalloc/mmprivate.h @@ -16,7 +16,9 @@ #include "portable.h" #include "xbt/xbt_os_thread.h" #include "xbt/mmalloc.h" +#include "xbt/ex.h" #include +#include #ifdef HAVE_LIMITS_H # include @@ -26,24 +28,34 @@ # endif #endif -#ifndef MIN -# define MIN(A, B) ((A) < (B) ? (A) : (B)) -#endif - -#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */ -#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */ -#define MMALLOC_VERSION 1 /* Current mmalloc version */ +#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */ +#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */ +#define MMALLOC_VERSION 2 /* Current mmalloc version */ /* The allocator divides the heap into blocks of fixed size; large requests receive one or more whole blocks, and small requests receive a fragment of a block. Fragment sizes are powers of two, and all fragments of a block are the same size. When all the - fragments in a block have been freed, the block itself is freed. */ + fragments in a block have been freed, the block itself is freed. + + FIXME: we are not targeting 16bits machines anymore; update values */ -#define INT_BIT (CHAR_BIT * sizeof(int)) -#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) -#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG) -#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) +#define INT_BIT (CHAR_BIT * sizeof(int)) +#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) +#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG) +#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) + +/* We keep fragment-specific meta-data for introspection purposes, and these + * information are kept in fixed lenght arrays. Here is the computation of + * that size. + * + * Never make SMALLEST_POSSIBLE_MALLOC smaller than sizeof(list) because we + * need to enlist the free fragments. + */ + +//#define SMALLEST_POSSIBLE_MALLOC (sizeof(struct list)) +#define SMALLEST_POSSIBLE_MALLOC (16*sizeof(struct list)) +#define MAX_FRAGMENT_PER_BLOCK (BLOCKSIZE / SMALLEST_POSSIBLE_MALLOC) /* The difference between two pointers is a signed int. On machines where the data addresses have the high bit set, we need to ensure that the @@ -52,25 +64,25 @@ sign of the result is machine dependent for negative values, so force it to be treated as an unsigned int. */ -#define ADDR2UINT(addr) ((unsigned int) ((char*) (addr) - (char*) NULL)) -#define RESIDUAL(addr,bsize) ((unsigned int) (ADDR2UINT (addr) % (bsize))) +#define ADDR2UINT(addr) ((uintptr_t) ((char*) (addr) - (char*) NULL)) +#define RESIDUAL(addr,bsize) ((uintptr_t) (ADDR2UINT (addr) % (bsize))) /* Determine the amount of memory spanned by the initial heap table (not an absolute limit). */ -#define HEAP (INT_BIT > 16 ? 4194304 : 65536) +#define HEAP (INT_BIT > 16 ? 4194304 : 65536) /* Number of contiguous free blocks allowed to build up at the end of - memory before they will be returned to the system. */ - -#define FINAL_FREE_BLOCKS 8 + memory before they will be returned to the system. + FIXME: this is not used anymore: we never return memory to the system. */ +#define FINAL_FREE_BLOCKS 8 /* Where to start searching the free list when looking for new memory. The two possible values are 0 and heapindex. Starting at 0 seems to reduce total memory usage, while starting at heapindex seems to run faster. */ -#define MALLOC_SEARCH_START mdp -> heapindex +#define MALLOC_SEARCH_START mdp -> heapindex /* Address to block number and vice versa. */ @@ -78,64 +90,75 @@ #define ADDRESS(B) ((void*) (((ADDR2UINT(B)) - 1) * BLOCKSIZE + (char*) mdp -> heapbase)) -const char *xbt_thread_self_name(void); - -/* Data structure giving per-block information. */ -typedef union { - /* Heap information for a busy block. */ - struct { - /* Zero for a large block, or positive giving the - logarithm to the base two of the fragment size. */ - int type; - union { - struct { - size_t nfree; /* Free fragments in a fragmented block. */ - size_t first; /* First free fragment of the block. */ - } frag; - struct { - size_t size; /* Size (in blocks) of a large cluster. */ - size_t busy_size; - } block; - } info; - } busy; - /* Heap information for a free block (that may be the first of - a free cluster). */ - struct { - size_t size; /* Size (in blocks) of a free cluster. */ - size_t next; /* Index of next free cluster. */ - size_t prev; /* Index of previous free cluster. */ - } free; -} malloc_info; - -/* List of blocks allocated with `mmemalign' (or `mvalloc'). */ - -struct alignlist { - struct alignlist *next; - void *aligned; /* The address that mmemaligned returned. */ - void *exact; /* The address that malloc returned. */ -}; - /* Doubly linked lists of free fragments. */ - struct list { struct list *next; struct list *prev; }; -/* Statistics available to the user. - FIXME: By design, the internals of the malloc package are no longer - exported to the user via an include file, so access to this data needs - to be via some other mechanism, such as mmstat_ where the - return value is the the user is interested in. */ - -struct mstats { - size_t bytes_total; /* Total size of the heap. */ - size_t chunks_used; /* Chunks allocated by the user. */ - size_t bytes_used; /* Byte total of user-allocated chunks. */ - size_t chunks_free; /* Chunks in the free list. */ - size_t bytes_free; /* Byte total of chunks in the free list. */ +/* Statistics available to the user. */ +struct mstats +{ + size_t bytes_total; /* Total size of the heap. */ + size_t chunks_used; /* Chunks allocated by the user. */ + size_t bytes_used; /* Byte total of user-allocated chunks. */ + size_t chunks_free; /* Chunks in the free list. */ + size_t bytes_free; /* Byte total of chunks in the free list. */ }; +/* Data structure giving per-block information. + * + * There is one such structure in the mdp->heapinfo array per block used in that heap, + * the array index is the block number. + * + * There is several types of blocks in memory: + * - full busy blocks: used when we are asked to malloc a block which size is > BLOCKSIZE/2 + * In this situation, the full block is given to the malloc. + * + * - fragmented busy blocks: when asked for smaller amount of memory. + * Fragment sizes are only power of 2. When looking for such a free fragment, + * we get one from mdp->fraghead (that contains a linked list of blocks fragmented at that + * size and containing a free fragment), or we get a fresh block that we fragment. + * + * - free blocks are grouped by clusters, that are chained together. + * When looking for free blocks, we traverse the mdp->heapinfo looking + * for a cluster of free blocks that would be large enough. + * + * The size of the cluster is only to be trusted in the first block of the cluster, not in the middle blocks. + * + * The type field is consistently updated for every blocks, even within clusters of blocks. + * You can crawl the array and rely on that value. + * + * TODO: + * - make room to store the backtrace of where the blocks and fragment were malloced, too. + */ +typedef struct { + int type; /* 0: busy large block + >0: busy fragmented (fragments of size 2^type bytes) + <0: free block */ + union { + /* Heap information for a busy block. */ + struct { + size_t nfree; /* Free fragments in a fragmented block. */ + size_t first; /* First free fragment of the block. */ + unsigned short frag_size[MAX_FRAGMENT_PER_BLOCK]; + void *bt[MAX_FRAGMENT_PER_BLOCK][XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ + } busy_frag; + struct { + size_t size; /* Size (in blocks) of a large cluster. */ + size_t busy_size; /* Actually used space, in bytes */ + void *bt[XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ + int bt_size; + } busy_block; + /* Heap information for a free block (that may be the first of a free cluster). */ + struct { + size_t size; /* Size (in blocks) of a free cluster. */ + size_t next; /* Index of next free cluster. */ + size_t prev; /* Index of previous free cluster. */ + } free_block; + }; +} malloc_info; + /* Internal structure that defines the format of the malloc-descriptor. This gets written to the base address of the region that mmalloc is managing, and thus also becomes the file header for the mapped file, @@ -151,7 +174,7 @@ struct mdesc { /* Chained lists of mdescs */ struct mdesc *next_mdesc; - + /* The "magic number" for an mmalloc file. */ char magic[MMALLOC_MAGIC_SIZE]; @@ -165,47 +188,6 @@ struct mdesc { /* Some flag bits to keep track of various internal things. */ unsigned int flags; - /* If a system call made by the mmalloc package fails, the errno is - preserved for future examination. */ - int saved_errno; - - /* Pointer to the function that is used to get more core, or return core - to the system, for requests using this malloc descriptor. For memory - mapped regions, this is the mmap() based routine. There may also be - a single malloc descriptor that points to an sbrk() based routine - for systems without mmap() or for applications that call the mmalloc() - package with a NULL malloc descriptor. - - FIXME: For mapped regions shared by more than one process, this - needs to be maintained on a per-process basis. */ - void *(*morecore) (struct mdesc * mdp, int size); - - /* Pointer to the function that causes an abort when the memory checking - features are activated. By default this is set to abort(), but can - be set to another function by the application using mmalloc(). - - FIXME: For mapped regions shared by more than one process, this - needs to be maintained on a per-process basis. */ - void (*abortfunc) (void); - - /* Debugging hook for free. - - FIXME: For mapped regions shared by more than one process, this - needs to be maintained on a per-process basis. */ - void (*mfree_hook) (void *mdp, void *ptr); - - /* Debugging hook for `malloc'. - - FIXME: For mapped regions shared by more than one process, this - needs to be maintained on a per-process basis. */ - void *(*mmalloc_hook) (void *mdp, size_t size); - - /* Debugging hook for realloc. - - FIXME: For mapped regions shared by more than one process, this - needs to be maintained on a per-process basis. */ - void *(*mrealloc_hook) (void *mdp, void *ptr, size_t size); - /* Number of info entries. */ size_t heapsize; @@ -220,24 +202,13 @@ struct mdesc { size_t heaplimit; /* Block information table. - Allocated with malign/__mmalloc_free (not mmalloc/mfree). */ + Allocated with malign/mfree (not mmalloc/mfree). */ /* Table indexed by block number giving per-block information. */ - malloc_info *heapinfo; - /* Instrumentation. */ - - struct mstats heapstats; - - /* Free list headers for each fragment size. */ - /* Free lists for each fragment size. */ - + /* List of all blocks containing free fragments of this size. The array indice is the log2 of requested size */ struct list fraghead[BLOCKLOG]; - /* List of blocks allocated by memalign. */ - - struct alignlist *aligned_blocks; - /* The base address of the memory region for this malloc heap. This is the location where the bookkeeping data for mmap and for malloc begins. */ @@ -261,66 +232,44 @@ struct mdesc { int fd; -}; + /* Instrumentation. */ -int mmalloc_compare_heap(void *h1, void *h2, void *std_heap_addr); + struct mstats heapstats; + +}; -int mmalloc_compare_mdesc(struct mdesc *mdp1, struct mdesc *mdp2, void *std_heap_addr); +int mmalloc_compare_mdesc(struct mdesc *mdp1, struct mdesc *mdp2); void mmalloc_display_info(void *h); +//void *get_end_addr_heap(void *s_heap); + /* Bits to look at in the malloc descriptor flags word */ -#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */ +#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */ #define MMALLOC_ANONYMOUS (1 << 1) /* Use anonymous mapping */ -#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */ -#define MMALLOC_MMCHECK_USED (1 << 3) /* mmcheckf() called already */ - -/* Internal version of `mfree' used in `morecore'. */ - -extern void __mmalloc_free(struct mdesc *mdp, void *ptr); +#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */ /* A default malloc descriptor for the single sbrk() managed region. */ extern struct mdesc *__mmalloc_default_mdp; -/* Initialize the first use of the default malloc descriptor, which uses - an sbrk() region. */ - -extern struct mdesc *__mmalloc_create_default_mdp(void); - -/* Grow or shrink a contiguous mapped region using mmap(). - Works much like sbrk(), only faster */ - -extern void *__mmalloc_mmap_morecore(struct mdesc *mdp, int size); - - /* Remap a mmalloc region that was previously mapped. */ -extern void *__mmalloc_remap_core(struct mdesc *mdp); - -/* Macro to convert from a user supplied malloc descriptor to pointer to the - internal malloc descriptor. If the user supplied descriptor is NULL, then - use the default internal version, initializing it if necessary. Otherwise - just cast the user supplied version (which is void *) to the proper type - (struct mdesc *). */ - -#define MD_TO_MDP(md) \ - ((md) == NULL \ - ? __mmalloc_default_mdp \ - : (struct mdesc *) (md)) - -/* Thread-safety (if the sem is already created)*/ -#define LOCK(md) \ - do {\ - struct mdesc *lock_local_mdp = MD_TO_MDP(md); \ - sem_wait(&lock_local_mdp->sem); \ - } while (0) - -#define UNLOCK(md) \ - do { \ - struct mdesc *unlock_local_mdp = MD_TO_MDP(md); \ - sem_post(&unlock_local_mdp->sem); \ - } while (0) +extern void *__mmalloc_remap_core(xbt_mheap_t mdp); + +/* Get core for the memory region specified by MDP, using SIZE as the + amount to either add to or subtract from the existing region. Works + like sbrk(), but using mmap(). */ +extern void *mmorecore(struct mdesc *mdp, int size); + +/* Thread-safety (if the sem is already created) + * + * This is mandatory in the case where the user runs a parallel simulation + * in a model-checking enabled tree. Without this protection, our malloc + * implementation will not like multi-threading AT ALL. + */ +#define LOCK(mdp) sem_wait(&mdp->sem) +#define UNLOCK(mdp) sem_post(&mdp->sem) #endif /* __MMPRIVATE_H */