X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/00255adf47970918999b2b4495ce8a69b036e68a..78e4c652321b99db9784f18451e2ab5b818db009:/src/xbt/mmalloc/mmprivate.h diff --git a/src/xbt/mmalloc/mmprivate.h b/src/xbt/mmalloc/mmprivate.h index 8352e13379..5e6609d9c1 100644 --- a/src/xbt/mmalloc/mmprivate.h +++ b/src/xbt/mmalloc/mmprivate.h @@ -1,23 +1,30 @@ -/* Declarations for `mmalloc' and friends. - Copyright 1990, 1991, 1992 Free Software Foundation +/* Declarations for `mmalloc' and friends. */ - Written May 1989 by Mike Haertel. - Heavily modified Mar 1992 by Fred Fish. (fnf@cygnus.com) */ - -/* Copyright (c) 2010. The SimGrid Team. - * All rights reserved. */ +/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#ifndef __MMPRIVATE_H -#define __MMPRIVATE_H 1 +/* Copyright 1990, 1991, 1992 Free Software Foundation + + Written May 1989 by Mike Haertel. + Heavily modified Mar 1992 by Fred Fish. (fnf@cygnus.com) */ + +#ifndef XBT_MMPRIVATE_H +#define XBT_MMPRIVATE_H 1 + +#include +#include -#include "portable.h" +#include "src/internal_config.h" #include "xbt/xbt_os_thread.h" #include "xbt/mmalloc.h" #include "xbt/ex.h" -#include +#include "xbt/dynar.h" +#include "xbt/swag.h" + +#include +#include #ifdef HAVE_LIMITS_H # include @@ -27,20 +34,34 @@ # endif #endif -#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */ -#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */ -#define MMALLOC_VERSION 1 /* Current mmalloc version */ +#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */ +#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */ +#define MMALLOC_VERSION 2 /* Current mmalloc version */ /* The allocator divides the heap into blocks of fixed size; large requests receive one or more whole blocks, and small requests receive a fragment of a block. Fragment sizes are powers of two, and all fragments of a block are the same size. When all the - fragments in a block have been freed, the block itself is freed. */ + fragments in a block have been freed, the block itself is freed. + + FIXME: we are not targeting 16bits machines anymore; update values */ -#define INT_BIT (CHAR_BIT * sizeof(int)) -#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) -#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG) -#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) +#define INT_BIT (CHAR_BIT * sizeof(int)) +#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) +#define BLOCKSIZE ((unsigned int) 1 << BLOCKLOG) +#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) + +/* We keep fragment-specific meta-data for introspection purposes, and these + * information are kept in fixed lenght arrays. Here is the computation of + * that size. + * + * Never make SMALLEST_POSSIBLE_MALLOC smaller than sizeof(list) because we + * need to enlist the free fragments. + */ + +//#define SMALLEST_POSSIBLE_MALLOC (sizeof(struct list)) +#define SMALLEST_POSSIBLE_MALLOC (16*sizeof(struct list)) +#define MAX_FRAGMENT_PER_BLOCK (BLOCKSIZE / SMALLEST_POSSIBLE_MALLOC) /* The difference between two pointers is a signed int. On machines where the data addresses have the high bit set, we need to ensure that the @@ -49,25 +70,25 @@ sign of the result is machine dependent for negative values, so force it to be treated as an unsigned int. */ -#define ADDR2UINT(addr) ((unsigned int) ((char*) (addr) - (char*) NULL)) -#define RESIDUAL(addr,bsize) ((unsigned int) (ADDR2UINT (addr) % (bsize))) +#define ADDR2UINT(addr) ((uintptr_t) ((char*) (addr) - (char*) NULL)) +#define RESIDUAL(addr,bsize) ((uintptr_t) (ADDR2UINT (addr) % (bsize))) /* Determine the amount of memory spanned by the initial heap table (not an absolute limit). */ -#define HEAP (INT_BIT > 16 ? 4194304 : 65536) +#define HEAP (INT_BIT > 16 ? 4194304 : 65536) /* Number of contiguous free blocks allowed to build up at the end of - memory before they will be returned to the system. */ - -#define FINAL_FREE_BLOCKS 8 + memory before they will be returned to the system. + FIXME: this is not used anymore: we never return memory to the system. */ +#define FINAL_FREE_BLOCKS 8 /* Where to start searching the free list when looking for new memory. The two possible values are 0 and heapindex. Starting at 0 seems to reduce total memory usage, while starting at heapindex seems to run faster. */ -#define MALLOC_SEARCH_START mdp -> heapindex +#define MALLOC_SEARCH_START mdp -> heapindex /* Address to block number and vice versa. */ @@ -75,12 +96,33 @@ #define ADDRESS(B) ((void*) (((ADDR2UINT(B)) - 1) * BLOCKSIZE + (char*) mdp -> heapbase)) -const char *xbt_thread_self_name(void); +SG_BEGIN_DECL() + +/* Doubly linked lists of free fragments. */ +struct list { + struct list *next; + struct list *prev; +}; + +/* Statistics available to the user. */ +struct mstats +{ + size_t bytes_total; /* Total size of the heap. */ + size_t chunks_used; /* Chunks allocated by the user. */ + size_t bytes_used; /* Byte total of user-allocated chunks. */ + size_t chunks_free; /* Chunks in the free list. */ + size_t bytes_free; /* Byte total of chunks in the free list. */ +}; + +#define MMALLOC_TYPE_HEAPINFO (-2) +#define MMALLOC_TYPE_FREE (-1) +#define MMALLOC_TYPE_UNFRAGMENTED 0 +/* >0 values are fragmented blocks */ /* Data structure giving per-block information. * - * There is one such structure in the mdp->heapinfo array, - * that is addressed by block number. + * There is one such structure in the mdp->heapinfo array per block used in that heap, + * the array index is the block number. * * There is several types of blocks in memory: * - full busy blocks: used when we are asked to malloc a block which size is > BLOCKSIZE/2 @@ -95,154 +137,186 @@ const char *xbt_thread_self_name(void); * When looking for free blocks, we traverse the mdp->heapinfo looking * for a cluster of free blocks that would be large enough. * - * The size of the cluster is only to be trusted in the first block of the cluster. - * If the cluster results of the fusion of several clusters, the previously first - * block of their cluster will have partial data. The only information kept consistent over - * all blocks of the clusters is their type (== -1). + * The size of the cluster is only to be trusted in the first block of the cluster, not in the middle blocks. * - * Note that there is no way to determine if the block is free or busy by exploring - * this structure only. It wasn't intended to be crawled for comparison and we should fix it (TODO). + * The type field is consistently updated for every blocks, even within clusters of blocks. + * You can crawl the array and rely on that value. * - * TODO: understand whether the information are written in each blocks of a cluster (be it - * free or busy) or only in the first block of the cluster. And in the latter case, how can - * I retrieve the first block of my cluster. - * - * TODO: - * - add an indication of the requested size in each fragment, similarly to busy_block.busy_size - * - make room to store the backtrace of where the blocks and fragment were malloced, too. */ typedef struct { - int type; /* 0: busy large block - >0: busy fragmented (fragments of size 2^type bytes) - <0: free block */ - union { - /* Heap information for a busy block. */ - struct { - size_t nfree; /* Free fragments in a fragmented block. */ - size_t first; /* First free fragment of the block. */ - } busy_frag; - struct { - size_t size; /* Size (in blocks) of a large cluster. */ - size_t busy_size; /* Actually used space, in bytes */ - } busy_block; - /* Heap information for a free block (that may be the first of a free cluster). */ - struct { - size_t size; /* Size (in blocks) of a free cluster. */ - size_t next; /* Index of next free cluster. */ - size_t prev; /* Index of previous free cluster. */ - } free_block; - }; + s_xbt_swag_hookup_t freehook; /* to register this block as having empty frags when needed */ + int type; /* 0: busy large block + >0: busy fragmented (fragments of size 2^type bytes) + <0: free block */ + + union { + /* Heap information for a busy block. */ + struct { + size_t nfree; /* Free fragments in a fragmented block. */ + ssize_t frag_size[MAX_FRAGMENT_PER_BLOCK]; + //void *bt[MAX_FRAGMENT_PER_BLOCK][XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ + int ignore[MAX_FRAGMENT_PER_BLOCK]; + } busy_frag; + struct { + size_t size; /* Size (in blocks) of a large cluster. */ + size_t busy_size; /* Actually used space, in bytes */ + //void *bt[XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ + //int bt_size; + int ignore; + } busy_block; + /* Heap information for a free block (that may be the first of a free cluster). */ + struct { + size_t size; /* Size (in blocks) of a free cluster. */ + size_t next; /* Index of next free cluster. */ + size_t prev; /* Index of previous free cluster. */ + } free_block; + }; } malloc_info; -/* Doubly linked lists of free fragments. */ -struct list { - struct list *next; - struct list *prev; -}; - -/* Internal structure that defines the format of the malloc-descriptor. - This gets written to the base address of the region that mmalloc is - managing, and thus also becomes the file header for the mapped file, - if such a file exists. */ - +/** @brief Descriptor of a mmalloc area + * + * Internal structure that defines the format of the malloc-descriptor. + * This gets written to the base address of the region that mmalloc is + * managing, and thus also becomes the file header for the mapped file, + * if such a file exists. + * */ struct mdesc { - /* Semaphore locking the access to the heap */ - sem_t sem; + /** @brief Mutex locking the access to the heap */ + pthread_mutex_t mutex; + + /** @brief Number of processes that attached the heap */ + unsigned int refcount; + + /** @brief Chained lists of mdescs */ + struct mdesc *next_mdesc; + + /** @brief The "magic number" for an mmalloc file. */ + char magic[MMALLOC_MAGIC_SIZE]; + + /** @brief The size in bytes of this structure + * + * Used as a sanity check when reusing a previously created mapped file. + * */ + unsigned int headersize; + + /** @brief Version number of the mmalloc package that created this file. */ + unsigned char version; + + unsigned int options; + + /** @brief Some flag bits to keep track of various internal things. */ + unsigned int flags; + + /** @brief Number of info entries. */ + size_t heapsize; + + /** @brief Pointer to first block of the heap (base of the first block). */ + void *heapbase; + + /** @brief Current search index for the heap table. + * + * Search index in the info table. + */ + size_t heapindex; + + /** @brief Limit of valid info table indices. */ + size_t heaplimit; + + /** @brief Block information table. + * + * Table indexed by block number giving per-block information. + */ + malloc_info *heapinfo; + + /* @brief List of all blocks containing free fragments of a given size. + * + * The array indice is the log2 of requested size. + * Actually only the sizes 8->11 seem to be used, but who cares? */ + s_xbt_swag_t fraghead[BLOCKLOG]; + + /* @brief Base address of the memory region for this malloc heap + * + * This is the location where the bookkeeping data for mmap and + * for malloc begins. + */ + void *base; + + /** @brief End of memory in use + * + * Some memory might be already mapped by the OS but not used + * by the heap. + * */ + void *breakval; + + /** @brief End of the current memory region for this malloc heap. + * + * This is the first location past the end of mapped memory. + * + * Compared to breakval, this value is rounded to the next memory page. + */ + void *top; + + /** @brief Open file descriptor for the file to which this malloc heap is mapped + * + * If this value is negative, MAP_ANONYMOUS memory is used. + * + * Also note that it may change each time the region is mapped and unmapped. */ + int fd; + + /* @brief Instrumentation */ + struct mstats heapstats; - /* Number of processes that attached the heap */ - unsigned int refcount; - - /* Chained lists of mdescs */ - struct mdesc *next_mdesc; - - /* The "magic number" for an mmalloc file. */ - char magic[MMALLOC_MAGIC_SIZE]; - - /* The size in bytes of this structure, used as a sanity check when reusing - a previously created mapped file. */ - unsigned int headersize; - - /* The version number of the mmalloc package that created this file. */ - unsigned char version; - - /* Some flag bits to keep track of various internal things. */ - unsigned int flags; - - /* Number of info entries. */ - size_t heapsize; - - /* Pointer to first block of the heap (base of the first block). */ - void *heapbase; - - /* Current search index for the heap table. */ - /* Search index in the info table. */ - size_t heapindex; - - /* Limit of valid info table indices. */ - size_t heaplimit; - - /* Block information table. - Allocated with malign/mfree (not mmalloc/mfree). */ - /* Table indexed by block number giving per-block information. */ - malloc_info *heapinfo; - - /* List of all blocks containing free fragments of this size. The array indice is the log2 of requested size */ - struct list fraghead[BLOCKLOG]; - - /* The base address of the memory region for this malloc heap. This - is the location where the bookkeeping data for mmap and for malloc - begins. */ - - void *base; - - /* The current location in the memory region for this malloc heap which - represents the end of memory in use. */ - - void *breakval; +}; - /* The end of the current memory region for this malloc heap. This is - the first location past the end of mapped memory. */ +/* Bits to look at in the malloc descriptor flags word */ - void *top; +#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */ +#define MMALLOC_ANONYMOUS (1 << 1) /* Use anonymous mapping */ +#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */ - /* Open file descriptor for the file to which this malloc heap is mapped. - This will always be a valid file descriptor, since /dev/zero is used - by default if no open file is supplied by the client. Also note that - it may change each time the region is mapped and unmapped. */ +/* A default malloc descriptor for the single sbrk() managed region. */ - int fd; +XBT_PUBLIC_DATA( struct mdesc ) *__mmalloc_default_mdp; -}; +/* Remap a mmalloc region that was previously mapped. */ -int mmalloc_compare_mdesc(struct mdesc *mdp1, struct mdesc *mdp2, void *std_heap_addr); +XBT_PUBLIC( void *)__mmalloc_remap_core(xbt_mheap_t mdp); -void mmalloc_display_info(void *h); +XBT_PUBLIC( void *)mmorecore(struct mdesc *mdp, ssize_t size); -/* Bits to look at in the malloc descriptor flags word */ +/** Thread-safety (if the mutex is already created) + * + * This is mandatory in the case where the user runs a parallel simulation + * in a model-checking enabled tree. Without this protection, our malloc + * implementation will not like multi-threading AT ALL. + */ +#define LOCK(mdp) pthread_mutex_lock(&mdp->mutex) +#define UNLOCK(mdp) pthread_mutex_unlock(&mdp->mutex) -#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */ -#define MMALLOC_ANONYMOUS (1 << 1) /* Use anonymous mapping */ -#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */ +static inline void mmalloc_paranoia(struct mdesc *mdp){ -/* A default malloc descriptor for the single sbrk() managed region. */ + /* nothing to fear for no */ -extern struct mdesc *__mmalloc_default_mdp; +} -/* Remap a mmalloc region that was previously mapped. */ +static inline int mmalloc_get_increment(malloc_info* heapinfo) { + if (heapinfo->type < 0) { + return heapinfo->free_block.size; + } else if (heapinfo->type == 0) { + return heapinfo->busy_block.size; + } else { + return 1; + } +} -extern void *__mmalloc_remap_core(xbt_mheap_t mdp); +XBT_PRIVATE int malloc_use_mmalloc(void); -/* Get core for the memory region specified by MDP, using SIZE as the - amount to either add to or subtract from the existing region. Works - like sbrk(), but using mmap(). */ -extern void *mmorecore(struct mdesc *mdp, int size); +XBT_PRIVATE int mmalloc_exec_using_mm(int argc, const char** argv); +XBT_PRIVATE void mmalloc_ensure_using_mm(int argc, const char** argv); -/* Thread-safety (if the sem is already created) FIXME: KILLIT*/ -#define LOCK(mdp) \ - sem_wait(&mdp->sem) +XBT_PRIVATE size_t mmalloc_get_bytes_used_remote(size_t heaplimit, const malloc_info* heapinfo); -#define UNLOCK(mdp) \ - sem_post(&mdp->sem) +SG_END_DECL() -#endif /* __MMPRIVATE_H */ +#endif