X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/ab9a2d2c7dca2b08750d70ef5769fc2c14350ab6..fe9f13ea487593ec11c6af50d8150a6743c84114:/src/xbt/mmalloc/mmprivate.h diff --git a/src/xbt/mmalloc/mmprivate.h b/src/xbt/mmalloc/mmprivate.h index ce97df49df..46fe161fcc 100644 --- a/src/xbt/mmalloc/mmprivate.h +++ b/src/xbt/mmalloc/mmprivate.h @@ -1,6 +1,6 @@ /* Declarations for `mmalloc' and friends. */ -/* Copyright (c) 2010-2017. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2010-2019. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -16,12 +16,11 @@ #include #include +#include "swag.h" #include "src/internal_config.h" -#include "xbt/xbt_os_thread.h" #include "xbt/mmalloc.h" #include "xbt/ex.h" #include "xbt/dynar.h" -#include "xbt/swag.h" #include #include @@ -59,7 +58,6 @@ * need to enlist the free fragments. */ -//#define SMALLEST_POSSIBLE_MALLOC (sizeof(struct list)) #define SMALLEST_POSSIBLE_MALLOC (16*sizeof(struct list)) #define MAX_FRAGMENT_PER_BLOCK (BLOCKSIZE / SMALLEST_POSSIBLE_MALLOC) @@ -148,20 +146,17 @@ typedef struct { int type; /* 0: busy large block >0: busy fragmented (fragments of size 2^type bytes) <0: free block */ - + union { /* Heap information for a busy block. */ struct { size_t nfree; /* Free fragments in a fragmented block. */ ssize_t frag_size[MAX_FRAGMENT_PER_BLOCK]; - //void *bt[MAX_FRAGMENT_PER_BLOCK][XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ int ignore[MAX_FRAGMENT_PER_BLOCK]; } busy_frag; struct { size_t size; /* Size (in blocks) of a large cluster. */ size_t busy_size; /* Actually used space, in bytes */ - //void *bt[XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */ - //int bt_size; int ignore; } busy_block; /* Heap information for a free block (that may be the first of a free cluster). */ @@ -277,13 +272,13 @@ struct mdesc { /* A default malloc descriptor for the single sbrk() managed region. */ -XBT_PUBLIC_DATA( struct mdesc ) *__mmalloc_default_mdp; +XBT_PUBLIC_DATA struct mdesc* __mmalloc_default_mdp; /* Remap a mmalloc region that was previously mapped. */ -XBT_PUBLIC( void *)__mmalloc_remap_core(xbt_mheap_t mdp); +XBT_PUBLIC void* __mmalloc_remap_core(xbt_mheap_t mdp); -XBT_PUBLIC( void *)mmorecore(struct mdesc *mdp, ssize_t size); +XBT_PUBLIC void* mmorecore(struct mdesc* mdp, ssize_t size); /** Thread-safety (if the mutex is already created) * @@ -291,32 +286,11 @@ XBT_PUBLIC( void *)mmorecore(struct mdesc *mdp, ssize_t size); * in a model-checking enabled tree. Without this protection, our malloc * implementation will not like multi-threading AT ALL. */ -#define LOCK(mdp) pthread_mutex_lock(&mdp->mutex) -#define UNLOCK(mdp) pthread_mutex_unlock(&mdp->mutex) - -static inline void mmalloc_paranoia(struct mdesc *mdp){ - - /* nothing to fear for no */ - -} - -static inline int mmalloc_get_increment(malloc_info* heapinfo) { - if (heapinfo->type < 0) { - return heapinfo->free_block.size; - } else if (heapinfo->type == 0) { - return heapinfo->busy_block.size; - } else { - return 1; - } -} - -XBT_PRIVATE void mmcheck(xbt_mheap_t heap); +#define LOCK(mdp) pthread_mutex_lock(&(mdp)->mutex) +#define UNLOCK(mdp) pthread_mutex_unlock(&(mdp)->mutex) XBT_PRIVATE int malloc_use_mmalloc(void); -XBT_PRIVATE int mmalloc_exec_using_mm(int argc, const char** argv); -XBT_PRIVATE void mmalloc_ensure_using_mm(int argc, const char** argv); - XBT_PRIVATE size_t mmalloc_get_bytes_used_remote(size_t heaplimit, const malloc_info* heapinfo); SG_END_DECL()