/* Declarations for `mmalloc' and friends. */
-/* Copyright (c) 2010-2014. The SimGrid Team.
- * All rights reserved. */
+/* Copyright (c) 2010-2023. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
Written May 1989 by Mike Haertel.
Heavily modified Mar 1992 by Fred Fish. (fnf@cygnus.com) */
-#ifndef __MMPRIVATE_H
-#define __MMPRIVATE_H 1
+#ifndef XBT_MMPRIVATE_H
+#define XBT_MMPRIVATE_H 1
-#include <xbt/misc.h>
+#include "src/internal_config.h"
+#include "src/xbt/mmalloc/mmalloc.h"
+#include "swag.h"
-#include "portable.h"
-#include "xbt/xbt_os_thread.h"
-#include "xbt/mmalloc.h"
-#include "xbt/ex.h"
-#include "xbt/dynar.h"
-#include "xbt/swag.h"
-
-#include <pthread.h>
+#include <limits.h>
#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
-#ifdef HAVE_LIMITS_H
-# include <limits.h>
-#else
-# ifndef CHAR_BIT
-# define CHAR_BIT 8
-# endif
-#endif
+// This macro is veery similar to xbt_assert, but with no dependency on XBT
+#define mmalloc_assert(cond, ...) \
+ do { \
+ if (!(cond)) { \
+ fprintf(stderr, __VA_ARGS__); \
+ abort(); \
+ } \
+ } while (0)
+
+XBT_PRIVATE xbt_mheap_t mmalloc_preinit(void);
#define MMALLOC_MAGIC "mmalloc" /* Mapped file magic number */
#define MMALLOC_MAGIC_SIZE 8 /* Size of magic number buf */
#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
/* We keep fragment-specific meta-data for introspection purposes, and these
- * information are kept in fixed lenght arrays. Here is the computation of
+ * information are kept in fixed length arrays. Here is the computation of
* that size.
*
- * Never make SMALLEST_POSSIBLE_MALLOC smaller than sizeof(list) because we
- * need to enlist the free fragments.
+ * Never make SMALLEST_POSSIBLE_MALLOC too small because we need to enlist
+ * the free fragments.
+ *
+ * FIXME: what's the correct size, actually? The used one is a guess.
*/
-//#define SMALLEST_POSSIBLE_MALLOC (sizeof(struct list))
-#define SMALLEST_POSSIBLE_MALLOC (16*sizeof(struct list))
+#define SMALLEST_POSSIBLE_MALLOC (32 * sizeof(void*))
#define MAX_FRAGMENT_PER_BLOCK (BLOCKSIZE / SMALLEST_POSSIBLE_MALLOC)
/* The difference between two pointers is a signed int. On machines where
sign of the result is machine dependent for negative values, so force
it to be treated as an unsigned int. */
-#define ADDR2UINT(addr) ((uintptr_t) ((char*) (addr) - (char*) NULL))
+#define ADDR2UINT(addr) ((uintptr_t) (addr))
#define RESIDUAL(addr,bsize) ((uintptr_t) (ADDR2UINT (addr) % (bsize)))
/* Determine the amount of memory spanned by the initial heap table
/* Address to block number and vice versa. */
-#define BLOCK(A) (((char*) (A) - (char*) mdp -> heapbase) / BLOCKSIZE + 1)
+#define BLOCK(A) ((size_t)(((char*)(A) - (char*)mdp->heapbase) / BLOCKSIZE + 1))
#define ADDRESS(B) ((void*) (((ADDR2UINT(B)) - 1) * BLOCKSIZE + (char*) mdp -> heapbase))
-SG_BEGIN_DECL()
-
-/* Doubly linked lists of free fragments. */
-struct list {
- struct list *next;
- struct list *prev;
-};
+SG_BEGIN_DECL
/* Statistics available to the user. */
struct mstats
size_t bytes_free; /* Byte total of chunks in the free list. */
};
-typedef struct s_heap_area{
- int valid;
- int block;
- int fragment;
-}s_heap_area_t, *heap_area_t;
-
-typedef struct s_heap_area_pair{
- int block1;
- int fragment1;
- int block2;
- int fragment2;
-}s_heap_area_pair_t, *heap_area_pair_t;
-
#define MMALLOC_TYPE_HEAPINFO (-2)
#define MMALLOC_TYPE_FREE (-1)
#define MMALLOC_TYPE_UNFRAGMENTED 0
int type; /* 0: busy large block
>0: busy fragmented (fragments of size 2^type bytes)
<0: free block */
-
+
union {
/* Heap information for a busy block. */
struct {
size_t nfree; /* Free fragments in a fragmented block. */
ssize_t frag_size[MAX_FRAGMENT_PER_BLOCK];
- //void *bt[MAX_FRAGMENT_PER_BLOCK][XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */
int ignore[MAX_FRAGMENT_PER_BLOCK];
} busy_frag;
struct {
size_t size; /* Size (in blocks) of a large cluster. */
size_t busy_size; /* Actually used space, in bytes */
- //void *bt[XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */
- //int bt_size;
int ignore;
} busy_block;
/* Heap information for a free block (that may be the first of a free cluster). */
* if such a file exists.
* */
struct mdesc {
-
- /** @brief Mutex locking the access to the heap */
- pthread_mutex_t mutex;
-
- /** @brief Number of processes that attached the heap */
- unsigned int refcount;
-
/** @brief Chained lists of mdescs */
struct mdesc *next_mdesc;
/* @brief List of all blocks containing free fragments of a given size.
*
- * The array indice is the log2 of requested size.
+ * The array index is the log2 of requested size.
* Actually only the sizes 8->11 seem to be used, but who cares? */
s_xbt_swag_t fraghead[BLOCKLOG];
*/
void *top;
- /** @brief Open file descriptor for the file to which this malloc heap is mapped
- *
- * If this value is negative, MAP_ANONYMOUS memory is used.
- *
- * Also note that it may change each time the region is mapped and unmapped. */
- int fd;
-
/* @brief Instrumentation */
struct mstats heapstats;
-
};
/* Bits to look at in the malloc descriptor flags word */
#define MMALLOC_DEVZERO (1 << 0) /* Have mapped to /dev/zero */
-#define MMALLOC_ANONYMOUS (1 << 1) /* Use anonymous mapping */
-#define MMALLOC_INITIALIZED (1 << 2) /* Initialized mmalloc */
+#define MMALLOC_INITIALIZED (1 << 1) /* Initialized mmalloc */
/* A default malloc descriptor for the single sbrk() managed region. */
-XBT_PUBLIC( struct mdesc ) *__mmalloc_default_mdp;
+XBT_PUBLIC_DATA struct mdesc* __mmalloc_default_mdp;
-/* Remap a mmalloc region that was previously mapped. */
+XBT_PUBLIC void* mmorecore(struct mdesc* mdp, ssize_t size);
-XBT_PUBLIC( void *)__mmalloc_remap_core(xbt_mheap_t mdp);
+XBT_PRIVATE size_t mmalloc_get_bytes_used_remote(size_t heaplimit, const malloc_info* heapinfo);
-XBT_PUBLIC( void *)mmorecore(struct mdesc *mdp, ssize_t size);
-
-/** Thread-safety (if the mutex is already created)
- *
- * This is mandatory in the case where the user runs a parallel simulation
- * in a model-checking enabled tree. Without this protection, our malloc
- * implementation will not like multi-threading AT ALL.
+/* We call dlsym during mmalloc initialization, but dlsym uses malloc.
+ * So during mmalloc initialization, any call to malloc is diverted to a private static buffer.
*/
-#define LOCK(mdp) pthread_mutex_lock(&mdp->mutex)
-#define UNLOCK(mdp) pthread_mutex_unlock(&mdp->mutex)
-
-static XBT_INLINE void mmalloc_paranoia(struct mdesc *mdp){
-
- /* nothing to fear for no */
-
-}
-
-static inline int mmalloc_get_increment(malloc_info* heapinfo) {
- if (heapinfo->type < 0) {
- return heapinfo->free_block.size;
- } else if (heapinfo->type == 0) {
- return heapinfo->busy_block.size;
- } else {
- return 1;
- }
-}
-
-void mmcheck(xbt_mheap_t heap);
-
-int malloc_use_mmalloc(void);
-
-int mmalloc_exec_using_mm(int argc, const char** argv);
-void mmalloc_ensure_using_mm(int argc, const char** argv);
-
-size_t mmalloc_get_bytes_used_remote(size_t heaplimit, const malloc_info* heapinfo);
+extern uint64_t* mmalloc_preinit_buffer;
+#ifdef __FreeBSD__ /* FreeBSD require more memory, other might */
+#define mmalloc_preinit_buffer_size 256
+#else /* Valid on: Linux */
+#define mmalloc_preinit_buffer_size 32
+#endif
-SG_END_DECL()
+SG_END_DECL
-#endif /* __MMPRIVATE_H */
+#endif