/* Declarations for `mmalloc' and friends. */
-/* Copyright (c) 2010-2018. The SimGrid Team. All rights reserved. */
+/* Copyright (c) 2010-2019. The SimGrid Team. All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "swag.h"
#include "src/internal_config.h"
-#include "xbt/xbt_os_thread.h"
#include "xbt/mmalloc.h"
#include "xbt/ex.h"
#include "xbt/dynar.h"
#include <pthread.h>
#include <stdint.h>
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
#ifdef HAVE_LIMITS_H
# include <limits.h>
#else
#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
/* We keep fragment-specific meta-data for introspection purposes, and these
- * information are kept in fixed lenght arrays. Here is the computation of
+ * information are kept in fixed length arrays. Here is the computation of
* that size.
*
* Never make SMALLEST_POSSIBLE_MALLOC smaller than sizeof(list) because we
* need to enlist the free fragments.
*/
-//#define SMALLEST_POSSIBLE_MALLOC (sizeof(struct list))
#define SMALLEST_POSSIBLE_MALLOC (16*sizeof(struct list))
#define MAX_FRAGMENT_PER_BLOCK (BLOCKSIZE / SMALLEST_POSSIBLE_MALLOC)
struct {
size_t nfree; /* Free fragments in a fragmented block. */
ssize_t frag_size[MAX_FRAGMENT_PER_BLOCK];
- //void *bt[MAX_FRAGMENT_PER_BLOCK][XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */
int ignore[MAX_FRAGMENT_PER_BLOCK];
} busy_frag;
struct {
size_t size; /* Size (in blocks) of a large cluster. */
size_t busy_size; /* Actually used space, in bytes */
- //void *bt[XBT_BACKTRACE_SIZE]; /* Where it was malloced (or realloced lastly) */
- //int bt_size;
int ignore;
} busy_block;
/* Heap information for a free block (that may be the first of a free cluster). */
/* @brief List of all blocks containing free fragments of a given size.
*
- * The array indice is the log2 of requested size.
+ * The array index is the log2 of requested size.
* Actually only the sizes 8->11 seem to be used, but who cares? */
s_xbt_swag_t fraghead[BLOCKLOG];
* in a model-checking enabled tree. Without this protection, our malloc
* implementation will not like multi-threading AT ALL.
*/
-#define LOCK(mdp) pthread_mutex_lock(&mdp->mutex)
-#define UNLOCK(mdp) pthread_mutex_unlock(&mdp->mutex)
+#define LOCK(mdp) pthread_mutex_lock(&(mdp)->mutex)
+#define UNLOCK(mdp) pthread_mutex_unlock(&(mdp)->mutex)
XBT_PRIVATE int malloc_use_mmalloc(void);