* Correctly initialize and destroy the mdp mutex.
* Simplify locking logic by pushing it up in {m,c,re}alloc/free
in "mm_legacy.c".
* Lock the default_mdp mutex around fork calls
(introduce xbt_os_thread_atfork for this purpose).
* Increase HEAP_OFFSET to avoid strange errors with valgrind
(got unexplained memory corruption in tesh with several threads).
git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/simgrid/simgrid/trunk@8506
48e7efb5-ca39-0410-a469-
dd3cf9ba447f
/** \brief Thread data type (opaque structure) */
typedef struct xbt_os_thread_ *xbt_os_thread_t;
/** \brief Thread data type (opaque structure) */
typedef struct xbt_os_thread_ *xbt_os_thread_t;
+/* Calls pthread_atfork() if present, and else does nothing.
+ * The only known user of this wrapper is mmalloc_preinit().
+ */
+XBT_PUBLIC(int) xbt_os_thread_atfork(void (*prepare)(void),
+ void (*parent)(void),
+ void (*child)(void));
+
XBT_PUBLIC(xbt_os_thread_t) xbt_os_thread_create(const char *name,
pvoid_f_pvoid_t
start_routine,
XBT_PUBLIC(xbt_os_thread_t) xbt_os_thread_create(const char *name,
pvoid_f_pvoid_t
start_routine,
if ((mbase = mdp->morecore(mdp, sizeof(mtemp))) != NULL) {
memcpy(mbase, mdp, sizeof(mtemp));
if ((mbase = mdp->morecore(mdp, sizeof(mtemp))) != NULL) {
memcpy(mbase, mdp, sizeof(mtemp));
- // mdp = (struct mdesc *) mbase;
+ mdp = (struct mdesc *) mbase;
{ /* create the mutex within that heap */
void *old_heap = mmalloc_get_current_heap();
{ /* create the mutex within that heap */
void *old_heap = mmalloc_get_current_heap();
region we are about to unmap, so we first make a local copy of it on the
stack and use the copy. */
region we are about to unmap, so we first make a local copy of it on the
stack and use the copy. */
+void mmalloc_pre_detach(void *md)
+{
+ struct mdesc *mdp = md;
+ xbt_os_mutex_t mutex = mdp->mutex;
+ mdp->mutex = NULL;
+ xbt_os_mutex_destroy(mutex);
+}
+
void *mmalloc_detach(void *md)
{
struct mdesc mtemp;
if (md != NULL) {
void *mmalloc_detach(void *md)
{
struct mdesc mtemp;
if (md != NULL) {
+ mmalloc_pre_detach(md);
mtemp = *(struct mdesc *) md;
mtemp = *(struct mdesc *) md;
- xbt_os_mutex_destroy(((struct mdesc *) md)->mutex);
/* Now unmap all the pages associated with this region by asking for a
negative increment equal to the current size of the region. */
/* Now unmap all the pages associated with this region by asking for a
negative increment equal to the current size of the region. */
if (ptr != NULL) {
mdp = MD_TO_MDP(md);
if (ptr != NULL) {
mdp = MD_TO_MDP(md);
for (l = mdp->aligned_blocks; l != NULL; l = l->next) {
if (l->aligned == ptr) {
l->aligned = NULL; /* Mark the slot in the list as free. */
for (l = mdp->aligned_blocks; l != NULL; l = l->next) {
if (l->aligned == ptr) {
l->aligned = NULL; /* Mark the slot in the list as free. */
} else {
__mmalloc_free(mdp, ptr);
}
} else {
__mmalloc_free(mdp, ptr);
}
__mmalloc_current_heap = new_heap;
}
__mmalloc_current_heap = new_heap;
}
-#ifdef MMALLOC_WANT_OVERIDE_LEGACY
+#if 1//def MMALLOC_WANT_OVERIDE_LEGACY
void *malloc(size_t n)
{
#ifdef HAVE_MMAP
if (!__mmalloc_current_heap)
mmalloc_preinit();
#endif
void *malloc(size_t n)
{
#ifdef HAVE_MMAP
if (!__mmalloc_current_heap)
mmalloc_preinit();
#endif
+ LOCK(__mmalloc_current_heap);
void *ret = mmalloc(__mmalloc_current_heap, n);
void *ret = mmalloc(__mmalloc_current_heap, n);
+ UNLOCK(__mmalloc_current_heap);
if (!__mmalloc_current_heap)
mmalloc_preinit();
#endif
if (!__mmalloc_current_heap)
mmalloc_preinit();
#endif
+ LOCK(__mmalloc_current_heap);
void *ret = mmalloc(__mmalloc_current_heap, total_size);
void *ret = mmalloc(__mmalloc_current_heap, total_size);
+ UNLOCK(__mmalloc_current_heap);
/* Fill the allocated memory with zeroes to mimic calloc behaviour */
memset(ret, '\0', total_size);
/* Fill the allocated memory with zeroes to mimic calloc behaviour */
memset(ret, '\0', total_size);
if (!__mmalloc_current_heap)
mmalloc_preinit();
#endif
if (!__mmalloc_current_heap)
mmalloc_preinit();
#endif
+ LOCK(__mmalloc_current_heap);
if (s) {
if (p)
ret = mrealloc(__mmalloc_current_heap, p, s);
if (s) {
if (p)
ret = mrealloc(__mmalloc_current_heap, p, s);
if (p)
mfree(__mmalloc_current_heap, p);
}
if (p)
mfree(__mmalloc_current_heap, p);
}
+ UNLOCK(__mmalloc_current_heap);
return ret;
}
void free(void *p)
{
return ret;
}
void free(void *p)
{
- return mfree(__mmalloc_current_heap, p);
+ LOCK(__mmalloc_current_heap);
+ mfree(__mmalloc_current_heap, p);
+ UNLOCK(__mmalloc_current_heap);
}
#endif
/* Make sure it works with md==NULL */
}
#endif
/* Make sure it works with md==NULL */
-#define HEAP_OFFSET 40960000 /* Safety gap from the heap's break address */
+#define HEAP_OFFSET (128<<20) /* Safety gap from the heap's break address.
+ * Try to increase this first if you experience
+ * strange errors under valgrind. */
void *mmalloc_get_default_md(void)
{
void *mmalloc_get_default_md(void)
{
return __mmalloc_default_mdp;
}
return __mmalloc_default_mdp;
}
+static void mmalloc_fork_prepare(void)
+{
+ if (__mmalloc_default_mdp)
+ LOCK(__mmalloc_default_mdp);
+}
+
+static void mmalloc_fork_finish(void)
+{
+ if (__mmalloc_default_mdp)
+ UNLOCK(__mmalloc_default_mdp);
+}
+
/* Initialize the default malloc descriptor. */
void mmalloc_preinit(void)
{
/* Initialize the default malloc descriptor. */
void mmalloc_preinit(void)
{
- if (!__mmalloc_default_mdp)
+ if (!__mmalloc_default_mdp) {
__mmalloc_default_mdp =
mmalloc_attach(-1, (char *) sbrk(0) + HEAP_OFFSET);
__mmalloc_default_mdp =
mmalloc_attach(-1, (char *) sbrk(0) + HEAP_OFFSET);
+ /* Fixme? only the default mdp in protected against forks */
+ if (xbt_os_thread_atfork(mmalloc_fork_prepare,
+ mmalloc_fork_finish, mmalloc_fork_finish) != 0)
+ abort();
+ }
xbt_assert(__mmalloc_default_mdp != NULL);
}
xbt_assert(__mmalloc_default_mdp != NULL);
}
{
/* Do not detach the default mdp or ldl won't be able to free the memory it allocated since we're in memory */
// mmalloc_detach(__mmalloc_default_mdp);
{
/* Do not detach the default mdp or ldl won't be able to free the memory it allocated since we're in memory */
// mmalloc_detach(__mmalloc_default_mdp);
+ mmalloc_pre_detach(__mmalloc_default_mdp);
size = 1;
mdp = MD_TO_MDP(md);
size = 1;
mdp = MD_TO_MDP(md);
// printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout);
// printf("(%s) Mallocing %d bytes on %p (default: %p)...",xbt_thread_self_name(),size,mdp,__mmalloc_default_mdp);fflush(stdout);
if (mdp->mmalloc_hook != NULL) {
if (mdp->mmalloc_hook != NULL) {
- void *res = ((*mdp->mmalloc_hook) (md, size));
- UNLOCK(mdp);
- return res;
+ return (*mdp->mmalloc_hook) (md, size);
}
if (!(mdp->flags & MMALLOC_INITIALIZED)) {
if (!initialize(mdp)) {
}
if (!(mdp->flags & MMALLOC_INITIALIZED)) {
if (!initialize(mdp)) {
} else {
/* No free fragments of the desired size, so get a new block
and break it into fragments, returning the first. */
} else {
/* No free fragments of the desired size, so get a new block
and break it into fragments, returning the first. */
//printf("(%s) No free fragment...",xbt_thread_self_name());
result = mmalloc(md, BLOCKSIZE);
//printf("(%s) Fragment: %p...",xbt_thread_self_name(),result);
//printf("(%s) No free fragment...",xbt_thread_self_name());
result = mmalloc(md, BLOCKSIZE);
//printf("(%s) Fragment: %p...",xbt_thread_self_name(),result);
}
result = morecore(mdp, blocks * BLOCKSIZE);
if (result == NULL) {
}
result = morecore(mdp, blocks * BLOCKSIZE);
if (result == NULL) {
return (NULL);
}
block = BLOCK(result);
return (NULL);
}
block = BLOCK(result);
mdp->heapinfo[block].busy.info.size = blocks;
mdp->heapstats.chunks_used++;
mdp->heapstats.bytes_used += blocks * BLOCKSIZE;
mdp->heapinfo[block].busy.info.size = blocks;
mdp->heapstats.chunks_used++;
mdp->heapstats.bytes_used += blocks * BLOCKSIZE;
mdp->heapstats.bytes_free -= blocks * BLOCKSIZE;
}
//printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout);
mdp->heapstats.bytes_free -= blocks * BLOCKSIZE;
}
//printf("(%s) Done mallocing. Result is %p\n",xbt_thread_self_name(),result);fflush(stdout);
#define ADDRESS(B) ((void*) (((ADDR2UINT(B)) - 1) * BLOCKSIZE + (char*) mdp -> heapbase))
#define ADDRESS(B) ((void*) (((ADDR2UINT(B)) - 1) * BLOCKSIZE + (char*) mdp -> heapbase))
-/* Thread-safety (if the mutex is already created)*/
-#define LOCK(mdp) if (mdp->mutex) xbt_os_mutex_acquire(mdp->mutex)
-#define UNLOCK(mdp) if (mdp->mutex) xbt_os_mutex_release(mdp->mutex)
const char *xbt_thread_self_name(void);
/* Data structure giving per-block information. */
const char *xbt_thread_self_name(void);
/* Data structure giving per-block information. */
? __mmalloc_default_mdp \
: (struct mdesc *) (md))
? __mmalloc_default_mdp \
: (struct mdesc *) (md))
+/* Thread-safety (if the mutex is already created)*/
+#define LOCK(md) \
+ do { \
+ struct mdesc *lock_local_mdp = MD_TO_MDP(md); \
+ if (lock_local_mdp->mutex) \
+ xbt_os_mutex_acquire(lock_local_mdp->mutex); \
+ } while (0)
+
+#define UNLOCK(md) \
+ do { \
+ struct mdesc *unlock_local_mdp = MD_TO_MDP(md); \
+ if (unlock_local_mdp->mutex) \
+ xbt_os_mutex_release(unlock_local_mdp->mutex); \
+ } while (0)
+
#endif /* __MMPRIVATE_H */
#endif /* __MMPRIVATE_H */
int type;
size_t block, blocks, oldlimit;
int type;
size_t block, blocks, oldlimit;
if (size == 0) {
mfree(md, ptr);
return (mmalloc(md, 0));
if (size == 0) {
mfree(md, ptr);
return (mmalloc(md, 0));
if (mdp->mrealloc_hook != NULL) {
if (mdp->mrealloc_hook != NULL) {
return ((*mdp->mrealloc_hook) (md, ptr, size));
}
return ((*mdp->mrealloc_hook) (md, ptr, size));
}
case 0:
/* Maybe reallocate a large block to a small fragment. */
if (size <= BLOCKSIZE / 2) {
case 0:
/* Maybe reallocate a large block to a small fragment. */
if (size <= BLOCKSIZE / 2) {
//printf("(%s) alloc large block...",xbt_thread_self_name());
result = mmalloc(md, size);
if (result != NULL) {
//printf("(%s) alloc large block...",xbt_thread_self_name());
result = mmalloc(md, size);
if (result != NULL) {
/* The new size is a large allocation as well;
see if we can hold it in place. */
/* The new size is a large allocation as well;
see if we can hold it in place. */
blocks = BLOCKIFY(size);
if (blocks < mdp->heapinfo[block].busy.info.size) {
/* The new size is smaller; return excess memory to the free list. */
blocks = BLOCKIFY(size);
if (blocks < mdp->heapinfo[block].busy.info.size) {
/* The new size is smaller; return excess memory to the free list. */
mdp->heaplimit = 0;
mfree(md, ptr);
mdp->heaplimit = oldlimit;
mdp->heaplimit = 0;
mfree(md, ptr);
mdp->heaplimit = oldlimit;
result = mmalloc(md, size);
if (result == NULL) {
mmalloc(md, blocks * BLOCKSIZE);
result = mmalloc(md, size);
if (result == NULL) {
mmalloc(md, blocks * BLOCKSIZE);
}
if (ptr != result)
memmove(result, ptr, blocks * BLOCKSIZE);
}
if (ptr != result)
memmove(result, ptr, blocks * BLOCKSIZE);
and copy the lesser of the new size and the old. */
//printf("(%s) new size is different...",xbt_thread_self_name());
and copy the lesser of the new size and the old. */
//printf("(%s) new size is different...",xbt_thread_self_name());
result = mmalloc(md, size);
if (result == NULL)
return (NULL);
result = mmalloc(md, size);
if (result == NULL)
return (NULL);
__xbt_ex_terminate = &__xbt_ex_terminate_default;
}
__xbt_ex_terminate = &__xbt_ex_terminate_default;
}
+int xbt_os_thread_atfork(void (*prepare)(void),
+ void (*parent)(void), void (*child)(void))
+{
+ return pthread_atfork(prepare, parent, child);
+}
+
static void *wrapper_start_routine(void *s)
{
xbt_os_thread_t t = s;
static void *wrapper_start_routine(void *s)
{
xbt_os_thread_t t = s;
"TlsFree() failed to cleanup the thread submodule");
}
"TlsFree() failed to cleanup the thread submodule");
}
+int xbt_os_thread_atfork(void (*prepare)(void),
+ void (*parent)(void), void (*child)(void))
+{
+ return 0;
+}
+
static DWORD WINAPI wrapper_start_routine(void *s)
{
xbt_os_thread_t t = (xbt_os_thread_t) s;
static DWORD WINAPI wrapper_start_routine(void *s)
{
xbt_os_thread_t t = (xbt_os_thread_t) s;
* The decision (and the loading) is made in xbt/context.c.
*/
* The decision (and the loading) is made in xbt/context.c.
*/
+int xbt_os_thread_atfork(void (*prepare)(void),
+ void (*parent)(void), void (*child)(void))
+{
+ return 0;
+}
+
/* Mod_init/exit mecanism */
void xbt_os_thread_mod_preinit(void)
{
/* Mod_init/exit mecanism */
void xbt_os_thread_mod_preinit(void)
{