-/* Initialization for access to a mmap'd malloc managed region.
- Copyright 1992, 2000 Free Software Foundation, Inc.
+/* Initialization for acces s to a mmap'd malloc managed region. */
+
+/* Copyright (c) 2012-2014. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+/* Copyright 1992, 2000 Free Software Foundation, Inc.
Contributed by Fred Fish at Cygnus Support. fnf@cygnus.com
-This file is part of the GNU C Library.
+ This file is part of the GNU C Library.
-The GNU C Library is free software; you can redistribute it and/or
-modify it under the terms of the GNU Library General Public License as
-published by the Free Software Foundation; either version 2 of the
-License, or (at your option) any later version.
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
-The GNU C Library is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Library General Public License for more details.
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
-You should have received a copy of the GNU Library General Public
-License along with the GNU C Library; see the file COPYING.LIB. If
-not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+#include "src/internal_config.h"
#include <sys/types.h>
#include <fcntl.h> /* After sys/types.h, at least for dpx/2. */
#include <sys/stat.h>
#include <string.h>
-#ifdef HAVE_UNISTD_H
+#if HAVE_UNISTD_H
#include <unistd.h> /* Prototypes for lseek */
#endif
#include "mmprivate.h"
#include "xbt/ex.h"
-#include "xbt_modinter.h" /* declarations of mmalloc_preinit and friends that live here */
+#include "src/xbt_modinter.h" /* declarations of mmalloc_preinit and friends that live here */
#ifndef SEEK_SET
#define SEEK_SET 0
On failure returns NULL. */
xbt_mheap_t xbt_mheap_new(int fd, void *baseaddr)
+{
+ return xbt_mheap_new_options(fd, baseaddr, 0);
+}
+
+xbt_mheap_t xbt_mheap_new_options(int fd, void *baseaddr, int options)
{
struct mdesc mtemp;
xbt_mheap_t mdp;
else if (sbuf.st_size > 0) {
/* We were given an valid file descriptor on an open file, so try to remap
- it into the current process at the same address to which it was previously
- mapped. It naturally have to pass some sanity checks for that.
+ it into the current process at the same address to which it was previously
+ mapped. It naturally have to pass some sanity checks for that.
- Note that we have to update the file descriptor number in the malloc-
- descriptor read from the file to match the current valid one, before
- trying to map the file in, and again after a successful mapping and
- after we've switched over to using the mapped in malloc descriptor
- rather than the temporary one on the stack.
+ Note that we have to update the file descriptor number in the malloc-
+ descriptor read from the file to match the current valid one, before
+ trying to map the file in, and again after a successful mapping and
+ after we've switched over to using the mapped in malloc descriptor
+ rather than the temporary one on the stack.
- Once we've switched over to using the mapped in malloc descriptor, we
- have to update the pointer to the morecore function, since it almost
- certainly will be at a different address if the process reusing the
- mapped region is from a different executable.
+ Once we've switched over to using the mapped in malloc descriptor, we
+ have to update the pointer to the morecore function, since it almost
+ certainly will be at a different address if the process reusing the
+ mapped region is from a different executable.
- Also note that if the heap being remapped previously used the mmcheckf()
- routines, we need to update the hooks since their target functions
- will have certainly moved if the executable has changed in any way.
- We do this by calling mmcheckf() internally.
+ Also note that if the heap being remapped previously used the mmcheckf()
+ routines, we need to update the hooks since their target functions
+ will have certainly moved if the executable has changed in any way.
+ We do this by calling mmcheckf() internally.
- Returns a pointer to the malloc descriptor if successful, or NULL if
- unsuccessful for some reason. */
+ Returns a pointer to the malloc descriptor if successful, or NULL if
+ unsuccessful for some reason. */
struct mdesc newmd;
struct mdesc *mdptr = NULL, *mdptemp = NULL;
if (lseek(fd, 0L, SEEK_SET) != 0)
- return NULL;
+ return NULL;
if (read(fd, (char *) &newmd, sizeof(newmd)) != sizeof(newmd))
- return NULL;
+ return NULL;
if (newmd.headersize != sizeof(newmd))
- return NULL;
+ return NULL;
if (strcmp(newmd.magic, MMALLOC_MAGIC) != 0)
- return NULL;
+ return NULL;
if (newmd.version > MMALLOC_VERSION)
- return NULL;
+ return NULL;
newmd.fd = fd;
if (__mmalloc_remap_core(&newmd) == newmd.base) {
- mdptr = (struct mdesc *) newmd.base;
- mdptr->fd = fd;
- if(!mdptr->refcount){
- sem_init(&mdptr->sem, 0, 1);
- mdptr->refcount++;
- }
+ mdptr = (struct mdesc *) newmd.base;
+ mdptr->fd = fd;
+ if(!mdptr->refcount){
+ pthread_mutex_init(&mdptr->mutex, NULL);
+ mdptr->refcount++;
+ }
}
/* Add the new heap to the linked list of heaps attached by mmalloc */
mdptemp = __mmalloc_default_mdp;
while(mdptemp->next_mdesc)
- mdptemp = mdptemp->next_mdesc;
+ mdptemp = mdptemp->next_mdesc;
LOCK(mdptemp);
mdptemp->next_mdesc = mdptr;
mdp->base = mdp->breakval = mdp->top = baseaddr;
mdp->next_mdesc = NULL;
mdp->refcount = 1;
+ mdp->options = options;
/* If we have not been passed a valid open file descriptor for the file
to map to, then we go for an anonymous map */
if (mdp->fd < 0){
mdp->flags |= MMALLOC_ANONYMOUS;
}
- sem_init(&mdp->sem, 0, 1);
-
+ pthread_mutex_init(&mdp->mutex, NULL);
/* If we have not been passed a valid open file descriptor for the file
to map to, then open /dev/zero and use that to map to. */
mdp = mdp->next_mdesc;
LOCK(mdp);
- mdp->next_mdesc = (struct mdesc *)mbase;
+ mdp->next_mdesc = (struct mdesc *)mbase;
UNLOCK(mdp);
}
struct mdesc *mdp = md;
if(--mdp->refcount == 0){
- LOCK(mdp) ;
- sem_destroy(&mdp->sem);
+ pthread_mutex_destroy(&mdp->mutex);
}
}
/** Terminate access to a mmalloc managed region by unmapping all memory pages
- associated with the region, and closing the file descriptor if it is one
- that we opened.
+ associated with the region, and closing the file descriptor if it is one
+ that we opened.
- Returns NULL on success.
+ Returns NULL on success.
- Returns the malloc descriptor on failure, which can subsequently be used
- for further action, such as obtaining more information about the nature of
- the failure.
+ Returns the malloc descriptor on failure, which can subsequently be used
+ for further action, such as obtaining more information about the nature of
+ the failure.
- Note that the malloc descriptor that we are using is currently located in
- region we are about to unmap, so we first make a local copy of it on the
- stack and use the copy. */
+ Note that the malloc descriptor that we are using is currently located in
+ region we are about to unmap, so we first make a local copy of it on the
+ stack and use the copy. */
void *xbt_mheap_destroy(xbt_mheap_t mdp)
{
}
}
-
-
/* Initialize the default malloc descriptor. */
void *mmalloc_preinit(void)
{
int res;
if (__mmalloc_default_mdp == NULL) {
- unsigned long mask = ~((unsigned long)getpagesize() - 1);
+ if(!xbt_pagesize)
+ xbt_pagesize = getpagesize();
+ unsigned long mask = ~((unsigned long)xbt_pagesize - 1);
void *addr = (void*)(((unsigned long)sbrk(0) + HEAP_OFFSET) & mask);
- __mmalloc_default_mdp = xbt_mheap_new(-1, addr);
+ __mmalloc_default_mdp = xbt_mheap_new_options(-1, addr, XBT_MHEAP_OPTION_MEMSET);
/* Fixme? only the default mdp in protected against forks */
- res = xbt_os_thread_atfork(mmalloc_fork_prepare,
- mmalloc_fork_parent, mmalloc_fork_child);
+ // This is mandated to protect the mmalloced areas through forks. Think of tesh.
+ // Nah, removing the mutex isn't a good idea either for tesh
+ res = xbt_os_thread_atfork(mmalloc_fork_prepare,
+ mmalloc_fork_parent, mmalloc_fork_child);
if (res != 0)
THROWF(system_error,0,"xbt_os_thread_atfork() failed: return value %d",res);
}
void mmalloc_postexit(void)
{
- /* Do not detach the default mdp or ldl won't be able to free the memory it allocated since we're in memory */
- // mmalloc_detach(__mmalloc_default_mdp);
- xbt_mheap_destroy_no_free(__mmalloc_default_mdp);
+ /* Do not destroy the default mdp or ldl won't be able to free the memory it
+ * allocated since we're in memory */
+ // xbt_mheap_destroy_no_free(__mmalloc_default_mdp);
+}
+
+// This is the underlying implementation of mmalloc_get_bytes_used_remote.
+// Is it used directly in order to evaluate the bytes used from a different
+// process.
+size_t mmalloc_get_bytes_used_remote(size_t heaplimit, const malloc_info* heapinfo)
+{
+ int bytes = 0;
+ for (size_t i=0; i < heaplimit; ++i){
+ if (heapinfo[i].type == MMALLOC_TYPE_UNFRAGMENTED){
+ if (heapinfo[i].busy_block.busy_size > 0)
+ bytes += heapinfo[i].busy_block.busy_size;
+ } else if (heapinfo[i].type > 0) {
+ for (size_t j=0; j < (size_t) (BLOCKSIZE >> heapinfo[i].type); j++){
+ if(heapinfo[i].busy_frag.frag_size[j] > 0)
+ bytes += heapinfo[i].busy_frag.frag_size[j];
+ }
+ }
+ }
+ return bytes;
+}
+
+size_t mmalloc_get_bytes_used(const xbt_mheap_t heap){
+ const struct mdesc* heap_data = (const struct mdesc *) heap;
+ return mmalloc_get_bytes_used_remote(heap_data->heaplimit, heap_data->heapinfo);
+}
+
+ssize_t mmalloc_get_busy_size(xbt_mheap_t heap, void *ptr){
+
+ ssize_t block = ((char*)ptr - (char*)(heap->heapbase)) / BLOCKSIZE + 1;
+ if(heap->heapinfo[block].type < 0)
+ return -1;
+ else if(heap->heapinfo[block].type == MMALLOC_TYPE_UNFRAGMENTED)
+ return heap->heapinfo[block].busy_block.busy_size;
+ else{
+ ssize_t frag = ((uintptr_t) (ADDR2UINT (ptr) % (BLOCKSIZE))) >> heap->heapinfo[block].type;
+ return heap->heapinfo[block].busy_frag.frag_size[frag];
+ }
+
+}
+
+void mmcheck(xbt_mheap_t heap) {return;
+ if (!heap->heapinfo)
+ return;
+ malloc_info* heapinfo = NULL;
+ for (size_t i=1; i < heap->heaplimit; i += mmalloc_get_increment(heapinfo)) {
+ heapinfo = heap->heapinfo + i;
+ switch (heapinfo->type) {
+ case MMALLOC_TYPE_HEAPINFO:
+ case MMALLOC_TYPE_FREE:
+ if (heapinfo->free_block.size==0) {
+ xbt_die("Block size == 0");
+ }
+ break;
+ case MMALLOC_TYPE_UNFRAGMENTED:
+ if (heapinfo->busy_block.size==0) {
+ xbt_die("Block size == 0");
+ }
+ if (heapinfo->busy_block.busy_size==0 && heapinfo->busy_block.size!=0) {
+ xbt_die("Empty busy block");
+ }
+ break;
+ default:
+ if (heapinfo->type<0) {
+ xbt_die("Unkown mmalloc block type.");
+ }
+ }
+ }
}