1 /* Initialization for acces s to a mmap'd malloc managed region. */
3 /* Copyright (c) 2012-2014. The SimGrid Team.
4 * All rights reserved. */
6 /* This program is free software; you can redistribute it and/or modify it
7 * under the terms of the license (GNU LGPL) which comes with this package. */
9 /* Copyright 1992, 2000 Free Software Foundation, Inc.
11 Contributed by Fred Fish at Cygnus Support. fnf@cygnus.com
13 This file is part of the GNU C Library.
15 The GNU C Library is free software; you can redistribute it and/or
16 modify it under the terms of the GNU Library General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 The GNU C Library is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 Library General Public License for more details.
25 You should have received a copy of the GNU Library General Public
26 License along with the GNU C Library; see the file COPYING.LIB. If
27 not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
28 Boston, MA 02111-1307, USA. */
30 #include "src/internal_config.h"
31 #include <sys/types.h>
32 #include <fcntl.h> /* After sys/types.h, at least for dpx/2. */
36 #include <unistd.h> /* Prototypes for lseek */
38 #include "mmprivate.h"
40 #include "src/xbt_modinter.h" /* declarations of mmalloc_preinit and friends that live here */
46 /* Initialize access to a mmalloc managed region.
48 If FD is a valid file descriptor for an open file then data for the
49 mmalloc managed region is mapped to that file, otherwise an anonymous
50 map is used if supported by the underlying OS. In case of running in
51 an OS without support of anonymous mappings then "/dev/zero" is used
52 and in both cases the data will not exist in any filesystem object.
54 If the open file corresponding to FD is from a previous use of
55 mmalloc and passes some basic sanity checks to ensure that it is
56 compatible with the current mmalloc package, then its data is
57 mapped in and is immediately accessible at the same addresses in
58 the current process as the process that created the file (ignoring
59 the BASEADDR parameter).
61 For non valid FDs or empty files ones the mapping is established
62 starting at the specified address BASEADDR in the process address
65 The provided BASEADDR should be choosed carefully in order to avoid
66 bumping into existing mapped regions or future mapped regions.
68 On success, returns a "malloc descriptor" which is used in subsequent
69 calls to other mmalloc package functions. It is explicitly "void *"
70 so that users of the package don't have to worry about the actual
71 implementation details.
73 On failure returns NULL. */
75 xbt_mheap_t xbt_mheap_new(int fd, void *baseaddr)
77 return xbt_mheap_new_options(fd, baseaddr, 0);
80 xbt_mheap_t xbt_mheap_new_options(int fd, void *baseaddr, int options)
87 /* First check to see if FD is a valid file descriptor, and if so, see
88 if the file has any current contents (size > 0). If it does, then
89 attempt to reuse the file. If we can't reuse the file, either
90 because it isn't a valid mmalloc produced file, was produced by an
91 obsolete version, or any other reason, then we fail to attach to
95 if (fstat(fd, &sbuf) < 0)
98 else if (sbuf.st_size > 0) {
99 /* We were given an valid file descriptor on an open file, so try to remap
100 it into the current process at the same address to which it was previously
101 mapped. It naturally have to pass some sanity checks for that.
103 Note that we have to update the file descriptor number in the malloc-
104 descriptor read from the file to match the current valid one, before
105 trying to map the file in, and again after a successful mapping and
106 after we've switched over to using the mapped in malloc descriptor
107 rather than the temporary one on the stack.
109 Once we've switched over to using the mapped in malloc descriptor, we
110 have to update the pointer to the morecore function, since it almost
111 certainly will be at a different address if the process reusing the
112 mapped region is from a different executable.
114 Also note that if the heap being remapped previously used the mmcheckf()
115 routines, we need to update the hooks since their target functions
116 will have certainly moved if the executable has changed in any way.
117 We do this by calling mmcheckf() internally.
119 Returns a pointer to the malloc descriptor if successful, or NULL if
120 unsuccessful for some reason. */
123 struct mdesc *mdptr = NULL, *mdptemp = NULL;
125 if (lseek(fd, 0L, SEEK_SET) != 0)
127 if (read(fd, (char *) &newmd, sizeof(newmd)) != sizeof(newmd))
129 if (newmd.headersize != sizeof(newmd))
131 if (strcmp(newmd.magic, MMALLOC_MAGIC) != 0)
133 if (newmd.version > MMALLOC_VERSION)
137 if (__mmalloc_remap_core(&newmd) == newmd.base) {
138 mdptr = (struct mdesc *) newmd.base;
140 if(!mdptr->refcount){
141 pthread_mutex_init(&mdptr->mutex, NULL);
146 /* Add the new heap to the linked list of heaps attached by mmalloc */
147 mdptemp = __mmalloc_default_mdp;
148 while(mdptemp->next_mdesc)
149 mdptemp = mdptemp->next_mdesc;
152 mdptemp->next_mdesc = mdptr;
159 /* NULL is not a valid baseaddr as we cannot map anything there.
160 C'mon, user. Think! */
161 if (baseaddr == NULL)
164 /* We start off with the malloc descriptor allocated on the stack, until
165 we build it up enough to call _mmalloc_mmap_morecore() to allocate the
166 first page of the region and copy it there. Ensure that it is zero'd and
167 then initialize the fields that we know values for. */
170 memset((char *) mdp, 0, sizeof(mtemp));
171 strncpy(mdp->magic, MMALLOC_MAGIC, MMALLOC_MAGIC_SIZE);
172 mdp->headersize = sizeof(mtemp);
173 mdp->version = MMALLOC_VERSION;
175 mdp->base = mdp->breakval = mdp->top = baseaddr;
176 mdp->next_mdesc = NULL;
178 mdp->options = options;
180 /* If we have not been passed a valid open file descriptor for the file
181 to map to, then we go for an anonymous map */
184 mdp->flags |= MMALLOC_ANONYMOUS;
186 pthread_mutex_init(&mdp->mutex, NULL);
187 /* If we have not been passed a valid open file descriptor for the file
188 to map to, then open /dev/zero and use that to map to. */
190 /* Now try to map in the first page, copy the malloc descriptor structure
191 there, and arrange to return a pointer to this new copy. If the mapping
192 fails, then close the file descriptor if it was opened by us, and arrange
195 if ((mbase = mmorecore(mdp, sizeof(mtemp))) != NULL) {
196 memcpy(mbase, mdp, sizeof(mtemp));
198 fprintf(stderr, "morecore failed to get some more memory!\n");
202 /* Add the new heap to the linked list of heaps attached by mmalloc */
203 if(__mmalloc_default_mdp){
204 mdp = __mmalloc_default_mdp;
205 while(mdp->next_mdesc)
206 mdp = mdp->next_mdesc;
209 mdp->next_mdesc = (struct mdesc *)mbase;
218 /** Terminate access to a mmalloc managed region, but do not free its content.
220 * This is for example useful for the base region where ldl stores its data
221 * because it leaves the place after us.
223 void xbt_mheap_destroy_no_free(xbt_mheap_t md)
225 struct mdesc *mdp = md;
227 if(--mdp->refcount == 0){
228 pthread_mutex_destroy(&mdp->mutex);
232 /** Terminate access to a mmalloc managed region by unmapping all memory pages
233 associated with the region, and closing the file descriptor if it is one
236 Returns NULL on success.
238 Returns the malloc descriptor on failure, which can subsequently be used
239 for further action, such as obtaining more information about the nature of
242 Note that the malloc descriptor that we are using is currently located in
243 region we are about to unmap, so we first make a local copy of it on the
244 stack and use the copy. */
246 void *xbt_mheap_destroy(xbt_mheap_t mdp)
248 struct mdesc mtemp, *mdptemp;
251 /* Remove the heap from the linked list of heaps attached by mmalloc */
252 mdptemp = __mmalloc_default_mdp;
253 while(mdptemp->next_mdesc != mdp )
254 mdptemp = mdptemp->next_mdesc;
256 mdptemp->next_mdesc = mdp->next_mdesc;
258 xbt_mheap_destroy_no_free(mdp);
261 /* Now unmap all the pages associated with this region by asking for a
262 negative increment equal to the current size of the region. */
264 if (mmorecore(&mtemp, (char *)mtemp.base - (char *)mtemp.breakval) == NULL) {
265 /* Deallocating failed. Update the original malloc descriptor
269 if (mtemp.flags & MMALLOC_DEVZERO) {
279 /* Safety gap from the heap's break address.
280 * Try to increase this first if you experience strange errors under
282 #define HEAP_OFFSET (128UL<<20)
284 xbt_mheap_t mmalloc_get_default_md(void)
286 xbt_assert(__mmalloc_default_mdp);
287 return __mmalloc_default_mdp;
290 static void mmalloc_fork_prepare(void)
292 xbt_mheap_t mdp = NULL;
293 if ((mdp =__mmalloc_default_mdp)){
299 mdp = mdp->next_mdesc;
304 static void mmalloc_fork_parent(void)
306 xbt_mheap_t mdp = NULL;
307 if ((mdp =__mmalloc_default_mdp)){
311 mdp = mdp->next_mdesc;
316 static void mmalloc_fork_child(void)
318 struct mdesc* mdp = NULL;
319 if ((mdp =__mmalloc_default_mdp)){
322 mdp = mdp->next_mdesc;
327 /* Initialize the default malloc descriptor. */
328 void *mmalloc_preinit(void)
331 if (__mmalloc_default_mdp == NULL) {
333 xbt_pagesize = getpagesize();
334 unsigned long mask = ~((unsigned long)xbt_pagesize - 1);
335 void *addr = (void*)(((unsigned long)sbrk(0) + HEAP_OFFSET) & mask);
336 __mmalloc_default_mdp = xbt_mheap_new_options(-1, addr, XBT_MHEAP_OPTION_MEMSET);
337 /* Fixme? only the default mdp in protected against forks */
338 // This is mandated to protect the mmalloced areas through forks. Think of tesh.
339 // Nah, removing the mutex isn't a good idea either for tesh
340 res = xbt_os_thread_atfork(mmalloc_fork_prepare,
341 mmalloc_fork_parent, mmalloc_fork_child);
343 THROWF(system_error,0,"xbt_os_thread_atfork() failed: return value %d",res);
345 xbt_assert(__mmalloc_default_mdp != NULL);
347 return __mmalloc_default_mdp;
350 void mmalloc_postexit(void)
352 /* Do not destroy the default mdp or ldl won't be able to free the memory it
353 * allocated since we're in memory */
354 // xbt_mheap_destroy_no_free(__mmalloc_default_mdp);
357 // This is the underlying implementation of mmalloc_get_bytes_used_remote.
358 // Is it used directly in order to evaluate the bytes used from a different
360 size_t mmalloc_get_bytes_used_remote(size_t heaplimit, const malloc_info* heapinfo)
363 for (size_t i=0; i < heaplimit; ++i){
364 if (heapinfo[i].type == MMALLOC_TYPE_UNFRAGMENTED){
365 if (heapinfo[i].busy_block.busy_size > 0)
366 bytes += heapinfo[i].busy_block.busy_size;
367 } else if (heapinfo[i].type > 0) {
368 for (size_t j=0; j < (size_t) (BLOCKSIZE >> heapinfo[i].type); j++){
369 if(heapinfo[i].busy_frag.frag_size[j] > 0)
370 bytes += heapinfo[i].busy_frag.frag_size[j];
377 size_t mmalloc_get_bytes_used(const xbt_mheap_t heap){
378 const struct mdesc* heap_data = (const struct mdesc *) heap;
379 return mmalloc_get_bytes_used_remote(heap_data->heaplimit, heap_data->heapinfo);
382 ssize_t mmalloc_get_busy_size(xbt_mheap_t heap, void *ptr){
384 ssize_t block = ((char*)ptr - (char*)(heap->heapbase)) / BLOCKSIZE + 1;
385 if(heap->heapinfo[block].type < 0)
387 else if(heap->heapinfo[block].type == MMALLOC_TYPE_UNFRAGMENTED)
388 return heap->heapinfo[block].busy_block.busy_size;
390 ssize_t frag = ((uintptr_t) (ADDR2UINT (ptr) % (BLOCKSIZE))) >> heap->heapinfo[block].type;
391 return heap->heapinfo[block].busy_frag.frag_size[frag];
396 void mmcheck(xbt_mheap_t heap) {return;
399 malloc_info* heapinfo = NULL;
400 for (size_t i=1; i < heap->heaplimit; i += mmalloc_get_increment(heapinfo)) {
401 heapinfo = heap->heapinfo + i;
402 switch (heapinfo->type) {
403 case MMALLOC_TYPE_HEAPINFO:
404 case MMALLOC_TYPE_FREE:
405 if (heapinfo->free_block.size==0) {
406 xbt_die("Block size == 0");
409 case MMALLOC_TYPE_UNFRAGMENTED:
410 if (heapinfo->busy_block.size==0) {
411 xbt_die("Block size == 0");
413 if (heapinfo->busy_block.busy_size==0 && heapinfo->busy_block.size!=0) {
414 xbt_die("Empty busy block");
418 if (heapinfo->type<0) {
419 xbt_die("Unkown mmalloc block type.");