1 /* Initialization for access to a mmap'd malloc managed region.
2 Copyright 1992, 2000 Free Software Foundation, Inc.
4 Contributed by Fred Fish at Cygnus Support. fnf@cygnus.com
6 This file is part of the GNU C Library.
8 The GNU C Library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Library General Public License as
10 published by the Free Software Foundation; either version 2 of the
11 License, or (at your option) any later version.
13 The GNU C Library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Library General Public License for more details.
18 You should have received a copy of the GNU Library General Public
19 License along with the GNU C Library; see the file COPYING.LIB. If
20 not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
23 /* Copyright (c) 2012-2014. The SimGrid Team.
24 * All rights reserved. */
26 /* This program is free software; you can redistribute it and/or modify it
27 * under the terms of the license (GNU LGPL) which comes with this package. */
29 #include <sys/types.h>
30 #include <fcntl.h> /* After sys/types.h, at least for dpx/2. */
34 #include <unistd.h> /* Prototypes for lseek */
36 #include "mmprivate.h"
38 #include "xbt_modinter.h" /* declarations of mmalloc_preinit and friends that live here */
44 /* Initialize access to a mmalloc managed region.
46 If FD is a valid file descriptor for an open file then data for the
47 mmalloc managed region is mapped to that file, otherwise an anonymous
48 map is used if supported by the underlying OS. In case of running in
49 an OS without support of anonymous mappings then "/dev/zero" is used
50 and in both cases the data will not exist in any filesystem object.
52 If the open file corresponding to FD is from a previous use of
53 mmalloc and passes some basic sanity checks to ensure that it is
54 compatible with the current mmalloc package, then its data is
55 mapped in and is immediately accessible at the same addresses in
56 the current process as the process that created the file (ignoring
57 the BASEADDR parameter).
59 For non valid FDs or empty files ones the mapping is established
60 starting at the specified address BASEADDR in the process address
63 The provided BASEADDR should be choosed carefully in order to avoid
64 bumping into existing mapped regions or future mapped regions.
66 On success, returns a "malloc descriptor" which is used in subsequent
67 calls to other mmalloc package functions. It is explicitly "void *"
68 so that users of the package don't have to worry about the actual
69 implementation details.
71 On failure returns NULL. */
73 xbt_mheap_t xbt_mheap_new(int fd, void *baseaddr)
80 /* First check to see if FD is a valid file descriptor, and if so, see
81 if the file has any current contents (size > 0). If it does, then
82 attempt to reuse the file. If we can't reuse the file, either
83 because it isn't a valid mmalloc produced file, was produced by an
84 obsolete version, or any other reason, then we fail to attach to
88 if (fstat(fd, &sbuf) < 0)
91 else if (sbuf.st_size > 0) {
92 /* We were given an valid file descriptor on an open file, so try to remap
93 it into the current process at the same address to which it was previously
94 mapped. It naturally have to pass some sanity checks for that.
96 Note that we have to update the file descriptor number in the malloc-
97 descriptor read from the file to match the current valid one, before
98 trying to map the file in, and again after a successful mapping and
99 after we've switched over to using the mapped in malloc descriptor
100 rather than the temporary one on the stack.
102 Once we've switched over to using the mapped in malloc descriptor, we
103 have to update the pointer to the morecore function, since it almost
104 certainly will be at a different address if the process reusing the
105 mapped region is from a different executable.
107 Also note that if the heap being remapped previously used the mmcheckf()
108 routines, we need to update the hooks since their target functions
109 will have certainly moved if the executable has changed in any way.
110 We do this by calling mmcheckf() internally.
112 Returns a pointer to the malloc descriptor if successful, or NULL if
113 unsuccessful for some reason. */
116 struct mdesc *mdptr = NULL, *mdptemp = NULL;
118 if (lseek(fd, 0L, SEEK_SET) != 0)
120 if (read(fd, (char *) &newmd, sizeof(newmd)) != sizeof(newmd))
122 if (newmd.headersize != sizeof(newmd))
124 if (strcmp(newmd.magic, MMALLOC_MAGIC) != 0)
126 if (newmd.version > MMALLOC_VERSION)
130 if (__mmalloc_remap_core(&newmd) == newmd.base) {
131 mdptr = (struct mdesc *) newmd.base;
133 if(!mdptr->refcount){
134 sem_init(&mdptr->sem, 0, 1);
139 /* Add the new heap to the linked list of heaps attached by mmalloc */
140 mdptemp = __mmalloc_default_mdp;
141 while(mdptemp->next_mdesc)
142 mdptemp = mdptemp->next_mdesc;
145 mdptemp->next_mdesc = mdptr;
152 /* NULL is not a valid baseaddr as we cannot map anything there.
153 C'mon, user. Think! */
154 if (baseaddr == NULL)
157 /* We start off with the malloc descriptor allocated on the stack, until
158 we build it up enough to call _mmalloc_mmap_morecore() to allocate the
159 first page of the region and copy it there. Ensure that it is zero'd and
160 then initialize the fields that we know values for. */
163 memset((char *) mdp, 0, sizeof(mtemp));
164 strncpy(mdp->magic, MMALLOC_MAGIC, MMALLOC_MAGIC_SIZE);
165 mdp->headersize = sizeof(mtemp);
166 mdp->version = MMALLOC_VERSION;
168 mdp->base = mdp->breakval = mdp->top = baseaddr;
169 mdp->next_mdesc = NULL;
172 /* If we have not been passed a valid open file descriptor for the file
173 to map to, then we go for an anonymous map */
176 mdp->flags |= MMALLOC_ANONYMOUS;
178 sem_init(&mdp->sem, 0, 1);
180 /* If we have not been passed a valid open file descriptor for the file
181 to map to, then open /dev/zero and use that to map to. */
183 /* Now try to map in the first page, copy the malloc descriptor structure
184 there, and arrange to return a pointer to this new copy. If the mapping
185 fails, then close the file descriptor if it was opened by us, and arrange
188 if ((mbase = mmorecore(mdp, sizeof(mtemp))) != NULL) {
189 memcpy(mbase, mdp, sizeof(mtemp));
191 fprintf(stderr, "morecore failed to get some more memory!\n");
195 /* Add the new heap to the linked list of heaps attached by mmalloc */
196 if(__mmalloc_default_mdp){
197 mdp = __mmalloc_default_mdp;
198 while(mdp->next_mdesc)
199 mdp = mdp->next_mdesc;
202 mdp->next_mdesc = (struct mdesc *)mbase;
211 /** Terminate access to a mmalloc managed region, but do not free its content.
213 * This is for example useful for the base region where ldl stores its data
214 * because it leaves the place after us.
216 void xbt_mheap_destroy_no_free(xbt_mheap_t md)
218 struct mdesc *mdp = md;
220 if(--mdp->refcount == 0){
222 sem_destroy(&mdp->sem);
226 /** Terminate access to a mmalloc managed region by unmapping all memory pages
227 associated with the region, and closing the file descriptor if it is one
230 Returns NULL on success.
232 Returns the malloc descriptor on failure, which can subsequently be used
233 for further action, such as obtaining more information about the nature of
236 Note that the malloc descriptor that we are using is currently located in
237 region we are about to unmap, so we first make a local copy of it on the
238 stack and use the copy. */
240 void *xbt_mheap_destroy(xbt_mheap_t mdp)
242 struct mdesc mtemp, *mdptemp;
245 /* Remove the heap from the linked list of heaps attached by mmalloc */
246 mdptemp = __mmalloc_default_mdp;
247 while(mdptemp->next_mdesc != mdp )
248 mdptemp = mdptemp->next_mdesc;
250 mdptemp->next_mdesc = mdp->next_mdesc;
252 xbt_mheap_destroy_no_free(mdp);
255 /* Now unmap all the pages associated with this region by asking for a
256 negative increment equal to the current size of the region. */
258 if (mmorecore(&mtemp, (char *)mtemp.base - (char *)mtemp.breakval) == NULL) {
259 /* Deallocating failed. Update the original malloc descriptor
263 if (mtemp.flags & MMALLOC_DEVZERO) {
273 /* Safety gap from the heap's break address.
274 * Try to increase this first if you experience strange errors under
276 #define HEAP_OFFSET (128UL<<20)
278 xbt_mheap_t mmalloc_get_default_md(void)
280 xbt_assert(__mmalloc_default_mdp);
281 return __mmalloc_default_mdp;
284 static void mmalloc_fork_prepare(void)
286 xbt_mheap_t mdp = NULL;
287 if ((mdp =__mmalloc_default_mdp)){
293 mdp = mdp->next_mdesc;
298 static void mmalloc_fork_parent(void)
300 xbt_mheap_t mdp = NULL;
301 if ((mdp =__mmalloc_default_mdp)){
305 mdp = mdp->next_mdesc;
310 static void mmalloc_fork_child(void)
312 struct mdesc* mdp = NULL;
313 if ((mdp =__mmalloc_default_mdp)){
316 mdp = mdp->next_mdesc;
321 /* Initialize the default malloc descriptor. */
322 void *mmalloc_preinit(void)
325 if (__mmalloc_default_mdp == NULL) {
326 unsigned long mask = ~((unsigned long)getpagesize() - 1);
327 void *addr = (void*)(((unsigned long)sbrk(0) + HEAP_OFFSET) & mask);
328 __mmalloc_default_mdp = xbt_mheap_new(-1, addr);
329 /* Fixme? only the default mdp in protected against forks */
330 // This is mandated to protect the mmalloced areas through forks. Think of tesh.
331 // Nah, removing the mutex isn't a good idea either for tesh
332 res = xbt_os_thread_atfork(mmalloc_fork_prepare,
333 mmalloc_fork_parent, mmalloc_fork_child);
335 THROWF(system_error,0,"xbt_os_thread_atfork() failed: return value %d",res);
337 xbt_assert(__mmalloc_default_mdp != NULL);
339 #if defined(HAVE_GNU_LD) && defined(MMALLOC_WANT_OVERRIDE_LEGACY)
340 mm_gnuld_legacy_init();
343 return __mmalloc_default_mdp;
346 void mmalloc_postexit(void)
348 /* Do not destroy the default mdp or ldl won't be able to free the memory it
349 * allocated since we're in memory */
350 // xbt_mheap_destroy_no_free(__mmalloc_default_mdp);
353 size_t mmalloc_get_bytes_used(xbt_mheap_t heap){
357 while(i<=((struct mdesc *)heap)->heaplimit){
358 if(((struct mdesc *)heap)->heapinfo[i].type == 0){
359 if(((struct mdesc *)heap)->heapinfo[i].busy_block.busy_size > 0)
360 bytes += ((struct mdesc *)heap)->heapinfo[i].busy_block.busy_size;
362 }else if(((struct mdesc *)heap)->heapinfo[i].type > 0){
363 for(j=0; j < (size_t) (BLOCKSIZE >> ((struct mdesc *)heap)->heapinfo[i].type); j++){
364 if(((struct mdesc *)heap)->heapinfo[i].busy_frag.frag_size[j] > 0)
365 bytes += ((struct mdesc *)heap)->heapinfo[i].busy_frag.frag_size[j];
374 ssize_t mmalloc_get_busy_size(xbt_mheap_t heap, void *ptr){
376 ssize_t block = ((char*)ptr - (char*)(heap->heapbase)) / BLOCKSIZE + 1;
377 if(heap->heapinfo[block].type == -1)
379 else if(heap->heapinfo[block].type == 0)
380 return heap->heapinfo[block].busy_block.busy_size;
382 ssize_t frag = ((uintptr_t) (ADDR2UINT (ptr) % (BLOCKSIZE))) >> heap->heapinfo[block].type;
383 return heap->heapinfo[block].busy_frag.frag_size[frag];