From bf71c3c42ec41053cf17711276bfd40d9b3a93c1 Mon Sep 17 00:00:00 2001 From: Frederic Suter Date: Mon, 26 Jun 2017 11:07:11 +0200 Subject: [PATCH] just a couple of smells --- src/smpi/smpi_shared.cpp | 12 ++++---- src/xbt/mmalloc/mmorecore.c | 59 ++++++++++++++++--------------------- src/xbt/mmalloc/mrealloc.c | 21 ++++++------- 3 files changed, 41 insertions(+), 51 deletions(-) diff --git a/src/smpi/smpi_shared.cpp b/src/smpi/smpi_shared.cpp index e5f24d6c42..cd4b82093e 100644 --- a/src/smpi/smpi_shared.cpp +++ b/src/smpi/smpi_shared.cpp @@ -214,12 +214,13 @@ static void *smpi_shared_malloc_local(size_t size, const char *file, int line) #define HUGE_PAGE_SIZE 1<<21 -/* - * Similar to smpi_shared_malloc, but only sharing the blocks described by shared_block_offsets. +/* Similar to smpi_shared_malloc, but only sharing the blocks described by shared_block_offsets. * This array contains the offsets (in bytes) of the block to share. * Even indices are the start offsets (included), odd indices are the stop offsets (excluded). - * For instance, if shared_block_offsets == {27, 42}, then the elements mem[27], mem[28], ..., mem[41] are shared. The others are not. + * For instance, if shared_block_offsets == {27, 42}, then the elements mem[27], mem[28], ..., mem[41] are shared. + * The others are not. */ + void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks) { char *huge_page_mount_point = xbt_cfg_get_string("smpi/shared-malloc-hugepage"); @@ -230,7 +231,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int use_huge_page = 0; #endif smpi_shared_malloc_blocksize = static_cast(xbt_cfg_get_double("smpi/shared-malloc-blocksize")); - void *mem, *allocated_ptr; + void* mem; size_t allocated_size; if(use_huge_page) { xbt_assert(smpi_shared_malloc_blocksize == HUGE_PAGE_SIZE, "the block size of shared malloc should be equal to the size of a huge page."); @@ -243,8 +244,7 @@ void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int /* First reserve memory area */ - allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - + void* allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); xbt_assert(allocated_ptr != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root " "to allow big allocations.\n", diff --git a/src/xbt/mmalloc/mmorecore.c b/src/xbt/mmalloc/mmorecore.c index e876dd2ba5..354e40b405 100644 --- a/src/xbt/mmalloc/mmorecore.c +++ b/src/xbt/mmalloc/mmorecore.c @@ -64,48 +64,43 @@ void *mmorecore(struct mdesc *mdp, ssize_t size) { ssize_t test = 0; - void *result; // please keep it uninitialized to track issues + void* result; // please keep it uninitialized to track issues off_t foffset; /* File offset at which new mapping will start */ size_t mapbytes; /* Number of bytes to map */ - void *moveto; /* Address where we wish to move "break value" to */ - void *mapto; /* Address we actually mapped to */ + void* moveto; /* Address where we wish to move "break value" to */ + void* mapto; /* Address we actually mapped to */ char buf = 0; /* Single byte to write to extend mapped file */ if (size == 0) { /* Just return the current "break" value. */ - result = mdp->breakval; - - } else if (size < 0) { - /* We are deallocating memory. If the amount requested would cause - us to try to deallocate back past the base of the mmap'd region - then die verbosely. Otherwise, deallocate the memory and return - the old break value. */ - if (((char *) mdp->breakval) + size >= (char *) mdp->base) { - result = (void *) mdp->breakval; - mdp->breakval = (char *) mdp->breakval + size; + return mdp->breakval; + } + + if (size < 0) { + /* We are deallocating memory. If the amount requested would cause us to try to deallocate back past the base of + * the mmap'd region then die verbosely. Otherwise, deallocate the memory and return the old break value. */ + if (((char*)mdp->breakval) + size >= (char*)mdp->base) { + result = (void*)mdp->breakval; + mdp->breakval = (char*)mdp->breakval + size; moveto = PAGE_ALIGN(mdp->breakval); - munmap(moveto, - (size_t) (((char *) mdp->top) - ((char *) moveto)) - 1); + munmap(moveto, (size_t)(((char*)mdp->top) - ((char*)moveto)) - 1); mdp->top = moveto; } else { fprintf(stderr,"Internal error: mmap was asked to deallocate more memory than it previously allocated. Bailling out now!\n"); abort(); } } else { - /* We are allocating memory. Make sure we have an open file - descriptor if not working with anonymous memory. */ + /* We are allocating memory. Make sure we have an open file descriptor if not working with anonymous memory. */ if (!(mdp->flags & MMALLOC_ANONYMOUS) && mdp->fd < 0) { fprintf(stderr,"Internal error: mmap file descriptor <0 (%d), without MMALLOC_ANONYMOUS being in the flags.\n",mdp->fd); abort(); - } else if ((char *) mdp->breakval + size > (char *) mdp->top) { - /* The request would move us past the end of the currently - mapped memory, so map in enough more memory to satisfy - the request. This means we also have to grow the mapped-to - file by an appropriate amount, since mmap cannot be used - to extend a file. */ - moveto = PAGE_ALIGN((char *) mdp->breakval + size); - mapbytes = (char *) moveto - (char *) mdp->top; - foffset = (char *) mdp->top - (char *) mdp->base; + } else if ((char*)mdp->breakval + size > (char*)mdp->top) { + /* The request would move us past the end of the currently mapped memory, so map in enough more memory to satisfy + the request. This means we also have to grow the mapped-to file by an appropriate amount, since mmap cannot + be used to extend a file. */ + moveto = PAGE_ALIGN((char*)mdp->breakval + size); + mapbytes = (char*)moveto - (char*)mdp->top; + foffset = (char*)mdp->top - (char*)mdp->base; if (mdp->fd > 0) { /* FIXME: Test results of lseek() */ @@ -117,10 +112,8 @@ void *mmorecore(struct mdesc *mdp, ssize_t size) } } - /* Let's call mmap. Note that it is possible that mdp->top - is 0. In this case mmap will choose the address for us. - This call might very well overwrite an already existing memory mapping - (leading to weird bugs). + /* Let's call mmap. Note that it is possible that mdp->top is 0. In this case mmap will choose the address for us. + This call might very well overwrite an already existing memory mapping (leading to weird bugs). */ mapto = mmap(mdp->top, mapbytes, PROT_READ | PROT_WRITE, MAP_PRIVATE_OR_SHARED(mdp) | MAP_IS_ANONYMOUS(mdp) | @@ -140,13 +133,13 @@ void *mmorecore(struct mdesc *mdp, ssize_t size) if (mdp->top == 0) mdp->base = mdp->breakval = mapto; - mdp->top = PAGE_ALIGN((char *) mdp->breakval + size); + mdp->top = PAGE_ALIGN((char*)mdp->breakval + size); result = (void *) mdp->breakval; - mdp->breakval = (char *) mdp->breakval + size; + mdp->breakval = (char*)mdp->breakval + size; } else { /* Memory is already mapped, we only need to increase the breakval: */ result = (void *) mdp->breakval; - mdp->breakval = (char *) mdp->breakval + size; + mdp->breakval = (char*)mdp->breakval + size; } } return (result); diff --git a/src/xbt/mmalloc/mrealloc.c b/src/xbt/mmalloc/mrealloc.c index d0739609e1..589e412012 100644 --- a/src/xbt/mmalloc/mrealloc.c +++ b/src/xbt/mmalloc/mrealloc.c @@ -14,18 +14,15 @@ #include "mmprivate.h" -/* Resize the given region to the new size, returning a pointer - to the (possibly moved) region. This is optimized for speed; - some benchmarks seem to indicate that greater compactness is - achieved by unconditionally allocating and copying to a - new region. This module has incestuous knowledge of the - internals of both mfree and mmalloc. */ +/* Resize the given region to the new size, returning a pointer to the (possibly moved) region. This is optimized for + * speed; some benchmarks seem to indicate that greater compactness is achieved by unconditionally allocating and + * copying to a new region. This module has incestuous knowledge of the internals of both mfree and mmalloc. */ void *mrealloc(xbt_mheap_t mdp, void *ptr, size_t size) { void *result; - int type; - size_t block, blocks, oldlimit; + size_t blocks; + size_t oldlimit; /* Only keep real realloc, and reroute hidden malloc and free to the relevant functions */ if (size == 0) { @@ -38,8 +35,8 @@ void *mrealloc(xbt_mheap_t mdp, void *ptr, size_t size) //printf("(%s)realloc %p to %d...",xbt_thread_self_name(),ptr,(int)size); if ((char *) ptr < (char *) mdp->heapbase || BLOCK(ptr) > mdp->heapsize) { - printf - ("FIXME. Ouch, this pointer is not mine, refusing to proceed (another solution would be to malloc it instead of reallocing it, see source code)\n"); + printf("FIXME. Ouch, this pointer is not mine, refusing to proceed (another solution would be to malloc " + "it instead of reallocing it, see source code)\n"); result = mmalloc(mdp, size); abort(); return result; @@ -54,9 +51,9 @@ void *mrealloc(xbt_mheap_t mdp, void *ptr, size_t size) if (size < SMALLEST_POSSIBLE_MALLOC) size = SMALLEST_POSSIBLE_MALLOC; - block = BLOCK(ptr); + size_t block = BLOCK(ptr); - type = mdp->heapinfo[block].type; + int type = mdp->heapinfo[block].type; switch (type) { case MMALLOC_TYPE_HEAPINFO: -- 2.20.1