X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/053352ba13737467be4fc66c9a1c92bd84118bee..64561039d3dec9e50b4eaf1b78b3edef71898383:/src/xbt/mallocator.c diff --git a/src/xbt/mallocator.c b/src/xbt/mallocator.c index 93e6a5ba5f..bf0d2c8e84 100644 --- a/src/xbt/mallocator.c +++ b/src/xbt/mallocator.c @@ -9,10 +9,10 @@ #include "xbt/mallocator.h" #include "xbt/asserts.h" #include "xbt/sysdep.h" +#include "mc/mc.h" /* kill mallocators when model-checking is enabled */ #include "mallocator_private.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(xbt_mallocator, xbt, "Mallocators"); -extern int _surf_do_model_check; /* kill mallocators when this is true */ /** * \brief Constructor @@ -44,21 +44,21 @@ xbt_mallocator_t xbt_mallocator_new(int size, xbt_assert0(new_f != NULL && free_f != NULL && reset_f != NULL, "invalid parameter"); - /* Let's force 0 size mallocator! (Dirty hack, blame Martin :) )*/ + /* Let's force 0 size mallocator! (Dirty hack, blame Martin :) ) */ /* mallocators and memory mess introduced by model-checking do not mix well together: * The mallocator will give standard memory when we are using raw memory (so these blocks are killed on restore) * and the contrary (so these blocks will leak accross restores) */ - if (_surf_do_model_check) + if (MC_IS_ENABLED) size = 0; - + m = xbt_new0(s_xbt_mallocator_t, 1); VERB1("Create mallocator %p", m); if (XBT_LOG_ISENABLED(xbt_mallocator, xbt_log_priority_verbose)) xbt_backtrace_display_current(); - m->objects = xbt_new0(void *, _surf_do_model_check?1:size); + m->objects = xbt_new0(void *, MC_IS_ENABLED ? 1 : size); m->max_size = size; m->current_size = 0; m->new_f = new_f; @@ -82,7 +82,8 @@ void xbt_mallocator_free(xbt_mallocator_t m) int i; xbt_assert0(m != NULL, "Invalid parameter"); - VERB3("Frees mallocator %p (size:%d/%d)", m, m->current_size, m->max_size); + VERB3("Frees mallocator %p (size:%d/%d)", m, m->current_size, + m->max_size); for (i = 0; i < m->current_size; i++) { (*(m->free_f)) (m->objects[i]); } @@ -109,17 +110,16 @@ void xbt_mallocator_free(xbt_mallocator_t m) void *xbt_mallocator_get(xbt_mallocator_t m) { void *object; - xbt_assert0(m != NULL, "Invalid parameter"); if (m->current_size > 0) { /* there is at least an available object */ - DEBUG3("Reuse an old object for mallocator %p (size:%d/%d)", m, - m->current_size, m->max_size); + /* DEBUG3("Reuse an old object for mallocator %p (size:%d/%d)", m, + m->current_size, m->max_size); */ object = m->objects[--m->current_size]; } else { /* otherwise we must allocate a new object */ - DEBUG3("Create a new object for mallocator %p (size:%d/%d)", m, - m->current_size, m->max_size); + /* DEBUG3("Create a new object for mallocator %p (size:%d/%d)", m, + m->current_size, m->max_size); */ object = (*(m->new_f)) (); } (*(m->reset_f)) (object); @@ -141,18 +141,16 @@ void *xbt_mallocator_get(xbt_mallocator_t m) */ void xbt_mallocator_release(xbt_mallocator_t m, void *object) { - xbt_assert0(m != NULL && object != NULL, "Invalid parameter"); - if (m->current_size < m->max_size) { /* there is enough place to push the object */ - DEBUG3 - ("Store deleted object in mallocator %p for further use (size:%d/%d)", - m, m->current_size, m->max_size); + /* DEBUG3 + ("Store deleted object in mallocator %p for further use (size:%d/%d)", + m, m->current_size, m->max_size); */ m->objects[m->current_size++] = object; } else { /* otherwise we don't have a choice, we must free the object */ - DEBUG3("Free deleted object: mallocator %p is full (size:%d/%d)", m, - m->current_size, m->max_size); + /* DEBUG3("Free deleted object: mallocator %p is full (size:%d/%d)", m, + m->current_size, m->max_size); */ (*(m->free_f)) (object); } }