summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/mimalloc-internal.h23
-rw-r--r--include/mimalloc-types.h35
-rw-r--r--src/alloc.c52
-rw-r--r--src/init.c8
-rw-r--r--src/memory.c13
-rw-r--r--src/options.c6
-rw-r--r--src/os.c4
-rw-r--r--src/page.c32
-rw-r--r--src/stats.c36
-rw-r--r--test/main-override-static.c80
-rw-r--r--test/test-stress.c9
11 files changed, 215 insertions, 83 deletions
diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h
index 8d6cc29..f4822b1 100644
--- a/include/mimalloc-internal.h
+++ b/include/mimalloc-internal.h
@@ -415,7 +415,7 @@ static inline bool mi_is_in_same_segment(const void* p, const void* q) {
}
static inline mi_block_t* mi_block_nextx( uintptr_t cookie, const mi_block_t* block ) {
- #if MI_SECURE
+ #ifdef MI_ENCODE_FREELIST
return (mi_block_t*)(block->next ^ cookie);
#else
UNUSED(cookie);
@@ -424,7 +424,7 @@ static inline mi_block_t* mi_block_nextx( uintptr_t cookie, const mi_block_t* bl
}
static inline void mi_block_set_nextx(uintptr_t cookie, mi_block_t* block, const mi_block_t* next) {
- #if MI_SECURE
+ #ifdef MI_ENCODE_FREELIST
block->next = (mi_encoded_t)next ^ cookie;
#else
UNUSED(cookie);
@@ -433,16 +433,15 @@ static inline void mi_block_set_nextx(uintptr_t cookie, mi_block_t* block, const
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
- #if MI_SECURE
+ #ifdef MI_ENCODE_FREELIST
mi_block_t* next = mi_block_nextx(page->cookie,block);
- #if MI_SECURE >= 4
- // check if next is at least in our segment range
- // TODO: it is better to check if it is actually inside our page but that is more expensive
- // to calculate. Perhaps with a relative free list this becomes feasible?
- if (next!=NULL && !mi_is_in_same_segment(block, next)) {
- _mi_fatal_error("corrupted free list entry at %p: %zx\n", block, (uintptr_t)next);
- }
- #endif
+ // check for free list corruption: is `next` at least in our segment range?
+ // TODO: it is better to check if it is actually inside our page but that is more expensive
+ // to calculate. Perhaps with a relative free list this becomes feasible?
+ if (next!=NULL && !mi_is_in_same_segment(block, next)) {
+ _mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", page->block_size, block, (uintptr_t)next);
+ next = NULL;
+ }
return next;
#else
UNUSED(page);
@@ -451,7 +450,7 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
}
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
- #if MI_SECURE
+ #ifdef MI_ENCODE_FREELIST
mi_block_set_nextx(page->cookie,block,next);
#else
UNUSED(page);
diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h
index e49893b..91a6824 100644
--- a/include/mimalloc-types.h
+++ b/include/mimalloc-types.h
@@ -25,24 +25,30 @@ terms of the MIT license. A copy of the license can be found in the file
// Define MI_SECURE to enable security mitigations
// #define MI_SECURE 1 // guard page around metadata
// #define MI_SECURE 2 // guard page around each mimalloc page
-// #define MI_SECURE 3 // encode free lists
-// #define MI_SECURE 4 // all security enabled (checks for double free, corrupted free list and invalid pointer free)
+// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
+// #define MI_SECURE 4 // experimental, may be more expensive: checks for double free.
#if !defined(MI_SECURE)
-#define MI_SECURE 0
+#define MI_SECURE 4
#endif
-// Define MI_DEBUG as 1 for basic assert checks and statistics
-// set it to 2 to do internal asserts,
-// and to 3 to do extensive invariant checking.
+// Define MI_DEBUG for debug mode
+// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
+// #define MI_DEBUG 2 // + internal assertion checks
+// #define MI_DEBUG 3 // + extensive internal invariant checking
#if !defined(MI_DEBUG)
#if !defined(NDEBUG) || defined(_DEBUG)
-#define MI_DEBUG 1
+#define MI_DEBUG 2
#else
#define MI_DEBUG 0
#endif
#endif
+// Encoded free lists allow detection of corrupted free lists
+// and can detect buffer overflows and double `free`s.
+#if (MI_SECURE>=3 || MI_DEBUG>=1)
+#define MI_ENCODE_FREELIST 1
+#endif
// ------------------------------------------------------
// Platform specific values
@@ -119,6 +125,8 @@ terms of the MIT license. A copy of the license can be found in the file
// Maximum slice offset (7)
#define MI_MAX_SLICE_OFFSET ((MI_MEDIUM_PAGE_SIZE / MI_SEGMENT_SLICE_SIZE) - 1)
+// The free lists use encoded next fields
+// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
typedef uintptr_t mi_encoded_t;
// free lists contain blocks
@@ -127,6 +135,7 @@ typedef struct mi_block_s {
} mi_block_t;
+// The delayed flags are used for efficient multi-threaded free-ing
typedef enum mi_delayed_e {
MI_NO_DELAYED_FREE = 0,
MI_USE_DELAYED_FREE = 1,
@@ -182,7 +191,7 @@ typedef struct mi_page_s {
bool is_zero; // `true` if the blocks in the free list are zero initialized
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
- #if MI_SECURE
+ #ifdef MI_ENCODE_FREELIST
uintptr_t cookie; // random cookie to encode the free lists
#endif
size_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
@@ -198,8 +207,8 @@ typedef struct mi_page_s {
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
// improve page index calculation
- // without padding: 11 words on 64-bit, 13 on 32-bit. Secure adds one word
- #if (MI_SECURE==0)
+ // without padding: 11 words on 64-bit, 13 on 32-bit.
+ #ifndef MI_ENCODE_FREELIST
void* padding[1]; // 12 words on 64-bit, 14 words on 32-bit
#endif
} mi_page_t;
@@ -355,14 +364,14 @@ typedef struct mi_stats_s {
mi_stat_count_t page_committed;
mi_stat_count_t segments_abandoned;
mi_stat_count_t pages_abandoned;
- mi_stat_count_t pages_extended;
- mi_stat_count_t mmap_calls;
- mi_stat_count_t commit_calls;
mi_stat_count_t threads;
mi_stat_count_t huge;
mi_stat_count_t large;
mi_stat_count_t malloc;
mi_stat_count_t segments_cache;
+ mi_stat_counter_t pages_extended;
+ mi_stat_counter_t mmap_calls;
+ mi_stat_counter_t commit_calls;
mi_stat_counter_t page_no_retire;
mi_stat_counter_t searches;
mi_stat_counter_t huge_count;
diff --git a/src/alloc.c b/src/alloc.c
index 4f5e5b4..1b46ee6 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -32,10 +32,10 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page,block);
page->used++;
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
-#if (MI_DEBUG)
+#if (MI_DEBUG!=0)
if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
-#elif (MI_SECURE)
- block->next = 0;
+#elif (MI_SECURE!=0)
+ block->next = 0; // don't leak internal data
#endif
#if (MI_STAT>1)
if(size <= MI_LARGE_OBJ_SIZE_MAX) {
@@ -125,10 +125,12 @@ mi_decl_allocator void* mi_zalloc(size_t size) mi_attr_noexcept {
// ------------------------------------------------------
-// Check for double free in secure mode
+// Check for double free in secure and debug mode
+// This is somewhat expensive so only enabled for secure mode 4
// ------------------------------------------------------
-#if MI_SECURE>=4
+#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0))
+// linear check if the free list contains a specific element
static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) {
while (list != NULL) {
if (elem==list) return true;
@@ -137,15 +139,15 @@ static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, cons
return false;
}
-static mi_decl_noinline bool mi_check_double_freex(const mi_page_t* page, const mi_block_t* block, const mi_block_t* n) {
+static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block, const mi_block_t* n) {
size_t psize;
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
if (n == NULL || ((uint8_t*)n >= pstart && (uint8_t*)n < (pstart + psize))) {
// Suspicious: the decoded value is in the same page (or NULL).
- // Walk the free lists to see if it is already freed
+ // Walk the free lists to verify positively if it is already freed
if (mi_list_contains(page, page->free, block) ||
- mi_list_contains(page, page->local_free, block) ||
- mi_list_contains(page, (const mi_block_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&page->thread_free)), block))
+ mi_list_contains(page, page->local_free, block) ||
+ mi_list_contains(page, (const mi_block_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*,&page->thread_free)), block))
{
_mi_fatal_error("double free detected of block %p with size %zu\n", block, page->block_size);
return true;
@@ -154,16 +156,23 @@ static mi_decl_noinline bool mi_check_double_freex(const mi_page_t* page, const
return false;
}
-static inline bool mi_check_double_free(const mi_page_t* page, const mi_block_t* block) {
- mi_block_t* n = (mi_block_t*)(block->next ^ page->cookie);
- if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check
- (n==NULL || mi_is_in_same_segment(block, n)))
+static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
+ mi_block_t* n = mi_block_nextx(page->cookie, block); // pretend it is freed, and get the decoded first field
+ if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
+ (n==NULL || mi_is_in_same_segment(block, n))) // quick check: in same segment or NULL?
{
// Suspicous: decoded value in block is in the same segment (or NULL) -- maybe a double free?
- return mi_check_double_freex(page, block, n);
+ // (continue in separate function to improve code generation)
+ return mi_check_is_double_freex(page, block, n);
}
return false;
}
+#else
+static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
+ UNUSED(page);
+ UNUSED(block);
+ return false;
+}
#endif
@@ -171,7 +180,6 @@ static inline bool mi_check_double_free(const mi_page_t* page, const mi_block_t*
// Free
// ------------------------------------------------------
-
// multi-threaded free
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
@@ -192,7 +200,10 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
page->free = block;
page->used--;
page->is_zero = false;
- _mi_segment_page_free(page,true,&heap->tld->segments);
+ mi_assert(page->used == 0);
+ mi_tld_t* tld = heap->tld;
+ _mi_stat_decrease(&tld->stats.huge, page->block_size);
+ _mi_segment_page_free(page,true,&tld->segments);
}
return;
}
@@ -250,6 +261,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// and push it on the free list
if (mi_likely(local)) {
// owning thread can free a block directly
+ if (mi_check_is_double_free(page, block)) return;
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
@@ -293,7 +305,7 @@ void mi_free(void* p) mi_attr_noexcept
const mi_segment_t* const segment = _mi_ptr_segment(p);
if (mi_unlikely(segment == NULL)) return; // checks for (p==NULL)
-#if (MI_DEBUG>0)
+#if (MI_DEBUG!=0)
if (mi_unlikely(!mi_is_in_heap_region(p))) {
_mi_warning_message("possibly trying to free a pointer that does not point to a valid heap region: 0x%p\n"
"(this may still be a valid very large allocation (over 64MiB))\n", p);
@@ -302,7 +314,7 @@ void mi_free(void* p) mi_attr_noexcept
}
}
#endif
-#if (MI_DEBUG>0 || MI_SECURE>=4)
+#if (MI_DEBUG!=0 || MI_SECURE>=4)
if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
_mi_error_message("trying to free a pointer that does not point to a valid heap space: %p\n", p);
return;
@@ -324,9 +336,7 @@ void mi_free(void* p) mi_attr_noexcept
if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
mi_block_t* block = (mi_block_t*)p;
- #if MI_SECURE>=4
- if (mi_check_double_free(page,block)) return;
- #endif
+ if (mi_check_is_double_free(page,block)) return;
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
diff --git a/src/init.c b/src/init.c
index 4da6c5a..14426d9 100644
--- a/src/init.c
+++ b/src/init.c
@@ -15,14 +15,14 @@ const mi_page_t _mi_page_empty = {
0, false, false, false, false, 0, 0,
{ 0 }, false,
NULL, // free
- #if MI_SECURE
+ #if MI_ENCODE_FREELIST
0,
#endif
0, // used
NULL,
ATOMIC_VAR_INIT(0), ATOMIC_VAR_INIT(0),
0, NULL, NULL, NULL
- #if (MI_SECURE==0)
+ #ifndef MI_ENCODE_FREELIST
, { NULL } // padding
#endif
};
@@ -64,8 +64,8 @@ const mi_page_t _mi_page_empty = {
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
- MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
- MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
+ MI_STAT_COUNT_NULL(), \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, \
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
MI_STAT_COUNT_END_NULL()
diff --git a/src/memory.c b/src/memory.c
index 0f9f5de..80351ed 100644
--- a/src/memory.c
+++ b/src/memory.c
@@ -461,11 +461,20 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
// reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large
if (!is_large) {
if (mi_option_is_enabled(mi_option_segment_reset)) {
- _mi_os_reset(p, size, stats); //
- // _mi_os_decommit(p,size,stats); // if !is_eager_committed (and clear dirty bits)
+ if (!is_eager_committed && // cannot reset large pages
+ (mi_option_is_enabled(mi_option_eager_commit) || // cannot reset halfway committed segments, use `option_page_reset` instead
+ mi_option_is_enabled(mi_option_reset_decommits))) // but we can decommit halfway committed segments
+ {
+ _mi_os_reset(p, size, stats);
+ //_mi_os_decommit(p, size, stats); // todo: and clear dirty bits?
+ }
}
+<<<<<<< HEAD
// else { _mi_os_reset(p,size,stats); }
}
+=======
+ }
+>>>>>>> dev
if (!is_eager_committed) {
// adjust commit statistics as we commit again when re-using the same slot
_mi_stat_decrease(&stats->committed, mi_good_commit_size(size));
diff --git a/src/options.c b/src/options.c
index 9b0c87a..99e90c4 100644
--- a/src/options.c
+++ b/src/options.c
@@ -65,7 +65,7 @@ static mi_option_desc_t options[_mi_option_last] =
{ 0, UNINIT, MI_OPTION(cache_reset) },
{ 0, UNINIT, MI_OPTION(reset_decommits) }, // note: cannot enable this if secure is on
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
- { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free
+ { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
{ 100, UNINIT, MI_OPTION(os_tag) } // only apple specific for now but might serve more or less related purpose
};
@@ -290,7 +290,9 @@ mi_attr_noreturn void _mi_fatal_error(const char* fmt, ...) {
va_start(args, fmt);
mi_vfprintf(NULL, "mimalloc: fatal: ", fmt, args);
va_end(args);
- exit(99);
+ #if (MI_SECURE>=0)
+ abort();
+ #endif
}
// --------------------------------------------------------
diff --git a/src/os.c b/src/os.c
index 1c6093d..5e595f9 100644
--- a/src/os.c
+++ b/src/os.c
@@ -476,7 +476,7 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
#endif
- _mi_stat_increase(&stats->mmap_calls, 1);
+ mi_stat_counter_increase(stats->mmap_calls, 1);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
if (commit) { _mi_stat_increase(&stats->committed, size); }
@@ -631,7 +631,7 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
int err = 0;
if (commit) {
_mi_stat_increase(&stats->committed, csize);
- _mi_stat_increase(&stats->commit_calls, 1);
+ _mi_stat_counter_increase(&stats->commit_calls, 1);
}
else {
_mi_stat_decrease(&stats->committed, csize);
diff --git a/src/page.c b/src/page.c
index 21ecce8..20c09a4 100644
--- a/src/page.c
+++ b/src/page.c
@@ -160,14 +160,21 @@ static void _mi_page_thread_free_collect(mi_page_t* page)
// return if the list is empty
if (head == NULL) return;
- // find the tail
+ // find the tail -- also to get a proper count (without data races)
+ uintptr_t max_count = page->capacity; // cannot collect more than capacity
uintptr_t count = 1;
mi_block_t* tail = head;
mi_block_t* next;
- while ((next = mi_block_next(page,tail)) != NULL) {
+ while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) {
count++;
tail = next;
}
+ // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free)
+ if (count > max_count) {
+ _mi_fatal_error("corrupted thread-free list\n");
+ return; // the thread-free items cannot be freed
+ }
+
// and append the current local free list
mi_block_set_next(page,tail, page->local_free);
page->local_free = head;
@@ -377,7 +384,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_stat_decrease(&page->heap->tld->stats.huge, page->block_size);
}
}
-
+
// remove from the page list
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
mi_segments_tld_t* segments_tld = &page->heap->tld->segments;
@@ -405,16 +412,17 @@ void _mi_page_retire(mi_page_t* page) {
// (or we end up retiring and re-allocating most of the time)
// NOTE: refine this more: we should not retire if this
// is the only page left with free blocks. It is not clear
- // how to check this efficiently though... for now we just check
- // if its neighbours are almost fully used.
+ // how to check this efficiently though...
+ // for now, we don't retire if it is the only page left of this size class.
+ mi_page_queue_t* pq = mi_page_queue_of(page);
if (mi_likely(page->block_size <= (MI_SMALL_SIZE_MAX/4))) {
- if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
+ // if (mi_page_mostly_used(page->prev) && mi_page_mostly_used(page->next)) {
+ if (pq->last==page && pq->first==page) {
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
return; // dont't retire after all
}
}
-
- _mi_page_free(page, mi_page_queue_of(page), false);
+ _mi_page_free(page, pq, false);
}
@@ -508,7 +516,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* page, size_t e
----------------------------------------------------------- */
#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well.
-#if MI_SECURE
+#if (MI_SECURE>0)
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
#else
#define MI_MIN_EXTEND (1)
@@ -529,7 +537,7 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_stats_t* st
size_t page_size;
_mi_page_start(_mi_page_segment(page), page, &page_size);
- mi_stat_increase(stats->pages_extended, 1);
+ mi_stat_counter_increase(stats->pages_extended, 1);
// calculate the extend count
size_t extend = page->reserved - page->capacity;
@@ -577,7 +585,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
- #if MI_SECURE
+ #ifdef MI_ENCODE_FREELIST
page->cookie = _mi_heap_random(heap) | 1;
#endif
page->is_zero = page->is_zero_init;
@@ -590,7 +598,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page->next == NULL);
mi_assert_internal(page->prev == NULL);
mi_assert_internal(!mi_page_has_aligned(page));
- #if MI_SECURE
+ #if (MI_ENCODE_FREELIST)
mi_assert_internal(page->cookie != 0);
#endif
mi_assert_expensive(mi_page_is_valid_init(page));
diff --git a/src/stats.c b/src/stats.c
index 3a738c1..f62fc5a 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -95,15 +95,17 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1);
mi_stat_add(&stats->segments_abandoned, &src->segments_abandoned, 1);
- mi_stat_add(&stats->mmap_calls, &src->mmap_calls, 1);
- mi_stat_add(&stats->commit_calls, &src->commit_calls, 1);
mi_stat_add(&stats->threads, &src->threads, 1);
- mi_stat_add(&stats->pages_extended, &src->pages_extended, 1);
mi_stat_add(&stats->malloc, &src->malloc, 1);
mi_stat_add(&stats->segments_cache, &src->segments_cache, 1);
mi_stat_add(&stats->huge, &src->huge, 1);
mi_stat_add(&stats->large, &src->large, 1);
+
+ mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1);
+ mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1);
+ mi_stat_counter_add(&stats->commit_calls, &src->commit_calls, 1);
+
mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1);
mi_stat_counter_add(&stats->searches, &src->searches, 1);
mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1);
@@ -121,6 +123,9 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
Display statistics
----------------------------------------------------------- */
+// unit > 0 : size in binary bytes
+// unit == 0: count as decimal
+// unit < 0 : count in binary
static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, const char* fmt) {
char buf[32];
int len = 32;
@@ -165,17 +170,24 @@ static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t
_mi_fprintf(out, " ok\n");
}
else if (unit<0) {
- mi_print_amount(stat->peak, 1, out);
- mi_print_amount(stat->allocated, 1, out);
- mi_print_amount(stat->freed, 1, out);
- mi_print_amount(-unit, 1, out);
- mi_print_count((stat->allocated / -unit), 0, out);
+ mi_print_amount(stat->peak, -1, out);
+ mi_print_amount(stat->allocated, -1, out);
+ mi_print_amount(stat->freed, -1, out);
+ if (unit==-1) {
+ _mi_fprintf(out, "%22s", "");
+ }
+ else {
+ mi_print_amount(-unit, 1, out);
+ mi_print_count((stat->allocated / -unit), 0, out);
+ }
if (stat->allocated > stat->freed)
_mi_fprintf(out, " not all freed!\n");
else
_mi_fprintf(out, " ok\n");
}
else {
+ mi_print_amount(stat->peak, 1, out);
+ mi_print_amount(stat->allocated, 1, out);
_mi_fprintf(out, "\n");
}
}
@@ -247,11 +259,11 @@ static void _mi_stats_print(mi_stats_t* stats, double secs, mi_output_fun* out)
mi_stat_print(&stats->segments_cache, "-cached", -1, out);
mi_stat_print(&stats->pages, "pages", -1, out);
mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out);
- mi_stat_print(&stats->pages_extended, "-extended", 0, out);
+ mi_stat_counter_print(&stats->pages_extended, "-extended", out);
mi_stat_counter_print(&stats->page_no_retire, "-noretire", out);
- mi_stat_print(&stats->mmap_calls, "mmaps", 0, out);
- mi_stat_print(&stats->commit_calls, "commits", 0, out);
- mi_stat_print(&stats->threads, "threads", 0, out);
+ mi_stat_counter_print(&stats->mmap_calls, "mmaps", out);
+ mi_stat_counter_print(&stats->commit_calls, "commits", out);
+ mi_stat_print(&stats->threads, "threads", -1, out);
mi_stat_counter_print_avg(&stats->searches, "searches", out);
if (secs >= 0.0) _mi_fprintf(out, "%10s: %9.3f s\n", "elapsed", secs);
diff --git a/test/main-override-static.c b/test/main-override-static.c
index de6d455..4310f1c 100644
--- a/test/main-override-static.c
+++ b/test/main-override-static.c
@@ -7,6 +7,7 @@
#include <mimalloc.h>
#include <mimalloc-override.h> // redefines malloc etc.
+
#include <stdint.h>
#include <stdbool.h>
@@ -170,9 +171,19 @@ void mi_bins() {
}
}
+static void double_free1();
+static void double_free2();
+static void corrupt_free();
+
+
int main() {
mi_version();
- mi_bins();
+ // mi_bins();
+
+ // detect double frees and heap corruption
+ //double_free1();
+ //double_free2();
+ //corrupt_free();
void* p1 = malloc(78);
void* p2 = malloc(24);
@@ -197,3 +208,70 @@ int main() {
return 0;
}
+
+// The double free samples come ArcHeap [1] by Insu Yun (issue #161)
+// [1]: https://arxiv.org/pdf/1903.00503.pdf
+
+static void double_free1() {
+ void* p[256];
+ //uintptr_t buf[256];
+
+ p[0] = mi_malloc(622616);
+ p[1] = mi_malloc(655362);
+ p[2] = mi_malloc(786432);
+ mi_free(p[2]);
+ // [VULN] Double free
+ mi_free(p[2]);
+ p[3] = mi_malloc(786456);
+ // [BUG] Found overlap
+ // p[3]=0x429b2ea2000 (size=917504), p[1]=0x429b2e42000 (size=786432)
+ fprintf(stderr, "p3: %p-%p, p1: %p-%p, p2: %p\n", p[3], (uint8_t*)(p[3]) + 786456, p[1], (uint8_t*)(p[1]) + 655362, p[2]);
+}
+
+static void double_free2() {
+ void* p[256];
+ //uintptr_t buf[256];
+ // [INFO] Command buffer: 0x327b2000
+ // [INFO] Input size: 182
+ p[0] = malloc(712352);
+ p[1] = malloc(786432);
+ free(p[0]);
+ // [VULN] Double free
+ free(p[0]);
+ p[2] = malloc(786440);
+ p[3] = malloc(917504);
+ p[4] = malloc(786440);
+ // [BUG] Found overlap
+ // p[4]=0x433f1402000 (size=917504), p[1]=0x433f14c2000 (size=786432)
+ fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432);
+}
+
+
+// Try to corrupt the heap through buffer overflow
+#define N 256
+#define SZ 64
+
+static void corrupt_free() {
+ void* p[N];
+ // allocate
+ for (int i = 0; i < N; i++) {
+ p[i] = malloc(SZ);
+ }
+ // free some
+ for (int i = 0; i < N; i += (N/10)) {
+ free(p[i]);
+ p[i] = NULL;
+ }
+ // try to corrupt the free list
+ for (int i = 0; i < N; i++) {
+ if (p[i] != NULL) {
+ memset(p[i], 0, SZ+8);
+ }
+ }
+ // allocate more.. trying to trigger an allocation from a corrupted entry
+ // this may need many allocations to get there (if at all)
+ for (int i = 0; i < 4096; i++) {
+ malloc(SZ);
+ }
+}
+
diff --git a/test/test-stress.c b/test/test-stress.c
index 9501fe8..e3b0f7a 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -17,7 +17,7 @@ terms of the MIT license.
#include <mimalloc.h>
// argument defaults
-static int THREADS = 32; // more repeatable if THREADS <= #processors
+static int THREADS = 1; // more repeatable if THREADS <= #processors
static int N = 20; // scaling factor
// static int THREADS = 8; // more repeatable if THREADS <= #processors
@@ -162,6 +162,7 @@ int main(int argc, char** argv) {
//printf("(reserve huge: %i\n)", res);
//bench_start_program();
+ mi_stats_reset();
memset((void*)transfer, 0, TRANSFERS*sizeof(void*));
run_os_threads(THREADS);
for (int i = 0; i < TRANSFERS; i++) {
@@ -169,7 +170,6 @@ int main(int argc, char** argv) {
}
#ifndef NDEBUG
mi_collect(false);
- mi_collect(true);
#endif
mi_stats_print(NULL);
//bench_end_program();
@@ -195,6 +195,11 @@ static void run_os_threads(size_t nthreads) {
for (size_t i = 0; i < nthreads; i++) {
WaitForSingleObject(thandles[i], INFINITE);
}
+ for (size_t i = 0; i < nthreads; i++) {
+ CloseHandle(thandles[i]);
+ }
+ free(tids);
+ free(thandles);
}
static void* atomic_exchange_ptr(volatile void** p, void* newval) {