summaryrefslogtreecommitdiff
path: root/src/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/alloc.c')
-rw-r--r--src/alloc.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 5f150f2..9199772 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -45,7 +45,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
#if (MI_STAT>0)
const size_t bsize = mi_page_usable_block_size(page);
- if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_increase(heap, normal, bsize);
mi_heap_stat_counter_increase(heap, normal_count, 1);
#if (MI_STAT>1)
@@ -297,20 +297,26 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
// only maintain stats for smaller objects if requested
#if (MI_STAT>0)
static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
-#if (MI_STAT < 2)
+ #if (MI_STAT < 2)
MI_UNUSED(block);
-#endif
+ #endif
mi_heap_t* const heap = mi_heap_get_default();
- const size_t bsize = mi_page_usable_block_size(page);
-#if (MI_STAT>1)
+ const size_t bsize = mi_page_usable_block_size(page);
+ #if (MI_STAT>1)
const size_t usize = mi_page_usable_size_of(page, block);
mi_heap_stat_decrease(heap, malloc, usize);
-#endif
- if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ #endif
+ if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
mi_heap_stat_decrease(heap, normal, bsize);
-#if (MI_STAT > 1)
+ #if (MI_STAT > 1)
mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
-#endif
+ #endif
+ }
+ else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
+ }
+ else {
+ mi_heap_stat_decrease(heap, huge, bsize);
}
}
#else
@@ -324,11 +330,11 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
static void mi_stat_huge_free(const mi_page_t* page) {
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
- if (bsize <= MI_HUGE_OBJ_SIZE_MAX) {
- mi_heap_stat_decrease(heap, huge, bsize);
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
}
else {
- mi_heap_stat_decrease(heap, giant, bsize);
+ mi_heap_stat_decrease(heap, huge, bsize);
}
}
#else
@@ -353,8 +359,8 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
// huge page segments are always abandoned and can be freed immediately
- mi_segment_t* const segment = _mi_page_segment(page);
- if (segment->page_kind==MI_PAGE_HUGE) {
+ mi_segment_t* segment = _mi_page_segment(page);
+ if (segment->kind==MI_SEGMENT_HUGE) {
mi_stat_huge_free(page);
_mi_segment_huge_page_free(segment, page, block);
return;
@@ -484,10 +490,10 @@ void mi_free(void* p) mi_attr_noexcept
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
- mi_block_t* const block = (mi_block_t*)p;
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
+ mi_block_t* block = (mi_block_t*)(p);
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block);
mi_stat_free(page, block);