summaryrefslogtreecommitdiff
path: root/src/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/alloc.c')
-rw-r--r--src/alloc.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 5905733..fe744f3 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -38,8 +38,8 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
- // allow use internally
- MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page));
+ // allow use of the block internally
+ mi_track_mem_undefined(block,mi_page_block_size(page));
// zero the block? note: we need to zero the full block size (issue #63)
if mi_unlikely(zero) {
@@ -77,7 +77,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
#endif
- MI_TRACK_MEM_NOACCESS(block,mi_page_block_size(page));
+ mi_track_mem_noaccess(block,mi_page_block_size(page));
return block;
}
@@ -99,7 +99,7 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
- MI_TRACK_ZALLOC(p,size,zero);
+ mi_track_malloc(p,size,zero);
return p;
}
@@ -128,7 +128,7 @@ extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
- MI_TRACK_ZALLOC(p,size,zero);
+ mi_track_malloc(p,size,zero);
return p;
}
}
@@ -183,7 +183,7 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
return false;
}
-#define MI_TRACK_PAGE(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); MI_TRACK_MEM_##access( pstart, psize); }
+#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
bool is_double_free = false;
@@ -213,10 +213,10 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
- MI_TRACK_MEM_DEFINED(padding,sizeof(*padding));
+ mi_track_mem_defined(padding,sizeof(*padding));
*delta = padding->delta;
bool ok = ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
- MI_TRACK_MEM_NOACCESS(padding,sizeof(*padding));
+ mi_track_mem_noaccess(padding,sizeof(*padding));
return ok;
}
@@ -239,7 +239,7 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
*size = bsize - delta;
uint8_t* fill = (uint8_t*)block + bsize - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
- MI_TRACK_MEM_DEFINED(fill,maxpad);
+ mi_track_mem_defined(fill,maxpad);
for (size_t i = 0; i < maxpad; i++) {
if (fill[i] != MI_DEBUG_PADDING) {
*wrong = bsize - delta + i;
@@ -247,7 +247,7 @@ static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, si
break;
}
}
- MI_TRACK_MEM_NOACCESS(fill,maxpad);
+ mi_track_mem_noaccess(fill,maxpad);
return ok;
}
@@ -481,7 +481,7 @@ void mi_free(void* p) mi_attr_noexcept
{
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
if mi_unlikely(segment == NULL) return;
- MI_TRACK_FREE(p);
+ mi_track_free(p);
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
@@ -493,7 +493,7 @@ void mi_free(void* p) mi_attr_noexcept
mi_check_padding(page, block);
mi_stat_free(page, block);
#if (MI_DEBUG!=0)
- MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page));
+ mi_track_mem_undefined(block,mi_page_block_size(page));
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
mi_block_set_next(page, block, page->local_free);
@@ -505,10 +505,10 @@ void mi_free(void* p) mi_attr_noexcept
else {
// non-local, aligned blocks, or a full page; use the more generic path
// note: recalc page in generic to improve code generation
- MI_TRACK_MEM_UNDEFINED(block,mi_page_block_size(page));
+ mi_track_mem_undefined(block,mi_page_block_size(page));
mi_free_generic(segment, tid == segment->thread_id, p);
}
- MI_TRACK_MEM_NOACCESS(block,mi_page_block_size(page));
+ mi_track_mem_noaccess(block,mi_page_block_size(page));
}
bool _mi_free_delayed_block(mi_block_t* block) {