summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rw-r--r--azure-pipelines.yml7
-rw-r--r--cmake/mimalloc-config-version.cmake4
-rw-r--r--doc/mimalloc-doc.h2
-rw-r--r--ide/vs2017/mimalloc-override.vcxproj2
-rw-r--r--ide/vs2017/mimalloc-override.vcxproj.filters6
-rw-r--r--ide/vs2017/mimalloc.vcxproj2
-rw-r--r--ide/vs2017/mimalloc.vcxproj.filters7
-rw-r--r--ide/vs2019/mimalloc-override.vcxproj2
-rw-r--r--ide/vs2019/mimalloc-override.vcxproj.filters8
-rw-r--r--ide/vs2019/mimalloc.vcxproj2
-rw-r--r--ide/vs2019/mimalloc.vcxproj.filters6
-rw-r--r--include/mimalloc-internal.h160
-rw-r--r--include/mimalloc-types.h180
-rw-r--r--include/mimalloc.h32
-rw-r--r--src/alloc-aligned.c2
-rw-r--r--src/alloc.c12
-rw-r--r--src/arena.c151
-rw-r--r--src/bitmap.c14
-rw-r--r--src/bitmap.h7
-rw-r--r--src/heap.c41
-rw-r--r--src/init.c45
-rw-r--r--src/options.c68
-rw-r--r--src/os.c50
-rw-r--r--src/page-queue.c21
-rw-r--r--src/page.c92
-rw-r--r--src/segment-cache.c360
-rw-r--r--src/segment.c1737
-rw-r--r--src/static.c2
-rw-r--r--src/stats.c8
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/main-override-static.c185
-rw-r--r--test/main-override.cpp70
-rw-r--r--test/test-stress.c28
34 files changed, 2218 insertions, 1099 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d1207bb..686ff17 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -37,7 +37,7 @@ set(mi_sources
src/os.c
src/bitmap.c
src/arena.c
- src/region.c
+ src/segment-cache.c
src/segment.c
src/page.c
src/alloc.c
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index cfaf187..c6efc87 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -134,9 +134,16 @@ jobs:
cmakeArgs: .. $(cmakeExtraArgs)
- script: make -j$(sysctl -n hw.ncpu) -C $(BuildType)
displayName: Make
+ # - script: MIMALLOC_VERBOSE=1 ./mimalloc-test-api
+ # workingDirectory: $(BuildType)
+ # displayName: TestAPI
+ # - script: MIMALLOC_VERBOSE=1 ./mimalloc-test-stress
+ # workingDirectory: $(BuildType)
+ # displayName: TestStress
- script: ctest --verbose --timeout 120
workingDirectory: $(BuildType)
displayName: CTest
+
# - upload: $(Build.SourcesDirectory)/$(BuildType)
# artifact: mimalloc-macos-$(BuildType)
diff --git a/cmake/mimalloc-config-version.cmake b/cmake/mimalloc-config-version.cmake
index 485c8e1..8063afe 100644
--- a/cmake/mimalloc-config-version.cmake
+++ b/cmake/mimalloc-config-version.cmake
@@ -1,5 +1,5 @@
-set(mi_version_major 1)
-set(mi_version_minor 7)
+set(mi_version_major 2)
+set(mi_version_minor 0)
set(mi_version_patch 6)
set(mi_version ${mi_version_major}.${mi_version_minor})
diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h
index ea2a1ad..4de6085 100644
--- a/doc/mimalloc-doc.h
+++ b/doc/mimalloc-doc.h
@@ -1080,7 +1080,7 @@ or via environment variables.
- `MIMALLOC_PAGE_RESET=0`: by default, mimalloc will reset (or purge) OS pages when not in use to signal to the OS
that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server)
programs. By setting it to `0` no such page resets will be done which can improve performance for programs that are not long
- running. As an alternative, the `MIMALLOC_RESET_DELAY=`<msecs> can be set higher (100ms by default) to make the page
+ running. As an alternative, the `MIMALLOC_DECOMMIT_DELAY=`<msecs> can be set higher (100ms by default) to make the page
reset occur less frequently instead of turning it off completely.
- `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly
improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs
diff --git a/ide/vs2017/mimalloc-override.vcxproj b/ide/vs2017/mimalloc-override.vcxproj
index a1266dc..a87b69a 100644
--- a/ide/vs2017/mimalloc-override.vcxproj
+++ b/ide/vs2017/mimalloc-override.vcxproj
@@ -236,7 +236,6 @@
<ClCompile Include="..\..\src\bitmap.c" />
<ClCompile Include="..\..\src\heap.c" />
<ClCompile Include="..\..\src\init.c" />
- <ClCompile Include="..\..\src\region.c" />
<ClCompile Include="..\..\src\options.c" />
<ClCompile Include="..\..\src\os.c" />
<ClCompile Include="..\..\src\page-queue.c">
@@ -247,6 +246,7 @@
</ClCompile>
<ClCompile Include="..\..\src\page.c" />
<ClCompile Include="..\..\src\random.c" />
+ <ClCompile Include="..\..\src\segment-cache.c" />
<ClCompile Include="..\..\src\segment.c" />
<ClCompile Include="..\..\src\stats.c" />
</ItemGroup>
diff --git a/ide/vs2017/mimalloc-override.vcxproj.filters b/ide/vs2017/mimalloc-override.vcxproj.filters
index e045ed8..d01f931 100644
--- a/ide/vs2017/mimalloc-override.vcxproj.filters
+++ b/ide/vs2017/mimalloc-override.vcxproj.filters
@@ -64,9 +64,6 @@
<ClCompile Include="..\..\src\init.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\src\region.c">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\src\alloc-override.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -82,5 +79,8 @@
<ClCompile Include="..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\src\segment-cache.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
</Project> \ No newline at end of file
diff --git a/ide/vs2017/mimalloc.vcxproj b/ide/vs2017/mimalloc.vcxproj
index 8102b9f..41fb77c 100644
--- a/ide/vs2017/mimalloc.vcxproj
+++ b/ide/vs2017/mimalloc.vcxproj
@@ -233,7 +233,6 @@
<ClCompile Include="..\..\src\bitmap.c" />
<ClCompile Include="..\..\src\heap.c" />
<ClCompile Include="..\..\src\init.c" />
- <ClCompile Include="..\..\src\region.c" />
<ClCompile Include="..\..\src\options.c" />
<ClCompile Include="..\..\src\page-queue.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
@@ -243,6 +242,7 @@
</ClCompile>
<ClCompile Include="..\..\src\page.c" />
<ClCompile Include="..\..\src\random.c" />
+ <ClCompile Include="..\..\src\segment-cache.c" />
<ClCompile Include="..\..\src\segment.c" />
<ClCompile Include="..\..\src\os.c" />
<ClCompile Include="..\..\src\stats.c" />
diff --git a/ide/vs2017/mimalloc.vcxproj.filters b/ide/vs2017/mimalloc.vcxproj.filters
index 500292c..0541764 100644
--- a/ide/vs2017/mimalloc.vcxproj.filters
+++ b/ide/vs2017/mimalloc.vcxproj.filters
@@ -47,10 +47,10 @@
<ClCompile Include="..\..\src\init.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\src\region.c">
+ <ClCompile Include="..\..\src\alloc-posix.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\src\alloc-posix.c">
+ <ClCompile Include="..\..\src\arena.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\src\arena.c">
@@ -62,6 +62,9 @@
<ClCompile Include="..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\src\segment-cache.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(ProjectDir)..\..\include\mimalloc.h">
diff --git a/ide/vs2019/mimalloc-override.vcxproj b/ide/vs2019/mimalloc-override.vcxproj
index 182ddab..4136e57 100644
--- a/ide/vs2019/mimalloc-override.vcxproj
+++ b/ide/vs2019/mimalloc-override.vcxproj
@@ -236,7 +236,6 @@
<ClCompile Include="..\..\src\bitmap.c" />
<ClCompile Include="..\..\src\heap.c" />
<ClCompile Include="..\..\src\init.c" />
- <ClCompile Include="..\..\src\region.c" />
<ClCompile Include="..\..\src\options.c" />
<ClCompile Include="..\..\src\os.c" />
<ClCompile Include="..\..\src\page-queue.c">
@@ -247,6 +246,7 @@
</ClCompile>
<ClCompile Include="..\..\src\page.c" />
<ClCompile Include="..\..\src\random.c" />
+ <ClCompile Include="..\..\src\segment-cache.c" />
<ClCompile Include="..\..\src\segment.c" />
<ClCompile Include="..\..\src\stats.c" />
</ItemGroup>
diff --git a/ide/vs2019/mimalloc-override.vcxproj.filters b/ide/vs2019/mimalloc-override.vcxproj.filters
index c06fd1d..d6b7b5a 100644
--- a/ide/vs2019/mimalloc-override.vcxproj.filters
+++ b/ide/vs2019/mimalloc-override.vcxproj.filters
@@ -19,9 +19,6 @@
<ClCompile Include="..\..\src\init.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\src\region.c">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\src\os.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -49,6 +46,9 @@
<ClCompile Include="..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\src\segment-cache.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(ProjectDir)..\..\include\mimalloc.h">
@@ -70,7 +70,7 @@
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\..\src\bitmap.h">
- <Filter>Source Files</Filter>
+ <Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj
index c12955c..9f967d9 100644
--- a/ide/vs2019/mimalloc.vcxproj
+++ b/ide/vs2019/mimalloc.vcxproj
@@ -225,7 +225,6 @@
</ClCompile>
<ClCompile Include="..\..\src\heap.c" />
<ClCompile Include="..\..\src\init.c" />
- <ClCompile Include="..\..\src\region.c" />
<ClCompile Include="..\..\src\options.c" />
<ClCompile Include="..\..\src\page-queue.c">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
@@ -235,6 +234,7 @@
</ClCompile>
<ClCompile Include="..\..\src\page.c" />
<ClCompile Include="..\..\src\random.c" />
+ <ClCompile Include="..\..\src\segment-cache.c" />
<ClCompile Include="..\..\src\segment.c" />
<ClCompile Include="..\..\src\os.c" />
<ClCompile Include="..\..\src\stats.c" />
diff --git a/ide/vs2019/mimalloc.vcxproj.filters b/ide/vs2019/mimalloc.vcxproj.filters
index 4cd0eb2..92be7cb 100644
--- a/ide/vs2019/mimalloc.vcxproj.filters
+++ b/ide/vs2019/mimalloc.vcxproj.filters
@@ -22,9 +22,6 @@
<ClCompile Include="..\..\src\init.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="..\..\src\region.c">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="..\..\src\options.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -52,6 +49,9 @@
<ClCompile Include="..\..\src\bitmap.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\src\segment-cache.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="$(ProjectDir)..\..\include\mimalloc.h">
diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h
index 16be125..1d0dc53 100644
--- a/include/mimalloc-internal.h
+++ b/include/mimalloc-internal.h
@@ -19,6 +19,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_CACHE_LINE 64
#if defined(_MSC_VER)
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
+#pragma warning(disable:26812) // unscoped enum warning
#define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
@@ -76,31 +77,40 @@ size_t _mi_os_page_size(void);
void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
+
+bool _mi_os_protect(void* addr, size_t size);
+bool _mi_os_unprotect(void* addr, size_t size);
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats);
+bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
+bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
+// bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
-// memory.c
-void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* id, mi_os_tld_t* tld);
-void _mi_mem_free(void* p, size_t size, size_t id, bool fully_committed, bool any_reset, mi_os_tld_t* tld);
-
-bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld);
-bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld);
-bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld);
-bool _mi_mem_protect(void* addr, size_t size);
-bool _mi_mem_unprotect(void* addr, size_t size);
+// arena.c
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
+void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
+void _mi_arena_free(void* p, size_t size, size_t memid, bool is_committed, mi_os_tld_t* tld);
-void _mi_mem_collect(mi_os_tld_t* tld);
+// "segment-cache.c"
+void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld);
+bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
+void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
+void _mi_segment_map_allocated_at(const mi_segment_t* segment);
+void _mi_segment_map_freed_at(const mi_segment_t* segment);
// "segment.c"
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_wsize, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
-uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
+bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
+void _mi_segment_thread_collect(mi_segments_tld_t* tld);
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
-void _mi_segment_thread_collect(mi_segments_tld_t* tld);
+uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(void);
+void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
@@ -226,6 +236,18 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
}
}
+// Align downwards
+static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ uintptr_t mask = alignment - 1;
+ if ((alignment & mask) == 0) { // power of two?
+ return (sz & ~mask);
+ }
+ else {
+ return ((sz / alignment) * alignment);
+ }
+}
+
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
mi_assert_internal(divider != 0);
@@ -240,6 +262,7 @@ static inline bool mi_mem_is_zero(void* p, size_t size) {
return true;
}
+
// Align a byte size to a size in _machine words_,
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t _mi_wsize_from_size(size_t size) {
@@ -411,35 +434,47 @@ static inline mi_segment_t* _mi_ptr_segment(const void* p) {
return (mi_segment_t*)((uintptr_t)p & ~MI_SEGMENT_MASK);
}
+static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
+ mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
+ return (mi_page_t*)(s);
+}
+
+static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
+ mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
+ return (mi_slice_t*)(p);
+}
+
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
- mi_segment_t* segment = _mi_ptr_segment(page);
- mi_assert_internal(segment == NULL || page == &segment->pages[page->segment_idx]);
+ mi_segment_t* segment = _mi_ptr_segment(page);
+ mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
return segment;
}
-// used internally
-static inline size_t _mi_segment_page_idx_of(const mi_segment_t* segment, const void* p) {
- // if (segment->page_size > MI_SEGMENT_SIZE) return &segment->pages[0]; // huge pages
- ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
- mi_assert_internal(diff >= 0 && (size_t)diff < MI_SEGMENT_SIZE);
- size_t idx = (size_t)diff >> segment->page_shift;
- mi_assert_internal(idx < segment->capacity);
- mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || idx == 0);
- return idx;
+static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
+ mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
+ mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
+ mi_assert_internal(start->slice_offset == 0);
+ mi_assert_internal(start + start->slice_count > slice);
+ return start;
}
// Get the page containing the pointer
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
- size_t idx = _mi_segment_page_idx_of(segment, p);
- return &((mi_segment_t*)segment)->pages[idx];
+ ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
+ mi_assert_internal(diff >= 0 && diff < (ptrdiff_t)MI_SEGMENT_SIZE);
+ size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
+ mi_assert_internal(idx < segment->slice_entries);
+ mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
+ mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
+ mi_assert_internal(slice->slice_offset == 0);
+ mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
+ return mi_slice_to_page(slice);
}
// Quick page start for initialized pages
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
- const size_t bsize = page->xblock_size;
- mi_assert_internal(bsize > 0 && (bsize%sizeof(void*)) == 0);
- return _mi_segment_page_start(segment, page, bsize, page_size, NULL);
+ return _mi_segment_page_start(segment, page, page_size);
}
// Get the page containing the pointer
@@ -456,7 +491,7 @@ static inline size_t mi_page_block_size(const mi_page_t* page) {
}
else {
size_t psize;
- _mi_segment_page_start(_mi_page_segment(page), page, bsize, &psize, NULL);
+ _mi_segment_page_start(_mi_page_segment(page), page, &psize);
return psize;
}
}
@@ -467,6 +502,14 @@ static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
+// size of a segment
+static inline size_t mi_segment_size(mi_segment_t* segment) {
+ return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
+}
+
+static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
+ return (uint8_t*)segment + mi_segment_size(segment);
+}
// Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
@@ -586,12 +629,13 @@ static inline bool mi_is_in_same_segment(const void* p, const void* q) {
}
static inline bool mi_is_in_same_page(const void* p, const void* q) {
- mi_segment_t* segmentp = _mi_ptr_segment(p);
- mi_segment_t* segmentq = _mi_ptr_segment(q);
- if (segmentp != segmentq) return false;
- size_t idxp = _mi_segment_page_idx_of(segmentp, p);
- size_t idxq = _mi_segment_page_idx_of(segmentq, q);
- return (idxp == idxq);
+ mi_segment_t* segment = _mi_ptr_segment(p);
+ if (_mi_ptr_segment(q) != segment) return false;
+ // assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
+ mi_page_t* page = _mi_segment_page_of(segment, p);
+ size_t psize;
+ uint8_t* start = _mi_segment_page_start(segment, page, &psize);
+ return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
}
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
@@ -656,6 +700,52 @@ static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, c
#endif
}
+
+// -------------------------------------------------------------------
+// commit mask
+// -------------------------------------------------------------------
+
+static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ cm->mask[i] = 0;
+ }
+}
+
+static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ cm->mask[i] = ~((size_t)0);
+ }
+}
+
+static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if (cm->mask[i] != 0) return false;
+ }
+ return true;
+}
+
+static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if (cm->mask[i] != ~((size_t)0)) return false;
+ }
+ return true;
+}
+
+// defined in `segment.c`:
+size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
+size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
+
+#define mi_commit_mask_foreach(cm,idx,count) \
+ idx = 0; \
+ while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
+
+#define mi_commit_mask_foreach_end() \
+ idx += count; \
+ }
+
+
+
+
// -------------------------------------------------------------------
// Fast "random" shuffle
// -------------------------------------------------------------------
diff --git a/include/mimalloc-types.h b/include/mimalloc-types.h
index be3cf50..310fb92 100644
--- a/include/mimalloc-types.h
+++ b/include/mimalloc-types.h
@@ -128,44 +128,59 @@ typedef int32_t mi_ssize_t;
// ------------------------------------------------------
// Main tuning parameters for segment and page sizes
-// Sizes for 64-bit, divide by two for 32-bit
-#define MI_SMALL_PAGE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB
-#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
-#define MI_LARGE_PAGE_SHIFT ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4MiB
-#define MI_SEGMENT_SHIFT ( MI_LARGE_PAGE_SHIFT) // 4MiB
+// Sizes for 64-bit (usually divide by two for 32-bit)
+#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
+
+#if MI_INTPTR_SIZE > 4
+#define MI_SEGMENT_SHIFT (10 + MI_SEGMENT_SLICE_SHIFT) // 64MiB
+#else
+#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
+#endif
+
+#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
+#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
+
// Derived constants
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
+#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
#define MI_SEGMENT_MASK (MI_SEGMENT_SIZE - 1)
+#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
+#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
-#define MI_LARGE_PAGE_SIZE (MI_ZU(1)<<MI_LARGE_PAGE_SHIFT)
-
-#define MI_SMALL_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_SMALL_PAGE_SIZE)
-#define MI_MEDIUM_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE)
-#define MI_LARGE_PAGES_PER_SEGMENT (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE)
-// The max object size are checked to not waste more than 12.5% internally over the page sizes.
-// (Except for large pages since huge objects are allocated in 4MiB chunks)
-#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 16KiB
-#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB
-#define MI_LARGE_OBJ_SIZE_MAX (MI_LARGE_PAGE_SIZE/2) // 2MiB
+#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
+#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
+#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
+#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_HUGE_OBJ_SIZE_MAX (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE) // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U)
-#if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
+#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
#error "mimalloc internal: define more bins"
#endif
#if (MI_ALIGNMENT_MAX > MI_SEGMENT_SIZE/2)
#error "mimalloc internal: the max aligned boundary is too large for the segment size"
#endif
+#if (MI_ALIGNED_MAX % MI_SEGMENT_SLICE_SIZE != 0)
+#error "mimalloc internal: the max aligned boundary must be an integral multiple of the segment slice size"
+#endif
+
+// Maximum slice offset (15)
+#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
// Used as a special value to encode block sizes in 32 bits.
-#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX)
+#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX)
+
+// blocks up to this size are always allocated aligned
+#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
+
+
// ------------------------------------------------------
@@ -253,18 +268,18 @@ typedef uintptr_t mi_thread_free_t;
// will be freed correctly even if only other threads free blocks.
typedef struct mi_page_s {
// "owned" by the segment
- uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
- uint8_t segment_in_use:1; // `true` if the segment allocated this page
- uint8_t is_reset:1; // `true` if the page memory was reset
- uint8_t is_committed:1; // `true` if the page virtual memory is committed
- uint8_t is_zero_init:1; // `true` if the page was zero initialized
+ uint32_t slice_count; // slices in this page (0 if not a page)
+ uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
+ uint8_t is_reset : 1; // `true` if the page memory was reset
+ uint8_t is_committed : 1; // `true` if the page virtual memory is committed
+ uint8_t is_zero_init : 1; // `true` if the page was zero initialized
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
- uint8_t is_zero:1; // `true` if the blocks in the free list are zero initialized
- uint8_t retire_expire:7; // expiration count for retired blocks
+ uint8_t is_zero : 1; // `true` if the blocks in the free list are zero initialized
+ uint8_t retire_expire : 7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
#ifdef MI_ENCODE_FREELIST
@@ -273,51 +288,95 @@ typedef struct mi_page_s {
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
uint32_t xblock_size; // size available in each block (always `>0`)
- mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
+ mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
_Atomic(uintptr_t) xheap;
-
- struct mi_page_s* next; // next page owned by this thread with the same `block_size`
- struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
+
+ struct mi_page_s* next; // next page owned by this thread with the same `block_size`
+ struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
+
+ // 64-bit 9 words, 32-bit 12 words, (+2 for secure)
+ #if MI_INTPTR_SIZE==8
+ uintptr_t padding[1];
+ #endif
} mi_page_t;
typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
- MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment
- MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
- MI_PAGE_HUGE // huge blocks (>512KiB) are put into a single page in a segment of the exact size (but still 2MiB aligned)
+ MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
+ MI_PAGE_LARGE, // larger blocks go into a page of just one block
+ MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
} mi_page_kind_t;
-// Segments are large allocated memory blocks (2MiB on 64 bit) from
+typedef enum mi_segment_kind_e {
+ MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
+ MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
+} mi_segment_kind_t;
+
+// ------------------------------------------------------
+// A segment holds a commit mask where a bit is set if
+// the corresponding MI_COMMIT_SIZE area is committed.
+// The MI_COMMIT_SIZE must be a multiple of the slice
+// size. If it is equal we have the most fine grained
+// decommit (but setting it higher can be more efficient).
+// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
+// be committed in one go which can be set higher than
+// MI_COMMIT_SIZE for efficiency (while the decommit mask
+// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
+// ------------------------------------------------------
+
+#define MI_MINIMAL_COMMIT_SIZE (2*MI_MiB)
+#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
+#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
+#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
+#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
+
+#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
+#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
+#endif
+
+typedef struct mi_commit_mask_s {
+ size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
+} mi_commit_mask_t;
+
+typedef mi_page_t mi_slice_t;
+typedef int64_t mi_msecs_t;
+
+
+// Segments are large allocated memory blocks (8mb on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks.
typedef struct mi_segment_s {
- // memory fields
- size_t memid; // id for the os-level memory manager
- bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
- bool mem_is_committed; // `true` if the whole segment is eagerly committed
+ size_t memid; // memory id for arena allocation
+ bool mem_is_pinned; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
+ bool mem_is_large; // in large/huge os pages?
+ bool mem_is_committed; // `true` if the whole segment is eagerly committed
+
+ bool allow_decommit;
+ mi_msecs_t decommit_expire;
+ mi_commit_mask_t decommit_mask;
+ mi_commit_mask_t commit_mask;
- // segment fields
_Atomic(struct mi_segment_s*) abandoned_next;
- struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
- struct mi_segment_s* prev;
- size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
- size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim if it is too long)
+ // from here is zero initialized
+ struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
+
+ size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
+ size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
+ size_t used; // count of pages in use
+ uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
- size_t used; // count of pages in use (`used <= capacity`)
- size_t capacity; // count of available pages (`#free + used`)
- size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
- size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
- uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
+ size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
+ size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
// layout like this to optimize access in `mi_free`
- size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
+ mi_segment_kind_t kind;
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
- mi_page_kind_t page_kind; // kind of pages: small, medium, large, or huge
- mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
+ size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
+ mi_slice_t slices[MI_SLICES_PER_SEGMENT];
} mi_segment_t;
@@ -459,7 +518,7 @@ typedef struct mi_stats_s {
mi_stat_count_t threads;
mi_stat_count_t normal;
mi_stat_count_t huge;
- mi_stat_count_t giant;
+ mi_stat_count_t large;
mi_stat_count_t malloc;
mi_stat_count_t segments_cache;
mi_stat_counter_t pages_extended;
@@ -469,7 +528,7 @@ typedef struct mi_stats_s {
mi_stat_counter_t searches;
mi_stat_counter_t normal_count;
mi_stat_counter_t huge_count;
- mi_stat_counter_t giant_count;
+ mi_stat_counter_t large_count;
#if MI_STAT>1
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
#endif
@@ -498,13 +557,15 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
// Thread Local data
// ------------------------------------------------------
-typedef int64_t mi_msecs_t;
+// A "span" is is an available range of slices. The span queues keep
+// track of slice spans of at most the given `slice_count` (but more than the previous size class).
+typedef struct mi_span_queue_s {
+ mi_slice_t* first;
+ mi_slice_t* last;
+ size_t slice_count;
+} mi_span_queue_t;
-// Queue of segments
-typedef struct mi_segment_queue_s {
- mi_segment_t* first;
- mi_segment_t* last;
-} mi_segment_queue_t;
+#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
// OS thread local data
typedef struct mi_os_tld_s {
@@ -512,11 +573,10 @@ typedef struct mi_os_tld_s {
mi_stats_t* stats; // points to tld stats
} mi_os_tld_t;
+
// Segments thread local data
typedef struct mi_segments_tld_s {
- mi_segment_queue_t small_free; // queue of segments with free small pages
- mi_segment_queue_t medium_free; // queue of segments with free medium pages
- mi_page_queue_t pages_reset; // queue of freed pages that can be reset
+ mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments
diff --git a/include/mimalloc.h b/include/mimalloc.h
index 91ad352..5e4c374 100644
--- a/include/mimalloc.h
+++ b/include/mimalloc.h
@@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
-#define MI_MALLOC_VERSION 176 // major + 2 digits minor
+#define MI_MALLOC_VERSION 206 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@@ -272,6 +272,8 @@ mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
+mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
+
// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
@@ -301,28 +303,32 @@ mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size
typedef enum mi_option_e {
// stable options
- mi_option_show_errors, // print error messages
- mi_option_show_stats, // print statistics on termination
- mi_option_verbose, // print verbose messages
- // the following options are experimental (see src/options.h)
- mi_option_eager_commit,
- mi_option_eager_region_commit,
- mi_option_reset_decommits,
+ mi_option_show_errors,
+ mi_option_show_stats,
+ mi_option_verbose,
+ // some of the following options are experimental
+ // (deprecated options are kept for binary backward compatibility with v1.x versions)
+ mi_option_eager_commit,
+ mi_option_deprecated_eager_region_commit,
+ mi_option_deprecated_reset_decommits,
mi_option_large_os_pages, // use large (2MiB) OS pages, implies eager commit
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB) at startup
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
mi_option_reserve_os_memory, // reserve specified amount of OS memory at startup
- mi_option_segment_cache,
- mi_option_page_reset,
- mi_option_abandoned_page_reset,
- mi_option_segment_reset,
+ mi_option_segment_cache,
+ mi_option_page_reset,
+ mi_option_abandoned_page_decommit,
+ mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
- mi_option_reset_delay,
+ mi_option_decommit_delay,
mi_option_use_numa_nodes, // 0 = use available numa nodes, otherwise use at most N nodes.
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only reserved arenas)
mi_option_os_tag,
mi_option_max_errors,
mi_option_max_warnings,
+ mi_option_allow_decommit,
+ mi_option_segment_decommit_delay,
+ mi_option_decommit_extend_delay,
_mi_option_last
} mi_option_t;
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index 6b15e65..fce0fd7 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -24,7 +24,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
const size_t padsize = size + MI_PADDING_SIZE;
// use regular allocation if it is guaranteed to fit the alignment constraints
- if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
+ if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p;
diff --git a/src/alloc.c b/src/alloc.c
index cd4afa1..8cf7242 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -324,11 +324,11 @@ static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
static void mi_stat_huge_free(const mi_page_t* page) {
mi_heap_t* const heap = mi_heap_get_default();
const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
- if (bsize <= MI_HUGE_OBJ_SIZE_MAX) {
- mi_heap_stat_decrease(heap, huge, bsize);
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
}
else {
- mi_heap_stat_decrease(heap, giant, bsize);
+ mi_heap_stat_decrease(heap, huge, bsize);
}
}
#else
@@ -353,8 +353,8 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
// huge page segments are always abandoned and can be freed immediately
- mi_segment_t* const segment = _mi_page_segment(page);
- if (segment->page_kind==MI_PAGE_HUGE) {
+ mi_segment_t* segment = _mi_page_segment(page);
+ if (segment->kind==MI_SEGMENT_HUGE) {
mi_stat_huge_free(page);
_mi_segment_huge_page_free(segment, page, block);
return;
@@ -483,10 +483,10 @@ void mi_free(void* p) mi_attr_noexcept
mi_threadid_t tid = _mi_thread_id();
mi_page_t* const page = _mi_segment_page_of(segment, p);
- mi_block_t* const block = (mi_block_t*)p;
if (mi_likely(tid == mi_atomic_load_relaxed(&segment->thread_id) && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
// local, and not full or aligned
+ mi_block_t* block = (mi_block_t*)(p);
if (mi_unlikely(mi_check_is_double_free(page,block))) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
diff --git a/src/arena.c b/src/arena.c
index 133f154..6b1e951 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -7,23 +7,18 @@ terms of the MIT license. A copy of the license can be found in the file
/* ----------------------------------------------------------------------------
"Arenas" are fixed area's of OS memory from which we can allocate
-large blocks (>= MI_ARENA_BLOCK_SIZE, 32MiB).
+large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
In contrast to the rest of mimalloc, the arenas are shared between
threads and need to be accessed using atomic operations.
Currently arenas are only used to for huge OS page (1GiB) reservations,
-otherwise it delegates to direct allocation from the OS.
+or direct OS memory reservations -- otherwise it delegates to direct allocation from the OS.
In the future, we can expose an API to manually add more kinds of arenas
which is sometimes needed for embedded devices or shared memory for example.
(We can also employ this with WASI or `sbrk` systems to reserve large arenas
on demand and be able to reuse them efficiently).
-The arena allocation needs to be thread safe and we use an atomic
-bitmap to allocate. The current implementation of the bitmap can
-only do this within a field (`size_t`) so we can allocate at most
-blocks of 2GiB (64*32MiB) and no object can cross the boundary. This
-can lead to fragmentation but fortunately most objects will be regions
-of 256MiB in practice.
+The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
#include "mimalloc-internal.h"
@@ -38,7 +33,6 @@ of 256MiB in practice.
// os.c
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* stats);
void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
-void _mi_os_free(void* p, size_t size, mi_stats_t* stats);
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
@@ -46,13 +40,17 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
+
/* -----------------------------------------------------------
Arena allocation
----------------------------------------------------------- */
-#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
-#define MI_ARENA_BLOCK_SIZE (4*MI_SEGMENT_ALIGN) // 32MiB
-#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 16MiB
+
+// Block info: bit 0 contains the `in_use` bit, the upper bits the
+// size in count of arena blocks.
+typedef uintptr_t mi_block_info_t;
+#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 8MiB (must be at least MI_SEGMENT_ALIGN)
+#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 4MiB
#define MI_MAX_ARENAS (64) // not more than 256 (since we use 8 bits in the memid)
// A memory arena descriptor
@@ -105,9 +103,9 @@ static size_t mi_block_count_of_size(size_t size) {
----------------------------------------------------------- */
static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
{
- size_t idx = mi_atomic_load_acquire(&arena->search_idx); // start from last search
+ size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
- mi_atomic_store_release(&arena->search_idx, idx); // start search from here next time
+ mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
return true;
};
return false;
@@ -118,8 +116,8 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
Arena Allocation
----------------------------------------------------------- */
-static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
- bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
+static mi_decl_noinline void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
+ bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
{
mi_bitmap_index_t bitmap_index;
if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
@@ -151,6 +149,48 @@ static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t n
return p;
}
+static mi_decl_noinline void* mi_arena_allocate(int numa_node, size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
+{
+ MI_UNUSED_RELEASE(alignment);
+ mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ const size_t bcount = mi_block_count_of_size(size);
+ if (mi_likely(max_arena == 0)) return NULL;
+ mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
+
+ // try numa affine allocation
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
+ if (arena==NULL) break; // end reached
+ if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
+ (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
+ {
+ void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
+ mi_assert_internal((uintptr_t)p % alignment == 0);
+ if (p != NULL) {
+ return p;
+ }
+ }
+ }
+
+ // try from another numa node instead..
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
+ if (arena==NULL) break; // end reached
+ if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
+ (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
+ {
+ void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
+ mi_assert_internal((uintptr_t)p % alignment == 0);
+ if (p != NULL) {
+ return p;
+ }
+ }
+ }
+ return NULL;
+}
+
+
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_pinned, bool* is_zero,
size_t* memid, mi_os_tld_t* tld)
{
@@ -160,40 +200,14 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool*
*is_zero = false;
*is_pinned = false;
- // try to allocate in an arena if the alignment is small enough
- // and the object is not too large or too small.
- if (alignment <= MI_SEGMENT_ALIGN &&
- size >= MI_ARENA_MIN_OBJ_SIZE &&
- mi_atomic_load_relaxed(&mi_arena_count) > 0)
- {
- const size_t bcount = mi_block_count_of_size(size);
- const int numa_node = _mi_os_numa_node(tld); // current numa node
-
- mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
- // try numa affine allocation
- for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
- mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
- if (arena==NULL) break; // end reached
- if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
- (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
- {
- void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
- mi_assert_internal((uintptr_t)p % alignment == 0);
- if (p != NULL) return p;
- }
- }
- // try from another numa node instead..
- for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
- mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
- if (arena==NULL) break; // end reached
- if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
- (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
- {
- void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_pinned, is_zero, memid, tld);
- mi_assert_internal((uintptr_t)p % alignment == 0);
- if (p != NULL) return p;
- }
- }
+ bool default_large = false;
+ if (large==NULL) large = &default_large; // ensure `large != NULL`
+ const int numa_node = _mi_os_numa_node(tld); // current numa node
+
+ // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN) {
+ void* p = mi_arena_allocate(numa_node, size, alignment, commit, large, is_pinned, is_zero, memid, tld);
+ if (p != NULL) return p;
}
// finally, fall back to the OS
@@ -217,13 +231,14 @@ void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, b
Arena free
----------------------------------------------------------- */
-void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats) {
- mi_assert_internal(size > 0 && stats != NULL);
+void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_os_tld_t* tld) {
+ mi_assert_internal(size > 0 && tld->stats != NULL);
if (p==NULL) return;
if (size==0) return;
+
if (memid == MI_MEMID_OS) {
// was a direct OS allocation, pass through
- _mi_os_free_ex(p, size, all_committed, stats);
+ _mi_os_free_ex(p, size, all_committed, tld->stats);
}
else {
// allocated in an arena
@@ -250,8 +265,7 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
}
else {
mi_assert_internal(arena->blocks_committed != NULL);
- _mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, stats); // ok if this fails
- // todo: use reset instead of decommit on windows?
+ _mi_os_decommit(p, blocks * MI_ARENA_BLOCK_SIZE, tld->stats); // ok if this fails
_mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
}
// and make it available to others again
@@ -341,6 +355,33 @@ int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noe
return 0;
}
+static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
+ size_t inuse_count = 0;
+ for (size_t i = 0; i < field_count; i++) {
+ char buf[MI_BITMAP_FIELD_BITS + 1];
+ uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
+ for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) {
+ bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
+ if (inuse) inuse_count++;
+ buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.');
+ }
+ buf[MI_BITMAP_FIELD_BITS] = 0;
+ _mi_verbose_message("%s%s\n", prefix, buf);
+ }
+ return inuse_count;
+}
+
+void mi_debug_show_arenas(void) mi_attr_noexcept {
+ size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
+ for (size_t i = 0; i < max_arenas; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
+ if (arena == NULL) break;
+ size_t inuse_count = 0;
+ _mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count);
+ inuse_count += mi_debug_show_bitmap(" ", arena->blocks_inuse, arena->field_count);
+ _mi_verbose_message(" blocks in use ('x'): %zu\n", inuse_count);
+ }
+}
/* -----------------------------------------------------------
Reserve a huge page arena.
diff --git a/src/bitmap.c b/src/bitmap.c
index 51926bb..af6de0a 100644
--- a/src/bitmap.c
+++ b/src/bitmap.c
@@ -35,17 +35,17 @@ static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
}
-
/* -----------------------------------------------------------
Claim a bit sequence atomically
----------------------------------------------------------- */
// Try to atomically claim a sequence of `count` bits in a single
// field at `idx` in `bitmap`. Returns `true` on success.
-bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
+inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
{
mi_assert_internal(bitmap_idx != NULL);
mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
+ mi_assert_internal(count > 0);
mi_bitmap_field_t* field = &bitmap[idx];
size_t map = mi_atomic_load_relaxed(field);
if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
@@ -94,9 +94,9 @@ bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_
return false;
}
-
+// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
-// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
+// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
size_t idx = start_field_idx;
for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
@@ -118,7 +118,7 @@ bool _mi_bitmap_try_find_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, c
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
-bool mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
const size_t idx = mi_bitmap_index_field(bitmap_idx);
const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
const size_t mask = mi_bitmap_mask_(count, bitidx);
@@ -215,7 +215,7 @@ static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bit
// intermediate fields
while (++field < final_field) {
- newmap = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
+ newmap = MI_BITMAP_FIELD_FULL;
map = 0;
if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
}
@@ -236,7 +236,7 @@ rollback:
// roll back intermediate fields
while (--field > initial_field) {
newmap = 0;
- map = mi_bitmap_mask_(MI_BITMAP_FIELD_BITS, 0);
+ map = MI_BITMAP_FIELD_FULL;
mi_assert_internal(mi_atomic_load_relaxed(field) == map);
mi_atomic_store_release(field, newmap);
}
diff --git a/src/bitmap.h b/src/bitmap.h
index 39ca55b..7bd3106 100644
--- a/src/bitmap.h
+++ b/src/bitmap.h
@@ -40,6 +40,11 @@ static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx
return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
}
+// Create a bit index.
+static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
+ return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
+}
+
// Get the field index from a bit index.
static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
return (bitmap_idx / MI_BITMAP_FIELD_BITS);
@@ -69,7 +74,7 @@ bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fiel
// Set `count` bits at `bitmap_idx` to 0 atomically
// Returns `true` if all `count` bits were 1 previously.
-bool mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
// Set `count` bits at `bitmap_idx` to 1 atomically
// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
diff --git a/src/heap.c b/src/heap.c
index d560fbc..4fdfb0b 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -115,17 +115,20 @@ static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{
if (heap==NULL || !mi_heap_is_initialized(heap)) return;
- _mi_deferred_free(heap, collect >= MI_FORCE);
+
+ const bool force = collect >= MI_FORCE;
+ _mi_deferred_free(heap, force);
// note: never reclaim on collect but leave it to threads that need storage to reclaim
- if (
- #ifdef NDEBUG
+ const bool force_main =
+ #ifdef NDEBUG
collect == MI_FORCE
- #else
+ #else
collect >= MI_FORCE
- #endif
- && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim)
- {
+ #endif
+ && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim;
+
+ if (force_main) {
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
// if all memory is freed by now, all segments should be freed.
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
@@ -141,20 +144,28 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
_mi_heap_delayed_free(heap);
// collect retired pages
- _mi_heap_collect_retired(heap, collect >= MI_FORCE);
+ _mi_heap_collect_retired(heap, force);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
- // collect segment caches
- if (collect >= MI_FORCE) {
+ // collect abandoned segments (in particular, decommit expired parts of segments in the abandoned segment list)
+ // note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment
+ _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
+
+ // collect segment local caches
+ if (force) {
_mi_segment_thread_collect(&heap->tld->segments);
}
+ // decommit in global segment caches
+ // note: forced decommit can be quite expensive if many threads are created/destroyed so we do not force on abandonment
+ _mi_segment_cache_collect( collect == MI_FORCE, &heap->tld->os);
+
// collect regions on program-exit (or shared library unload)
- if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
- _mi_mem_collect(&heap->tld->os);
+ if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
+ //_mi_mem_collect(&heap->tld->os);
}
}
@@ -272,9 +283,9 @@ static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_
// stats
const size_t bsize = mi_page_block_size(page);
- if (bsize > MI_LARGE_OBJ_SIZE_MAX) {
- if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
- mi_heap_stat_decrease(heap, giant, bsize);
+ if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
}
else {
mi_heap_stat_decrease(heap, huge, bsize);
diff --git a/src/init.c b/src/init.c
index 854a222..4ab4652 100644
--- a/src/init.c
+++ b/src/init.c
@@ -28,6 +28,9 @@ const mi_page_t _mi_page_empty = {
ATOMIC_VAR_INIT(0), // xthread_free
ATOMIC_VAR_INIT(0), // xheap
NULL, NULL
+ #if MI_INTPTR_SIZE==8
+ , { 0 } // padding
+ #endif
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
@@ -54,8 +57,8 @@ const mi_page_t _mi_page_empty = {
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
- QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
- QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ }
+ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
+ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
#define MI_STAT_COUNT_NULL() {0,0,0,0}
@@ -78,6 +81,18 @@ const mi_page_t _mi_page_empty = {
{ 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
MI_STAT_COUNT_END_NULL()
+
+// Empty slice span queues for every bin
+#define SQNULL(sz) { NULL, NULL, sz }
+#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
+ { SQNULL(1), \
+ SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
+ SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
+ SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
+ SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
+ SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
+
+
// --------------------------------------------------------
// Statically allocate an empty heap as the initial
// thread local value for the default heap,
@@ -102,6 +117,17 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
false
};
+#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
+#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os)))
+
+mi_decl_cache_align static const mi_tld_t tld_empty = {
+ 0,
+ false,
+ NULL, NULL,
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, 0, NULL, tld_empty_stats, tld_empty_os }, // segments
+ { 0, tld_empty_stats }, // os
+ { MI_STATS_NULL } // stats
+};
// the thread-local default heap for allocation
mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
@@ -110,11 +136,8 @@ extern mi_heap_t _mi_heap_main;
static mi_tld_t tld_main = {
0, false,
- &_mi_heap_main, &_mi_heap_main,
- { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
- 0, 0, 0, 0, 0, 0, NULL,
- &tld_main.stats, &tld_main.os
- }, // segments
+ &_mi_heap_main, & _mi_heap_main,
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, 0, NULL, &tld_main.stats, &tld_main.os }, // segments
{ 0, &tld_main.stats }, // os
{ MI_STATS_NULL } // stats
};
@@ -190,6 +213,7 @@ static bool _mi_heap_init(void) {
// OS allocated so already zero initialized
mi_tld_t* tld = &td->tld;
mi_heap_t* heap = &td->heap;
+ _mi_memcpy_aligned(tld, &tld_empty, sizeof(*tld));
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap));
heap->thread_id = _mi_thread_id();
_mi_random_init(&heap->random);
@@ -241,7 +265,10 @@ static bool _mi_heap_done(mi_heap_t* heap) {
// free if not the main thread
if (heap != &_mi_heap_main) {
- mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
+ // the following assertion does not always hold for huge segments as those are always treated
+ // as abondened: one may allocate it in one thread, but deallocate in another in which case
+ // the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
+ // mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
_mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main);
}
#if 0
@@ -517,7 +544,7 @@ void mi_process_init(void) mi_attr_noexcept {
if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
long ksize = mi_option_get(mi_option_reserve_os_memory);
if (ksize > 0) {
- mi_reserve_os_memory((size_t)ksize*MI_KiB, true, true);
+ mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
}
}
}
diff --git a/src/options.c b/src/options.c
index 2022d96..537b2e6 100644
--- a/src/options.c
+++ b/src/options.c
@@ -49,51 +49,51 @@ typedef struct mi_option_desc_s {
mi_init_t init; // is it initialized yet? (from the environment)
mi_option_t option; // for debugging: the option index should match the option
const char* name; // option name without `mimalloc_` prefix
+ const char* legacy_name; // potential legacy v1.x option name
} mi_option_desc_t;
-#define MI_OPTION(opt) mi_option_##opt, #opt
-#define MI_OPTION_DESC(opt) {0, UNINIT, MI_OPTION(opt) }
+#define MI_OPTION(opt) mi_option_##opt, #opt, NULL
+#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy
static mi_option_desc_t options[_mi_option_last] =
{
// stable options
-#if MI_DEBUG || defined(MI_SHOW_ERRORS)
+ #if MI_DEBUG || defined(MI_SHOW_ERRORS)
{ 1, UNINIT, MI_OPTION(show_errors) },
-#else
+ #else
{ 0, UNINIT, MI_OPTION(show_errors) },
-#endif
+ #endif
{ 0, UNINIT, MI_OPTION(show_stats) },
{ 0, UNINIT, MI_OPTION(verbose) },
- // the following options are experimental and not all combinations make sense.
- { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
- #if defined(_WIN32) || (MI_INTPTR_SIZE <= 4) // and other OS's without overcommit?
- { 0, UNINIT, MI_OPTION(eager_region_commit) },
- { 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
- #else
- { 1, UNINIT, MI_OPTION(eager_region_commit) },
- { 0, UNINIT, MI_OPTION(reset_decommits) }, // reset uses MADV_FREE/MADV_DONTNEED
- #endif
+ // Some of the following options are experimental and not all combinations are valid. Use with care.
+ { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (8MiB) (but see also `eager_commit_delay`)
+ { 0, UNINIT, MI_OPTION(deprecated_eager_region_commit) },
+ { 0, UNINIT, MI_OPTION(deprecated_reset_decommits) },
{ 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
{ 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
{ -1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
{ 0, UNINIT, MI_OPTION(reserve_os_memory) },
{ 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
- { 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
- { 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
- { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
-#if defined(__NetBSD__)
+ { 0, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
+ { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_decommit, abandoned_page_reset) },// decommit free page memory when a thread terminates
+ { 0, UNINIT, MI_OPTION(deprecated_segment_reset) },
+ #if defined(__NetBSD__)
{ 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
-#else
+ #elif defined(_WIN32)
+ { 4, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
+ #else
{ 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
-#endif
- { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
- { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
- { 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
- { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
- { 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
- { 16, UNINIT, MI_OPTION(max_warnings) } // maximum warnings that are output
-
+ #endif
+ { 25, UNINIT, MI_OPTION_LEGACY(decommit_delay, reset_delay) }, // page decommit delay in milli-seconds
+ { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
+ { 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
+ { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
+ { 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
+ { 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
+ { 1, UNINIT, MI_OPTION(allow_decommit) }, // decommit slices when no longer used (after decommit_delay milli-seconds)
+ { 500, UNINIT, MI_OPTION(segment_decommit_delay) }, // decommit delay in milli-seconds for freed segments
+ { 2, UNINIT, MI_OPTION(decommit_extend_delay) }
};
static void mi_option_init(mi_option_desc_t* desc);
@@ -538,11 +538,21 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
static void mi_option_init(mi_option_desc_t* desc) {
// Read option value from the environment
+ char s[64+1];
char buf[64+1];
mi_strlcpy(buf, "mimalloc_", sizeof(buf));
mi_strlcat(buf, desc->name, sizeof(buf));
- char s[64+1];
- if (mi_getenv(buf, s, sizeof(s))) {
+ bool found = mi_getenv(buf,s,sizeof(s));
+ if (!found && desc->legacy_name != NULL) {
+ mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ mi_strlcat(buf, desc->legacy_name, sizeof(buf));
+ found = mi_getenv(buf,s,sizeof(s));
+ if (found) {
+ _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name );
+ }
+ }
+
+ if (found) {
size_t len = strlen(s);
if (len >= sizeof(buf)) len = sizeof(buf) - 1;
for (size_t i = 0; i < len; i++) {
diff --git a/src/os.c b/src/os.c
index bedf44c..f0a26c6 100644
--- a/src/os.c
+++ b/src/os.c
@@ -74,17 +74,6 @@ static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
}
-static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
- mi_assert_internal(alignment != 0);
- uintptr_t mask = alignment - 1;
- if ((alignment & mask) == 0) { // power of two?
- return (sz & ~mask);
- }
- else {
- return ((sz / alignment) * alignment);
- }
-}
-
static void* mi_align_down_ptr(void* p, size_t alignment) {
return (void*)_mi_align_down((uintptr_t)p, alignment);
}
@@ -335,6 +324,9 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size);
-------------------------------------------------------------- */
#ifdef _WIN32
+
+#define MEM_COMMIT_RESERVE (MEM_COMMIT|MEM_RESERVE)
+
static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) {
#if (MI_INTPTR_SIZE >= 8)
// on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations
@@ -528,6 +520,17 @@ static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int pr
return NULL;
}
+static int mi_unix_mmap_fd(void) {
+#if defined(VM_MAKE_TAG)
+ // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
+ int os_tag = (int)mi_option_get(mi_option_os_tag);
+ if (os_tag < 100 || os_tag > 255) os_tag = 100;
+ return VM_MAKE_TAG(os_tag);
+#else
+ return -1;
+#endif
+}
+
static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
void* p = NULL;
#if !defined(MAP_ANONYMOUS)
@@ -536,20 +539,14 @@ static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int pro
#if !defined(MAP_NORESERVE)
#define MAP_NORESERVE 0
#endif
+ const int fd = mi_unix_mmap_fd();
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
- int fd = -1;
if (_mi_os_has_overcommit()) {
flags |= MAP_NORESERVE;
}
#if defined(PROT_MAX)
protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
- #endif
- #if defined(VM_MAKE_TAG)
- // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
- int os_tag = (int)mi_option_get(mi_option_os_tag);
- if (os_tag < 100 || os_tag > 255) { os_tag = 100; }
- fd = VM_MAKE_TAG(os_tag);
- #endif
+ #endif
// huge page allocation
if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) {
static _Atomic(size_t) large_page_try_ok; // = 0;
@@ -954,9 +951,12 @@ bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
}
+/*
static bool mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
- return mi_os_commitx(addr, size, true, true /* conservative */, is_zero, stats);
+ return mi_os_commitx(addr, size, true, true // conservative
+ , is_zero, stats);
}
+*/
// Signal to the OS that the address range is no longer in use
// but may be used later again. This will release physical memory
@@ -1019,14 +1019,10 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
- if (mi_option_is_enabled(mi_option_reset_decommits)) {
- return _mi_os_decommit(addr, size, stats);
- }
- else {
- return mi_os_resetx(addr, size, true, stats);
- }
+ return mi_os_resetx(addr, size, true, stats);
}
+/*
bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
MI_UNUSED(tld_stats);
mi_stats_t* stats = &_mi_stats_main;
@@ -1038,7 +1034,7 @@ bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stat
return mi_os_resetx(addr, size, false, stats);
}
}
-
+*/
// Protect a region in memory to be not accessible.
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
diff --git a/src/page-queue.c b/src/page-queue.c
index 26b1080..92f933c 100644
--- a/src/page-queue.c
+++ b/src/page-queue.c
@@ -34,15 +34,15 @@ terms of the MIT license. A copy of the license can be found in the file
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
- return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+sizeof(uintptr_t)));
+ return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
}
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
- return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
+ return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
}
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
- return (pq->block_size > MI_LARGE_OBJ_SIZE_MAX);
+ return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
}
/* -----------------------------------------------------------
@@ -72,11 +72,11 @@ static inline uint8_t mi_bin(size_t size) {
bin = (uint8_t)wsize;
}
#endif
- else if (wsize > MI_LARGE_OBJ_WSIZE_MAX) {
+ else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
bin = MI_BIN_HUGE;
}
else {
- #if defined(MI_ALIGN4W)
+ #if defined(MI_ALIGN4W)
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
#endif
wsize--;
@@ -108,7 +108,7 @@ size_t _mi_bin_size(uint8_t bin) {
// Good size for allocation
size_t mi_good_size(size_t size) mi_attr_noexcept {
- if (size <= MI_LARGE_OBJ_SIZE_MAX) {
+ if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
return _mi_bin_size(mi_bin(size));
}
else {
@@ -206,8 +206,9 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(queue, page));
- mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
+ mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_heap_t* heap = mi_page_heap(page);
+
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == queue->last) queue->last = page->prev;
@@ -228,9 +229,10 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(!mi_page_queue_contains(queue, page));
- mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
+
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
mi_assert_internal(page->xblock_size == queue->block_size ||
- (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
+ (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
@@ -256,6 +258,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(from, page));
mi_assert_expensive(!mi_page_queue_contains(to, page));
+
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
diff --git a/src/page.c b/src/page.c
index 386898f..94fc707 100644
--- a/src/page.c
+++ b/src/page.c
@@ -74,10 +74,10 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->used <= page->capacity);
mi_assert_internal(page->capacity <= page->reserved);
- const size_t bsize = mi_page_block_size(page);
mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = _mi_page_start(segment,page,NULL);
- mi_assert_internal(start == _mi_segment_page_start(segment,page,bsize,NULL,NULL));
+ mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
+ //const size_t bsize = mi_page_block_size(page);
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
mi_assert_internal(mi_page_list_is_valid(page,page->free));
@@ -110,11 +110,12 @@ bool _mi_page_is_valid(mi_page_t* page) {
#endif
if (mi_page_heap(page)!=NULL) {
mi_segment_t* segment = _mi_page_segment(page);
- mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
- if (segment->page_kind != MI_PAGE_HUGE) {
+
+ mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
+ if (segment->kind != MI_SEGMENT_HUGE) {
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
- mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
+ mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
}
}
@@ -230,9 +231,10 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// called from segments when reclaiming abandoned pages
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
+
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
- mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
mi_assert_internal(!page->is_reset);
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
@@ -243,14 +245,12 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
// allocate a fresh page from a segment
static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
- mi_assert_internal(pq==NULL||block_size == pq->block_size);
mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL;
}
- // a fresh page was found, initialize it
- mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
+ mi_assert_internal(pq==NULL || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
mi_page_init(heap, page, block_size, heap->tld);
mi_heap_stat_increase(heap, pages, 1);
if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
@@ -367,9 +367,22 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
// no more aligned blocks in here
mi_page_set_has_aligned(page, false);
+ mi_heap_t* heap = mi_page_heap(page);
+ const size_t bsize = mi_page_block_size(page);
+ if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ _mi_stat_decrease(&heap->tld->stats.large, bsize);
+ }
+ else {
+ // not strictly necessary as we never get here for a huge page
+ mi_assert_internal(false);
+ _mi_stat_decrease(&heap->tld->stats.huge, bsize);
+ }
+ }
+
// remove from the page list
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
- mi_segments_tld_t* segments_tld = &mi_page_heap(page)->tld->segments;
+ mi_segments_tld_t* segments_tld = &heap->tld->segments;
mi_page_queue_remove(pq, page);
// and free it
@@ -377,7 +390,8 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld);
}
-#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
+// Retire parameters
+#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX
#define MI_RETIRE_CYCLES (8)
// Retire a page with no more used blocks
@@ -390,7 +404,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(mi_page_all_free(page));
-
+
mi_page_set_has_aligned(page, false);
// don't retire too often..
@@ -403,7 +417,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
- page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
+ page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages;
@@ -414,7 +428,6 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
return; // dont't free after all
}
}
-
_mi_page_free(page, pq, false);
}
@@ -558,6 +571,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
// allocations but this did not speed up any benchmark (due to an
// extra test in malloc? or cache effects?)
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
+ MI_UNUSED(tld);
mi_assert_expensive(mi_page_is_valid_init(page));
#if (MI_SECURE<=2)
mi_assert(page->free == NULL);
@@ -567,7 +581,6 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
if (page->capacity >= page->reserved) return;
size_t page_size;
- //uint8_t* page_start =
_mi_page_start(_mi_page_segment(page), page, &page_size);
mi_stat_counter_increase(tld->stats.pages_extended, 1);
@@ -615,9 +628,11 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(block_size > 0);
// set fields
mi_page_set_heap(page, heap);
+ page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
size_t page_size;
- _mi_segment_page_start(segment, page, block_size, &page_size, NULL);
- page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE);
+ _mi_segment_page_start(segment, page, &page_size);
+ mi_assert_internal(mi_page_block_size(page) <= page_size);
+ mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
#ifdef MI_ENCODE_FREELIST
@@ -630,6 +645,8 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
page->is_zero = page->is_zero_init;
#endif
+ mi_assert_internal(page->is_committed);
+ mi_assert_internal(!page->is_reset);
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->used == 0);
@@ -691,7 +708,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
mi_heap_stat_counter_increase(heap, searches, count);
if (page == NULL) {
- _mi_heap_collect_retired(heap, false); // perhaps make a page available
+ _mi_heap_collect_retired(heap, false); // perhaps make a page available?
page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
@@ -762,26 +779,35 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
General allocation
----------------------------------------------------------- */
-// A huge page is allocated directly without being in a queue.
+// Large and huge page allocation.
+// Huge pages are allocated directly without being in a queue.
// Because huge pages contain just one block, and the segment contains
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
-static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
+static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
size_t block_size = _mi_os_good_alloc_size(size);
mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE);
- mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
+ bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX);
+ mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size);
if (page != NULL) {
- const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already
- mi_assert_internal(bsize >= size);
+ const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding
mi_assert_internal(mi_page_immediate_available(page));
- mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
- mi_assert_internal(_mi_page_segment(page)->used==1);
- mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
- mi_page_set_heap(page, NULL);
-
- if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
- mi_heap_stat_increase(heap, giant, bsize);
- mi_heap_stat_counter_increase(heap, giant_count, 1);
+ mi_assert_internal(bsize >= size);
+
+ if (pq == NULL) {
+ // huge pages are directly abandoned
+ mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
+ mi_assert_internal(_mi_page_segment(page)->used==1);
+ mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
+ mi_page_set_heap(page, NULL);
+ }
+ else {
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ }
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_increase(heap, large, bsize);
+ mi_heap_stat_counter_increase(heap, large_count, 1);
}
else {
mi_heap_stat_increase(heap, huge, bsize);
@@ -797,13 +823,13 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
- if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) {
+ if (mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) {
if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL;
}
else {
- return mi_huge_page_alloc(heap,size);
+ return mi_large_huge_page_alloc(heap,size);
}
}
else {
diff --git a/src/segment-cache.c b/src/segment-cache.c
new file mode 100644
index 0000000..93908c8
--- /dev/null
+++ b/src/segment-cache.c
@@ -0,0 +1,360 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2020, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* ----------------------------------------------------------------------------
+ Implements a cache of segments to avoid expensive OS calls and to reuse
+ the commit_mask to optimize the commit/decommit calls.
+ The full memory map of all segments is also implemented here.
+-----------------------------------------------------------------------------*/
+#include "mimalloc.h"
+#include "mimalloc-internal.h"
+#include "mimalloc-atomic.h"
+
+#include "bitmap.h" // atomic bitmap
+
+//#define MI_CACHE_DISABLE 1 // define to completely disable the segment cache
+
+#define MI_CACHE_FIELDS (16)
+#define MI_CACHE_MAX (MI_BITMAP_FIELD_BITS*MI_CACHE_FIELDS) // 1024 on 64-bit
+
+#define BITS_SET() ATOMIC_VAR_INIT(UINTPTR_MAX)
+#define MI_CACHE_BITS_SET MI_INIT16(BITS_SET) // note: update if MI_CACHE_FIELDS changes
+
+typedef struct mi_cache_slot_s {
+ void* p;
+ size_t memid;
+ bool is_pinned;
+ mi_commit_mask_t commit_mask;
+ mi_commit_mask_t decommit_mask;
+ _Atomic(mi_msecs_t) expire;
+} mi_cache_slot_t;
+
+static mi_decl_cache_align mi_cache_slot_t cache[MI_CACHE_MAX]; // = 0
+
+static mi_decl_cache_align mi_bitmap_field_t cache_available[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; // zero bit = available!
+static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET };
+static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free
+
+
+mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
+{
+#ifdef MI_CACHE_DISABLE
+ return NULL;
+#else
+
+ // only segment blocks
+ if (size != MI_SEGMENT_SIZE) return NULL;
+
+ // numa node determines start field
+ const int numa_node = _mi_os_numa_node(tld);
+ size_t start_field = 0;
+ if (numa_node > 0) {
+ start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node;
+ if (start_field >= MI_CACHE_FIELDS) start_field = 0;
+ }
+
+ // find an available slot
+ mi_bitmap_index_t bitidx = 0;
+ bool claimed = false;
+ if (*large) { // large allowed?
+ claimed = _mi_bitmap_try_find_from_claim(cache_available_large, MI_CACHE_FIELDS, start_field, 1, &bitidx);
+ if (claimed) *large = true;
+ }
+ if (!claimed) {
+ claimed = _mi_bitmap_try_find_from_claim(cache_available, MI_CACHE_FIELDS, start_field, 1, &bitidx);
+ if (claimed) *large = false;
+ }
+
+ if (!claimed) return NULL;
+
+ // found a slot
+ mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)];
+ void* p = slot->p;
+ *memid = slot->memid;
+ *is_pinned = slot->is_pinned;
+ *is_zero = false;
+ *commit_mask = slot->commit_mask;
+ *decommit_mask = slot->decommit_mask;
+ slot->p = NULL;
+ mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0);
+
+ // mark the slot as free again
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx));
+ _mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx);
+ return p;
+#endif
+}
+
+static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats)
+{
+ if (mi_commit_mask_is_empty(cmask)) {
+ // nothing
+ }
+ else if (mi_commit_mask_is_full(cmask)) {
+ _mi_os_decommit(p, total, stats);
+ }
+ else {
+ // todo: one call to decommit the whole at once?
+ mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
+ size_t part = total/MI_COMMIT_MASK_BITS;
+ size_t idx;
+ size_t count;
+ mi_commit_mask_foreach(cmask, idx, count) {
+ void* start = (uint8_t*)p + (idx*part);
+ size_t size = count*part;
+ _mi_os_decommit(start, size, stats);
+ }
+ mi_commit_mask_foreach_end()
+ }
+ mi_commit_mask_create_empty(cmask);
+}
+
+#define MI_MAX_PURGE_PER_PUSH (4)
+
+static mi_decl_noinline void mi_segment_cache_purge(bool force, mi_os_tld_t* tld)
+{
+ MI_UNUSED(tld);
+ if (!mi_option_is_enabled(mi_option_allow_decommit)) return;
+ mi_msecs_t now = _mi_clock_now();
+ size_t purged = 0;
+ const size_t max_visits = (force ? MI_CACHE_MAX /* visit all */ : MI_CACHE_FIELDS /* probe at most N (=16) slots */);
+ size_t idx = (force ? 0 : _mi_random_shuffle((uintptr_t)now) % MI_CACHE_MAX /* random start */ );
+ for (size_t visited = 0; visited < max_visits; visited++,idx++) { // visit N slots
+ if (idx >= MI_CACHE_MAX) idx = 0; // wrap
+ mi_cache_slot_t* slot = &cache[idx];
+ mi_msecs_t expire = mi_atomic_loadi64_relaxed(&slot->expire);
+ if (expire != 0 && (force || now >= expire)) { // racy read
+ // seems expired, first claim it from available
+ purged++;
+ mi_bitmap_index_t bitidx = mi_bitmap_index_create_from_bit(idx);
+ if (_mi_bitmap_claim(cache_available, MI_CACHE_FIELDS, 1, bitidx, NULL)) {
+ // was available, we claimed it
+ expire = mi_atomic_loadi64_acquire(&slot->expire);
+ if (expire != 0 && (force || now >= expire)) { // safe read
+ // still expired, decommit it
+ mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
+ mi_assert_internal(!mi_commit_mask_is_empty(&slot->commit_mask) && _mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
+ _mi_abandoned_await_readers(); // wait until safe to decommit
+ // decommit committed parts
+ // TODO: instead of decommit, we could also free to the OS?
+ mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats);
+ mi_commit_mask_create_empty(&slot->decommit_mask);
+ }
+ _mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop
+ }
+ if (!force && purged > MI_MAX_PURGE_PER_PUSH) break; // bound to no more than N purge tries per push
+ }
+ }
+}
+
+void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld) {
+ mi_segment_cache_purge(force, tld );
+}
+
+mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
+{
+#ifdef MI_CACHE_DISABLE
+ return false;
+#else
+
+ // only for normal segment blocks
+ if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false;
+
+ // numa node determines start field
+ int numa_node = _mi_os_numa_node(NULL);
+ size_t start_field = 0;
+ if (numa_node > 0) {
+ start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node;
+ if (start_field >= MI_CACHE_FIELDS) start_field = 0;
+ }
+
+ // purge expired entries
+ mi_segment_cache_purge(false /* force? */, tld);
+
+ // find an available slot
+ mi_bitmap_index_t bitidx;
+ bool claimed = _mi_bitmap_try_find_from_claim(cache_inuse, MI_CACHE_FIELDS, start_field, 1, &bitidx);
+ if (!claimed) return false;
+
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_available, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
+#if MI_DEBUG>1
+ if (is_pinned || is_large) {
+ mi_assert_internal(mi_commit_mask_is_full(commit_mask));
+ }
+#endif
+
+ // set the slot
+ mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)];
+ slot->p = start;
+ slot->memid = memid;
+ slot->is_pinned = is_pinned;
+ mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
+ slot->commit_mask = *commit_mask;
+ slot->decommit_mask = *decommit_mask;
+ if (!mi_commit_mask_is_empty(commit_mask) && !is_large && !is_pinned && mi_option_is_enabled(mi_option_allow_decommit)) {
+ long delay = mi_option_get(mi_option_segment_decommit_delay);
+ if (delay == 0) {
+ _mi_abandoned_await_readers(); // wait until safe to decommit
+ mi_commit_mask_decommit(&slot->commit_mask, start, MI_SEGMENT_SIZE, tld->stats);
+ mi_commit_mask_create_empty(&slot->decommit_mask);
+ }
+ else {
+ mi_atomic_storei64_release(&slot->expire, _mi_clock_now() + delay);
+ }
+ }
+
+ // make it available
+ _mi_bitmap_unclaim((is_large ? cache_available_large : cache_available), MI_CACHE_FIELDS, 1, bitidx);
+ return true;
+#endif
+}
+
+
+/* -----------------------------------------------------------
+ The following functions are to reliably find the segment or
+ block that encompasses any pointer p (or NULL if it is not
+ in any of our segments).
+ We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB)
+ set to 1 if it contains the segment meta data.
+----------------------------------------------------------- */
+
+
+#if (MI_INTPTR_SIZE==8)
+#define MI_MAX_ADDRESS ((size_t)20 << 40) // 20TB
+#else
+#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb
+#endif
+
+#define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE)
+#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8)
+#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE)
+
+static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments
+
+static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) {
+ mi_assert_internal(_mi_ptr_segment(segment) == segment); // is it aligned on MI_SEGMENT_SIZE?
+ if ((uintptr_t)segment >= MI_MAX_ADDRESS) {
+ *bitidx = 0;
+ return MI_SEGMENT_MAP_WSIZE;
+ }
+ else {
+ const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE;
+ *bitidx = segindex % MI_INTPTR_BITS;
+ const size_t mapindex = segindex / MI_INTPTR_BITS;
+ mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE);
+ return mapindex;
+ }
+}
+
+void _mi_segment_map_allocated_at(const mi_segment_t* segment) {
+ size_t bitidx;
+ size_t index = mi_segment_map_index_of(segment, &bitidx);
+ mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE);
+ if (index==MI_SEGMENT_MAP_WSIZE) return;
+ uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
+ uintptr_t newmask;
+ do {
+ newmask = (mask | ((uintptr_t)1 << bitidx));
+ } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask));
+}
+
+void _mi_segment_map_freed_at(const mi_segment_t* segment) {
+ size_t bitidx;
+ size_t index = mi_segment_map_index_of(segment, &bitidx);
+ mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE);
+ if (index == MI_SEGMENT_MAP_WSIZE) return;
+ uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
+ uintptr_t newmask;
+ do {
+ newmask = (mask & ~((uintptr_t)1 << bitidx));
+ } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask));
+}
+
+// Determine the segment belonging to a pointer or NULL if it is not in a valid segment.
+static mi_segment_t* _mi_segment_of(const void* p) {
+ mi_segment_t* segment = _mi_ptr_segment(p);
+ if (segment == NULL) return NULL;
+ size_t bitidx;
+ size_t index = mi_segment_map_index_of(segment, &bitidx);
+ // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge
+ const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]);
+ if (mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0)) {
+ return segment; // yes, allocated by us
+ }
+ if (index==MI_SEGMENT_MAP_WSIZE) return NULL;
+
+ // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers?
+
+ // search downwards for the first segment in case it is an interior pointer
+ // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough
+ // valid huge objects
+ // note: we could maintain a lowest index to speed up the path for invalid pointers?
+ size_t lobitidx;
+ size_t loindex;
+ uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1);
+ if (lobits != 0) {
+ loindex = index;
+ lobitidx = mi_bsr(lobits); // lobits != 0
+ }
+ else if (index == 0) {
+ return NULL;
+ }
+ else {
+ mi_assert_internal(index > 0);
+ uintptr_t lomask = mask;
+ loindex = index;
+ do {
+ loindex--;
+ lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]);
+ } while (lomask != 0 && loindex > 0);
+ if (lomask == 0) return NULL;
+ lobitidx = mi_bsr(lomask); // lomask != 0
+ }
+ mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE);
+ // take difference as the addresses could be larger than the MAX_ADDRESS space.
+ size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE;
+ segment = (mi_segment_t*)((uint8_t*)segment - diff);
+
+ if (segment == NULL) return NULL;
+ mi_assert_internal((void*)segment < p);
+ bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie);
+ mi_assert_internal(cookie_ok);
+ if (mi_unlikely(!cookie_ok)) return NULL;
+ if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range
+ mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment));
+ return segment;
+}
+
+// Is this a valid pointer in our heap?
+static bool mi_is_valid_pointer(const void* p) {
+ return (_mi_segment_of(p) != NULL);
+}
+
+mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept {
+ return mi_is_valid_pointer(p);
+}
+
+/*
+// Return the full segment range belonging to a pointer
+static void* mi_segment_range_of(const void* p, size_t* size) {
+ mi_segment_t* segment = _mi_segment_of(p);
+ if (segment == NULL) {
+ if (size != NULL) *size = 0;
+ return NULL;
+ }
+ else {
+ if (size != NULL) *size = segment->segment_size;
+ return segment;
+ }
+ mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
+ mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
+ mi_reset_delayed(tld);
+ mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld));
+ return page;
+}
+*/
diff --git a/src/segment.c b/src/segment.c
index 8a83cee..8d3eebe 100644
--- a/src/segment.c
+++ b/src/segment.c
@@ -13,437 +13,343 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_PAGE_HUGE_ALIGN (256*1024)
-static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size);
+static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats);
-/* --------------------------------------------------------------------------------
- Segment allocation
- We allocate pages inside bigger "segments" (4MiB on 64-bit). This is to avoid
- splitting VMA's on Linux and reduce fragmentation on other OS's.
- Each thread owns its own segments.
-
- Currently we have:
- - small pages (64KiB), 64 in one segment
- - medium pages (512KiB), 8 in one segment
- - large pages (4MiB), 1 in one segment
- - huge blocks > MI_LARGE_OBJ_SIZE_MAX become large segment with 1 page
- In any case the memory for a segment is virtual and usually committed on demand.
- (i.e. we are careful to not touch the memory until we actually allocate a block there)
-
- If a thread ends, it "abandons" pages with used blocks
- and there is an abandoned segment list whose segments can
- be reclaimed by still running threads, much like work-stealing.
--------------------------------------------------------------------------------- */
-
-
-/* -----------------------------------------------------------
- Queue of segments containing free pages
------------------------------------------------------------ */
+// -------------------------------------------------------------------
+// commit mask
+// -------------------------------------------------------------------
-#if (MI_DEBUG>=3)
-static bool mi_segment_queue_contains(const mi_segment_queue_t* queue, const mi_segment_t* segment) {
- mi_assert_internal(segment != NULL);
- mi_segment_t* list = queue->first;
- while (list != NULL) {
- if (list == segment) break;
- mi_assert_internal(list->next==NULL || list->next->prev == list);
- mi_assert_internal(list->prev==NULL || list->prev->next == list);
- list = list->next;
+static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false;
}
- return (list == segment);
-}
-#endif
-
-static bool mi_segment_queue_is_empty(const mi_segment_queue_t* queue) {
- return (queue->first == NULL);
-}
-
-static void mi_segment_queue_remove(mi_segment_queue_t* queue, mi_segment_t* segment) {
- mi_assert_expensive(mi_segment_queue_contains(queue, segment));
- if (segment->prev != NULL) segment->prev->next = segment->next;
- if (segment->next != NULL) segment->next->prev = segment->prev;
- if (segment == queue->first) queue->first = segment->next;
- if (segment == queue->last) queue->last = segment->prev;
- segment->next = NULL;
- segment->prev = NULL;
+ return true;
}
-static void mi_segment_enqueue(mi_segment_queue_t* queue, mi_segment_t* segment) {
- mi_assert_expensive(!mi_segment_queue_contains(queue, segment));
- segment->next = NULL;
- segment->prev = queue->last;
- if (queue->last != NULL) {
- mi_assert_internal(queue->last->next == NULL);
- queue->last->next = segment;
- queue->last = segment;
+static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if ((commit->mask[i] & cm->mask[i]) != 0) return true;
}
- else {
- queue->last = queue->first = segment;
- }
-}
-
-static mi_segment_queue_t* mi_segment_free_queue_of_kind(mi_page_kind_t kind, mi_segments_tld_t* tld) {
- if (kind == MI_PAGE_SMALL) return &tld->small_free;
- else if (kind == MI_PAGE_MEDIUM) return &tld->medium_free;
- else return NULL;
-}
-
-static mi_segment_queue_t* mi_segment_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
- return mi_segment_free_queue_of_kind(segment->page_kind, tld);
+ return false;
}
-// remove from free queue if it is in one
-static void mi_segment_remove_from_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld); // may be NULL
- bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
- if (in_queue) {
- mi_segment_queue_remove(queue, segment);
+static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ res->mask[i] = (commit->mask[i] & cm->mask[i]);
}
}
-static void mi_segment_insert_in_free_queue(mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_segment_enqueue(mi_segment_free_queue(segment, tld), segment);
+static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ res->mask[i] &= ~(cm->mask[i]);
+ }
}
-
-/* -----------------------------------------------------------
- Invariant checking
------------------------------------------------------------ */
-
-#if (MI_DEBUG>=2)
-static bool mi_segment_is_in_free_queue(const mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_segment_queue_t* queue = mi_segment_free_queue(segment, tld);
- bool in_queue = (queue!=NULL && (segment->next != NULL || segment->prev != NULL || queue->first == segment));
- if (in_queue) {
- mi_assert_expensive(mi_segment_queue_contains(queue, segment));
+static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ res->mask[i] |= cm->mask[i];
}
- return in_queue;
}
-#endif
-static size_t mi_segment_page_size(const mi_segment_t* segment) {
- if (segment->capacity > 1) {
- mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM);
- return ((size_t)1 << segment->page_shift);
+static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) {
+ mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
+ mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
+ if (bitcount == MI_COMMIT_MASK_BITS) {
+ mi_assert_internal(bitidx==0);
+ mi_commit_mask_create_full(cm);
}
- else {
- mi_assert_internal(segment->page_kind >= MI_PAGE_LARGE);
- return segment->segment_size;
+ else if (bitcount == 0) {
+ mi_commit_mask_create_empty(cm);
}
-}
-
-
-#if (MI_DEBUG>=2)
-static bool mi_pages_reset_contains(const mi_page_t* page, mi_segments_tld_t* tld) {
- mi_page_t* p = tld->pages_reset.first;
- while (p != NULL) {
- if (p == page) return true;
- p = p->next;
+ else {
+ mi_commit_mask_create_empty(cm);
+ size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS;
+ size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS;
+ while (bitcount > 0) {
+ mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT);
+ size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs;
+ size_t count = (bitcount > avail ? avail : bitcount);
+ size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs);
+ cm->mask[i] = mask;
+ bitcount -= count;
+ ofs = 0;
+ i++;
+ }
}
- return false;
}
-#endif
-#if (MI_DEBUG>=3)
-static bool mi_segment_is_valid(const mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_assert_internal(segment != NULL);
- mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
- mi_assert_internal(segment->used <= segment->capacity);
- mi_assert_internal(segment->abandoned <= segment->used);
- size_t nfree = 0;
- for (size_t i = 0; i < segment->capacity; i++) {
- const mi_page_t* const page = &segment->pages[i];
- if (!page->segment_in_use) {
- nfree++;
+size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) {
+ mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
+ size_t count = 0;
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ size_t mask = cm->mask[i];
+ if (~mask == 0) {
+ count += MI_COMMIT_MASK_FIELD_BITS;
}
- if (page->segment_in_use || page->is_reset) {
- mi_assert_expensive(!mi_pages_reset_contains(page, tld));
+ else {
+ for (; mask != 0; mask >>= 1) { // todo: use popcount
+ if ((mask&1)!=0) count++;
+ }
}
}
- mi_assert_internal(nfree + segment->used == segment->capacity);
- // mi_assert_internal(segment->thread_id == _mi_thread_id() || (segment->thread_id==0)); // or 0
- mi_assert_internal(segment->page_kind == MI_PAGE_HUGE ||
- (mi_segment_page_size(segment) * segment->capacity == segment->segment_size));
- return true;
+ // we use total since for huge segments each commit bit may represent a larger size
+ return ((total / MI_COMMIT_MASK_BITS) * count);
}
-#endif
-static bool mi_page_not_in_queue(const mi_page_t* page, mi_segments_tld_t* tld) {
- mi_assert_internal(page != NULL);
- if (page->next != NULL || page->prev != NULL) {
- mi_assert_internal(mi_pages_reset_contains(page, tld));
- return false;
+
+size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) {
+ size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS;
+ size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS;
+ size_t mask = 0;
+ // find first ones
+ while (i < MI_COMMIT_MASK_FIELD_COUNT) {
+ mask = cm->mask[i];
+ mask >>= ofs;
+ if (mask != 0) {
+ while ((mask&1) == 0) {
+ mask >>= 1;
+ ofs++;
+ }
+ break;
+ }
+ i++;
+ ofs = 0;
+ }
+ if (i >= MI_COMMIT_MASK_FIELD_COUNT) {
+ // not found
+ *idx = MI_COMMIT_MASK_BITS;
+ return 0;
}
else {
- // both next and prev are NULL, check for singleton list
- return (tld->pages_reset.first != page && tld->pages_reset.last != page);
+ // found, count ones
+ size_t count = 0;
+ *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs;
+ do {
+ mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1);
+ do {
+ count++;
+ mask >>= 1;
+ } while ((mask&1) == 1);
+ if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) {
+ i++;
+ if (i >= MI_COMMIT_MASK_FIELD_COUNT) break;
+ mask = cm->mask[i];
+ ofs = 0;
+ }
+ } while ((mask&1) == 1);
+ mi_assert_internal(count > 0);
+ return count;
}
}
+/* --------------------------------------------------------------------------------
+ Segment allocation
+
+ If a thread ends, it "abandons" pages with used blocks
+ and there is an abandoned segment list whose segments can
+ be reclaimed by still running threads, much like work-stealing.
+-------------------------------------------------------------------------------- */
+
+
/* -----------------------------------------------------------
- Guard pages
+ Slices
----------------------------------------------------------- */
-static void mi_segment_protect_range(void* p, size_t size, bool protect) {
- if (protect) {
- _mi_mem_protect(p, size);
- }
- else {
- _mi_mem_unprotect(p, size);
- }
-}
-
-static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t* tld) {
- // add/remove guard pages
- if (MI_SECURE != 0) {
- // in secure mode, we set up a protected page in between the segment info and the page data
- const size_t os_psize = _mi_os_page_size();
- mi_assert_internal((segment->segment_info_size - os_psize) >= (sizeof(mi_segment_t) + ((segment->capacity - 1) * sizeof(mi_page_t))));
- mi_assert_internal(((uintptr_t)segment + segment->segment_info_size) % os_psize == 0);
- mi_segment_protect_range((uint8_t*)segment + segment->segment_info_size - os_psize, os_psize, protect);
- if (MI_SECURE <= 1 || segment->capacity == 1) {
- // and protect the last (or only) page too
- mi_assert_internal(MI_SECURE <= 1 || segment->page_kind >= MI_PAGE_LARGE);
- uint8_t* start = (uint8_t*)segment + segment->segment_size - os_psize;
- if (protect && !segment->mem_is_committed) {
- if (protect) {
- // ensure secure page is committed
- if (_mi_mem_commit(start, os_psize, NULL, tld)) { // if this fails that is ok (as it is an unaccessible page)
- mi_segment_protect_range(start, os_psize, protect);
- }
- }
- }
- else {
- mi_segment_protect_range(start, os_psize, protect);
- }
- }
- else {
- // or protect every page
- const size_t page_size = mi_segment_page_size(segment);
- for (size_t i = 0; i < segment->capacity; i++) {
- if (segment->pages[i].is_committed) {
- mi_segment_protect_range((uint8_t*)segment + (i+1)*page_size - os_psize, os_psize, protect);
- }
- }
- }
- }
+
+static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) {
+ return &segment->slices[segment->slice_entries];
+}
+
+static uint8_t* mi_slice_start(const mi_slice_t* slice) {
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment));
+ return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE));
}
+
/* -----------------------------------------------------------
- Page reset
+ Bins
----------------------------------------------------------- */
+// Use bit scan forward to quickly find the first zero bit if it is available
+
+static inline size_t mi_slice_bin8(size_t slice_count) {
+ if (slice_count<=1) return slice_count;
+ mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT);
+ slice_count--;
+ size_t s = mi_bsr(slice_count); // slice_count > 1
+ if (s <= 2) return slice_count + 1;
+ size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4;
+ return bin;
+}
-static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) {
- mi_assert_internal(page->is_committed);
- if (!mi_option_is_enabled(mi_option_page_reset)) return;
- if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
- size_t psize;
- void* start = mi_segment_raw_page_start(segment, page, &psize);
- page->is_reset = true;
- mi_assert_internal(size <= psize);
- size_t reset_size = ((size == 0 || size > psize) ? psize : size);
- if (reset_size > 0) _mi_mem_reset(start, reset_size, tld->os);
+static inline size_t mi_slice_bin(size_t slice_count) {
+ mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE);
+ mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX);
+ size_t bin = mi_slice_bin8(slice_count);
+ mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX);
+ return bin;
}
-static bool mi_page_unreset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld)
-{
- mi_assert_internal(page->is_reset);
- mi_assert_internal(page->is_committed);
- mi_assert_internal(!segment->mem_is_pinned);
- if (segment->mem_is_pinned || !page->is_committed || !page->is_reset) return true;
- page->is_reset = false;
- size_t psize;
- uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
- size_t unreset_size = (size == 0 || size > psize ? psize : size);
- bool is_zero = false;
- bool ok = true;
- if (unreset_size > 0) {
- ok = _mi_mem_unreset(start, unreset_size, &is_zero, tld->os);
- }
- if (is_zero) page->is_zero_init = true;
- return ok;
+static inline size_t mi_slice_index(const mi_slice_t* slice) {
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ ptrdiff_t index = slice - segment->slices;
+ mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries);
+ return index;
}
/* -----------------------------------------------------------
- The free page queue
+ Slice span queues
----------------------------------------------------------- */
-// we re-use the `used` field for the expiration counter. Since this is a
-// a 32-bit field while the clock is always 64-bit we need to guard
-// against overflow, we use substraction to check for expiry which work
-// as long as the reset delay is under (2^30 - 1) milliseconds (~12 days)
-static void mi_page_reset_set_expire(mi_page_t* page) {
- uint32_t expire = (uint32_t)_mi_clock_now() + mi_option_get(mi_option_reset_delay);
- page->used = expire;
+static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) {
+ // todo: or push to the end?
+ mi_assert_internal(slice->prev == NULL && slice->next==NULL);
+ slice->prev = NULL; // paranoia
+ slice->next = sq->first;
+ sq->first = slice;
+ if (slice->next != NULL) slice->next->prev = slice;
+ else sq->last = slice;
+ slice->xblock_size = 0; // free
}
-static bool mi_page_reset_is_expired(mi_page_t* page, mi_msecs_t now) {
- int32_t expire = (int32_t)(page->used);
- return (((int32_t)now - expire) >= 0);
+static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) {
+ size_t bin = mi_slice_bin(slice_count);
+ mi_span_queue_t* sq = &tld->spans[bin];
+ mi_assert_internal(sq->slice_count >= slice_count);
+ return sq;
}
-static void mi_pages_reset_add(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
- mi_assert_internal(!page->segment_in_use || !page->is_committed);
- mi_assert_internal(mi_page_not_in_queue(page,tld));
- mi_assert_expensive(!mi_pages_reset_contains(page, tld));
- mi_assert_internal(_mi_page_segment(page)==segment);
- if (!mi_option_is_enabled(mi_option_page_reset)) return;
- if (segment->mem_is_pinned || page->segment_in_use || !page->is_committed || page->is_reset) return;
+static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) {
+ mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
+ // should work too if the queue does not contain slice (which can happen during reclaim)
+ if (slice->prev != NULL) slice->prev->next = slice->next;
+ if (slice == sq->first) sq->first = slice->next;
+ if (slice->next != NULL) slice->next->prev = slice->prev;
+ if (slice == sq->last) sq->last = slice->prev;
+ slice->prev = NULL;
+ slice->next = NULL;
+ slice->xblock_size = 1; // no more free
+}
- if (mi_option_get(mi_option_reset_delay) == 0) {
- // reset immediately?
- mi_page_reset(segment, page, 0, tld);
- }
- else {
- // otherwise push on the delayed page reset queue
- mi_page_queue_t* pq = &tld->pages_reset;
- // push on top
- mi_page_reset_set_expire(page);
- page->next = pq->first;
- page->prev = NULL;
- if (pq->first == NULL) {
- mi_assert_internal(pq->last == NULL);
- pq->first = pq->last = page;
- }
- else {
- pq->first->prev = page;
- pq->first = page;
- }
- }
+
+/* -----------------------------------------------------------
+ Invariant checking
+----------------------------------------------------------- */
+
+static bool mi_slice_is_used(const mi_slice_t* slice) {
+ return (slice->xblock_size > 0);
}
-static void mi_pages_reset_remove(mi_page_t* page, mi_segments_tld_t* tld) {
- if (mi_page_not_in_queue(page,tld)) return;
- mi_page_queue_t* pq = &tld->pages_reset;
- mi_assert_internal(pq!=NULL);
- mi_assert_internal(!page->segment_in_use);
- mi_assert_internal(mi_pages_reset_contains(page, tld));
- if (page->prev != NULL) page->prev->next = page->next;
- if (page->next != NULL) page->next->prev = page->prev;
- if (page == pq->last) pq->last = page->prev;
- if (page == pq->first) pq->first = page->next;
- page->next = page->prev = NULL;
- page->used = 0;
+#if (MI_DEBUG>=3)
+static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) {
+ for (mi_slice_t* s = sq->first; s != NULL; s = s->next) {
+ if (s==slice) return true;
+ }
+ return false;
}
-static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool force_reset, mi_segments_tld_t* tld) {
- if (segment->mem_is_pinned) return; // never reset in huge OS pages
- for (size_t i = 0; i < segment->capacity; i++) {
- mi_page_t* page = &segment->pages[i];
- if (!page->segment_in_use && page->is_committed && !page->is_reset) {
- mi_pages_reset_remove(page, tld);
- if (force_reset) {
- mi_page_reset(segment, page, 0, tld);
+static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) {
+ mi_assert_internal(segment != NULL);
+ mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
+ mi_assert_internal(segment->abandoned <= segment->used);
+ mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id());
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask)); // can only decommit committed blocks
+ //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0);
+ mi_slice_t* slice = &segment->slices[0];
+ const mi_slice_t* end = mi_segment_slices_end(segment);
+ size_t used_count = 0;
+ mi_span_queue_t* sq;
+ while(slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ size_t index = mi_slice_index(slice);
+ size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1;
+ if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET valid back offsets
+ used_count++;
+ for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET && index + i <= maxindex; i++) {
+ mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t));
+ mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0);
+ mi_assert_internal(i==0 || segment->slices[index + i].xblock_size == 1);
+ }
+ // and the last entry as well (for coalescing)
+ const mi_slice_t* last = slice + slice->slice_count - 1;
+ if (last > slice && last < mi_segment_slices_end(segment)) {
+ mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t));
+ mi_assert_internal(last->slice_count == 0);
+ mi_assert_internal(last->xblock_size == 1);
}
}
- else {
- mi_assert_internal(mi_page_not_in_queue(page,tld));
+ else { // free range of slices; only last slice needs a valid back offset
+ mi_slice_t* last = &segment->slices[maxindex];
+ if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) {
+ mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset);
+ }
+ mi_assert_internal(slice == last || last->slice_count == 0 );
+ mi_assert_internal(last->xblock_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->xblock_size==1));
+ if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned
+ sq = mi_span_queue_for(slice->slice_count,tld);
+ mi_assert_internal(mi_span_queue_contains(sq,slice));
+ }
}
+ slice = &segment->slices[maxindex+1];
}
+ mi_assert_internal(slice == end);
+ mi_assert_internal(used_count == segment->used + 1);
+ return true;
}
-
-static void mi_reset_delayed(mi_segments_tld_t* tld) {
- if (!mi_option_is_enabled(mi_option_page_reset)) return;
- mi_msecs_t now = _mi_clock_now();
- mi_page_queue_t* pq = &tld->pages_reset;
- // from oldest up to the first that has not expired yet
- mi_page_t* page = pq->last;
- while (page != NULL && mi_page_reset_is_expired(page,now)) {
- mi_page_t* const prev = page->prev; // save previous field
- mi_page_reset(_mi_page_segment(page), page, 0, tld);
- page->used = 0;
- page->prev = page->next = NULL;
- page = prev;
- }
- // discard the reset pages from the queue
- pq->last = page;
- if (page != NULL){
- page->next = NULL;
- }
- else {
- pq->first = NULL;
- }
-}
-
+#endif
/* -----------------------------------------------------------
Segment size calculations
----------------------------------------------------------- */
-// Raw start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
-// The raw start is not taking aligned block allocation into consideration.
-static uint8_t* mi_segment_raw_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
- size_t psize = (segment->page_kind == MI_PAGE_HUGE ? segment->segment_size : (size_t)1 << segment->page_shift);
- uint8_t* p = (uint8_t*)segment + page->segment_idx * psize;
-
- if (page->segment_idx == 0) {
- // the first page starts after the segment info (and possible guard page)
- p += segment->segment_info_size;
- psize -= segment->segment_info_size;
- }
-
-#if (MI_SECURE > 1) // every page has an os guard page
- psize -= _mi_os_page_size();
-#elif (MI_SECURE==1) // the last page has an os guard page at the end
- if (page->segment_idx == segment->capacity - 1) {
- psize -= _mi_os_page_size();
- }
-#endif
-
- if (page_size != NULL) *page_size = psize;
- mi_assert_internal(page->xblock_size == 0 || _mi_ptr_page(p) == page);
- mi_assert_internal(_mi_ptr_segment(p) == segment);
- return p;
+static size_t mi_segment_info_size(mi_segment_t* segment) {
+ return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE;
}
-// Start of the page available memory; can be used on uninitialized pages (only `segment_idx` must be set)
-uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size)
+static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t xblock_size, size_t* page_size)
{
- size_t psize;
- uint8_t* p = mi_segment_raw_page_start(segment, page, &psize);
- if (pre_size != NULL) *pre_size = 0;
- if (page->segment_idx == 0 && block_size > 0 && segment->page_kind <= MI_PAGE_MEDIUM) {
- // for small and medium objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore)
- size_t adjust = block_size - ((uintptr_t)p % block_size);
- if (adjust < block_size) {
- p += adjust;
- psize -= adjust;
- if (pre_size != NULL) *pre_size = adjust;
- }
- mi_assert_internal((uintptr_t)p % block_size == 0);
- }
+ ptrdiff_t idx = slice - segment->slices;
+ size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE;
+ // make the start not OS page aligned for smaller blocks to avoid page/cache effects
+ size_t start_offset = (xblock_size >= MI_INTPTR_SIZE && xblock_size <= 1024 ? MI_MAX_ALIGN_GUARANTEE : 0);
+ if (page_size != NULL) { *page_size = psize - start_offset; }
+ return (uint8_t*)segment + ((idx*MI_SEGMENT_SLICE_SIZE) + start_offset);
+}
- if (page_size != NULL) *page_size = psize;
- mi_assert_internal(page->xblock_size==0 || _mi_ptr_page(p) == page);
+// Start of the page available memory; can be used on uninitialized pages
+uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size)
+{
+ const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page);
+ uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, page->xblock_size, page_size);
+ mi_assert_internal(page->xblock_size > 0 || _mi_ptr_page(p) == page);
mi_assert_internal(_mi_ptr_segment(p) == segment);
return p;
}
-static size_t mi_segment_size(size_t capacity, size_t required, size_t* pre_size, size_t* info_size)
-{
- const size_t minsize = sizeof(mi_segment_t) + ((capacity - 1) * sizeof(mi_page_t)) + 16 /* padding */;
+
+static size_t mi_segment_calculate_slices(size_t required, size_t* pre_size, size_t* info_slices) {
+ size_t page_size = _mi_os_page_size();
+ size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size);
size_t guardsize = 0;
- size_t isize = 0;
- if (MI_SECURE == 0) {
- // normally no guard pages
- isize = _mi_align_up(minsize, 16 * MI_MAX_ALIGN_SIZE);
- }
- else {
+ if (MI_SECURE>0) {
// in secure mode, we set up a protected page in between the segment info
// and the page data (and one at the end of the segment)
- const size_t page_size = _mi_os_page_size();
- isize = _mi_align_up(minsize, page_size);
- guardsize = page_size;
- required = _mi_align_up(required, page_size);
+ guardsize = page_size;
+ required = _mi_align_up(required, page_size);
}
- if (info_size != NULL) *info_size = isize;
- if (pre_size != NULL) *pre_size = isize + guardsize;
- return (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + 2*guardsize, MI_PAGE_HUGE_ALIGN) );
+ if (pre_size != NULL) *pre_size = isize;
+ isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE);
+ if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE;
+ size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) );
+ mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0);
+ return (segment_size / MI_SEGMENT_SLICE_SIZE);
}
@@ -462,25 +368,30 @@ static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) {
if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size;
}
-static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_segments_tld_t* tld) {
+static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
segment->thread_id = 0;
- mi_segments_track_size(-((long)segment_size),tld);
- if (MI_SECURE != 0) {
- mi_assert_internal(!segment->mem_is_pinned);
- mi_segment_protect(segment, false, tld->os); // ensure no more guard pages are set
+ _mi_segment_map_freed_at(segment);
+ mi_segments_track_size(-((long)mi_segment_size(segment)),tld);
+ if (MI_SECURE>0) {
+ // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set
+ // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted
+ size_t os_pagesize = _mi_os_page_size();
+ _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
+ uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
+ _mi_os_unprotect(end, os_pagesize);
+ }
+
+ // purge delayed decommits now? (no, leave it to the cache)
+ // mi_segment_delayed_decommit(segment,true,tld->stats);
+
+ // _mi_os_free(segment, mi_segment_size(segment), /*segment->memid,*/ tld->stats);
+ const size_t size = mi_segment_size(segment);
+ if (size != MI_SEGMENT_SIZE || !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) {
+ const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
+ if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
+ _mi_abandoned_await_readers(); // wait until safe to free
+ _mi_arena_free(segment, mi_segment_size(segment), segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->os);
}
-
- bool any_reset = false;
- bool fully_committed = true;
- for (size_t i = 0; i < segment->capacity; i++) {
- mi_page_t* page = &segment->pages[i];
- if (!page->is_committed) { fully_committed = false; }
- if (page->is_reset) { any_reset = true; }
- }
- if (any_reset && mi_option_is_enabled(mi_option_reset_decommits)) {
- fully_committed = false;
- }
- _mi_mem_free(segment, segment_size, segment->memid, fully_committed, any_reset, tld->os);
}
@@ -488,14 +399,14 @@ static void mi_segment_os_free(mi_segment_t* segment, size_t segment_size, mi_se
#define MI_SEGMENT_CACHE_FRACTION (8)
// note: returned segment may be partially reset
-static mi_segment_t* mi_segment_cache_pop(size_t segment_size, mi_segments_tld_t* tld) {
- if (segment_size != 0 && segment_size != MI_SEGMENT_SIZE) return NULL;
+static mi_segment_t* mi_segment_cache_pop(size_t segment_slices, mi_segments_tld_t* tld) {
+ if (segment_slices != 0 && segment_slices != MI_SLICES_PER_SEGMENT) return NULL;
mi_segment_t* segment = tld->cache;
if (segment == NULL) return NULL;
tld->cache_count--;
tld->cache = segment->next;
segment->next = NULL;
- mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
+ mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
_mi_stat_decrease(&tld->stats->segments_cache, 1);
return segment;
}
@@ -514,18 +425,19 @@ static bool mi_segment_cache_full(mi_segments_tld_t* tld)
while (tld->cache_count > max_cache) { //(1 + (tld->peak_count / MI_SEGMENT_CACHE_FRACTION))) {
mi_segment_t* segment = mi_segment_cache_pop(0,tld);
mi_assert_internal(segment != NULL);
- if (segment != NULL) mi_segment_os_free(segment, segment->segment_size, tld);
+ if (segment != NULL) mi_segment_os_free(segment, tld);
}
return true;
}
static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_assert_internal(!mi_segment_is_in_free_queue(segment, tld));
mi_assert_internal(segment->next == NULL);
- if (segment->segment_size != MI_SEGMENT_SIZE || mi_segment_cache_full(tld)) {
+ if (segment->segment_slices != MI_SLICES_PER_SEGMENT || mi_segment_cache_full(tld)) {
return false;
}
- mi_assert_internal(segment->segment_size == MI_SEGMENT_SIZE);
+ // mi_segment_delayed_decommit(segment, true, tld->stats);
+ mi_assert_internal(segment->segment_slices == MI_SLICES_PER_SEGMENT);
+ mi_assert_internal(segment->next == NULL);
segment->next = tld->cache;
tld->cache = segment;
tld->cache_count++;
@@ -537,315 +449,616 @@ static bool mi_segment_cache_push(mi_segment_t* segment, mi_segments_tld_t* tld)
void _mi_segment_thread_collect(mi_segments_tld_t* tld) {
mi_segment_t* segment;
while ((segment = mi_segment_cache_pop(0,tld)) != NULL) {
- mi_segment_os_free(segment, segment->segment_size, tld);
+ mi_segment_os_free(segment, tld);
}
mi_assert_internal(tld->cache_count == 0);
- mi_assert_internal(tld->cache == NULL);
-#if MI_DEBUG>=2
- if (!_mi_is_main_thread()) {
- mi_assert_internal(tld->pages_reset.first == NULL);
- mi_assert_internal(tld->pages_reset.last == NULL);
- }
-#endif
+ mi_assert_internal(tld->cache == NULL);
}
+
/* -----------------------------------------------------------
- Segment allocation
+ Span management
----------------------------------------------------------- */
-// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
-static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
-{
- // the segment parameter is non-null if it came from our cache
- mi_assert_internal(segment==NULL || (required==0 && page_kind <= MI_PAGE_LARGE));
+static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) {
+ mi_assert_internal(_mi_ptr_segment(p) == segment);
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
+ mi_commit_mask_create_empty(cm);
+ if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return;
+ const size_t segstart = mi_segment_info_size(segment);
+ const size_t segsize = mi_segment_size(segment);
+ if (p >= (uint8_t*)segment + segsize) return;
+
+ size_t pstart = (p - (uint8_t*)segment);
+ mi_assert_internal(pstart + size <= segsize);
+
+ size_t start;
+ size_t end;
+ if (conservative) {
+ // decommit conservative
+ start = _mi_align_up(pstart, MI_COMMIT_SIZE);
+ end = _mi_align_down(pstart + size, MI_COMMIT_SIZE);
+ mi_assert_internal(start >= segstart);
+ mi_assert_internal(end <= segsize);
+ }
+ else {
+ // commit liberal
+ start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE);
+ end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE);
+ }
+ if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area
+ start = segstart;
+ }
+ if (end > segsize) {
+ end = segsize;
+ }
- // calculate needed sizes first
- size_t capacity;
- if (page_kind == MI_PAGE_HUGE) {
- mi_assert_internal(page_shift == MI_SEGMENT_SHIFT && required > 0);
- capacity = 1;
+ mi_assert_internal(start <= pstart && (pstart + size) <= end);
+ mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0);
+ *start_p = (uint8_t*)segment + start;
+ *full_size = (end > start ? end - start : 0);
+ if (*full_size == 0) return;
+
+ size_t bitidx = start / MI_COMMIT_SIZE;
+ mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS);
+
+ size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0
+ if (bitidx + bitcount > MI_COMMIT_MASK_BITS) {
+ _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size);
+ }
+ mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS);
+ mi_commit_mask_create(bitidx, bitcount, cm);
+}
+
+
+static bool mi_segment_commitx(mi_segment_t* segment, bool commit, uint8_t* p, size_t size, mi_stats_t* stats) {
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
+
+ // try to commit in at least MI_MINIMAL_COMMIT_SIZE sizes.
+ /*
+ if (commit && size > 0) {
+ const size_t csize = _mi_align_up(size, MI_MINIMAL_COMMIT_SIZE);
+ if (p + csize <= mi_segment_end(segment)) {
+ size = csize;
+ }
+ }
+ */
+ // commit liberal, but decommit conservative
+ uint8_t* start = NULL;
+ size_t full_size = 0;
+ mi_commit_mask_t mask;
+ mi_segment_commit_mask(segment, !commit/*conservative*/, p, size, &start, &full_size, &mask);
+ if (mi_commit_mask_is_empty(&mask) || full_size==0) return true;
+
+ if (commit && !mi_commit_mask_all_set(&segment->commit_mask, &mask)) {
+ bool is_zero = false;
+ mi_commit_mask_t cmask;
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
+ _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
+ if (!_mi_os_commit(start,full_size,&is_zero,stats)) return false;
+ mi_commit_mask_set(&segment->commit_mask, &mask);
+ }
+ else if (!commit && mi_commit_mask_any_set(&segment->commit_mask, &mask)) {
+ mi_assert_internal((void*)start != (void*)segment);
+ //mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &mask));
+
+ mi_commit_mask_t cmask;
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask);
+ _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap
+ if (segment->allow_decommit) {
+ _mi_os_decommit(start, full_size, stats); // ok if this fails
+ }
+ mi_commit_mask_clear(&segment->commit_mask, &mask);
+ }
+ // increase expiration of reusing part of the delayed decommit
+ if (commit && mi_commit_mask_any_set(&segment->decommit_mask, &mask)) {
+ segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
+ }
+ // always undo delayed decommits
+ mi_commit_mask_clear(&segment->decommit_mask, &mask);
+ return true;
+}
+
+static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
+ // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
+ if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->decommit_mask)) return true; // fully committed
+ return mi_segment_commitx(segment,true,p,size,stats);
+}
+
+static void mi_segment_perhaps_decommit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) {
+ if (!segment->allow_decommit) return;
+ if (mi_option_get(mi_option_decommit_delay) == 0) {
+ mi_segment_commitx(segment, false, p, size, stats);
}
else {
- mi_assert_internal(required == 0);
- size_t page_size = (size_t)1 << page_shift;
- capacity = MI_SEGMENT_SIZE / page_size;
- mi_assert_internal(MI_SEGMENT_SIZE % page_size == 0);
- mi_assert_internal(capacity >= 1 && capacity <= MI_SMALL_PAGES_PER_SEGMENT);
+ // register for future decommit in the decommit mask
+ uint8_t* start = NULL;
+ size_t full_size = 0;
+ mi_commit_mask_t mask;
+ mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask);
+ if (mi_commit_mask_is_empty(&mask) || full_size==0) return;
+
+ // update delayed commit
+ mi_assert_internal(segment->decommit_expire > 0 || mi_commit_mask_is_empty(&segment->decommit_mask));
+ mi_commit_mask_t cmask;
+ mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only decommit what is committed; span_free may try to decommit more
+ mi_commit_mask_set(&segment->decommit_mask, &cmask);
+ mi_msecs_t now = _mi_clock_now();
+ if (segment->decommit_expire == 0) {
+ // no previous decommits, initialize now
+ segment->decommit_expire = now + mi_option_get(mi_option_decommit_delay);
+ }
+ else if (segment->decommit_expire <= now) {
+ // previous decommit mask already expired
+ // mi_segment_delayed_decommit(segment, true, stats);
+ segment->decommit_expire = now + mi_option_get(mi_option_decommit_extend_delay); // (mi_option_get(mi_option_decommit_delay) / 8); // wait a tiny bit longer in case there is a series of free's
+ }
+ else {
+ // previous decommit mask is not yet expired, increase the expiration by a bit.
+ segment->decommit_expire += mi_option_get(mi_option_decommit_extend_delay);
+ }
+ }
+}
+
+static void mi_segment_delayed_decommit(mi_segment_t* segment, bool force, mi_stats_t* stats) {
+ if (!segment->allow_decommit || mi_commit_mask_is_empty(&segment->decommit_mask)) return;
+ mi_msecs_t now = _mi_clock_now();
+ if (!force && now < segment->decommit_expire) return;
+
+ mi_commit_mask_t mask = segment->decommit_mask;
+ segment->decommit_expire = 0;
+ mi_commit_mask_create_empty(&segment->decommit_mask);
+
+ size_t idx;
+ size_t count;
+ mi_commit_mask_foreach(&mask, idx, count) {
+ // if found, decommit that sequence
+ if (count > 0) {
+ uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE);
+ size_t size = count * MI_COMMIT_SIZE;
+ mi_segment_commitx(segment, false, p, size, stats);
+ }
}
- size_t info_size;
- size_t pre_size;
- size_t segment_size = mi_segment_size(capacity, required, &pre_size, &info_size);
- mi_assert_internal(segment_size >= required);
-
- // Initialize parameters
- const bool eager_delayed = (page_kind <= MI_PAGE_MEDIUM && // don't delay for large objects
- // !_mi_os_has_overcommit() && // never delay on overcommit systems
- _mi_current_thread_count() > 1 && // do not delay for the first N threads
- tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
- const bool eager = !eager_delayed && mi_option_is_enabled(mi_option_eager_commit);
- bool commit = eager; // || (page_kind >= MI_PAGE_LARGE);
- bool pages_still_good = false;
- bool is_zero = false;
+ mi_commit_mask_foreach_end()
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask));
+}
- // Try to get it from our thread local cache first
- if (segment != NULL) {
- // came from cache
- mi_assert_internal(segment->segment_size == segment_size);
- if (page_kind <= MI_PAGE_MEDIUM && segment->page_kind == page_kind && segment->segment_size == segment_size) {
- pages_still_good = true;
+
+static bool mi_segment_is_abandoned(mi_segment_t* segment) {
+ return (segment->thread_id == 0);
+}
+
+// note: can be called on abandoned segments
+static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice_index < segment->slice_entries);
+ mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment)
+ ? NULL : mi_span_queue_for(slice_count,tld));
+ if (slice_count==0) slice_count = 1;
+ mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries);
+
+ // set first and last slice (the intermediates can be undetermined)
+ mi_slice_t* slice = &segment->slices[slice_index];
+ slice->slice_count = (uint32_t)slice_count;
+ mi_assert_internal(slice->slice_count == slice_count); // no overflow?
+ slice->slice_offset = 0;
+ if (slice_count > 1) {
+ mi_slice_t* last = &segment->slices[slice_index + slice_count - 1];
+ last->slice_count = 0;
+ last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1));
+ last->xblock_size = 0;
+ }
+
+ // perhaps decommit
+ mi_segment_perhaps_decommit(segment,mi_slice_start(slice),slice_count*MI_SEGMENT_SLICE_SIZE,tld->stats);
+
+ // and push it on the free page queue (if it was not a huge page)
+ if (sq != NULL) mi_span_queue_push( sq, slice );
+ else slice->xblock_size = 0; // mark huge page as free anyways
+}
+
+/*
+// called from reclaim to add existing free spans
+static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) {
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0);
+ size_t slice_index = mi_slice_index(slice);
+ mi_segment_span_free(segment,slice_index,slice->slice_count,tld);
+}
+*/
+
+static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->xblock_size==0);
+ mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE);
+ mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld);
+ mi_span_queue_delete(sq, slice);
+}
+
+// note: can be called on abandoned segments
+static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0);
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ bool is_abandoned = mi_segment_is_abandoned(segment);
+
+ // for huge pages, just mark as free but don't add to the queues
+ if (segment->kind == MI_SEGMENT_HUGE) {
+ mi_assert_internal(segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
+ slice->xblock_size = 0; // mark as free anyways
+ // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
+ // avoid a possible cache miss (and the segment is about to be freed)
+ return slice;
+ }
+
+ // otherwise coalesce the span and add to the free span queues
+ size_t slice_count = slice->slice_count;
+ mi_slice_t* next = slice + slice->slice_count;
+ mi_assert_internal(next <= mi_segment_slices_end(segment));
+ if (next < mi_segment_slices_end(segment) && next->xblock_size==0) {
+ // free next block -- remove it from free and merge
+ mi_assert_internal(next->slice_count > 0 && next->slice_offset==0);
+ slice_count += next->slice_count; // extend
+ if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); }
+ }
+ if (slice > segment->slices) {
+ mi_slice_t* prev = mi_slice_first(slice - 1);
+ mi_assert_internal(prev >= segment->slices);
+ if (prev->xblock_size==0) {
+ // free previous slice -- remove it from free and merge
+ mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0);
+ slice_count += prev->slice_count;
+ if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); }
+ slice = prev;
}
- else
- {
- if (MI_SECURE!=0) {
- mi_assert_internal(!segment->mem_is_pinned);
- mi_segment_protect(segment, false, tld->os); // reset protection if the page kind differs
- }
- // different page kinds; unreset any reset pages, and unprotect
- // TODO: optimize cache pop to return fitting pages if possible?
- for (size_t i = 0; i < segment->capacity; i++) {
- mi_page_t* page = &segment->pages[i];
- if (page->is_reset) {
- if (!commit && mi_option_is_enabled(mi_option_reset_decommits)) {
- page->is_reset = false;
- }
- else {
- mi_page_unreset(segment, page, 0, tld); // todo: only unreset the part that was reset? (instead of the full page)
- }
+ }
+
+ // and add the new free page
+ mi_segment_span_free(segment, mi_slice_index(slice), slice_count, tld);
+ return slice;
+}
+
+
+static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) {
+ mi_assert_internal(_mi_ptr_segment(slice)==segment);
+ mi_assert_internal(slice->slice_count >= slice_count);
+ mi_assert_internal(slice->xblock_size > 0); // no more in free queue
+ if (slice->slice_count <= slice_count) return;
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
+ size_t next_index = mi_slice_index(slice) + slice_count;
+ size_t next_count = slice->slice_count - slice_count;
+ mi_segment_span_free(segment, next_index, next_count, tld);
+ slice->slice_count = (uint32_t)slice_count;
+}
+
+// Note: may still return NULL if committing the memory failed
+static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice_index < segment->slice_entries);
+ mi_slice_t* slice = &segment->slices[slice_index];
+ mi_assert_internal(slice->xblock_size==0 || slice->xblock_size==1);
+
+ // commit before changing the slice data
+ if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) {
+ return NULL; // commit failed!
+ }
+
+ // convert the slices to a page
+ slice->slice_offset = 0;
+ slice->slice_count = (uint32_t)slice_count;
+ mi_assert_internal(slice->slice_count == slice_count);
+ const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE;
+ slice->xblock_size = (uint32_t)(bsize >= MI_HUGE_BLOCK_SIZE ? MI_HUGE_BLOCK_SIZE : bsize);
+ mi_page_t* page = mi_slice_to_page(slice);
+ mi_assert_internal(mi_page_block_size(page) == bsize);
+
+ // set slice back pointers for the first MI_MAX_SLICE_OFFSET entries
+ size_t extra = slice_count-1;
+ if (extra > MI_MAX_SLICE_OFFSET) extra = MI_MAX_SLICE_OFFSET;
+ if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices
+ slice++;
+ for (size_t i = 1; i <= extra; i++, slice++) {
+ slice->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i);
+ slice->slice_count = 0;
+ slice->xblock_size = 1;
+ }
+
+ // and also for the last one (if not set already) (the last one is needed for coalescing)
+ // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543)
+ mi_slice_t* last = &((mi_slice_t*)segment->slices)[slice_index + slice_count - 1];
+ if (last < mi_segment_slices_end(segment) && last >= slice) {
+ last->slice_offset = (uint32_t)(sizeof(mi_slice_t)*(slice_count-1));
+ last->slice_count = 0;
+ last->xblock_size = 1;
+ }
+
+ // and initialize the page
+ page->is_reset = false;
+ page->is_committed = true;
+ segment->used++;
+ return page;
+}
+
+static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_segments_tld_t* tld) {
+ mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX);
+ // search from best fit up
+ mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld);
+ if (slice_count == 0) slice_count = 1;
+ while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) {
+ for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) {
+ if (slice->slice_count >= slice_count) {
+ // found one
+ mi_span_queue_delete(sq, slice);
+ mi_segment_t* segment = _mi_ptr_segment(slice);
+ if (slice->slice_count > slice_count) {
+ mi_segment_slice_split(segment, slice, slice_count, tld);
}
- }
- // ensure the initial info is committed
- if (segment->capacity < capacity) {
- bool commit_zero = false;
- bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
- if (commit_zero) is_zero = true;
- if (!ok) {
+ mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->xblock_size > 0);
+ mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld);
+ if (page == NULL) {
+ // commit failed; return NULL but first restore the slice
+ mi_segment_span_free_coalesce(slice, tld);
return NULL;
}
+ return page;
}
}
+ sq++;
+ }
+ // could not find a page..
+ return NULL;
+}
+
+
+/* -----------------------------------------------------------
+ Segment allocation
+----------------------------------------------------------- */
+
+// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
+static mi_segment_t* mi_segment_init(mi_segment_t* segment, size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page)
+{
+ mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL));
+ mi_assert_internal((segment==NULL) || (segment!=NULL && required==0));
+ // calculate needed sizes first
+ size_t info_slices;
+ size_t pre_size;
+ const size_t segment_slices = mi_segment_calculate_slices(required, &pre_size, &info_slices);
+ const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
+ const size_t segment_size = segment_slices * MI_SEGMENT_SLICE_SIZE;
+
+ // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little)
+ const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems
+ _mi_current_thread_count() > 1 && // do not delay for the first N threads
+ tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay));
+ const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit);
+ bool commit = eager || (required > 0);
+
+ // Try to get from our cache first
+ bool is_zero = false;
+ const bool commit_info_still_good = (segment != NULL);
+ mi_commit_mask_t commit_mask;
+ mi_commit_mask_t decommit_mask;
+ if (segment != NULL) {
+ commit_mask = segment->commit_mask;
+ decommit_mask = segment->decommit_mask;
}
else {
+ mi_commit_mask_create_empty(&commit_mask);
+ mi_commit_mask_create_empty(&decommit_mask);
+ }
+ if (segment==NULL) {
// Allocate the segment from the OS
- size_t memid;
- bool mem_large = (!eager_delayed && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
- bool is_pinned = false;
- segment = (mi_segment_t*)_mi_mem_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
- if (segment == NULL) return NULL; // failed to allocate
- if (!commit) {
- // ensure the initial info is committed
- mi_assert_internal(!mem_large && !is_pinned);
- bool commit_zero = false;
- bool ok = _mi_mem_commit(segment, pre_size, &commit_zero, tld->os);
- if (commit_zero) is_zero = true;
- if (!ok) {
- // commit failed; we cannot touch the memory: free the segment directly and return `NULL`
- _mi_mem_free(segment, MI_SEGMENT_SIZE, memid, false, false, os_tld);
- return NULL;
+ bool mem_large = (!eager_delay && (MI_SECURE==0)); // only allow large OS pages once we are no longer lazy
+ bool is_pinned = false;
+ size_t memid = 0;
+ segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, &commit_mask, &decommit_mask, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
+ if (segment==NULL) {
+ segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, MI_SEGMENT_SIZE, &commit, &mem_large, &is_pinned, &is_zero, &memid, os_tld);
+ if (segment == NULL) return NULL; // failed to allocate
+ if (commit) {
+ mi_commit_mask_create_full(&commit_mask);
}
+ else {
+ mi_commit_mask_create_empty(&commit_mask);
+ }
+ }
+ mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
+
+ const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
+ mi_assert_internal(commit_needed>0);
+ mi_commit_mask_t commit_needed_mask;
+ mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
+ if (!mi_commit_mask_all_set(&commit_mask, &commit_needed_mask)) {
+ // at least commit the info slices
+ mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= info_slices*MI_SEGMENT_SLICE_SIZE);
+ bool ok = _mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, &is_zero, tld->stats);
+ if (!ok) return NULL; // failed to commit
+ mi_commit_mask_set(&commit_mask, &commit_needed_mask);
}
segment->memid = memid;
- segment->mem_is_pinned = (mem_large || is_pinned);
- segment->mem_is_committed = commit;
- mi_segments_track_size((long)segment_size, tld);
+ segment->mem_is_pinned = is_pinned;
+ segment->mem_is_large = mem_large;
+ segment->mem_is_committed = mi_commit_mask_is_full(&commit_mask);
+ mi_segments_track_size((long)(segment_size), tld);
+ _mi_segment_map_allocated_at(segment);
}
- mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0);
- mi_assert_internal(segment->mem_is_pinned ? segment->mem_is_committed : true);
+
+ // zero the segment info? -- not always needed as it is zero initialized from the OS
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
- if (!pages_still_good) {
- // zero the segment info (but not the `mem` fields)
+ if (!is_zero) {
ptrdiff_t ofs = offsetof(mi_segment_t, next);
- memset((uint8_t*)segment + ofs, 0, info_size - ofs);
-
- // initialize pages info
- for (size_t i = 0; i < capacity; i++) {
- mi_assert_internal(i <= 255);
- segment->pages[i].segment_idx = (uint8_t)i;
- segment->pages[i].is_reset = false;
- segment->pages[i].is_committed = commit;
- segment->pages[i].is_zero_init = is_zero;
+ size_t prefix = offsetof(mi_segment_t, slices) - ofs;
+ memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*segment_slices);
+ }
+
+ if (!commit_info_still_good) {
+ segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
+ segment->allow_decommit = (mi_option_is_enabled(mi_option_allow_decommit) && !segment->mem_is_pinned && !segment->mem_is_large);
+ if (segment->allow_decommit) {
+ segment->decommit_expire = _mi_clock_now() + mi_option_get(mi_option_decommit_delay);
+ segment->decommit_mask = decommit_mask;
+ mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
+ #if MI_DEBUG>2
+ const size_t commit_needed = _mi_divide_up(info_slices*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE);
+ mi_commit_mask_t commit_needed_mask;
+ mi_commit_mask_create(0, commit_needed, &commit_needed_mask);
+ mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask));
+ #endif
+ }
+ else {
+ mi_assert_internal(mi_commit_mask_is_empty(&decommit_mask));
+ segment->decommit_expire = 0;
+ mi_commit_mask_create_empty( &segment->decommit_mask );
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask));
}
}
- else {
- // zero the segment info but not the pages info (and mem fields)
- ptrdiff_t ofs = offsetof(mi_segment_t, next);
- memset((uint8_t*)segment + ofs, 0, offsetof(mi_segment_t,pages) - ofs);
- }
+
- // initialize
- segment->page_kind = page_kind;
- segment->capacity = capacity;
- segment->page_shift = page_shift;
- segment->segment_size = segment_size;
- segment->segment_info_size = pre_size;
- segment->thread_id = _mi_thread_id();
+ // initialize segment info
+ segment->segment_slices = segment_slices;
+ segment->segment_info_slices = info_slices;
+ segment->thread_id = _mi_thread_id();
segment->cookie = _mi_ptr_cookie(segment);
- // _mi_stat_increase(&tld->stats->page_committed, segment->segment_info_size);
+ segment->slice_entries = slice_entries;
+ segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE);
- // set protection
- mi_segment_protect(segment, true, tld->os);
+ // memset(segment->slices, 0, sizeof(mi_slice_t)*(info_slices+1));
+ _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment));
- // insert in free lists for small and medium pages
- if (page_kind <= MI_PAGE_MEDIUM) {
- mi_segment_insert_in_free_queue(segment, tld);
+ // set up guard pages
+ size_t guard_slices = 0;
+ if (MI_SECURE>0) {
+ // in secure mode, we set up a protected page in between the segment info
+ // and the page data
+ size_t os_pagesize = _mi_os_page_size();
+ mi_assert_internal(mi_segment_info_size(segment) - os_pagesize >= pre_size);
+ _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize);
+ uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize;
+ mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats);
+ _mi_os_protect(end, os_pagesize);
+ if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-(
+ guard_slices = 1;
+ }
+
+ // reserve first slices for segment info
+ mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld);
+ mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance
+ mi_assert_internal(segment->used == 1);
+ segment->used = 0; // don't count our internal slices towards usage
+
+ // initialize initial free pages
+ if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page
+ mi_assert_internal(huge_page==NULL);
+ mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, tld);
+ }
+ else {
+ mi_assert_internal(huge_page!=NULL);
+ mi_assert_internal(mi_commit_mask_is_empty(&segment->decommit_mask));
+ mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask));
+ *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld);
+ mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance
}
- //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment);
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
return segment;
}
-static mi_segment_t* mi_segment_alloc(size_t required, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- return mi_segment_init(NULL, required, page_kind, page_shift, tld, os_tld);
+
+// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` .
+static mi_segment_t* mi_segment_alloc(size_t required, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) {
+ return mi_segment_init(NULL, required, tld, os_tld, huge_page);
}
+
static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) {
- MI_UNUSED(force);
- mi_assert(segment != NULL);
- // note: don't reset pages even on abandon as the whole segment is freed? (and ready for reuse)
- bool force_reset = (force && mi_option_is_enabled(mi_option_abandoned_page_reset));
- mi_pages_reset_remove_all_in_segment(segment, force_reset, tld);
- mi_segment_remove_from_free_queue(segment,tld);
-
- mi_assert_expensive(!mi_segment_queue_contains(&tld->small_free, segment));
- mi_assert_expensive(!mi_segment_queue_contains(&tld->medium_free, segment));
- mi_assert(segment->next == NULL);
- mi_assert(segment->prev == NULL);
- _mi_stat_decrease(&tld->stats->page_committed, segment->segment_info_size);
+ mi_assert_internal(segment != NULL);
+ mi_assert_internal(segment->next == NULL);
+ mi_assert_internal(segment->used == 0);
+
+ // Remove the free pages
+ mi_slice_t* slice = &segment->slices[0];
+ const mi_slice_t* end = mi_segment_slices_end(segment);
+ size_t page_count = 0;
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ mi_assert_internal(mi_slice_index(slice)==0 || slice->xblock_size == 0); // no more used pages ..
+ if (slice->xblock_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
+ mi_segment_span_remove_from_queue(slice, tld);
+ }
+ page_count++;
+ slice = slice + slice->slice_count;
+ }
+ mi_assert_internal(page_count == 2); // first page is allocated by the segment itself
+
+ // stats
+ _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment));
if (!force && mi_segment_cache_push(segment, tld)) {
// it is put in our cache
}
else {
// otherwise return it to the OS
- mi_segment_os_free(segment, segment->segment_size, tld);
+ mi_segment_os_free(segment, tld);
}
}
-/* -----------------------------------------------------------
- Free page management inside a segment
------------------------------------------------------------ */
-
-
-static bool mi_segment_has_free(const mi_segment_t* segment) {
- return (segment->used < segment->capacity);
-}
-
-static bool mi_segment_page_claim(mi_segment_t* segment, mi_page_t* page, mi_segments_tld_t* tld) {
- mi_assert_internal(_mi_page_segment(page) == segment);
- mi_assert_internal(!page->segment_in_use);
- mi_pages_reset_remove(page, tld);
- // check commit
- if (!page->is_committed) {
- mi_assert_internal(!segment->mem_is_pinned);
- mi_assert_internal(!page->is_reset);
- size_t psize;
- uint8_t* start = mi_segment_raw_page_start(segment, page, &psize);
- bool is_zero = false;
- const size_t gsize = (MI_SECURE >= 2 ? _mi_os_page_size() : 0);
- bool ok = _mi_mem_commit(start, psize + gsize, &is_zero, tld->os);
- if (!ok) return false; // failed to commit!
- if (gsize > 0) { mi_segment_protect_range(start + psize, gsize, true); }
- if (is_zero) { page->is_zero_init = true; }
- page->is_committed = true;
- }
- // set in-use before doing unreset to prevent delayed reset
- page->segment_in_use = true;
- segment->used++;
- // check reset
- if (page->is_reset) {
- mi_assert_internal(!segment->mem_is_pinned);
- bool ok = mi_page_unreset(segment, page, 0, tld);
- if (!ok) {
- page->segment_in_use = false;
- segment->used--;
- return false;
- }
- }
- mi_assert_internal(page->segment_in_use);
- mi_assert_internal(segment->used <= segment->capacity);
- if (segment->used == segment->capacity && segment->page_kind <= MI_PAGE_MEDIUM) {
- // if no more free pages, remove from the queue
- mi_assert_internal(!mi_segment_has_free(segment));
- mi_segment_remove_from_free_queue(segment, tld);
- }
- return true;
-}
-
/* -----------------------------------------------------------
- Free
+ Page Free
----------------------------------------------------------- */
static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld);
-// clear page data; can be called on abandoned segments
-static void mi_segment_page_clear(mi_segment_t* segment, mi_page_t* page, bool allow_reset, mi_segments_tld_t* tld)
-{
- mi_assert_internal(page->segment_in_use);
+// note: can be called on abandoned pages
+static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) {
+ mi_assert_internal(page->xblock_size > 0);
mi_assert_internal(mi_page_all_free(page));
- mi_assert_internal(page->is_committed);
- mi_assert_internal(mi_page_not_in_queue(page, tld));
-
+ mi_segment_t* segment = _mi_ptr_segment(page);
+ mi_assert_internal(segment->used > 0);
+
size_t inuse = page->capacity * mi_page_block_size(page);
_mi_stat_decrease(&tld->stats->page_committed, inuse);
_mi_stat_decrease(&tld->stats->pages, 1);
- // calculate the used size from the raw (non-aligned) start of the page
- //size_t pre_size;
- //_mi_segment_page_start(segment, page, page->block_size, NULL, &pre_size);
- //size_t used_size = pre_size + (page->capacity * page->block_size);
+ // reset the page memory to reduce memory pressure?
+ if (!segment->mem_is_pinned && !page->is_reset && mi_option_is_enabled(mi_option_page_reset)) {
+ size_t psize;
+ uint8_t* start = _mi_page_start(segment, page, &psize);
+ page->is_reset = true;
+ _mi_os_reset(start, psize, tld->stats);
+ }
+ // zero the page data, but not the segment fields
page->is_zero_init = false;
- page->segment_in_use = false;
-
- // reset the page memory to reduce memory pressure?
- // note: must come after setting `segment_in_use` to false but before block_size becomes 0
- //mi_page_reset(segment, page, 0 /*used_size*/, tld);
-
- // zero the page data, but not the segment fields and capacity, and block_size (for page size calculations)
- uint32_t block_size = page->xblock_size;
- uint16_t capacity = page->capacity;
- uint16_t reserved = page->reserved;
- ptrdiff_t ofs = offsetof(mi_page_t,capacity);
+ ptrdiff_t ofs = offsetof(mi_page_t, capacity);
memset((uint8_t*)page + ofs, 0, sizeof(*page) - ofs);
- page->capacity = capacity;
- page->reserved = reserved;
- page->xblock_size = block_size;
- segment->used--;
-
- // add to the free page list for reuse/reset
- if (allow_reset) {
- mi_pages_reset_add(segment, page, tld);
- }
+ page->xblock_size = 1;
- page->capacity = 0; // after reset these can be zero'd now
- page->reserved = 0;
+ // and free it
+ mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld);
+ segment->used--;
+ // cannot assert segment valid as it is called during reclaim
+ // mi_assert_expensive(mi_segment_is_valid(segment, tld));
+ return slice;
}
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
{
mi_assert(page != NULL);
+
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_expensive(mi_segment_is_valid(segment,tld));
- mi_reset_delayed(tld);
// mark it as free now
- mi_segment_page_clear(segment, page, true, tld);
+ mi_segment_page_clear(page, tld);
+ mi_assert_expensive(mi_segment_is_valid(segment, tld));
if (segment->used == 0) {
// no more used pages; remove from the free list and free the segment
mi_segment_free(segment, force, tld);
}
- else {
- if (segment->used == segment->abandoned) {
- // only abandoned pages; remove from free list and abandon
- mi_segment_abandon(segment,tld);
- }
- else if (segment->used + 1 == segment->capacity) {
- mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM); // for now we only support small and medium pages
- // move back to segments free list
- mi_segment_insert_in_free_queue(segment,tld);
- }
+ else if (segment->used == segment->abandoned) {
+ // only abandoned pages; remove from free list and abandon
+ mi_segment_abandon(segment,tld);
}
}
@@ -854,7 +1067,7 @@ void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld)
Abandonment
When threads terminate, they can leave segments with
-live blocks (reached through other threads). Such segments
+live blocks (reachable through other threads). Such segments
are "abandoned" and will be reclaimed by other threads to
reuse their pages and/or free them eventually
@@ -869,11 +1082,11 @@ or decommitting segments that have a pending read operation.
Note: the current implementation is one possible design;
another way might be to keep track of abandoned segments
-in the regions. This would have the advantage of keeping
+in the arenas/segment_cache's. This would have the advantage of keeping
all concurrent code in one place and not needing to deal
with ABA issues. The drawback is that it is unclear how to
scan abandoned segments efficiently in that case as they
-would be spread among all other segments in the regions.
+would be spread among all other segments in the arenas.
----------------------------------------------------------- */
// Use the bottom 20-bits (on 64-bit) of the aligned segment pointers
@@ -912,7 +1125,7 @@ static mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0
static void mi_abandoned_visited_push(mi_segment_t* segment) {
mi_assert_internal(segment->thread_id == 0);
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t,&segment->abandoned_next) == NULL);
- mi_assert_internal(segment->next == NULL && segment->prev == NULL);
+ mi_assert_internal(segment->next == NULL);
mi_assert_internal(segment->used > 0);
mi_segment_t* anext = mi_atomic_load_ptr_relaxed(mi_segment_t, &abandoned_visited);
do {
@@ -969,7 +1182,7 @@ static bool mi_abandoned_visited_revisit(void)
static void mi_abandoned_push(mi_segment_t* segment) {
mi_assert_internal(segment->thread_id == 0);
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
- mi_assert_internal(segment->next == NULL && segment->prev == NULL);
+ mi_assert_internal(segment->next == NULL);
mi_assert_internal(segment->used > 0);
mi_tagged_segment_t next;
mi_tagged_segment_t ts = mi_atomic_load_relaxed(&abandoned);
@@ -981,6 +1194,7 @@ static void mi_abandoned_push(mi_segment_t* segment) {
}
// Wait until there are no more pending reads on segments that used to be in the abandoned list
+// called for example from `arena.c` before decommitting
void _mi_abandoned_await_readers(void) {
size_t n;
do {
@@ -1031,20 +1245,31 @@ static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) {
mi_assert_internal(segment->used == segment->abandoned);
mi_assert_internal(segment->used > 0);
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
- mi_assert_expensive(mi_segment_is_valid(segment, tld));
-
- // remove the segment from the free page queue if needed
- mi_reset_delayed(tld);
- mi_pages_reset_remove_all_in_segment(segment, mi_option_is_enabled(mi_option_abandoned_page_reset), tld);
- mi_segment_remove_from_free_queue(segment, tld);
- mi_assert_internal(segment->next == NULL && segment->prev == NULL);
+ mi_assert_internal(segment->abandoned_visits == 0);
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
+
+ // remove the free pages from the free page queues
+ mi_slice_t* slice = &segment->slices[0];
+ const mi_slice_t* end = mi_segment_slices_end(segment);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (slice->xblock_size == 0) { // a free page
+ mi_segment_span_remove_from_queue(slice,tld);
+ slice->xblock_size = 0; // but keep it free
+ }
+ slice = slice + slice->slice_count;
+ }
+ // perform delayed decommits
+ mi_segment_delayed_decommit(segment, mi_option_is_enabled(mi_option_abandoned_page_decommit) /* force? */, tld->stats);
+
// all pages in the segment are abandoned; add it to the abandoned list
_mi_stat_increase(&tld->stats->segments_abandoned, 1);
- mi_segments_track_size(-((long)segment->segment_size), tld);
+ mi_segments_track_size(-((long)mi_segment_size(segment)), tld);
segment->thread_id = 0;
- segment->abandoned_visits = 0;
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL);
+ segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned
mi_abandoned_push(segment);
}
@@ -1053,9 +1278,10 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_assert_internal(mi_page_heap(page) == NULL);
mi_segment_t* segment = _mi_page_segment(page);
- mi_assert_expensive(!mi_pages_reset_contains(page, tld));
- mi_assert_expensive(mi_segment_is_valid(segment, tld));
- segment->abandoned++;
+
+ mi_assert_expensive(mi_segment_is_valid(segment,tld));
+ segment->abandoned++;
+
_mi_stat_increase(&tld->stats->pages_abandoned, 1);
mi_assert_internal(segment->abandoned <= segment->used);
if (segment->used == segment->abandoned) {
@@ -1068,75 +1294,96 @@ void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) {
Reclaim abandoned pages
----------------------------------------------------------- */
-// Possibly clear pages and check if free space is available
-static bool mi_segment_check_free(mi_segment_t* segment, size_t block_size, bool* all_pages_free)
+static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) {
+ mi_slice_t* slice = &segment->slices[0];
+ *end = mi_segment_slices_end(segment);
+ mi_assert_internal(slice->slice_count>0 && slice->xblock_size>0); // segment allocated page
+ slice = slice + slice->slice_count; // skip the first segment allocated page
+ return slice;
+}
+
+// Possibly free pages and check if free space is available
+static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld)
{
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
+ mi_assert_internal(mi_segment_is_abandoned(segment));
bool has_page = false;
- size_t pages_used = 0;
- size_t pages_used_empty = 0;
- for (size_t i = 0; i < segment->capacity; i++) {
- mi_page_t* page = &segment->pages[i];
- if (page->segment_in_use) {
- pages_used++;
+
+ // for all slices
+ const mi_slice_t* end;
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (mi_slice_is_used(slice)) { // used page
// ensure used count is up to date and collect potential concurrent frees
+ mi_page_t* const page = mi_slice_to_page(slice);
_mi_page_free_collect(page, false);
if (mi_page_all_free(page)) {
- // if everything free already, page can be reused for some block size
- // note: don't clear the page yet as we can only OS reset it once it is reclaimed
- pages_used_empty++;
- has_page = true;
- }
- else if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
- // a page has available free blocks of the right size
- has_page = true;
+ // if this page is all free now, free it without adding to any queues (yet)
+ mi_assert_internal(page->next == NULL && page->prev==NULL);
+ _mi_stat_decrease(&tld->stats->pages_abandoned, 1);
+ segment->abandoned--;
+ slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce!
+ mi_assert_internal(!mi_slice_is_used(slice));
+ if (slice->slice_count >= slices_needed) {
+ has_page = true;
+ }
}
+ else {
+ if (page->xblock_size == block_size && mi_page_has_any_available(page)) {
+ // a page has available free blocks of the right size
+ has_page = true;
+ }
+ }
}
else {
- // whole empty page
- has_page = true;
+ // empty span
+ if (slice->slice_count >= slices_needed) {
+ has_page = true;
+ }
}
- }
- mi_assert_internal(pages_used == segment->used && pages_used >= pages_used_empty);
- if (all_pages_free != NULL) {
- *all_pages_free = ((pages_used - pages_used_empty) == 0);
+ slice = slice + slice->slice_count;
}
return has_page;
}
-
-// Reclaim a segment; returns NULL if the segment was freed
+// Reclaim an abandoned segment; returns NULL if the segment was freed
// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full.
static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) {
mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_segment_t, &segment->abandoned_next) == NULL);
+ mi_assert_expensive(mi_segment_is_valid(segment, tld));
if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; }
segment->thread_id = _mi_thread_id();
segment->abandoned_visits = 0;
- mi_segments_track_size((long)segment->segment_size, tld);
- mi_assert_internal(segment->next == NULL && segment->prev == NULL);
- mi_assert_expensive(mi_segment_is_valid(segment, tld));
+ mi_segments_track_size((long)mi_segment_size(segment), tld);
+ mi_assert_internal(segment->next == NULL);
_mi_stat_decrease(&tld->stats->segments_abandoned, 1);
-
- for (size_t i = 0; i < segment->capacity; i++) {
- mi_page_t* page = &segment->pages[i];
- if (page->segment_in_use) {
+
+ // for all slices
+ const mi_slice_t* end;
+ mi_slice_t* slice = mi_slices_start_iterate(segment, &end);
+ while (slice < end) {
+ mi_assert_internal(slice->slice_count > 0);
+ mi_assert_internal(slice->slice_offset == 0);
+ if (mi_slice_is_used(slice)) {
+ // in use: reclaim the page in our heap
+ mi_page_t* page = mi_slice_to_page(slice);
mi_assert_internal(!page->is_reset);
mi_assert_internal(page->is_committed);
- mi_assert_internal(mi_page_not_in_queue(page, tld));
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_assert_internal(mi_page_heap(page) == NULL);
- segment->abandoned--;
- mi_assert(page->next == NULL);
+ mi_assert_internal(page->next == NULL && page->prev==NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
- // set the heap again and allow heap thread delayed free again.
+ segment->abandoned--;
+ // set the heap again and allow delayed free again
mi_page_set_heap(page, heap);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
- // TODO: should we not collect again given that we just collected in `check_free`?
_mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page)) {
- // if everything free already, clear the page directly
- mi_segment_page_clear(segment, page, true, tld); // reset is ok now
+ // if everything free by now, free the page
+ slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing
}
else {
// otherwise reclaim it into the heap
@@ -1146,21 +1393,21 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
}
}
}
- else if (page->is_committed && !page->is_reset) { // not in-use, and not reset yet
- // note: do not reset as this includes pages that were not touched before
- // mi_pages_reset_add(segment, page, tld);
+ else {
+ // the span is free, add it to our page queues
+ slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing
}
+ mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0);
+ slice = slice + slice->slice_count;
}
- mi_assert_internal(segment->abandoned == 0);
- if (segment->used == 0) {
+
+ mi_assert(segment->abandoned == 0);
+ if (segment->used == 0) { // due to page_clear
mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed));
mi_segment_free(segment, false, tld);
return NULL;
}
else {
- if (segment->page_kind <= MI_PAGE_MEDIUM && mi_segment_has_free(segment)) {
- mi_segment_insert_in_free_queue(segment, tld);
- }
return segment;
}
}
@@ -1173,16 +1420,15 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) {
}
}
-static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, bool* reclaimed, mi_segments_tld_t* tld)
+static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld)
{
*reclaimed = false;
mi_segment_t* segment;
int max_tries = 8; // limit the work to bound allocation times
while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
segment->abandoned_visits++;
- bool all_pages_free;
- bool has_page = mi_segment_check_free(segment,block_size,&all_pages_free); // try to free up pages (due to concurrent frees)
- if (all_pages_free) {
+ bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees)
+ if (segment->used == 0) {
// free the segment (by forced reclaim) to make it available to other threads.
// note1: we prefer to free a segment as that might lead to reclaiming another
// segment that is still partially used.
@@ -1190,18 +1436,19 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
// freeing but that would violate some invariants temporarily)
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
- else if (has_page && segment->page_kind == page_kind) {
- // found a free page of the right kind, or page of the right block_size with free space
+ else if (has_page) {
+ // found a large enough free span, or a page of the right block_size with free space
// we return the result of reclaim (which is usually `segment`) as it might free
// the segment due to concurrent frees (in which case `NULL` is returned).
return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld);
}
- else if (segment->abandoned_visits >= 3) {
- // always reclaim on 3rd visit to limit the list length.
+ else if (segment->abandoned_visits > 3) {
+ // always reclaim on 3rd visit to limit the abandoned queue length.
mi_segment_reclaim(segment, heap, 0, NULL, tld);
}
else {
// otherwise, push on the visited list so it gets not looked at too quickly again
+ mi_segment_delayed_decommit(segment, true /* force? */, tld->stats); // forced decommit if needed as we may not visit soon again
mi_abandoned_visited_push(segment);
}
}
@@ -1209,121 +1456,112 @@ static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t block_size,
}
+void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld)
+{
+ mi_segment_t* segment;
+ int max_tries = (force ? 16*1024 : 1024); // limit latency
+ if (force) {
+ mi_abandoned_visited_revisit();
+ }
+ while ((max_tries-- > 0) && ((segment = mi_abandoned_pop()) != NULL)) {
+ mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees)
+ if (segment->used == 0) {
+ // free the segment (by forced reclaim) to make it available to other threads.
+ // note: we could in principle optimize this by skipping reclaim and directly
+ // freeing but that would violate some invariants temporarily)
+ mi_segment_reclaim(segment, heap, 0, NULL, tld);
+ }
+ else {
+ // otherwise, decommit if needed and push on the visited list
+ // note: forced decommit can be expensive if many threads are destroyed/created as in mstress.
+ mi_segment_delayed_decommit(segment, force, tld->stats);
+ mi_abandoned_visited_push(segment);
+ }
+ }
+}
+
/* -----------------------------------------------------------
Reclaim or allocate
----------------------------------------------------------- */
-static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t page_kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
{
- mi_assert_internal(page_kind <= MI_PAGE_LARGE);
mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE);
+ mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX);
// 1. try to get a segment from our cache
mi_segment_t* segment = mi_segment_cache_pop(MI_SEGMENT_SIZE, tld);
if (segment != NULL) {
- mi_segment_init(segment, 0, page_kind, page_shift, tld, os_tld);
+ mi_segment_init(segment, 0, tld, os_tld, NULL);
return segment;
}
// 2. try to reclaim an abandoned segment
bool reclaimed;
- segment = mi_segment_try_reclaim(heap, block_size, page_kind, &reclaimed, tld);
+ segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld);
if (reclaimed) {
// reclaimed the right page right into the heap
- mi_assert_internal(segment != NULL && segment->page_kind == page_kind && page_kind <= MI_PAGE_LARGE);
+ mi_assert_internal(segment != NULL);
return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks
}
else if (segment != NULL) {
- // reclaimed a segment with empty pages (of `page_kind`) in it
+ // reclaimed a segment with a large enough empty span in it
return segment;
}
// 3. otherwise allocate a fresh segment
- return mi_segment_alloc(0, page_kind, page_shift, tld, os_tld);
+ return mi_segment_alloc(0, tld, os_tld, NULL);
}
/* -----------------------------------------------------------
- Small page allocation
+ Page allocation
----------------------------------------------------------- */
-static mi_page_t* mi_segment_find_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_assert_internal(mi_segment_has_free(segment));
- mi_assert_expensive(mi_segment_is_valid(segment, tld));
- for (size_t i = 0; i < segment->capacity; i++) { // TODO: use a bitmap instead of search?
- mi_page_t* page = &segment->pages[i];
- if (!page->segment_in_use) {
- bool ok = mi_segment_page_claim(segment, page, tld);
- if (ok) return page;
+static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
+{
+ mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE);
+
+ // find a free page
+ size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE));
+ size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE;
+ mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size);
+ mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld);
+ if (page==NULL) {
+ // no free page, allocate a new segment and try again
+ if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) {
+ // OOM or reclaimed a good page in the heap
+ return NULL;
+ }
+ else {
+ // otherwise try again
+ return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld);
}
}
- mi_assert(false);
- return NULL;
-}
-
-// Allocate a page inside a segment. Requires that the page has free pages
-static mi_page_t* mi_segment_page_alloc_in(mi_segment_t* segment, mi_segments_tld_t* tld) {
- mi_assert_internal(mi_segment_has_free(segment));
- return mi_segment_find_free(segment, tld);
-}
-
-static mi_page_t* mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_page_kind_t kind, size_t page_shift, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- // find an available segment the segment free queue
- mi_segment_queue_t* const free_queue = mi_segment_free_queue_of_kind(kind, tld);
- if (mi_segment_queue_is_empty(free_queue)) {
- // possibly allocate or reclaim a fresh segment
- mi_segment_t* const segment = mi_segment_reclaim_or_alloc(heap, block_size, kind, page_shift, tld, os_tld);
- if (segment == NULL) return NULL; // return NULL if out-of-memory (or reclaimed)
- mi_assert_internal(free_queue->first == segment);
- mi_assert_internal(segment->page_kind==kind);
- mi_assert_internal(segment->used < segment->capacity);
- }
- mi_assert_internal(free_queue->first != NULL);
- mi_page_t* const page = mi_segment_page_alloc_in(free_queue->first, tld);
- mi_assert_internal(page != NULL);
-#if MI_DEBUG>=2
- // verify it is committed
- _mi_segment_page_start(_mi_page_segment(page), page, sizeof(void*), NULL, NULL)[0] = 0;
-#endif
+ mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size);
+ mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id());
+ mi_segment_delayed_decommit(_mi_ptr_segment(page), false, tld->stats);
return page;
}
-static mi_page_t* mi_segment_small_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- return mi_segment_page_alloc(heap, block_size, MI_PAGE_SMALL,MI_SMALL_PAGE_SHIFT,tld,os_tld);
-}
-static mi_page_t* mi_segment_medium_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- return mi_segment_page_alloc(heap, block_size, MI_PAGE_MEDIUM, MI_MEDIUM_PAGE_SHIFT, tld, os_tld);
-}
/* -----------------------------------------------------------
- large page allocation
+ Huge page allocation
----------------------------------------------------------- */
-static mi_page_t* mi_segment_large_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
- mi_segment_t* segment = mi_segment_reclaim_or_alloc(heap,block_size,MI_PAGE_LARGE,MI_LARGE_PAGE_SHIFT,tld,os_tld);
- if (segment == NULL) return NULL;
- mi_page_t* page = mi_segment_find_free(segment, tld);
- mi_assert_internal(page != NULL);
-#if MI_DEBUG>=2
- _mi_segment_page_start(segment, page, sizeof(void*), NULL, NULL)[0] = 0;
-#endif
- return page;
-}
-
static mi_page_t* mi_segment_huge_page_alloc(size_t size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld)
{
- mi_segment_t* segment = mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT,tld,os_tld);
- if (segment == NULL) return NULL;
- mi_assert_internal(mi_segment_page_size(segment) - segment->segment_info_size - (2*(MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= size);
- segment->thread_id = 0; // huge pages are immediately abandoned
- mi_segments_track_size(-(long)segment->segment_size, tld);
- mi_page_t* page = mi_segment_find_free(segment, tld);
- mi_assert_internal(page != NULL);
+ mi_page_t* page = NULL;
+ mi_segment_t* segment = mi_segment_alloc(size,tld,os_tld,&page);
+ if (segment == NULL || page==NULL) return NULL;
+ mi_assert_internal(segment->used==1);
+ mi_assert_internal(mi_page_block_size(page) >= size);
+ segment->thread_id = 0; // huge segments are immediately abandoned
return page;
}
// free huge block from another thread
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) {
// huge page segments are always abandoned and can be freed immediately by any thread
- mi_assert_internal(segment->page_kind==MI_PAGE_HUGE);
+ mi_assert_internal(segment->kind==MI_SEGMENT_HUGE);
mi_assert_internal(segment == _mi_page_segment(page));
mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0);
@@ -1338,7 +1576,6 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
page->is_zero = false;
mi_assert(page->used == 0);
mi_tld_t* tld = heap->tld;
- mi_segments_track_size((long)segment->segment_size, &tld->segments);
_mi_segment_page_free(page, true, &tld->segments);
}
#if (MI_DEBUG!=0)
@@ -1349,26 +1586,24 @@ void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block
}
/* -----------------------------------------------------------
- Page allocation
+ Page allocation and free
----------------------------------------------------------- */
-
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) {
mi_page_t* page;
if (block_size <= MI_SMALL_OBJ_SIZE_MAX) {
- page = mi_segment_small_page_alloc(heap, block_size, tld, os_tld);
+ page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld);
}
else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) {
- page = mi_segment_medium_page_alloc(heap, block_size, tld, os_tld);
+ page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld);
}
else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) {
- page = mi_segment_large_page_alloc(heap, block_size, tld, os_tld);
+ page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld);
}
else {
page = mi_segment_huge_page_alloc(block_size,tld,os_tld);
}
mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld));
- mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size);
- mi_reset_delayed(tld);
- mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld));
return page;
}
+
+
diff --git a/src/static.c b/src/static.c
index 4b3abc2..5b34ddb 100644
--- a/src/static.c
+++ b/src/static.c
@@ -25,7 +25,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include "os.c"
#include "bitmap.c"
#include "arena.c"
-#include "region.c"
+#include "segment-cache.c"
#include "segment.c"
#include "page.c"
#include "heap.c"
diff --git a/src/stats.c b/src/stats.c
index 6d486f4..134a7bc 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -105,7 +105,7 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
mi_stat_add(&stats->segments_cache, &src->segments_cache, 1);
mi_stat_add(&stats->normal, &src->normal, 1);
mi_stat_add(&stats->huge, &src->huge, 1);
- mi_stat_add(&stats->giant, &src->giant, 1);
+ mi_stat_add(&stats->large, &src->large, 1);
mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1);
mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1);
@@ -115,7 +115,7 @@ static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) {
mi_stat_counter_add(&stats->searches, &src->searches, 1);
mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1);
mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1);
- mi_stat_counter_add(&stats->giant_count, &src->giant_count, 1);
+ mi_stat_counter_add(&stats->large_count, &src->large_count, 1);
#if MI_STAT>1
for (size_t i = 0; i <= MI_BIN_HUGE; i++) {
if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) {
@@ -300,12 +300,12 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
#endif
#if MI_STAT
mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg);
+ mi_stat_print(&stats->large, "large", (stats->large_count.count == 0 ? 1 : -(stats->large.allocated / stats->large_count.count)), out, arg);
mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg);
- mi_stat_print(&stats->giant, "giant", (stats->giant_count.count == 0 ? 1 : -(stats->giant.allocated / stats->giant_count.count)), out, arg);
mi_stat_count_t total = { 0,0,0,0 };
mi_stat_add(&total, &stats->normal, 1);
+ mi_stat_add(&total, &stats->large, 1);
mi_stat_add(&total, &stats->huge, 1);
- mi_stat_add(&total, &stats->giant, 1);
mi_stat_print(&total, "total", 1, out, arg);
#endif
#if MI_STAT>1
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 054d940..bb8dc97 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -16,7 +16,7 @@ if (NOT CMAKE_BUILD_TYPE)
endif()
# Import mimalloc (if installed)
-find_package(mimalloc 1.7 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH)
+find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH)
message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})")
# overriding with a dynamic library
diff --git a/test/main-override-static.c b/test/main-override-static.c
index 071e424..0711650 100644
--- a/test/main-override-static.c
+++ b/test/main-override-static.c
@@ -7,6 +7,7 @@
#include <mimalloc.h>
#include <mimalloc-override.h> // redefines malloc etc.
+
static void double_free1();
static void double_free2();
static void corrupt_free();
@@ -16,6 +17,7 @@ static void test_aslr(void);
static void test_process_info(void);
static void test_reserved(void);
static void negative_stat(void);
+static void alloc_huge(void);
int main() {
mi_version();
@@ -29,6 +31,7 @@ int main() {
// invalid_free();
// test_reserved();
// negative_stat();
+ alloc_huge();
void* p1 = malloc(78);
void* p2 = malloc(24);
@@ -42,7 +45,7 @@ int main() {
free(p1);
free(p2);
free(s);
-
+
/* now test if override worked by allocating/freeing across the api's*/
//p1 = mi_malloc(32);
//free(p1);
@@ -179,4 +182,182 @@ static void negative_stat(void) {
*p = 100;
mi_free(p);
mi_stats_print_out(NULL, NULL);
-} \ No newline at end of file
+}
+
+static void alloc_huge(void) {
+ void* p = mi_malloc(67108872);
+ mi_free(p);
+}
+
+
+// ----------------------------
+// bin size experiments
+// ------------------------------
+
+#if 0
+#include <stdint.h>
+#include <stdbool.h>
+
+#define MI_INTPTR_SIZE 8
+#define MI_LARGE_WSIZE_MAX (4*1024*1024 / MI_INTPTR_SIZE)
+
+#define MI_BIN_HUGE 100
+//#define MI_ALIGN2W
+
+// Bit scan reverse: return the index of the highest bit.
+static inline uint8_t mi_bsr32(uint32_t x);
+
+#if defined(_MSC_VER)
+#include <windows.h>
+#include <intrin.h>
+static inline uint8_t mi_bsr32(uint32_t x) {
+ uint32_t idx;
+ _BitScanReverse((DWORD*)&idx, x);
+ return idx;
+}
+#elif defined(__GNUC__) || defined(__clang__)
+static inline uint8_t mi_bsr32(uint32_t x) {
+ return (31 - __builtin_clz(x));
+}
+#else
+static inline uint8_t mi_bsr32(uint32_t x) {
+ // de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
+ static const uint8_t debruijn[32] = {
+ 31, 0, 22, 1, 28, 23, 18, 2, 29, 26, 24, 10, 19, 7, 3, 12,
+ 30, 21, 27, 17, 25, 9, 6, 11, 20, 16, 8, 5, 15, 4, 14, 13,
+ };
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+ return debruijn[(x*0x076be629) >> 27];
+}
+#endif
+
+/*
+// Bit scan reverse: return the index of the highest bit.
+uint8_t _mi_bsr(uintptr_t x) {
+ if (x == 0) return 0;
+ #if MI_INTPTR_SIZE==8
+ uint32_t hi = (x >> 32);
+ return (hi == 0 ? mi_bsr32((uint32_t)x) : 32 + mi_bsr32(hi));
+ #elif MI_INTPTR_SIZE==4
+ return mi_bsr32(x);
+ #else
+ # error "define bsr for non-32 or 64-bit platforms"
+ #endif
+}
+*/
+
+
+static inline size_t _mi_wsize_from_size(size_t size) {
+ return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
+}
+
+// Return the bin for a given field size.
+// Returns MI_BIN_HUGE if the size is too large.
+// We use `wsize` for the size in "machine word sizes",
+// i.e. byte size == `wsize*sizeof(void*)`.
+extern inline uint8_t _mi_bin8(size_t size) {
+ size_t wsize = _mi_wsize_from_size(size);
+ uint8_t bin;
+ if (wsize <= 1) {
+ bin = 1;
+ }
+#if defined(MI_ALIGN4W)
+ else if (wsize <= 4) {
+ bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
+ }
+#elif defined(MI_ALIGN2W)
+ else if (wsize <= 8) {
+ bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
+ }
+#else
+ else if (wsize <= 8) {
+ bin = (uint8_t)wsize;
+ }
+#endif
+ else if (wsize > MI_LARGE_WSIZE_MAX) {
+ bin = MI_BIN_HUGE;
+ }
+ else {
+#if defined(MI_ALIGN4W)
+ if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
+#endif
+ wsize--;
+ // find the highest bit
+ uint8_t b = mi_bsr32((uint32_t)wsize);
+ // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
+ // - adjust with 3 because we use do not round the first 8 sizes
+ // which each get an exact bin
+ bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3;
+ }
+ return bin;
+}
+
+static inline uint8_t _mi_bin4(size_t size) {
+ size_t wsize = _mi_wsize_from_size(size);
+ uint8_t bin;
+ if (wsize <= 1) {
+ bin = 1;
+ }
+#if defined(MI_ALIGN4W)
+ else if (wsize <= 4) {
+ bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
+ }
+#elif defined(MI_ALIGN2W)
+ else if (wsize <= 8) {
+ bin = (uint8_t)((wsize+1)&~1); // round to double word sizes
+ }
+#else
+ else if (wsize <= 8) {
+ bin = (uint8_t)wsize;
+ }
+#endif
+ else if (wsize > MI_LARGE_WSIZE_MAX) {
+ bin = MI_BIN_HUGE;
+ }
+ else {
+ uint8_t b = mi_bsr32((uint32_t)wsize);
+ bin = ((b << 1) + (uint8_t)((wsize >> (b - 1)) & 0x01)) + 3;
+ }
+ return bin;
+}
+
+static size_t _mi_binx4(size_t bsize) {
+ if (bsize==0) return 0;
+ uint8_t b = mi_bsr32((uint32_t)bsize);
+ if (b <= 1) return bsize;
+ size_t bin = ((b << 1) | (bsize >> (b - 1))&0x01);
+ return bin;
+}
+
+static size_t _mi_binx8(size_t bsize) {
+ if (bsize<=1) return bsize;
+ uint8_t b = mi_bsr32((uint32_t)bsize);
+ if (b <= 2) return bsize;
+ size_t bin = ((b << 2) | (bsize >> (b - 2))&0x03) - 5;
+ return bin;
+}
+
+static void mi_bins(void) {
+ //printf(" QNULL(1), /* 0 */ \\\n ");
+ size_t last_bin = 0;
+ size_t min_bsize = 0;
+ size_t last_bsize = 0;
+ for (size_t bsize = 1; bsize < 2*1024; bsize++) {
+ size_t size = bsize * 64 * 1024;
+ size_t bin = _mi_binx8(bsize);
+ if (bin != last_bin) {
+ printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_bsize, last_bsize, last_bin);
+ //printf("QNULL(%6zd), ", wsize);
+ //if (last_bin%8 == 0) printf("/* %i */ \\\n ", last_bin);
+ last_bin = bin;
+ min_bsize = bsize;
+ }
+ last_bsize = bsize;
+ }
+}
+#endif \ No newline at end of file
diff --git a/test/main-override.cpp b/test/main-override.cpp
index f748c75..e0dba5a 100644
--- a/test/main-override.cpp
+++ b/test/main-override.cpp
@@ -32,22 +32,27 @@ static void heap_late_free(); // issue #204
static void padding_shrink(); // issue #209
static void various_tests();
static void test_mt_shutdown();
+static void large_alloc(void); // issue #363
static void fail_aslr(); // issue #372
static void tsan_numa_test(); // issue #414
-static void strdup_test(); // issue #445
+static void strdup_test(); // issue #445
+static void bench_alloc_large(void); // issue #xxx
int main() {
mi_stats_reset(); // ignore earlier allocations
- heap_thread_free_large();
- heap_no_delete();
- heap_late_free();
- padding_shrink();
- various_tests();
- tsan_numa_test();
- strdup_test();
+
+ heap_thread_free_large();
+ heap_no_delete();
+ heap_late_free();
+ padding_shrink();
+ various_tests();
+ large_alloc();
+ tsan_numa_test();
+ strdup_test();
test_mt_shutdown();
//fail_aslr();
+ bench_alloc_large();
mi_stats_print(NULL);
return 0;
}
@@ -188,7 +193,7 @@ static void heap_thread_free_large_worker() {
static void heap_thread_free_large() {
for (int i = 0; i < 100; i++) {
- shared_p = mi_malloc_aligned(2*1024*1024 + 1, 8);
+ shared_p = mi_malloc_aligned(2 * 1024 * 1024 + 1, 8);
auto t1 = std::thread(heap_thread_free_large_worker);
t1.join();
}
@@ -220,6 +225,18 @@ static void test_mt_shutdown()
std::cout << "done" << std::endl;
}
+// issue #363
+using namespace std;
+
+void large_alloc(void)
+{
+ char* a = new char[1ull << 25];
+ thread th([&] {
+ delete[] a;
+ });
+ th.join();
+}
+
// issue #372
static void fail_aslr() {
size_t sz = (4ULL << 40); // 4TiB
@@ -231,11 +248,42 @@ static void fail_aslr() {
// issues #414
static void dummy_worker() {
void* p = mi_malloc(0);
- mi_free(p);
+ mi_free(p);
}
static void tsan_numa_test() {
auto t1 = std::thread(dummy_worker);
dummy_worker();
t1.join();
-} \ No newline at end of file
+}
+
+// issue #?
+#include <chrono>
+#include <random>
+#include <iostream>
+
+static void bench_alloc_large(void) {
+ static constexpr int kNumBuffers = 20;
+ static constexpr size_t kMinBufferSize = 5 * 1024 * 1024;
+ static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024;
+ std::unique_ptr<char[]> buffers[kNumBuffers];
+
+ std::random_device rd;
+ std::mt19937 gen(42); //rd());
+ std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize);
+ std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1);
+
+ static constexpr int kNumIterations = 2000;
+ const auto start = std::chrono::steady_clock::now();
+ for (int i = 0; i < kNumIterations; ++i) {
+ int buffer_idx = buf_number_distribution(gen);
+ size_t new_size = size_distribution(gen);
+ buffers[buffer_idx] = std::make_unique<char[]>(new_size);
+ }
+ const auto end = std::chrono::steady_clock::now();
+ const auto num_ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
+ const auto us_per_allocation = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / kNumIterations;
+ std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl;
+ std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl;
+}
+
diff --git a/test/test-stress.c b/test/test-stress.c
index 498b7ec..ff5fffe 100644
--- a/test/test-stress.c
+++ b/test/test-stress.c
@@ -39,12 +39,12 @@ static size_t use_one_size = 0; // use single object size of `N * s
// #define USE_STD_MALLOC
#ifdef USE_STD_MALLOC
-#define custom_calloc(n,s) calloc(n,s)
+#define custom_calloc(n,s) malloc(n*s)
#define custom_realloc(p,s) realloc(p,s)
#define custom_free(p) free(p)
#else
#include <mimalloc.h>
-#define custom_calloc(n,s) mi_calloc(n,s)
+#define custom_calloc(n,s) mi_malloc(n*s)
#define custom_realloc(p,s) mi_realloc(p,s)
#define custom_free(p) mi_free(p)
#endif
@@ -182,17 +182,20 @@ static void run_os_threads(size_t nthreads, void (*entry)(intptr_t tid));
static void test_stress(void) {
uintptr_t r = rand();
for (int n = 0; n < ITER; n++) {
- run_os_threads(THREADS, &stress);
+ run_os_threads(THREADS, &stress);
for (int i = 0; i < TRANSFERS; i++) {
if (chance(50, &r) || n + 1 == ITER) { // free all on last run, otherwise free half of the transfers
void* p = atomic_exchange_ptr(&transfer[i], NULL);
free_items(p);
}
}
- // mi_collect(false);
-#if !defined(NDEBUG) || defined(MI_TSAN)
+ #ifndef NDEBUG
+ //mi_collect(false);
+ //mi_debug_show_arenas();
+ #endif
+ #if !defined(NDEBUG) || defined(MI_TSAN)
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); }
-#endif
+ #endif
}
}
@@ -244,16 +247,23 @@ int main(int argc, char** argv) {
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
srand(0x7feb352d);
- // mi_stats_reset();
+
+ //mi_reserve_os_memory(512ULL << 20, true, true);
+
+#if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
+ mi_stats_reset();
+#endif
+
#ifdef STRESS
- test_stress();
+ test_stress();
#else
- test_leak();
+ test_leak();
#endif
#ifndef USE_STD_MALLOC
#ifndef NDEBUG
mi_collect(true);
+ //mi_debug_show_arenas();
#endif
mi_stats_print(NULL);
#endif