summaryrefslogtreecommitdiff
path: root/src/base.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/base.c')
-rw-r--r--src/base.c106
1 files changed, 6 insertions, 100 deletions
diff --git a/src/base.c b/src/base.c
index b0324b5d..421f295a 100644
--- a/src/base.c
+++ b/src/base.c
@@ -12,22 +12,8 @@
static base_t *b0;
-metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
-
-const char *metadata_thp_mode_names[] = {
- "disabled",
- "auto",
- "always"
-};
-
/******************************************************************************/
-static inline bool
-metadata_thp_madvise(void) {
- return (metadata_thp_enabled() &&
- (init_system_thp_mode == thp_mode_default));
-}
-
static void *
base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
void *addr;
@@ -107,12 +93,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
post_reentrancy(tsd);
}
label_done:
- if (metadata_thp_madvise()) {
- /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
- assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
- (size & HUGEPAGE_MASK) == 0);
- pages_nohuge(addr, size);
- }
+ return;
}
static void
@@ -140,42 +121,6 @@ base_get_num_blocks(base_t *base, bool with_new_block) {
return n_blocks;
}
-static void
-base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
- assert(opt_metadata_thp == metadata_thp_auto);
- malloc_mutex_assert_owner(tsdn, &base->mtx);
- if (base->auto_thp_switched) {
- return;
- }
- /* Called when adding a new block. */
- bool should_switch;
- if (base_ind_get(base) != 0) {
- should_switch = (base_get_num_blocks(base, true) ==
- BASE_AUTO_THP_THRESHOLD);
- } else {
- should_switch = (base_get_num_blocks(base, true) ==
- BASE_AUTO_THP_THRESHOLD_A0);
- }
- if (!should_switch) {
- return;
- }
-
- base->auto_thp_switched = true;
- assert(!config_stats || base->n_thp == 0);
- /* Make the initial blocks THP lazily. */
- base_block_t *block = base->blocks;
- while (block != NULL) {
- assert((block->size & HUGEPAGE_MASK) == 0);
- pages_huge(block, block->size);
- if (config_stats) {
- base->n_thp += HUGEPAGE_CEILING(block->size -
- extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
- }
- block = block->next;
- assert(block == NULL || (base_ind_get(base) == 0));
- }
-}
-
static void *
base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
size_t alignment) {
@@ -218,13 +163,6 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
PAGE_CEILING((uintptr_t)addr - gap_size);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
- if (metadata_thp_madvise() && (opt_metadata_thp ==
- metadata_thp_always || base->auto_thp_switched)) {
- base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
- LG_HUGEPAGE;
- assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
- }
}
}
@@ -262,8 +200,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
- pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
- *pind_last;
+ pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
+ *pind_last + 1 : *pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
@@ -273,24 +211,6 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
return NULL;
}
- if (metadata_thp_madvise()) {
- void *addr = (void *)block;
- assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
- (block_size & HUGEPAGE_MASK) == 0);
- if (opt_metadata_thp == metadata_thp_always) {
- pages_huge(addr, block_size);
- } else if (opt_metadata_thp == metadata_thp_auto &&
- base != NULL) {
- /* base != NULL indicates this is not a new base. */
- malloc_mutex_lock(tsdn, &base->mtx);
- base_auto_thp_switch(tsdn, base);
- if (base->auto_thp_switched) {
- pages_huge(addr, block_size);
- }
- malloc_mutex_unlock(tsdn, &base->mtx);
- }
- }
-
*pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
@@ -327,16 +247,8 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
base->allocated += sizeof(base_block_t);
base->resident += PAGE_CEILING(sizeof(base_block_t));
base->mapped += block->size;
- if (metadata_thp_madvise() &&
- !(opt_metadata_thp == metadata_thp_auto
- && !base->auto_thp_switched)) {
- assert(base->n_thp > 0);
- base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
- LG_HUGEPAGE;
- }
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
- assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
return &block->extent;
}
@@ -371,20 +283,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next;
base->blocks = block;
- base->auto_thp_switched = false;
- for (szind_t i = 0; i < NSIZES; i++) {
+ for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
- base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
- metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
- >> LG_HUGEPAGE : 0;
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
- assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
base_size);
@@ -426,7 +333,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
- for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
+ for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */
@@ -479,7 +386,7 @@ base_alloc_extent(tsdn_t *tsdn, base_t *base) {
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
- size_t *mapped, size_t *n_thp) {
+ size_t *mapped) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
@@ -488,7 +395,6 @@ base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
*allocated = base->allocated;
*resident = base->resident;
*mapped = base->mapped;
- *n_thp = base->n_thp;
malloc_mutex_unlock(tsdn, &base->mtx);
}