summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDanny Lin <danny@kdrag0n.dev>2021-04-05 23:16:01 -0700
committeralk3pInjection <5e147612@kscope.ink>2022-05-08 12:40:50 +0800
commit6ccb70d6f50aa544662a7aa1f13a942093f3049d (patch)
tree11ed5b4aad15bf15537cf213918f3b319f0b1a91
parentd0e2a41d561c8f2200e6d1ee8b115a1430ee7b03 (diff)
Update Android extension functions for jemalloc 5.2.1
malloc_iterate and mallinfo need to be updated to accomodate upstream changes. Change-Id: Iae187f1ca442e0f8c3041ed67474d7d979085b0b
-rw-r--r--src/android_je_iterate.c2
-rw-r--r--src/android_je_mallinfo.c36
2 files changed, 21 insertions, 17 deletions
diff --git a/src/android_je_iterate.c b/src/android_je_iterate.c
index b21bfe97..3f145363 100644
--- a/src/android_je_iterate.c
+++ b/src/android_je_iterate.c
@@ -34,7 +34,7 @@ int je_malloc_iterate(uintptr_t base, size_t size,
continue;
}
- if (extent_szind_get_maybe_invalid(extent) >= NSIZES) {
+ if (extent_szind_get_maybe_invalid(extent) >= SC_NSIZES) {
// Ignore this unused extent.
ptr = (uintptr_t)extent_past_get(extent);
continue;
diff --git a/src/android_je_mallinfo.c b/src/android_je_mallinfo.c
index 53bf6644..418157c2 100644
--- a/src/android_je_mallinfo.c
+++ b/src/android_je_mallinfo.c
@@ -21,25 +21,27 @@ static size_t accumulate_large_allocs(arena_t* arena) {
* Do not include stats.allocated_large, it is only updated by
* arena_stats_merge, and would include the data counted below.
*/
- for (unsigned j = 0; j < NSIZES - NBINS; j++) {
+ for (unsigned j = 0; j < SC_NSIZES - SC_NBINS; j++) {
/* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
size_t allocs = (size_t)(nmalloc - ndalloc);
- total_bytes += sz_index2size(NBINS + j) * allocs;
+ total_bytes += sz_index2size(SC_NBINS + j) * allocs;
}
return total_bytes;
}
static size_t accumulate_small_allocs(arena_t* arena) {
size_t total_bytes = 0;
- for (unsigned j = 0; j < NBINS; j++) {
- bin_t* bin = &arena->bins[j];
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
+ bin_t* bin = &arena->bins[i].bin_shards[j];
- /* NOTE: This includes allocations cached on every thread. */
- malloc_mutex_lock(TSDN_NULL, &bin->lock);
- total_bytes += bin_infos[j].reg_size * bin->stats.curregs;
- malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ /* NOTE: This includes allocations cached on every thread. */
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ total_bytes += bin_infos[j].reg_size * bin->stats.curregs;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
}
return total_bytes;
}
@@ -73,7 +75,7 @@ size_t je_mallinfo_narenas() {
}
size_t je_mallinfo_nbins() {
- return NBINS;
+ return SC_NBINS;
}
struct mallinfo je_mallinfo_arena_info(size_t aidx) {
@@ -98,16 +100,18 @@ struct mallinfo je_mallinfo_bin_info(size_t aidx, size_t bidx) {
memset(&mi, 0, sizeof(mi));
malloc_mutex_lock(TSDN_NULL, &arenas_lock);
- if (aidx < narenas_auto && bidx < NBINS) {
+ if (aidx < narenas_auto && bidx < SC_NBINS) {
arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
if (arena != NULL) {
- bin_t* bin = &arena->bins[bidx];
+ for (unsigned j = 0; j < bin_infos[bidx].n_shards; j++) {
+ bin_t* bin = &arena->bins[bidx].bin_shards[j];
- malloc_mutex_lock(TSDN_NULL, &bin->lock);
- mi.ordblks = bin_infos[bidx].reg_size * bin->stats.curregs;
- mi.uordblks = (size_t) bin->stats.nmalloc;
- mi.fordblks = (size_t) bin->stats.ndalloc;
- malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ mi.ordblks += bin_infos[bidx].reg_size * bin->stats.curregs;
+ mi.uordblks += (size_t) bin->stats.nmalloc;
+ mi.fordblks += (size_t) bin->stats.ndalloc;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);