diff options
author | Android Build Coastguard Worker <android-build-coastguard-worker@google.com> | 2021-07-17 10:00:50 +0000 |
---|---|---|
committer | Android Build Coastguard Worker <android-build-coastguard-worker@google.com> | 2021-07-17 10:00:50 +0000 |
commit | 7290ff7cfa79abad5f0a3b2d77383eea2ec90077 (patch) | |
tree | 8491a9b79bfd0f8a95b317af0ac66246337ca71f | |
parent | 099b82285c9a2eb131185cada331e4b2f02f8a6d (diff) | |
parent | cb1e42a2d4e4a592cab41b8f8abbf33342cfcf57 (diff) |
Snap for 7558324 from cb1e42a2d4e4a592cab41b8f8abbf33342cfcf57 to s-keystone-qcom-release
Change-Id: I0ae93840e53a10c8f057306964d0732b49fe30ff
-rw-r--r-- | libc/bionic/heap_tagging.cpp | 7 | ||||
-rw-r--r-- | libc/bionic/libc_init_static.cpp | 6 | ||||
-rw-r--r-- | libc/include/malloc.h | 43 |
3 files changed, 52 insertions, 4 deletions
diff --git a/libc/bionic/heap_tagging.cpp b/libc/bionic/heap_tagging.cpp index ffbabb9a0..41aa20507 100644 --- a/libc/bionic/heap_tagging.cpp +++ b/libc/bionic/heap_tagging.cpp @@ -139,7 +139,12 @@ bool SetHeapTaggingLevel(HeapTaggingLevel tag_level) { } if (tag_level == M_HEAP_TAGGING_LEVEL_ASYNC) { - set_tcf_on_all_threads(PR_MTE_TCF_ASYNC); + // When entering ASYNC mode, specify that we want to allow upgrading to SYNC by OR'ing in + // the SYNC flag. But if the kernel doesn't support specifying multiple TCF modes, fall back + // to specifying a single mode. + if (!set_tcf_on_all_threads(PR_MTE_TCF_ASYNC | PR_MTE_TCF_SYNC)) { + set_tcf_on_all_threads(PR_MTE_TCF_ASYNC); + } #if defined(USE_SCUDO) scudo_malloc_set_track_allocation_stacks(0); #endif diff --git a/libc/bionic/libc_init_static.cpp b/libc/bionic/libc_init_static.cpp index 069ebb0ab..3a8513f98 100644 --- a/libc/bionic/libc_init_static.cpp +++ b/libc/bionic/libc_init_static.cpp @@ -311,7 +311,11 @@ __attribute__((no_sanitize("hwaddress", "memtag"))) void __libc_init_mte(const v unsigned long prctl_arg = PR_TAGGED_ADDR_ENABLE | PR_MTE_TAG_SET_NONZERO; prctl_arg |= (level == M_HEAP_TAGGING_LEVEL_SYNC) ? PR_MTE_TCF_SYNC : PR_MTE_TCF_ASYNC; - if (prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg, 0, 0, 0) == 0) { + // When entering ASYNC mode, specify that we want to allow upgrading to SYNC by OR'ing in the + // SYNC flag. But if the kernel doesn't support specifying multiple TCF modes, fall back to + // specifying a single mode. + if (prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg | PR_MTE_TCF_SYNC, 0, 0, 0) == 0 || + prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg, 0, 0, 0) == 0) { __libc_shared_globals()->initial_heap_tagging_level = level; return; } diff --git a/libc/include/malloc.h b/libc/include/malloc.h index bae1f6823..f7beb2c10 100644 --- a/libc/include/malloc.h +++ b/libc/include/malloc.h @@ -170,7 +170,45 @@ int malloc_info(int __must_be_zero, FILE* __fp) __INTRODUCED_IN(23); * Available since API level 28. */ #define M_PURGE (-101) -/* + + +/** + * mallopt() option to tune the allocator's choice of memory tags to + * make it more likely that a certain class of memory errors will be + * detected. This is only relevant if MTE is enabled in this process + * and ignored otherwise. The value argument should be one of the + * M_MEMTAG_TUNING_* flags. + * NOTE: This is only available in scudo. + * + * Available since API level 31. + */ +#define M_MEMTAG_TUNING (-102) + +/** + * When passed as a value of M_MEMTAG_TUNING mallopt() call, enables + * deterministic detection of linear buffer overflow and underflow + * bugs by assigning distinct tag values to adjacent allocations. This + * mode has a slightly reduced chance to detect use-after-free bugs + * because only half of the possible tag values are available for each + * memory location. + * + * Please keep in mind that MTE can not detect overflow within the + * same tag granule (16-byte aligned chunk), and can miss small + * overflows even in this mode. Such overflow can not be the cause of + * a memory corruption, because the memory within one granule is never + * used for multiple allocations. + */ +#define M_MEMTAG_TUNING_BUFFER_OVERFLOW 0 + +/** + * When passed as a value of M_MEMTAG_TUNING mallopt() call, enables + * independently randomized tags for uniform ~93% probability of + * detecting both spatial (buffer overflow) and temporal (use after + * free) bugs. + */ +#define M_MEMTAG_TUNING_UAF 1 + +/** * mallopt() option for per-thread memory initialization tuning. * The value argument should be one of: * 1: Disable automatic heap initialization and, where possible, memory tagging, @@ -210,7 +248,7 @@ int malloc_info(int __must_be_zero, FILE* __fp) __INTRODUCED_IN(23); * should not be zero-initialized, any other value indicates to initialize heap * memory to zero. * - * Note that this memory mitigations is only implemented in scudo and therefore + * Note that this memory mitigation is only implemented in scudo and therefore * this will have no effect when using another allocator (such as jemalloc on * Android Go devices). * @@ -222,6 +260,7 @@ int malloc_info(int __must_be_zero, FILE* __fp) __INTRODUCED_IN(23); * mallopt() option to change the heap tagging state. May be called at any * time, including when multiple threads are running. * The value must be one of the M_HEAP_TAGGING_LEVEL_ constants. + * NOTE: This is only available in scudo. * * Available since API level 31. */ |