summaryrefslogtreecommitdiff
path: root/include/mimalloc-internal.h
diff options
context:
space:
mode:
authordaan <daanl@outlook.com>2019-12-27 23:33:50 -0800
committerdaan <daanl@outlook.com>2019-12-28 00:57:42 -0800
commite3391d9a53c66f922c6e0ac12df4723701a05110 (patch)
tree7d3a25f865b9a30e978b48fb1c1872e34da9bff0 /include/mimalloc-internal.h
parentce02986d56cb69dd2f2d2b1a5c25260338665957 (diff)
stronger encoding of free lists using two keys per page
Diffstat (limited to 'include/mimalloc-internal.h')
-rw-r--r--include/mimalloc-internal.h58
1 files changed, 40 insertions, 18 deletions
diff --git a/include/mimalloc-internal.h b/include/mimalloc-internal.h
index e648c1f..cdaac96 100644
--- a/include/mimalloc-internal.h
+++ b/include/mimalloc-internal.h
@@ -392,12 +392,28 @@ static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
}
-// -------------------------------------------------------------------
-// Encoding/Decoding the free list next pointers
-// Note: we pass a `null` value to be used as the `NULL` value for the
-// end of a free list. This is to prevent the cookie itself to ever
-// be present among user blocks (as `cookie^0==cookie`).
-// -------------------------------------------------------------------
+/* -------------------------------------------------------------------
+Encoding/Decoding the free list next pointers
+
+This is to protect against buffer overflow exploits where the
+free list is mutated. Many hardened allocators xor the next pointer `p`
+with a secret key `k1`, as `p^k1`, but if the attacker can guess
+the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
+Moreover, if multiple blocks can be read, the attacker can
+xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
+about the pointers (and subsequently `k1`).
+
+Instead mimalloc uses an extra key `k2` and encode as `rotl(p+k2,13)^k1`.
+Since these operations are not associative, the above approaches do not
+work so well any more even if the `p` can be guesstimated. (We include
+the rotation since xor and addition are otherwise linear in the lowest bit)
+Both keys are unique per page.
+
+We also pass a separate `null` value to be used as `NULL` or otherwise
+`rotl(k2,13)^k1` would appear (too) often as a sentinel value.
+------------------------------------------------------------------- */
+
+#define MI_ENCODE_ROTATE_BITS (13)
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
@@ -412,49 +428,55 @@ static inline bool mi_is_in_same_page(const void* p, const void* q) {
return (idxp == idxq);
}
-static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t cookie ) {
+static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
+ return ((x << shift) | (x >> (MI_INTPTR_BITS - shift)));
+}
+static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
+ return ((x >> shift) | (x << (MI_INTPTR_BITS - shift)));
+}
+static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, uintptr_t key1, uintptr_t key2 ) {
#ifdef MI_ENCODE_FREELIST
- mi_block_t* b = (mi_block_t*)(block->next ^ cookie);
+ mi_block_t* b = (mi_block_t*)(mi_rotr(block->next ^ key1, MI_ENCODE_ROTATE_BITS) - key2);
if (mi_unlikely((void*)b==null)) { b = NULL; }
return b;
#else
- UNUSED(cookie); UNUSED(null);
+ UNUSED(key1); UNUSED(key2); UNUSED(null);
return (mi_block_t*)block->next;
#endif
}
-static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t cookie) {
+static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, uintptr_t key1, uintptr_t key2) {
#ifdef MI_ENCODE_FREELIST
if (mi_unlikely(next==NULL)) { next = (mi_block_t*)null; }
- block->next = (mi_encoded_t)next ^ cookie;
+ block->next = mi_rotl((mi_encoded_t)next + key2, MI_ENCODE_ROTATE_BITS) ^ key1;
#else
- UNUSED(cookie); UNUSED(null);
+ UNUSED(key1); UNUSED(key2); UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
#ifdef MI_ENCODE_FREELIST
- mi_block_t* next = mi_block_nextx(page,block,page->cookie);
- // check for free list corruption: is `next` at least in our segment range?
+ mi_block_t* next = mi_block_nextx(page,block,page->key[0],page->key[1]);
+ // check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
- if (next!=NULL && !mi_is_in_same_page(block, next)) {
+ if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
_mi_fatal_error("corrupted free list entry of size %zub at %p: value 0x%zx\n", page->block_size, block, (uintptr_t)next);
next = NULL;
}
return next;
#else
UNUSED(page);
- return mi_block_nextx(page,block,0);
+ return mi_block_nextx(page,block,0,0);
#endif
}
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
#ifdef MI_ENCODE_FREELIST
- mi_block_set_nextx(page,block,next, page->cookie);
+ mi_block_set_nextx(page,block,next, page->key[0], page->key[1]);
#else
UNUSED(page);
- mi_block_set_nextx(page,block, next,0);
+ mi_block_set_nextx(page,block, next,0,0);
#endif
}