summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Goldblatt <davidgoldblatt@fb.com>2019-03-22 12:53:11 -0700
committerDavid Goldblatt <davidtgoldblatt@gmail.com>2019-04-15 16:48:12 -0700
commit33e1dad6803ea3e20971b46baa299045f736d22a (patch)
treef5d9814442d9e855b7aae5a2e981dd164332a0f7 /src
parentb92c9a1a81f3f68da87afe5887d8450fef0700d3 (diff)
Safety checks: Add a redzoning feature.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c22
-rw-r--r--src/jemalloc.c1
-rw-r--r--src/prof.c16
-rw-r--r--src/safety_check.c19
4 files changed, 43 insertions, 15 deletions
diff --git a/src/arena.c b/src/arena.c
index 60eac232..084df855 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -8,6 +8,7 @@
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/util.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
@@ -1531,12 +1532,16 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
}
void
-arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
+arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
cassert(config_prof);
assert(ptr != NULL);
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
assert(usize <= SC_SMALL_MAXCLASS);
+ if (config_opt_safety_checks) {
+ safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
+ }
+
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@@ -1577,10 +1582,19 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
assert(opt_prof);
extent_t *extent = iealloc(tsdn, ptr);
- size_t usize = arena_prof_demote(tsdn, extent, ptr);
- if (usize <= tcache_maxclass) {
+ size_t usize = extent_usize_get(extent);
+ size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
+ if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
+ /*
+ * Currently, we only do redzoning for small sampled
+ * allocations.
+ */
+ assert(bumped_usize == SC_LARGE_MINCLASS);
+ safety_check_verify_redzone(ptr, usize, bumped_usize);
+ }
+ if (bumped_usize <= tcache_maxclass) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- sz_size2index(usize), slow_path);
+ sz_size2index(bumped_usize), slow_path);
} else {
large_dalloc(tsdn, extent);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 7bc7b957..818ce3aa 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -13,6 +13,7 @@
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h"
diff --git a/src/prof.c b/src/prof.c
index 4d7d65db..a4e30f42 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -125,7 +125,7 @@ struct prof_thr_node_s {
uint64_t thr_uid;
/* Variable size based on thr_name_sz. */
char name[1];
-};
+};
typedef struct prof_alloc_node_s prof_alloc_node_t;
@@ -388,7 +388,7 @@ prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
new_node->next = NULL;
new_node->index = log_bt_index;
- /*
+ /*
* Copy the backtrace: bt is inside a tdata or gctx, which
* might die before prof_log_stop is called.
*/
@@ -402,7 +402,7 @@ prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
} else {
return node->index;
}
-}
+}
static size_t
prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
assert(prof_logging_state == prof_logging_state_started);
@@ -452,7 +452,7 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
* it's being destroyed).
*/
return;
- }
+ }
malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
@@ -514,11 +514,11 @@ prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
}
label_done:
- malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
}
void
-prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
+prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
@@ -2604,8 +2604,8 @@ static void
prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
emitter_json_array_kv_begin(emitter, "stack_traces");
prof_bt_node_t *bt_node = log_bt_first;
- prof_bt_node_t *bt_old_node;
- /*
+ prof_bt_node_t *bt_old_node;
+ /*
* Calculate how many hex digits we need: twice number of bytes, two for
* "0x", and then one more for terminating '\0'.
*/
diff --git a/src/safety_check.c b/src/safety_check.c
index cbec1907..804155dc 100644
--- a/src/safety_check.c
+++ b/src/safety_check.c
@@ -1,11 +1,24 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
+static void (*safety_check_abort)(const char *message);
+
+void safety_check_set_abort(void (*abort_fn)(const char *)) {
+ safety_check_abort = abort_fn;
+}
+
void safety_check_fail(const char *format, ...) {
- va_list ap;
+ char buf[MALLOC_PRINTF_BUFSIZE];
+ va_list ap;
va_start(ap, format);
- malloc_vcprintf(NULL, NULL, format, ap);
+ malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
va_end(ap);
- abort();
+
+ if (safety_check_abort == NULL) {
+ malloc_write(buf);
+ abort();
+ } else {
+ safety_check_abort(buf);
+ }
}