summaryrefslogtreecommitdiff
path: root/runtime/base/arena_allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/base/arena_allocator.cc')
-rw-r--r--runtime/base/arena_allocator.cc58
1 files changed, 42 insertions, 16 deletions
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index d951089bb2..b84e29f7ce 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -162,6 +162,7 @@ Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
MallocArena::MallocArena(size_t size) {
memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ CHECK(memory_ != nullptr); // Abort on OOM.
size_ = size;
}
@@ -319,15 +320,27 @@ void* ArenaAllocator::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
// mark only the actually allocated memory as defined. That leaves red zones
// and padding between allocations marked as inaccessible.
size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
- if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
- // Obtain a new block.
- ObtainNewArenaForAllocation(rounded_bytes);
- CHECK(ptr_ != nullptr);
- MEMORY_TOOL_MAKE_NOACCESS(ptr_, end_ - ptr_);
- }
ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
- uint8_t* ret = ptr_;
- ptr_ += rounded_bytes;
+ uint8_t* ret;
+ if (UNLIKELY(rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
+ ret = AllocFromNewArena(rounded_bytes);
+ uint8_t* noaccess_begin = ret + bytes;
+ uint8_t* noaccess_end;
+ if (ret == arena_head_->Begin()) {
+ DCHECK(ptr_ - rounded_bytes == ret);
+ noaccess_end = end_;
+ } else {
+ // We're still using the old arena but `ret` comes from a new one just after it.
+ DCHECK(arena_head_->next_ != nullptr);
+ DCHECK(ret == arena_head_->next_->Begin());
+ DCHECK_EQ(rounded_bytes, arena_head_->next_->GetBytesAllocated());
+ noaccess_end = arena_head_->next_->End();
+ }
+ MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, noaccess_end - noaccess_begin);
+ } else {
+ ret = ptr_;
+ ptr_ += rounded_bytes;
+ }
MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
// Check that the memory is already zeroed out.
DCHECK(std::all_of(ret, ret + bytes, [](uint8_t val) { return val == 0u; }));
@@ -340,14 +353,27 @@ ArenaAllocator::~ArenaAllocator() {
pool_->FreeArenaChain(arena_head_);
}
-void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
- UpdateBytesAllocated();
- Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, allocation_size));
- new_arena->next_ = arena_head_;
- arena_head_ = new_arena;
- // Update our internal data structures.
- ptr_ = begin_ = new_arena->Begin();
- end_ = new_arena->End();
+uint8_t* ArenaAllocator::AllocFromNewArena(size_t bytes) {
+ Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, bytes));
+ DCHECK(new_arena != nullptr);
+ DCHECK_LE(bytes, new_arena->Size());
+ if (static_cast<size_t>(end_ - ptr_) > new_arena->Size() - bytes) {
+ // The old arena has more space remaining than the new one, so keep using it.
+ // This can happen when the requested size is over half of the default size.
+ DCHECK(arena_head_ != nullptr);
+ new_arena->bytes_allocated_ = bytes; // UpdateBytesAllocated() on the new_arena.
+ new_arena->next_ = arena_head_->next_;
+ arena_head_->next_ = new_arena;
+ } else {
+ UpdateBytesAllocated();
+ new_arena->next_ = arena_head_;
+ arena_head_ = new_arena;
+ // Update our internal data structures.
+ begin_ = new_arena->Begin();
+ ptr_ = begin_ + bytes;
+ end_ = new_arena->End();
+ }
+ return new_arena->Begin();
}
bool ArenaAllocator::Contains(const void* ptr) const {