diff options
author | Vladimir Marko <vmarko@google.com> | 2017-10-12 13:34:49 +0100 |
---|---|---|
committer | Vladimir Marko <vmarko@google.com> | 2017-10-17 11:12:08 +0100 |
commit | 174b2e27ebf933b80f4e8b64b4b024ab4306aaac (patch) | |
tree | 968cdd8d7fd68571115db77cc288807c3b257911 /compiler/optimizing/optimizing_compiler.cc | |
parent | 6783118d2ad9d759f0617b1219a9e29a10a569f7 (diff) |
Use ScopedArenaAllocator for code generation.
Reuse the memory previously allocated on the ArenaStack by
optimization passes.
This CL handles only the architecture-independent codegen
and slow paths, architecture-dependent codegen allocations
shall be moved to the ScopedArenaAllocator in a follow-up.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 19.6MiB -> 18.5MiB (-1189KiB)
BatteryStats.dumpLocked(): 39.3MiB -> 37.0MiB (-2379KiB)
Also move definitions of functions that use bit_vector-inl.h
from bit_vector.h also to bit_vector-inl.h .
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 64312607
Change-Id: I84688c3a5a95bf90f56bd3a150bc31fedc95f29c
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 42f32b7866..29319f8c38 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1142,6 +1142,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item); if (kArenaAllocatorCountAllocations) { + codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting. size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated(); if (total_allocated > kArenaAllocatorMemoryReportThreshold) { MemStats mem_stats(allocator.GetMemStats()); @@ -1251,18 +1252,6 @@ bool OptimizingCompiler::JitCompile(Thread* self, if (codegen.get() == nullptr) { return false; } - - if (kArenaAllocatorCountAllocations) { - size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated(); - if (total_allocated > kArenaAllocatorMemoryReportThreshold) { - MemStats mem_stats(allocator.GetMemStats()); - MemStats peak_stats(arena_stack.GetPeakStats()); - LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling " - << dex_file->PrettyMethod(method_idx) - << "\n" << Dumpable<MemStats>(mem_stats) - << "\n" << Dumpable<MemStats>(peak_stats); - } - } } size_t stack_map_size = 0; @@ -1357,6 +1346,19 @@ bool OptimizingCompiler::JitCompile(Thread* self, jit_logger->WriteLog(code, code_allocator.GetSize(), method); } + if (kArenaAllocatorCountAllocations) { + codegen.reset(); // Release codegen's ScopedArenaAllocator for memory accounting. + size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated(); + if (total_allocated > kArenaAllocatorMemoryReportThreshold) { + MemStats mem_stats(allocator.GetMemStats()); + MemStats peak_stats(arena_stack.GetPeakStats()); + LOG(INFO) << "Used " << total_allocated << " bytes of arena memory for compiling " + << dex_file->PrettyMethod(method_idx) + << "\n" << Dumpable<MemStats>(mem_stats) + << "\n" << Dumpable<MemStats>(peak_stats); + } + } + return true; } |