summaryrefslogtreecommitdiff
path: root/compiler/optimizing/optimizing_compiler.cc
diff options
context:
space:
mode:
authorDavid Srbecky <dsrbecky@google.com>2019-10-04 17:44:33 +0100
committerDavid Srbecky <dsrbecky@google.com>2019-10-09 15:51:56 +0000
commite1b36f09cb478a39ba443f6acb11cd1901c01c1d (patch)
tree75642c703fee9fce7cd09af344dd26d485379b13 /compiler/optimizing/optimizing_compiler.cc
parent449011217844401ed491ad0f89dcba21cc26bfc4 (diff)
JIT: Separate code allocation and initialization.
Allocate(reserve) both code and data first, and then initialise(commit) both of them. This is useful since we know the address sooner for the purpose of debug info generation. Test: ./art/test.py -b -r --jit --host --64 Change-Id: I4971a8801004efbc6c2b27884834dda775b72664
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc119
1 files changed, 62 insertions, 57 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d30d68118d..a45f5025d8 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1288,17 +1288,24 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
jni_compiled_method);
- const uint8_t* roots_data = code_cache->ReserveData(
- self, region, stack_map.size(), /* number_of_roots= */ 0, method);
- if (roots_data == nullptr) {
+
+ ArrayRef<const uint8_t> reserved_code;
+ ArrayRef<const uint8_t> reserved_data;
+ if (!code_cache->Reserve(self,
+ region,
+ jni_compiled_method.GetCode().size(),
+ stack_map.size(),
+ /* number_of_roots= */ 0,
+ method,
+ /*out*/ &reserved_code,
+ /*out*/ &reserved_data)) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
+ const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
// Add debug info after we know the code location but before we update entry-point.
- const std::function<void(const void*)> generate_debug_info = [&](const void* code) {
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code);
- const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ if (compiler_options.GenerateAnyDebugInfo()) {
debug::MethodDebugInfo info = {};
info.custom_name = "art_jni_trampoline";
info.dex_file = dex_file;
@@ -1311,30 +1318,26 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.is_native_debuggable = compiler_options.GetNativeDebuggable();
info.is_optimized = true;
info.is_code_address_text_relative = false;
- info.code_address = code_address;
+ info.code_address = reinterpret_cast<uintptr_t>(code);
info.code_size = jni_compiled_method.GetCode().size();
- info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+ info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
info.code_info = nullptr;
info.cfi = jni_compiled_method.GetCfi();
GenerateJitDebugInfo(info);
- };
-
- const void* code = code_cache->CommitCode(
- self,
- region,
- method,
- jni_compiled_method.GetCode().data(),
- jni_compiled_method.GetCode().size(),
- stack_map.data(),
- stack_map.size(),
- roots_data,
- roots,
- osr,
- /* has_should_deoptimize_flag= */ false,
- cha_single_implementation_list,
- generate_debug_info);
- if (code == nullptr) {
- code_cache->ClearData(self, region, roots_data);
+ }
+
+ if (!code_cache->Commit(self,
+ region,
+ method,
+ reserved_code,
+ jni_compiled_method.GetCode(),
+ reserved_data,
+ roots,
+ ArrayRef<const uint8_t>(stack_map),
+ osr,
+ /* has_should_deoptimize_flag= */ false,
+ cha_single_implementation_list)) {
+ code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
return false;
}
@@ -1382,13 +1385,23 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
- size_t number_of_roots = codegen->GetNumberOfJitRoots();
- const uint8_t* roots_data = code_cache->ReserveData(
- self, region, stack_map.size(), number_of_roots, method);
- if (roots_data == nullptr) {
+
+ ArrayRef<const uint8_t> reserved_code;
+ ArrayRef<const uint8_t> reserved_data;
+ if (!code_cache->Reserve(self,
+ region,
+ code_allocator.GetMemory().size(),
+ stack_map.size(),
+ /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
+ method,
+ /*out*/ &reserved_code,
+ /*out*/ &reserved_data)) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
+ const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
+ const uint8_t* roots_data = reserved_data.data();
+
std::vector<Handle<mirror::Object>> roots;
codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
// The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
@@ -1399,10 +1412,8 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}));
// Add debug info after we know the code location but before we update entry-point.
- const std::function<void(const void*)> generate_debug_info = [&](const void* code) {
- const CompilerOptions& compiler_options = GetCompilerOptions();
- const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code);
- const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ const CompilerOptions& compiler_options = GetCompilerOptions();
+ if (compiler_options.GenerateAnyDebugInfo()) {
debug::MethodDebugInfo info = {};
DCHECK(info.custom_name.empty());
info.dex_file = dex_file;
@@ -1415,32 +1426,26 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.is_native_debuggable = compiler_options.GetNativeDebuggable();
info.is_optimized = true;
info.is_code_address_text_relative = false;
- info.code_address = code_address;
+ info.code_address = reinterpret_cast<uintptr_t>(code);
info.code_size = code_allocator.GetMemory().size();
- info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
- info.code_info = stack_map.size() == 0 ? nullptr : method_header->GetOptimizedCodeInfoPtr();
+ info.frame_size_in_bytes = codegen->GetFrameSize();
+ info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
GenerateJitDebugInfo(info);
- };
+ }
- const void* code = code_cache->CommitCode(
- self,
- region,
- method,
- code_allocator.GetMemory().data(),
- code_allocator.GetMemory().size(),
- stack_map.data(),
- stack_map.size(),
- roots_data,
- roots,
- osr,
- codegen->GetGraph()->HasShouldDeoptimizeFlag(),
- codegen->GetGraph()->GetCHASingleImplementationList(),
- generate_debug_info);
-
- if (code == nullptr) {
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
- code_cache->ClearData(self, region, roots_data);
+ if (!code_cache->Commit(self,
+ region,
+ method,
+ reserved_code,
+ code_allocator.GetMemory(),
+ reserved_data,
+ roots,
+ ArrayRef<const uint8_t>(stack_map),
+ osr,
+ codegen->GetGraph()->HasShouldDeoptimizeFlag(),
+ codegen->GetGraph()->GetCHASingleImplementationList())) {
+ code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
return false;
}
@@ -1477,7 +1482,7 @@ void OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info
std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
// NB: Don't allow packing of full info since it would remove non-backtrace data.
- Locks::jit_lock_->AssertHeld(Thread::Current());
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
const void* code_ptr = reinterpret_cast<const void*>(info.code_address);
AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info);
}