diff options
author | Nicolas Geoffray <ngeoffray@google.com> | 2019-10-09 20:12:15 +0000 |
---|---|---|
committer | Nicolas Geoffray <ngeoffray@google.com> | 2019-10-09 20:12:15 +0000 |
commit | 63b0c26aae3e7237166dd781eb7a15fbc7c091c2 (patch) | |
tree | 5713f52331c0cd3b881df80f4314f9ea2a0f09db /compiler/optimizing/optimizing_compiler.cc | |
parent | e1b36f09cb478a39ba443f6acb11cd1901c01c1d (diff) |
Revert "JIT: Separate code allocation and initialization."
This reverts commit e1b36f09cb478a39ba443f6acb11cd1901c01c1d.
Reason for revert: Crashes in art::jit::JitMemoryRegion::FreeCode
https://logs.chromium.org/logs/art/buildbucket/cr-buildbucket.appspot.com/8900060997388741808/+/steps/test_libcore/0/stdout
Change-Id: I40259baec9acbb7889732548f8b31e0a48651cd9
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 119 |
1 files changed, 57 insertions, 62 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index a45f5025d8..d30d68118d 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1288,24 +1288,17 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, jni_compiled_method); - - ArrayRef<const uint8_t> reserved_code; - ArrayRef<const uint8_t> reserved_data; - if (!code_cache->Reserve(self, - region, - jni_compiled_method.GetCode().size(), - stack_map.size(), - /* number_of_roots= */ 0, - method, - /*out*/ &reserved_code, - /*out*/ &reserved_data)) { + const uint8_t* roots_data = code_cache->ReserveData( + self, region, stack_map.size(), /* number_of_roots= */ 0, method); + if (roots_data == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; } - const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize(); // Add debug info after we know the code location but before we update entry-point. - if (compiler_options.GenerateAnyDebugInfo()) { + const std::function<void(const void*)> generate_debug_info = [&](const void* code) { + const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code); + const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; info.custom_name = "art_jni_trampoline"; info.dex_file = dex_file; @@ -1318,26 +1311,30 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.is_native_debuggable = compiler_options.GetNativeDebuggable(); info.is_optimized = true; info.is_code_address_text_relative = false; - info.code_address = reinterpret_cast<uintptr_t>(code); + info.code_address = code_address; info.code_size = jni_compiled_method.GetCode().size(); - info.frame_size_in_bytes = jni_compiled_method.GetFrameSize(); + info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); info.code_info = nullptr; info.cfi = jni_compiled_method.GetCfi(); GenerateJitDebugInfo(info); - } - - if (!code_cache->Commit(self, - region, - method, - reserved_code, - jni_compiled_method.GetCode(), - reserved_data, - roots, - ArrayRef<const uint8_t>(stack_map), - osr, - /* has_should_deoptimize_flag= */ false, - cha_single_implementation_list)) { - code_cache->Free(self, region, reserved_code.data(), reserved_data.data()); + }; + + const void* code = code_cache->CommitCode( + self, + region, + method, + jni_compiled_method.GetCode().data(), + jni_compiled_method.GetCode().size(), + stack_map.data(), + stack_map.size(), + roots_data, + roots, + osr, + /* has_should_deoptimize_flag= */ false, + cha_single_implementation_list, + generate_debug_info); + if (code == nullptr) { + code_cache->ClearData(self, region, roots_data); return false; } @@ -1385,23 +1382,13 @@ bool OptimizingCompiler::JitCompile(Thread* self, } ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item); - - ArrayRef<const uint8_t> reserved_code; - ArrayRef<const uint8_t> reserved_data; - if (!code_cache->Reserve(self, - region, - code_allocator.GetMemory().size(), - stack_map.size(), - /*number_of_roots=*/codegen->GetNumberOfJitRoots(), - method, - /*out*/ &reserved_code, - /*out*/ &reserved_data)) { + size_t number_of_roots = codegen->GetNumberOfJitRoots(); + const uint8_t* roots_data = code_cache->ReserveData( + self, region, stack_map.size(), number_of_roots, method); + if (roots_data == nullptr) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; } - const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize(); - const uint8_t* roots_data = reserved_data.data(); - std::vector<Handle<mirror::Object>> roots; codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots); // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope. @@ -1412,8 +1399,10 @@ bool OptimizingCompiler::JitCompile(Thread* self, })); // Add debug info after we know the code location but before we update entry-point. - const CompilerOptions& compiler_options = GetCompilerOptions(); - if (compiler_options.GenerateAnyDebugInfo()) { + const std::function<void(const void*)> generate_debug_info = [&](const void* code) { + const CompilerOptions& compiler_options = GetCompilerOptions(); + const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code); + const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; DCHECK(info.custom_name.empty()); info.dex_file = dex_file; @@ -1426,26 +1415,32 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.is_native_debuggable = compiler_options.GetNativeDebuggable(); info.is_optimized = true; info.is_code_address_text_relative = false; - info.code_address = reinterpret_cast<uintptr_t>(code); + info.code_address = code_address; info.code_size = code_allocator.GetMemory().size(); - info.frame_size_in_bytes = codegen->GetFrameSize(); - info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data(); + info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); + info.code_info = stack_map.size() == 0 ? nullptr : method_header->GetOptimizedCodeInfoPtr(); info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); GenerateJitDebugInfo(info); - } + }; - if (!code_cache->Commit(self, - region, - method, - reserved_code, - code_allocator.GetMemory(), - reserved_data, - roots, - ArrayRef<const uint8_t>(stack_map), - osr, - codegen->GetGraph()->HasShouldDeoptimizeFlag(), - codegen->GetGraph()->GetCHASingleImplementationList())) { - code_cache->Free(self, region, reserved_code.data(), reserved_data.data()); + const void* code = code_cache->CommitCode( + self, + region, + method, + code_allocator.GetMemory().data(), + code_allocator.GetMemory().size(), + stack_map.data(), + stack_map.size(), + roots_data, + roots, + osr, + codegen->GetGraph()->HasShouldDeoptimizeFlag(), + codegen->GetGraph()->GetCHASingleImplementationList(), + generate_debug_info); + + if (code == nullptr) { + MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); + code_cache->ClearData(self, region, roots_data); return false; } @@ -1482,7 +1477,7 @@ void OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info); // NB: Don't allow packing of full info since it would remove non-backtrace data. - MutexLock mu(Thread::Current(), *Locks::jit_lock_); + Locks::jit_lock_->AssertHeld(Thread::Current()); const void* code_ptr = reinterpret_cast<const void*>(info.code_address); AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info); } |