diff options
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 242 |
1 files changed, 99 insertions, 143 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index f4bf11d3d3..9978a6fa18 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -137,13 +137,15 @@ class PassObserver : public ValueObject { LOG(INFO) << "TIMINGS " << GetMethodName(); LOG(INFO) << Dumpable<TimingLogger>(timing_logger_); } + if (visualizer_enabled_) { + FlushVisualizer(); + } DCHECK(visualizer_oss_.str().empty()); } - void DumpDisassembly() REQUIRES(!visualizer_dump_mutex_) { + void DumpDisassembly() { if (visualizer_enabled_) { visualizer_.DumpGraphWithDisassembly(); - FlushVisualizer(); } } @@ -158,12 +160,11 @@ class PassObserver : public ValueObject { } private: - void StartPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) { + void StartPass(const char* pass_name) { VLOG(compiler) << "Starting pass: " << pass_name; // Dump graph first, then start timer. if (visualizer_enabled_) { visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_); - FlushVisualizer(); } if (timing_logger_enabled_) { timing_logger_.StartTiming(pass_name); @@ -178,14 +179,13 @@ class PassObserver : public ValueObject { visualizer_oss_.clear(); } - void EndPass(const char* pass_name, bool pass_change) REQUIRES(!visualizer_dump_mutex_) { + void EndPass(const char* pass_name, bool pass_change) { // Pause timer first, then dump graph. if (timing_logger_enabled_) { timing_logger_.EndTiming(); } if (visualizer_enabled_) { visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_); - FlushVisualizer(); } // Validate the HGraph if running in debug mode. @@ -295,6 +295,7 @@ class OptimizingCompiler final : public Compiler { bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, + jit::JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr, @@ -383,6 +384,7 @@ class OptimizingCompiler final : public Compiler { ArtMethod* method, bool baseline, bool osr, + bool is_shared_jit_code, VariableSizedHandleScope* handles) const; CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator, @@ -404,9 +406,7 @@ class OptimizingCompiler final : public Compiler { PassObserver* pass_observer, VariableSizedHandleScope* handles) const; - void GenerateJitDebugInfo(ArtMethod* method, - const debug::MethodDebugInfo& method_debug_info) - REQUIRES_SHARED(Locks::mutator_lock_); + void GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info); std::unique_ptr<OptimizingCompilerStats> compilation_stats_; @@ -450,8 +450,6 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) { return instruction_set == InstructionSet::kArm || instruction_set == InstructionSet::kArm64 || instruction_set == InstructionSet::kThumb2 - || instruction_set == InstructionSet::kMips - || instruction_set == InstructionSet::kMips64 || instruction_set == InstructionSet::kX86 || instruction_set == InstructionSet::kX86_64; } @@ -462,19 +460,6 @@ bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph, PassObserver* pass_observer, VariableSizedHandleScope* handles) const { switch (codegen->GetCompilerOptions().GetInstructionSet()) { -#ifdef ART_ENABLE_CODEGEN_mips - case InstructionSet::kMips: { - OptimizationDef mips_optimizations[] = { - OptDef(OptimizationPass::kPcRelativeFixupsMips) - }; - return RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - mips_optimizations); - } -#endif #ifdef ART_ENABLE_CODEGEN_x86 case InstructionSet::kX86: { OptimizationDef x86_optimizations[] = { @@ -537,36 +522,6 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph, arm64_optimizations); } #endif -#ifdef ART_ENABLE_CODEGEN_mips - case InstructionSet::kMips: { - OptimizationDef mips_optimizations[] = { - OptDef(OptimizationPass::kInstructionSimplifierMips), - OptDef(OptimizationPass::kSideEffectsAnalysis), - OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"), - OptDef(OptimizationPass::kPcRelativeFixupsMips) - }; - return RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - mips_optimizations); - } -#endif -#ifdef ART_ENABLE_CODEGEN_mips64 - case InstructionSet::kMips64: { - OptimizationDef mips64_optimizations[] = { - OptDef(OptimizationPass::kSideEffectsAnalysis), - OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch") - }; - return RunOptimizations(graph, - codegen, - dex_compilation_unit, - pass_observer, - handles, - mips64_optimizations); - } -#endif #ifdef ART_ENABLE_CODEGEN_x86 case InstructionSet::kX86: { OptimizationDef x86_optimizations[] = { @@ -782,6 +737,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, ArtMethod* method, bool baseline, bool osr, + bool is_shared_jit_code, VariableSizedHandleScope* handles) const { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation); const CompilerOptions& compiler_options = GetCompilerOptions(); @@ -849,7 +805,9 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, kInvalidInvokeType, dead_reference_safe, compiler_options.GetDebuggable(), - /* osr= */ osr); + /* osr= */ osr, + /* is_shared_jit_code= */ is_shared_jit_code, + /* baseline= */ baseline); if (method != nullptr) { graph->SetArtMethod(method); @@ -910,6 +868,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator, MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit); break; } + case kAnalysisFailPhiEquivalentInOsr: { + MaybeRecordStat(compilation_stats_.get(), + MethodCompilationStat::kNotCompiledPhiEquivalentInOsr); + break; + } case kAnalysisSuccess: UNREACHABLE(); } @@ -1106,6 +1069,7 @@ CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item, method, compiler_options.IsBaseline(), /* osr= */ false, + /* is_shared_jit_code= */ false, &handles)); } } @@ -1166,7 +1130,8 @@ static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* alloca jni_compiled_method.GetFrameSize(), jni_compiled_method.GetCoreSpillMask(), jni_compiled_method.GetFpSpillMask(), - /* num_dex_registers= */ 0); + /* num_dex_registers= */ 0, + /* baseline= */ false); stack_map_stream->EndMethod(); return stack_map_stream->Encode(); } @@ -1248,6 +1213,7 @@ bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) { bool OptimizingCompiler::JitCompile(Thread* self, jit::JitCodeCache* code_cache, + jit::JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr, @@ -1279,39 +1245,24 @@ bool OptimizingCompiler::JitCompile(Thread* self, ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map. ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator, jni_compiled_method); - uint8_t* stack_map_data = nullptr; - uint8_t* roots_data = nullptr; - uint32_t data_size = code_cache->ReserveData(self, - stack_map.size(), - /* number_of_roots= */ 0, - method, - &stack_map_data, - &roots_data); - if (stack_map_data == nullptr || roots_data == nullptr) { + + ArrayRef<const uint8_t> reserved_code; + ArrayRef<const uint8_t> reserved_data; + if (!code_cache->Reserve(self, + region, + jni_compiled_method.GetCode().size(), + stack_map.size(), + /* number_of_roots= */ 0, + method, + /*out*/ &reserved_code, + /*out*/ &reserved_data)) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; } - memcpy(stack_map_data, stack_map.data(), stack_map.size()); - - const void* code = code_cache->CommitCode( - self, - method, - stack_map_data, - roots_data, - jni_compiled_method.GetCode().data(), - jni_compiled_method.GetCode().size(), - data_size, - osr, - roots, - /* has_should_deoptimize_flag= */ false, - cha_single_implementation_list); - if (code == nullptr) { - return false; - } + const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize(); + // Add debug info after we know the code location but before we update entry-point. if (compiler_options.GenerateAnyDebugInfo()) { - const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); - const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; info.custom_name = "art_jni_trampoline"; info.dex_file = dex_file; @@ -1324,12 +1275,27 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.is_native_debuggable = compiler_options.GetNativeDebuggable(); info.is_optimized = true; info.is_code_address_text_relative = false; - info.code_address = code_address; + info.code_address = reinterpret_cast<uintptr_t>(code); info.code_size = jni_compiled_method.GetCode().size(); - info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); + info.frame_size_in_bytes = jni_compiled_method.GetFrameSize(); info.code_info = nullptr; info.cfi = jni_compiled_method.GetCfi(); - GenerateJitDebugInfo(method, info); + GenerateJitDebugInfo(info); + } + + if (!code_cache->Commit(self, + region, + method, + reserved_code, + jni_compiled_method.GetCode(), + reserved_data, + roots, + ArrayRef<const uint8_t>(stack_map), + osr, + /* has_should_deoptimize_flag= */ false, + cha_single_implementation_list)) { + code_cache->Free(self, region, reserved_code.data(), reserved_data.data()); + return false; } Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed()); @@ -1366,8 +1332,9 @@ bool OptimizingCompiler::JitCompile(Thread* self, &code_allocator, dex_compilation_unit, method, - baseline, + baseline || GetCompilerOptions().IsBaseline(), osr, + /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region), &handles)); if (codegen.get() == nullptr) { return false; @@ -1375,20 +1342,23 @@ bool OptimizingCompiler::JitCompile(Thread* self, } ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item); - size_t number_of_roots = codegen->GetNumberOfJitRoots(); - uint8_t* stack_map_data = nullptr; - uint8_t* roots_data = nullptr; - uint32_t data_size = code_cache->ReserveData(self, - stack_map.size(), - number_of_roots, - method, - &stack_map_data, - &roots_data); - if (stack_map_data == nullptr || roots_data == nullptr) { + + ArrayRef<const uint8_t> reserved_code; + ArrayRef<const uint8_t> reserved_data; + if (!code_cache->Reserve(self, + region, + code_allocator.GetMemory().size(), + stack_map.size(), + /*number_of_roots=*/codegen->GetNumberOfJitRoots(), + method, + /*out*/ &reserved_code, + /*out*/ &reserved_data)) { MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); return false; } - memcpy(stack_map_data, stack_map.data(), stack_map.size()); + const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize(); + const uint8_t* roots_data = reserved_data.data(); + std::vector<Handle<mirror::Object>> roots; codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots); // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope. @@ -1398,29 +1368,9 @@ bool OptimizingCompiler::JitCompile(Thread* self, return handles.Contains(root.GetReference()); })); - const void* code = code_cache->CommitCode( - self, - method, - stack_map_data, - roots_data, - code_allocator.GetMemory().data(), - code_allocator.GetMemory().size(), - data_size, - osr, - roots, - codegen->GetGraph()->HasShouldDeoptimizeFlag(), - codegen->GetGraph()->GetCHASingleImplementationList()); - - if (code == nullptr) { - MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit); - code_cache->ClearData(self, stack_map_data, roots_data); - return false; - } - + // Add debug info after we know the code location but before we update entry-point. const CompilerOptions& compiler_options = GetCompilerOptions(); if (compiler_options.GenerateAnyDebugInfo()) { - const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); - const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; DCHECK(info.custom_name.empty()); info.dex_file = dex_file; @@ -1433,12 +1383,27 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.is_native_debuggable = compiler_options.GetNativeDebuggable(); info.is_optimized = true; info.is_code_address_text_relative = false; - info.code_address = code_address; + info.code_address = reinterpret_cast<uintptr_t>(code); info.code_size = code_allocator.GetMemory().size(); - info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); - info.code_info = stack_map.size() == 0 ? nullptr : stack_map_data; + info.frame_size_in_bytes = codegen->GetFrameSize(); + info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data(); info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); - GenerateJitDebugInfo(method, info); + GenerateJitDebugInfo(info); + } + + if (!code_cache->Commit(self, + region, + method, + reserved_code, + code_allocator.GetMemory(), + reserved_data, + roots, + ArrayRef<const uint8_t>(stack_map), + osr, + codegen->GetGraph()->HasShouldDeoptimizeFlag(), + codegen->GetGraph()->GetCHASingleImplementationList())) { + code_cache->Free(self, region, reserved_code.data(), reserved_data.data()); + return false; } Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed()); @@ -1462,31 +1427,22 @@ bool OptimizingCompiler::JitCompile(Thread* self, return true; } -void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED, - const debug::MethodDebugInfo& info) { +void OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) { const CompilerOptions& compiler_options = GetCompilerOptions(); - DCHECK(compiler_options.GenerateAnyDebugInfo()); - TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit)); - { - TimingLogger::ScopedTiming st("Generate JIT debug info", &logger); - + if (compiler_options.GenerateAnyDebugInfo()) { // If both flags are passed, generate full debug info. const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo(); // Create entry for the single method that we just compiled. - std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT( - compiler_options.GetInstructionSet(), - compiler_options.GetInstructionSetFeatures(), - mini_debug_info, - info); - AddNativeDebugInfoForJit(Thread::Current(), - reinterpret_cast<const void*>(info.code_address), - elf_file, - debug::PackElfFileForJIT, - compiler_options.GetInstructionSet(), - compiler_options.GetInstructionSetFeatures()); + InstructionSet isa = compiler_options.GetInstructionSet(); + const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures(); + std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info); + + // NB: Don't allow packing of full info since it would remove non-backtrace data. + MutexLock mu(Thread::Current(), *Locks::jit_lock_); + const void* code_ptr = reinterpret_cast<const void*>(info.code_address); + AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info); } - Runtime::Current()->GetJit()->AddTimingLogger(logger); } } // namespace art |