diff options
author | David Srbecky <dsrbecky@google.com> | 2018-06-13 18:55:35 +0100 |
---|---|---|
committer | David Srbecky <dsrbecky@google.com> | 2018-06-13 19:03:09 +0100 |
commit | 50fac06c51864f293c61ff9d0983b82698cf6dac (patch) | |
tree | 7f7d09fa8aeacf4b5a255085af02970347a54cd0 /compiler/optimizing/code_generator.cc | |
parent | 86decb6a3e3ebba8c3c67bfd25c12d9a85794f65 (diff) |
Add Kind column to stack maps.
Add 'Kind' column to stack maps which marks special stack map types,
and use it at run-time to add extra sanity checks.
It will also allow us to binary search the stack maps.
The column increases .oat file by 0.2%.
Test: test-art-host-gtest-stack_map_test
Change-Id: I2a9143afa0e32bb06174604ca81a64c41fed232f
Diffstat (limited to 'compiler/optimizing/code_generator.cc')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 65 |
1 files changed, 27 insertions, 38 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index b3feb787a9..45a81cf26d 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1055,7 +1055,8 @@ void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region, void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, - SlowPathCode* slow_path) { + SlowPathCode* slow_path, + bool native_debug_info) { if (instruction != nullptr) { // The code generated for some type conversions // may call the runtime, thus normally requiring a subsequent @@ -1120,12 +1121,23 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, outer_dex_pc = outer_environment->GetDexPc(); outer_environment_size = outer_environment->Size(); } + + HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); + bool osr = + instruction->IsSuspendCheck() && + (info != nullptr) && + graph_->IsCompilingOsr() && + (inlining_depth == 0); + StackMap::Kind kind = native_debug_info + ? StackMap::Kind::Debug + : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default); stack_map_stream->BeginStackMapEntry(outer_dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, - inlining_depth); + inlining_depth, + kind); EmitEnvironment(environment, slow_path); // Record invoke info, the common case for the trampoline is super and static invokes. Only // record these to reduce oat file size. @@ -1138,19 +1150,9 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, } stack_map_stream->EndStackMapEntry(); - HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); - if (instruction->IsSuspendCheck() && - (info != nullptr) && - graph_->IsCompilingOsr() && - (inlining_depth == 0)) { + if (osr) { DCHECK_EQ(info->GetSuspendCheck(), instruction); - // We duplicate the stack map as a marker that this stack map can be an OSR entry. - // Duplicating it avoids having the runtime recognize and skip an OSR stack map. DCHECK(info->IsIrreducible()); - stack_map_stream->BeginStackMapEntry( - dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0); - EmitEnvironment(instruction->GetEnvironment(), slow_path); - stack_map_stream->EndStackMapEntry(); if (kIsDebugBuild) { for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) { HInstruction* in_environment = environment->GetInstructionAt(i); @@ -1167,14 +1169,6 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, } } } - } else if (kIsDebugBuild) { - // Ensure stack maps are unique, by checking that the native pc in the stack map - // last emitted is different than the native pc of the stack map just emitted. - size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps(); - if (number_of_stack_maps > 1) { - DCHECK_NE(stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 1), - stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 2)); - } } } @@ -1196,12 +1190,11 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, // Ensure that we do not collide with the stack map of the previous instruction. GenerateNop(); } - RecordPcInfo(instruction, dex_pc, slow_path); + RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true); } } void CodeGenerator::RecordCatchBlockInfo() { - ArenaAllocator* allocator = graph_->GetAllocator(); StackMapStream* stack_map_stream = GetStackMapStream(); for (HBasicBlock* block : *block_order_) { @@ -1213,28 +1206,24 @@ void CodeGenerator::RecordCatchBlockInfo() { uint32_t num_vregs = graph_->GetNumberOfVRegs(); uint32_t inlining_depth = 0; // Inlining of catch blocks is not supported at the moment. uint32_t native_pc = GetAddressOf(block); - uint32_t register_mask = 0; // Not used. - - // The stack mask is not used, so we leave it empty. - ArenaBitVector* stack_mask = - ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator); stack_map_stream->BeginStackMapEntry(dex_pc, native_pc, - register_mask, - stack_mask, + /* register_mask */ 0, + /* stack_mask */ nullptr, num_vregs, - inlining_depth); + inlining_depth, + StackMap::Kind::Catch); HInstruction* current_phi = block->GetFirstPhi(); for (size_t vreg = 0; vreg < num_vregs; ++vreg) { - while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) { - HInstruction* next_phi = current_phi->GetNext(); - DCHECK(next_phi == nullptr || - current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber()) - << "Phis need to be sorted by vreg number to keep this a linear-time loop."; - current_phi = next_phi; - } + while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) { + HInstruction* next_phi = current_phi->GetNext(); + DCHECK(next_phi == nullptr || + current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber()) + << "Phis need to be sorted by vreg number to keep this a linear-time loop."; + current_phi = next_phi; + } if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) { stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0); |