summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_generator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/code_generator.cc')
-rw-r--r--compiler/optimizing/code_generator.cc106
1 files changed, 68 insertions, 38 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1444931b9c..cf633df496 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1090,13 +1090,6 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
}
-bool CodeGenerator::IsImplicitNullCheckAllowed(HNullCheck* null_check) const {
- return compiler_options_.GetImplicitNullChecks() &&
- // Null checks which might throw into a catch block need to save live
- // registers and therefore cannot be done implicitly.
- !null_check->CanThrowIntoCatchBlock();
-}
-
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
@@ -1105,6 +1098,10 @@ bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
}
void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
+ if (!compiler_options_.GetImplicitNullChecks()) {
+ return;
+ }
+
// If we are from a static path don't record the pc as we can't throw NPE.
// NB: having the checks here makes the code much less verbose in the arch
// specific code generators.
@@ -1123,16 +1120,35 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
// and needs to record the pc.
if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
HNullCheck* null_check = first_prev_not_move->AsNullCheck();
- if (IsImplicitNullCheckAllowed(null_check)) {
- // TODO: The parallel moves modify the environment. Their changes need to be
- // reverted otherwise the stack maps at the throw point will not be correct.
- RecordPcInfo(null_check, null_check->GetDexPc());
- }
+ // TODO: The parallel moves modify the environment. Their changes need to be
+ // reverted otherwise the stack maps at the throw point will not be correct.
+ RecordPcInfo(null_check, null_check->GetDexPc());
+ }
+}
+
+LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
+ RegisterSet caller_saves) {
+ // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
+ // HSuspendCheck from entry block). However, it will still get a valid stack frame
+ // because the HNullCheck needs an environment.
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ // When throwing from a try block, we may need to retrieve dalvik registers from
+ // physical registers and we also need to set up stack mask for GC. This is
+ // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
+ bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
+ if (can_throw_into_catch_block) {
+ call_kind = LocationSummary::kCallOnSlowPath;
}
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
+ locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
+ }
+ DCHECK(!instruction->HasUses());
+ return locations;
}
void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
- if (IsImplicitNullCheckAllowed(instruction)) {
+ if (compiler_options_.GetImplicitNullChecks()) {
MaybeRecordStat(kImplicitNullCheckGenerated);
GenerateImplicitNullCheck(instruction);
} else {
@@ -1172,37 +1188,51 @@ void CodeGenerator::EmitParallelMoves(Location from1,
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
-void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) {
+void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
// Ensure that the call kind indication given to the register allocator is
- // coherent with the runtime call generated, and that the GC side effect is
- // set when required.
+ // coherent with the runtime call generated.
if (slow_path == nullptr) {
DCHECK(instruction->GetLocations()->WillCall())
<< "instruction->DebugName()=" << instruction->DebugName();
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
- << "instruction->DebugName()=" << instruction->DebugName()
- << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString();
} else {
DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
- // When (non-Baker) read barriers are enabled, some instructions
- // use a slow path to emit a read barrier, which does not trigger
- // GC.
- (kEmitCompilerReadBarrier &&
- !kUseBakerReadBarrier &&
- (instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsArrayGet() ||
- instruction->IsLoadClass() ||
- instruction->IsLoadString() ||
- instruction->IsInstanceOf() ||
- instruction->IsCheckCast() ||
- (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
- << "instruction->DebugName()=" << instruction->DebugName()
- << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString()
- << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ }
+
+ // Check that the GC side effect is set when required.
+ // TODO: Reverse EntrypointCanTriggerGC
+ if (EntrypointCanTriggerGC(entrypoint)) {
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()="
+ << instruction->GetSideEffects().ToString();
+ } else {
+ DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
+ // When (non-Baker) read barriers are enabled, some instructions
+ // use a slow path to emit a read barrier, which does not trigger
+ // GC.
+ (kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
+ (instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet() ||
+ instruction->IsLoadClass() ||
+ instruction->IsLoadString() ||
+ instruction->IsInstanceOf() ||
+ instruction->IsCheckCast() ||
+ (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()="
+ << instruction->GetSideEffects().ToString()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ }
+ } else {
+ // The GC side effect is not required for the instruction. But the instruction might still have
+ // it, for example if it calls other entrypoints requiring it.
}
// Check the coherency of leaf information.
@@ -1252,7 +1282,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
saved_fpu_stack_offsets_[i] = stack_offset;
@@ -1271,7 +1301,7 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);