summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2017-12-03 08:32:55 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2017-12-03 08:32:55 +0000
commitc90dfb6d83f6b51da9d5f2e7816d5d2de1ce3ecc (patch)
treeae55d22c73af57ff516bec440ce9a28a3320b7eb /compiler
parent2ff6bd7ad049e2003342aaf60898fdfc68130908 (diff)
parentf630f1f7781529cc2dd1151faf7523a53d45d6e2 (diff)
Snap for 4481641 from f630f1f7781529cc2dd1151faf7523a53d45d6e2 to pi-release
Change-Id: Ib4edfa41e843d78fecf3371fb671479342411aa5
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_mips.cc93
-rw-r--r--compiler/optimizing/code_generator_mips64.cc93
-rw-r--r--compiler/optimizing/constant_folding.cc2
-rw-r--r--compiler/optimizing/inliner.cc57
-rw-r--r--compiler/optimizing/instruction_simplifier.cc14
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.h2
-rw-r--r--compiler/optimizing/intrinsics.cc12
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/nodes.cc16
9 files changed, 240 insertions, 51 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d6922d2f3fd..6376f03b26a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3103,23 +3103,92 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+
+ bool const_index = false;
+ bool const_length = false;
+
+ if (index->IsConstant()) {
+ if (length->IsConstant()) {
+ const_index = true;
+ const_length = true;
+ } else {
+ int32_t index_value = index->AsIntConstant()->GetValue();
+ if (index_value < 0 || IsInt<16>(index_value + 1)) {
+ const_index = true;
+ }
+ }
+ } else if (length->IsConstant()) {
+ int32_t length_value = length->AsIntConstant()->GetValue();
+ if (IsUint<15>(length_value)) {
+ const_length = true;
+ }
+ }
+
+ locations->SetInAt(0, const_index
+ ? Location::ConstantLocation(index->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetInAt(1, const_length
+ ? Location::ConstantLocation(length->AsConstant())
+ : Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
- codegen_->AddSlowPath(slow_path);
-
- Register index = locations->InAt(0).AsRegister<Register>();
- Register length = locations->InAt(1).AsRegister<Register>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0 || index >= length) {
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // Nothing to be done.
+ }
+ return;
+ }
- // length is limited by the maximum positive signed 32-bit integer.
- // Unsigned comparison of length and index checks for index < 0
- // and for length <= index simultaneously.
- __ Bgeu(index, length, slow_path->GetEntryLabel());
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Register index = index_loc.AsRegister<Register>();
+ if (length == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else if (length == 1) {
+ __ Bnez(index, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsUint<15>(length)) << length;
+ __ Sltiu(TMP, index, length);
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ Register length = length_loc.AsRegister<Register>();
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else if (index == 0) {
+ __ Blez(length, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsInt<16>(index + 1)) << index;
+ __ Sltiu(TMP, length, index + 1);
+ __ Bnez(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ Register index = index_loc.AsRegister<Register>();
+ __ Bgeu(index, length, slow_path->GetEntryLabel());
+ }
+ }
}
// Temp is used for read barrier.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ee33b3f3351..03a719f4455 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2614,23 +2614,92 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+
+ bool const_index = false;
+ bool const_length = false;
+
+ if (index->IsConstant()) {
+ if (length->IsConstant()) {
+ const_index = true;
+ const_length = true;
+ } else {
+ int32_t index_value = index->AsIntConstant()->GetValue();
+ if (index_value < 0 || IsInt<16>(index_value + 1)) {
+ const_index = true;
+ }
+ }
+ } else if (length->IsConstant()) {
+ int32_t length_value = length->AsIntConstant()->GetValue();
+ if (IsUint<15>(length_value)) {
+ const_length = true;
+ }
+ }
+
+ locations->SetInAt(0, const_index
+ ? Location::ConstantLocation(index->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetInAt(1, const_length
+ ? Location::ConstantLocation(length->AsConstant())
+ : Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
- codegen_->AddSlowPath(slow_path);
-
- GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
- GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0 || index >= length) {
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ Bc(slow_path->GetEntryLabel());
+ } else {
+ // Nothing to be done.
+ }
+ return;
+ }
- // length is limited by the maximum positive signed 32-bit integer.
- // Unsigned comparison of length and index checks for index < 0
- // and for length <= index simultaneously.
- __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ GpuRegister index = index_loc.AsRegister<GpuRegister>();
+ if (length == 0) {
+ __ Bc(slow_path->GetEntryLabel());
+ } else if (length == 1) {
+ __ Bnezc(index, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsUint<15>(length)) << length;
+ __ Sltiu(TMP, index, length);
+ __ Beqzc(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ GpuRegister length = length_loc.AsRegister<GpuRegister>();
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0) {
+ __ Bc(slow_path->GetEntryLabel());
+ } else if (index == 0) {
+ __ Blezc(length, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsInt<16>(index + 1)) << index;
+ __ Sltiu(TMP, length, index + 1);
+ __ Bnezc(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ GpuRegister index = index_loc.AsRegister<GpuRegister>();
+ __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ }
+ }
}
// Temp is used for read barrier.
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb586bf096e..6f11e628eef 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -113,7 +113,7 @@ void HConstantFoldingVisitor::VisitBinaryOperation(HBinaryOperation* inst) {
void HConstantFoldingVisitor::VisitTypeConversion(HTypeConversion* inst) {
// Constant folding: replace `TypeConversion(a)' with a constant at
// compile time if `a' is a constant.
- HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
+ HConstant* constant = inst->TryStaticEvaluation();
if (constant != nullptr) {
inst->ReplaceWith(constant);
inst->GetBlock()->RemoveInstruction(inst);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 2444e43d64f..560372e22e8 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1211,11 +1211,49 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
ReferenceTypeInfo receiver_type,
bool do_rtp,
bool cha_devirtualize) {
+ DCHECK(!invoke_instruction->IsIntrinsic());
HInstruction* return_replacement = nullptr;
uint32_t dex_pc = invoke_instruction->GetDexPc();
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
+ bool should_remove_invoke_instruction = false;
+
+ // If invoke_instruction is devirtualized to a different method, give intrinsics
+ // another chance before we try to inline it.
+ bool wrong_invoke_type = false;
+ if (invoke_instruction->GetResolvedMethod() != method &&
+ IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
+ if (invoke_instruction->IsInvokeInterface()) {
+ // We don't intrinsify an invoke-interface directly.
+ // Replace the invoke-interface with an invoke-virtual.
+ HInvokeVirtual* new_invoke = new (graph_->GetAllocator()) HInvokeVirtual(
+ graph_->GetAllocator(),
+ invoke_instruction->GetNumberOfArguments(),
+ invoke_instruction->GetType(),
+ invoke_instruction->GetDexPc(),
+ invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index.
+ method,
+ method->GetMethodIndex());
+ HInputsRef inputs = invoke_instruction->GetInputs();
+ for (size_t index = 0; index != inputs.size(); ++index) {
+ new_invoke->SetArgumentAt(index, inputs[index]);
+ }
+ invoke_instruction->GetBlock()->InsertInstructionBefore(new_invoke, invoke_instruction);
+ new_invoke->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ if (invoke_instruction->GetType() == DataType::Type::kReference) {
+ new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
+ }
+ // Run intrinsic recognizer again to set new_invoke's intrinsic.
+ IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type);
+ DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
+ return_replacement = new_invoke;
+ // invoke_instruction is replaced with new_invoke.
+ should_remove_invoke_instruction = true;
+ } else {
+ // invoke_instruction is intrinsified and stays.
+ }
+ } else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
DCHECK(!method->IsProxyMethod());
// Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
@@ -1258,26 +1296,27 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
return_replacement = new_invoke;
- // Directly check if the new virtual can be recognized as an intrinsic.
- // This way, we avoid running a full recognition pass just to detect
- // these relative rare cases.
- bool wrong_invoke_type = false;
- if (IntrinsicsRecognizer::Recognize(new_invoke, &wrong_invoke_type)) {
- MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
- }
+ // invoke_instruction is replaced with new_invoke.
+ should_remove_invoke_instruction = true;
} else {
// TODO: Consider sharpening an invoke virtual once it is not dependent on the
// compiler driver.
return false;
}
+ } else {
+ // invoke_instruction is inlined.
+ should_remove_invoke_instruction = true;
}
+
if (cha_devirtualize) {
AddCHAGuard(invoke_instruction, dex_pc, cursor, bb_cursor);
}
if (return_replacement != nullptr) {
invoke_instruction->ReplaceWith(return_replacement);
}
- invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ if (should_remove_invoke_instruction) {
+ invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ }
FixUpReturnReferenceType(method, return_replacement);
if (do_rtp && ReturnTypeMoreSpecific(invoke_instruction, return_replacement)) {
// Actual return value has a more specific type than the method's declared
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index bd20d28992e..089e41b4f4e 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1168,16 +1168,6 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct
RecordSimplification();
return;
}
- } else if (input->IsIntConstant()) {
- // Try to eliminate type conversion on int constant whose value falls into
- // the range of the result type.
- int32_t value = input->AsIntConstant()->GetValue();
- if (DataType::IsTypeConversionImplicit(value, result_type)) {
- instruction->ReplaceWith(input);
- instruction->GetBlock()->RemoveInstruction(instruction);
- RecordSimplification();
- return;
- }
}
}
@@ -2045,7 +2035,9 @@ void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) {
optimizations.SetArgumentIsString();
} else if (kUseReadBarrier) {
DCHECK(instruction->GetResolvedMethod() != nullptr);
- DCHECK(instruction->GetResolvedMethod()->GetDeclaringClass()->IsStringClass());
+ DCHECK(instruction->GetResolvedMethod()->GetDeclaringClass()->IsStringClass() ||
+ // Object.equals() can be devirtualized to String.equals().
+ instruction->GetResolvedMethod()->GetDeclaringClass()->IsObjectClass());
Runtime* runtime = Runtime::Current();
// For AOT, we always assume that the boot image shall contain the String.class and
// we do not need a read barrier for boot image classes as they are non-moveable.
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 22cc2efc1ae..6cb8affe85c 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -30,7 +30,7 @@ namespace mips {
class InstructionSimplifierMips : public HOptimization {
public:
InstructionSimplifierMips(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_mips", stats),
+ : HOptimization(graph, kInstructionSimplifierMipsPassName, stats),
codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 77199242f59..6928b70df7b 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -137,7 +137,7 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
case kVirtual:
// Call might be devirtualized.
- return (invoke_type == kVirtual || invoke_type == kDirect);
+ return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface);
case kSuper:
case kInterface:
@@ -148,8 +148,12 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
UNREACHABLE();
}
-bool IntrinsicsRecognizer::Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type) {
- ArtMethod* art_method = invoke->GetResolvedMethod();
+bool IntrinsicsRecognizer::Recognize(HInvoke* invoke,
+ ArtMethod* art_method,
+ /*out*/ bool* wrong_invoke_type) {
+ if (art_method == nullptr) {
+ art_method = invoke->GetResolvedMethod();
+ }
*wrong_invoke_type = false;
if (art_method == nullptr || !art_method->IsIntrinsic()) {
return false;
@@ -182,7 +186,7 @@ void IntrinsicsRecognizer::Run() {
HInstruction* inst = inst_it.Current();
if (inst->IsInvoke()) {
bool wrong_invoke_type = false;
- if (Recognize(inst->AsInvoke(), &wrong_invoke_type)) {
+ if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
} else if (wrong_invoke_type) {
LOG(WARNING)
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index c07a99032ac..62991435c7a 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -47,7 +47,7 @@ class IntrinsicsRecognizer : public HOptimization {
// Static helper that recognizes intrinsic call. Returns true on success.
// If it fails due to invoke type mismatch, wrong_invoke_type is set.
// Useful to recognize intrinsics on individual calls outside this full pass.
- static bool Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type)
+ static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type)
REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fa580d9bed6..4a9da7ece1a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1403,6 +1403,14 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
if (GetInput()->IsIntConstant()) {
int32_t value = GetInput()->AsIntConstant()->GetValue();
switch (GetResultType()) {
+ case DataType::Type::kInt8:
+ return graph->GetIntConstant(static_cast<int8_t>(value), GetDexPc());
+ case DataType::Type::kUint8:
+ return graph->GetIntConstant(static_cast<uint8_t>(value), GetDexPc());
+ case DataType::Type::kInt16:
+ return graph->GetIntConstant(static_cast<int16_t>(value), GetDexPc());
+ case DataType::Type::kUint16:
+ return graph->GetIntConstant(static_cast<uint16_t>(value), GetDexPc());
case DataType::Type::kInt64:
return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc());
case DataType::Type::kFloat32:
@@ -1415,6 +1423,14 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
} else if (GetInput()->IsLongConstant()) {
int64_t value = GetInput()->AsLongConstant()->GetValue();
switch (GetResultType()) {
+ case DataType::Type::kInt8:
+ return graph->GetIntConstant(static_cast<int8_t>(value), GetDexPc());
+ case DataType::Type::kUint8:
+ return graph->GetIntConstant(static_cast<uint8_t>(value), GetDexPc());
+ case DataType::Type::kInt16:
+ return graph->GetIntConstant(static_cast<int16_t>(value), GetDexPc());
+ case DataType::Type::kUint16:
+ return graph->GetIntConstant(static_cast<uint16_t>(value), GetDexPc());
case DataType::Type::kInt32:
return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc());
case DataType::Type::kFloat32: