summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator_mips.cc93
-rw-r--r--compiler/optimizing/code_generator_mips64.cc93
-rw-r--r--compiler/optimizing/constant_folding.cc2
-rw-r--r--compiler/optimizing/inliner.cc57
-rw-r--r--compiler/optimizing/instruction_simplifier.cc14
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.h2
-rw-r--r--compiler/optimizing/intrinsics.cc12
-rw-r--r--compiler/optimizing/intrinsics.h2
-rw-r--r--compiler/optimizing/nodes.cc16
-rw-r--r--openjdkjvmti/events.cc36
-rw-r--r--openjdkjvmti/events.h12
-rw-r--r--runtime/art_method.cc10
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/debugger.cc6
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc6
-rw-r--r--runtime/gc/heap.cc25
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/interpreter/mterp/arm/entry.S3
-rw-r--r--runtime/interpreter/mterp/arm/header.S1
-rw-r--r--runtime/interpreter/mterp/arm64/entry.S3
-rw-r--r--runtime/interpreter/mterp/arm64/header.S1
-rw-r--r--runtime/interpreter/mterp/cfi_asm_support.h31
-rw-r--r--runtime/interpreter/mterp/mips/entry.S2
-rw-r--r--runtime/interpreter/mterp/mips/footer.S1
-rw-r--r--runtime/interpreter/mterp/mips/header.S1
-rw-r--r--runtime/interpreter/mterp/mips64/entry.S1
-rw-r--r--runtime/interpreter/mterp/mips64/header.S1
-rw-r--r--runtime/interpreter/mterp/mterp.cc10
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S4
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm64.S4
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips.S4
-rw-r--r--runtime/interpreter/mterp/out/mterp_mips64.S2
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S4
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86_64.S4
-rw-r--r--runtime/interpreter/mterp/x86/entry.S3
-rw-r--r--runtime/interpreter/mterp/x86/header.S1
-rw-r--r--runtime/interpreter/mterp/x86_64/entry.S3
-rw-r--r--runtime/interpreter/mterp/x86_64/header.S1
-rw-r--r--runtime/jit/profile_compilation_info.cc6
-rw-r--r--runtime/native/dalvik_system_DexFile.cc20
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc48
-rw-r--r--runtime/runtime.cc1
-rw-r--r--runtime/runtime.h12
-rw-r--r--runtime/thread.cc1
-rw-r--r--runtime/verifier/method_verifier.cc27
-rw-r--r--runtime/verifier/reg_type-inl.h14
-rw-r--r--runtime/verifier/reg_type.cc24
-rw-r--r--runtime/verifier/reg_type.h46
-rw-r--r--runtime/verifier/reg_type_cache-inl.h3
-rw-r--r--runtime/verifier/reg_type_cache.cc80
-rw-r--r--runtime/verifier/reg_type_cache.h7
-rw-r--r--runtime/verifier/reg_type_test.cc362
-rwxr-xr-xtest/071-dexfile-get-static-size/build30
-rw-r--r--test/071-dexfile-get-static-size/expected.txt4
-rw-r--r--test/071-dexfile-get-static-size/info.txt3
-rw-r--r--test/071-dexfile-get-static-size/src/Main.java62
-rw-r--r--test/071-dexfile-get-static-size/test1.dexbin0 -> 1864 bytes
-rw-r--r--test/071-dexfile-get-static-size/test2.dexbin0 -> 1264 bytes
-rw-r--r--test/099-vmdebug/expected.txt6
-rw-r--r--test/099-vmdebug/info.txt2
-rw-r--r--test/099-vmdebug/src/Main.java61
-rw-r--r--test/638-checker-inline-cache-intrinsic/expected.txt1
-rw-r--r--test/638-checker-inline-cache-intrinsic/info.txt1
-rw-r--r--test/638-checker-inline-cache-intrinsic/run17
-rw-r--r--test/638-checker-inline-cache-intrinsic/src/Main.java95
-rwxr-xr-xtest/667-jit-jni-stub/run3
-rw-r--r--test/711-checker-type-conversion/src/Main.java34
-rwxr-xr-xtest/testrunner/testrunner.py18
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Parser.java2
-rw-r--r--tools/libjdwp_art_failures.txt5
70 files changed, 1307 insertions, 164 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d6922d2f3f..6376f03b26 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3103,23 +3103,92 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+
+ bool const_index = false;
+ bool const_length = false;
+
+ if (index->IsConstant()) {
+ if (length->IsConstant()) {
+ const_index = true;
+ const_length = true;
+ } else {
+ int32_t index_value = index->AsIntConstant()->GetValue();
+ if (index_value < 0 || IsInt<16>(index_value + 1)) {
+ const_index = true;
+ }
+ }
+ } else if (length->IsConstant()) {
+ int32_t length_value = length->AsIntConstant()->GetValue();
+ if (IsUint<15>(length_value)) {
+ const_length = true;
+ }
+ }
+
+ locations->SetInAt(0, const_index
+ ? Location::ConstantLocation(index->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetInAt(1, const_length
+ ? Location::ConstantLocation(length->AsConstant())
+ : Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
- codegen_->AddSlowPath(slow_path);
-
- Register index = locations->InAt(0).AsRegister<Register>();
- Register length = locations->InAt(1).AsRegister<Register>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0 || index >= length) {
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // Nothing to be done.
+ }
+ return;
+ }
- // length is limited by the maximum positive signed 32-bit integer.
- // Unsigned comparison of length and index checks for index < 0
- // and for length <= index simultaneously.
- __ Bgeu(index, length, slow_path->GetEntryLabel());
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Register index = index_loc.AsRegister<Register>();
+ if (length == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else if (length == 1) {
+ __ Bnez(index, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsUint<15>(length)) << length;
+ __ Sltiu(TMP, index, length);
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ Register length = length_loc.AsRegister<Register>();
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else if (index == 0) {
+ __ Blez(length, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsInt<16>(index + 1)) << index;
+ __ Sltiu(TMP, length, index + 1);
+ __ Bnez(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ Register index = index_loc.AsRegister<Register>();
+ __ Bgeu(index, length, slow_path->GetEntryLabel());
+ }
+ }
}
// Temp is used for read barrier.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ee33b3f335..03a719f445 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2614,23 +2614,92 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+
+ bool const_index = false;
+ bool const_length = false;
+
+ if (index->IsConstant()) {
+ if (length->IsConstant()) {
+ const_index = true;
+ const_length = true;
+ } else {
+ int32_t index_value = index->AsIntConstant()->GetValue();
+ if (index_value < 0 || IsInt<16>(index_value + 1)) {
+ const_index = true;
+ }
+ }
+ } else if (length->IsConstant()) {
+ int32_t length_value = length->AsIntConstant()->GetValue();
+ if (IsUint<15>(length_value)) {
+ const_length = true;
+ }
+ }
+
+ locations->SetInAt(0, const_index
+ ? Location::ConstantLocation(index->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetInAt(1, const_length
+ ? Location::ConstantLocation(length->AsConstant())
+ : Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
- codegen_->AddSlowPath(slow_path);
-
- GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
- GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0 || index >= length) {
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ Bc(slow_path->GetEntryLabel());
+ } else {
+ // Nothing to be done.
+ }
+ return;
+ }
- // length is limited by the maximum positive signed 32-bit integer.
- // Unsigned comparison of length and index checks for index < 0
- // and for length <= index simultaneously.
- __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ GpuRegister index = index_loc.AsRegister<GpuRegister>();
+ if (length == 0) {
+ __ Bc(slow_path->GetEntryLabel());
+ } else if (length == 1) {
+ __ Bnezc(index, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsUint<15>(length)) << length;
+ __ Sltiu(TMP, index, length);
+ __ Beqzc(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ GpuRegister length = length_loc.AsRegister<GpuRegister>();
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0) {
+ __ Bc(slow_path->GetEntryLabel());
+ } else if (index == 0) {
+ __ Blezc(length, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsInt<16>(index + 1)) << index;
+ __ Sltiu(TMP, length, index + 1);
+ __ Bnezc(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ GpuRegister index = index_loc.AsRegister<GpuRegister>();
+ __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ }
+ }
}
// Temp is used for read barrier.
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb586bf096..6f11e628ee 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -113,7 +113,7 @@ void HConstantFoldingVisitor::VisitBinaryOperation(HBinaryOperation* inst) {
void HConstantFoldingVisitor::VisitTypeConversion(HTypeConversion* inst) {
// Constant folding: replace `TypeConversion(a)' with a constant at
// compile time if `a' is a constant.
- HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
+ HConstant* constant = inst->TryStaticEvaluation();
if (constant != nullptr) {
inst->ReplaceWith(constant);
inst->GetBlock()->RemoveInstruction(inst);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 2444e43d64..560372e22e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1211,11 +1211,49 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
ReferenceTypeInfo receiver_type,
bool do_rtp,
bool cha_devirtualize) {
+ DCHECK(!invoke_instruction->IsIntrinsic());
HInstruction* return_replacement = nullptr;
uint32_t dex_pc = invoke_instruction->GetDexPc();
HInstruction* cursor = invoke_instruction->GetPrevious();
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
- if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
+ bool should_remove_invoke_instruction = false;
+
+ // If invoke_instruction is devirtualized to a different method, give intrinsics
+ // another chance before we try to inline it.
+ bool wrong_invoke_type = false;
+ if (invoke_instruction->GetResolvedMethod() != method &&
+ IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
+ if (invoke_instruction->IsInvokeInterface()) {
+ // We don't intrinsify an invoke-interface directly.
+ // Replace the invoke-interface with an invoke-virtual.
+ HInvokeVirtual* new_invoke = new (graph_->GetAllocator()) HInvokeVirtual(
+ graph_->GetAllocator(),
+ invoke_instruction->GetNumberOfArguments(),
+ invoke_instruction->GetType(),
+ invoke_instruction->GetDexPc(),
+ invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index.
+ method,
+ method->GetMethodIndex());
+ HInputsRef inputs = invoke_instruction->GetInputs();
+ for (size_t index = 0; index != inputs.size(); ++index) {
+ new_invoke->SetArgumentAt(index, inputs[index]);
+ }
+ invoke_instruction->GetBlock()->InsertInstructionBefore(new_invoke, invoke_instruction);
+ new_invoke->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
+ if (invoke_instruction->GetType() == DataType::Type::kReference) {
+ new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
+ }
+ // Run intrinsic recognizer again to set new_invoke's intrinsic.
+ IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type);
+ DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
+ return_replacement = new_invoke;
+ // invoke_instruction is replaced with new_invoke.
+ should_remove_invoke_instruction = true;
+ } else {
+ // invoke_instruction is intrinsified and stays.
+ }
+ } else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
DCHECK(!method->IsProxyMethod());
// Turn an invoke-interface into an invoke-virtual. An invoke-virtual is always
@@ -1258,26 +1296,27 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
return_replacement = new_invoke;
- // Directly check if the new virtual can be recognized as an intrinsic.
- // This way, we avoid running a full recognition pass just to detect
- // these relative rare cases.
- bool wrong_invoke_type = false;
- if (IntrinsicsRecognizer::Recognize(new_invoke, &wrong_invoke_type)) {
- MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
- }
+ // invoke_instruction is replaced with new_invoke.
+ should_remove_invoke_instruction = true;
} else {
// TODO: Consider sharpening an invoke virtual once it is not dependent on the
// compiler driver.
return false;
}
+ } else {
+ // invoke_instruction is inlined.
+ should_remove_invoke_instruction = true;
}
+
if (cha_devirtualize) {
AddCHAGuard(invoke_instruction, dex_pc, cursor, bb_cursor);
}
if (return_replacement != nullptr) {
invoke_instruction->ReplaceWith(return_replacement);
}
- invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ if (should_remove_invoke_instruction) {
+ invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction);
+ }
FixUpReturnReferenceType(method, return_replacement);
if (do_rtp && ReturnTypeMoreSpecific(invoke_instruction, return_replacement)) {
// Actual return value has a more specific type than the method's declared
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index bd20d28992..089e41b4f4 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1168,16 +1168,6 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct
RecordSimplification();
return;
}
- } else if (input->IsIntConstant()) {
- // Try to eliminate type conversion on int constant whose value falls into
- // the range of the result type.
- int32_t value = input->AsIntConstant()->GetValue();
- if (DataType::IsTypeConversionImplicit(value, result_type)) {
- instruction->ReplaceWith(input);
- instruction->GetBlock()->RemoveInstruction(instruction);
- RecordSimplification();
- return;
- }
}
}
@@ -2045,7 +2035,9 @@ void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) {
optimizations.SetArgumentIsString();
} else if (kUseReadBarrier) {
DCHECK(instruction->GetResolvedMethod() != nullptr);
- DCHECK(instruction->GetResolvedMethod()->GetDeclaringClass()->IsStringClass());
+ DCHECK(instruction->GetResolvedMethod()->GetDeclaringClass()->IsStringClass() ||
+ // Object.equals() can be devirtualized to String.equals().
+ instruction->GetResolvedMethod()->GetDeclaringClass()->IsObjectClass());
Runtime* runtime = Runtime::Current();
// For AOT, we always assume that the boot image shall contain the String.class and
// we do not need a read barrier for boot image classes as they are non-moveable.
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 22cc2efc1a..6cb8affe85 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -30,7 +30,7 @@ namespace mips {
class InstructionSimplifierMips : public HOptimization {
public:
InstructionSimplifierMips(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_mips", stats),
+ : HOptimization(graph, kInstructionSimplifierMipsPassName, stats),
codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 77199242f5..6928b70df7 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -137,7 +137,7 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
case kVirtual:
// Call might be devirtualized.
- return (invoke_type == kVirtual || invoke_type == kDirect);
+ return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface);
case kSuper:
case kInterface:
@@ -148,8 +148,12 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
UNREACHABLE();
}
-bool IntrinsicsRecognizer::Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type) {
- ArtMethod* art_method = invoke->GetResolvedMethod();
+bool IntrinsicsRecognizer::Recognize(HInvoke* invoke,
+ ArtMethod* art_method,
+ /*out*/ bool* wrong_invoke_type) {
+ if (art_method == nullptr) {
+ art_method = invoke->GetResolvedMethod();
+ }
*wrong_invoke_type = false;
if (art_method == nullptr || !art_method->IsIntrinsic()) {
return false;
@@ -182,7 +186,7 @@ void IntrinsicsRecognizer::Run() {
HInstruction* inst = inst_it.Current();
if (inst->IsInvoke()) {
bool wrong_invoke_type = false;
- if (Recognize(inst->AsInvoke(), &wrong_invoke_type)) {
+ if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
} else if (wrong_invoke_type) {
LOG(WARNING)
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index c07a99032a..62991435c7 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -47,7 +47,7 @@ class IntrinsicsRecognizer : public HOptimization {
// Static helper that recognizes intrinsic call. Returns true on success.
// If it fails due to invoke type mismatch, wrong_invoke_type is set.
// Useful to recognize intrinsics on individual calls outside this full pass.
- static bool Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type)
+ static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type)
REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fa580d9bed..4a9da7ece1 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1403,6 +1403,14 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
if (GetInput()->IsIntConstant()) {
int32_t value = GetInput()->AsIntConstant()->GetValue();
switch (GetResultType()) {
+ case DataType::Type::kInt8:
+ return graph->GetIntConstant(static_cast<int8_t>(value), GetDexPc());
+ case DataType::Type::kUint8:
+ return graph->GetIntConstant(static_cast<uint8_t>(value), GetDexPc());
+ case DataType::Type::kInt16:
+ return graph->GetIntConstant(static_cast<int16_t>(value), GetDexPc());
+ case DataType::Type::kUint16:
+ return graph->GetIntConstant(static_cast<uint16_t>(value), GetDexPc());
case DataType::Type::kInt64:
return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc());
case DataType::Type::kFloat32:
@@ -1415,6 +1423,14 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
} else if (GetInput()->IsLongConstant()) {
int64_t value = GetInput()->AsLongConstant()->GetValue();
switch (GetResultType()) {
+ case DataType::Type::kInt8:
+ return graph->GetIntConstant(static_cast<int8_t>(value), GetDexPc());
+ case DataType::Type::kUint8:
+ return graph->GetIntConstant(static_cast<uint8_t>(value), GetDexPc());
+ case DataType::Type::kInt16:
+ return graph->GetIntConstant(static_cast<int16_t>(value), GetDexPc());
+ case DataType::Type::kUint16:
+ return graph->GetIntConstant(static_cast<uint16_t>(value), GetDexPc());
case DataType::Type::kInt32:
return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc());
case DataType::Type::kFloat32:
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 05f9125df9..912e7548db 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -139,7 +139,9 @@ EventMask* EventMasks::GetEventMaskOrNull(art::Thread* thread) {
}
-void EventMasks::EnableEvent(art::Thread* thread, ArtJvmtiEvent event) {
+void EventMasks::EnableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event) {
+ DCHECK_EQ(&env->event_masks, this);
+ env->event_info_mutex_.AssertExclusiveHeld(art::Thread::Current());
DCHECK(EventMask::EventIsInRange(event));
GetEventMask(thread).Set(event);
if (thread != nullptr) {
@@ -147,7 +149,9 @@ void EventMasks::EnableEvent(art::Thread* thread, ArtJvmtiEvent event) {
}
}
-void EventMasks::DisableEvent(art::Thread* thread, ArtJvmtiEvent event) {
+void EventMasks::DisableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event) {
+ DCHECK_EQ(&env->event_masks, this);
+ env->event_info_mutex_.AssertExclusiveHeld(art::Thread::Current());
DCHECK(EventMask::EventIsInRange(event));
GetEventMask(thread).Set(event, false);
if (thread != nullptr) {
@@ -1134,20 +1138,28 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
return ERR(MUST_POSSESS_CAPABILITY);
}
- bool old_state = global_mask.Test(event);
+ bool old_state;
+ bool new_state;
- if (mode == JVMTI_ENABLE) {
- env->event_masks.EnableEvent(thread, event);
- global_mask.Set(event);
- } else {
- DCHECK_EQ(mode, JVMTI_DISABLE);
+ {
+ // Change the event masks atomically.
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, envs_lock_);
+ art::WriterMutexLock mu_env_info(self, env->event_info_mutex_);
+ old_state = global_mask.Test(event);
+ if (mode == JVMTI_ENABLE) {
+ env->event_masks.EnableEvent(env, thread, event);
+ global_mask.Set(event);
+ new_state = true;
+ } else {
+ DCHECK_EQ(mode, JVMTI_DISABLE);
- env->event_masks.DisableEvent(thread, event);
- RecalculateGlobalEventMask(event);
+ env->event_masks.DisableEvent(env, thread, event);
+ RecalculateGlobalEventMaskLocked(event);
+ new_state = global_mask.Test(event);
+ }
}
- bool new_state = global_mask.Test(event);
-
// Handle any special work required for the event type.
if (new_state != old_state) {
HandleEventType(event, mode == JVMTI_ENABLE);
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index b05136661b..7bdd9a58ec 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -149,8 +149,16 @@ struct EventMasks {
EventMask& GetEventMask(art::Thread* thread);
EventMask* GetEventMaskOrNull(art::Thread* thread);
- void EnableEvent(art::Thread* thread, ArtJvmtiEvent event);
- void DisableEvent(art::Thread* thread, ArtJvmtiEvent event);
+ // Circular dependencies mean we cannot see the definition of ArtJvmTiEnv so the mutex is simply
+ // asserted in the function.
+ // Note that the 'env' passed in must be the same env this EventMasks is associated with.
+ void EnableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event);
+ // REQUIRES(env->event_info_mutex_);
+ // Circular dependencies mean we cannot see the definition of ArtJvmTiEnv so the mutex is simply
+ // asserted in the function.
+ // Note that the 'env' passed in must be the same env this EventMasks is associated with.
+ void DisableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event);
+ // REQUIRES(env->event_info_mutex_);
bool IsEnabledAnywhere(ArtJvmtiEvent event);
// Make any changes to event masks needed for the given capability changes. If caps_added is true
// then caps is all the newly set capabilities of the jvmtiEnv. If it is false then caps is the
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index bdbc4509f3..43a51391b9 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -692,13 +692,15 @@ void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass());
// If the entry point of the method we are copying from is from JIT code, we just
- // put the entry point of the new method to interpreter. We could set the entry point
- // to the JIT code, but this would require taking the JIT code cache lock to notify
- // it, which we do not want at this level.
+ // put the entry point of the new method to interpreter or GenericJNI. We could set
+ // the entry point to the JIT code, but this would require taking the JIT code cache
+ // lock to notify it, which we do not want at this level.
Runtime* runtime = Runtime::Current();
if (runtime->UseJitCompilation()) {
if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) {
- SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
+ SetEntryPointFromQuickCompiledCodePtrSize(
+ src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(),
+ image_pointer_size);
}
}
// Clear the profiling info for the same reasons as the JIT code.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ccf431969a..e5bb7862cb 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4515,7 +4515,7 @@ std::string ClassLinker::GetDescriptorForProxy(ObjPtr<mirror::Class> proxy_class
void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
// Create constructor for Proxy that must initialize the method.
- CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 23u);
+ CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 21u);
// Find the <init>(InvocationHandler)V method. The exact method offset varies depending
// on which front-end compiler was used to build the libcore DEX files.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1dcd935eea..13029fb958 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -963,7 +963,11 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
}
VariableSizedHandleScope hs(Thread::Current());
std::vector<Handle<mirror::Object>> raw_instances;
- Runtime::Current()->GetHeap()->GetInstances(hs, hs.NewHandle(c), max_count, raw_instances);
+ Runtime::Current()->GetHeap()->GetInstances(hs,
+ hs.NewHandle(c),
+ /* use_is_assignable_from */ false,
+ max_count,
+ raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
instances->push_back(gRegistry->Add(raw_instances[i].Get()));
}
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index f756312983..238ada94ff 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -41,6 +41,12 @@ static void StoreObjectInBss(ArtMethod* outer_method,
static_assert(sizeof(GcRoot<mirror::String>) == sizeof(GcRoot<mirror::Object>), "Size check.");
DCHECK_NE(bss_offset, IndexBssMappingLookup::npos);
DCHECK_ALIGNED(bss_offset, sizeof(GcRoot<mirror::Object>));
+ if (UNLIKELY(!oat_file->IsExecutable())) {
+ // There are situations where we execute bytecode tied to an oat file opened
+ // as non-executable (i.e. the AOT-compiled code cannot be executed) and we
+ // can JIT that bytecode and get here without the .bss being mmapped.
+ return;
+ }
GcRoot<mirror::Object>* slot = reinterpret_cast<GcRoot<mirror::Object>*>(
const_cast<uint8_t*>(oat_file->BssBegin() + bss_offset));
DCHECK_GE(slot, oat_file->GetBssGcRoots().data());
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9f6266612a..f29ae92e2d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1796,19 +1796,25 @@ uint64_t Heap::GetBytesAllocatedEver() const {
return GetBytesFreedEver() + GetBytesAllocated();
}
+// Check whether the given object is an instance of the given class.
+static bool MatchesClass(mirror::Object* obj,
+ Handle<mirror::Class> h_class,
+ bool use_is_assignable_from) REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Class* instance_class = obj->GetClass();
+ CHECK(instance_class != nullptr);
+ ObjPtr<mirror::Class> klass = h_class.Get();
+ if (use_is_assignable_from) {
+ return klass != nullptr && klass->IsAssignableFrom(instance_class);
+ }
+ return instance_class == klass;
+}
+
void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
bool use_is_assignable_from,
uint64_t* counts) {
auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Class* instance_class = obj->GetClass();
- CHECK(instance_class != nullptr);
for (size_t i = 0; i < classes.size(); ++i) {
- ObjPtr<mirror::Class> klass = classes[i].Get();
- if (use_is_assignable_from) {
- if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
- ++counts[i];
- }
- } else if (instance_class == klass) {
+ if (MatchesClass(obj, classes[i], use_is_assignable_from)) {
++counts[i];
}
}
@@ -1818,11 +1824,12 @@ void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
void Heap::GetInstances(VariableSizedHandleScope& scope,
Handle<mirror::Class> h_class,
+ bool use_is_assignable_from,
int32_t max_count,
std::vector<Handle<mirror::Object>>& instances) {
DCHECK_GE(max_count, 0);
auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (obj->GetClass() == h_class.Get()) {
+ if (MatchesClass(obj, h_class, use_is_assignable_from)) {
if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
instances.push_back(scope.NewHandle(obj));
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 4d7424c7ef..ac0d82e12a 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -346,9 +346,10 @@ class Heap {
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Implements JDWP RT_Instances.
+ // Implements VMDebug.getInstancesOfClasses and JDWP RT_Instances.
void GetInstances(VariableSizedHandleScope& scope,
Handle<mirror::Class> c,
+ bool use_is_assignable_from,
int32_t max_count,
std::vector<Handle<mirror::Object>>& instances)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
index de617a90d7..df4bcc66f3 100644
--- a/runtime/interpreter/mterp/arm/entry.S
+++ b/runtime/interpreter/mterp/arm/entry.S
@@ -23,7 +23,7 @@
/*
* On entry:
* r0 Thread* self/
- * r1 code_item
+ * r1 insns_
* r2 ShadowFrame
* r3 JValue* result_register
*
@@ -56,6 +56,7 @@ ENTRY ExecuteMterpImpl
VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
index 51c2ba4c03..64ab9efa19 100644
--- a/runtime/interpreter/mterp/arm/header.S
+++ b/runtime/interpreter/mterp/arm/header.S
@@ -85,6 +85,7 @@ unspecified registers or condition codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
index f3d40ff6f7..8d61210be8 100644
--- a/runtime/interpreter/mterp/arm64/entry.S
+++ b/runtime/interpreter/mterp/arm64/entry.S
@@ -20,7 +20,7 @@
* Interpreter entry point.
* On entry:
* x0 Thread* self/
- * x1 code_item
+ * x1 insns_
* x2 ShadowFrame
* x3 JValue* result_register
*
@@ -46,6 +46,7 @@ ENTRY ExecuteMterpImpl
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ .cfi_register DPC_PSEUDO_REG, xPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
index 47f12d2f5d..9261b770d6 100644
--- a/runtime/interpreter/mterp/arm64/header.S
+++ b/runtime/interpreter/mterp/arm64/header.S
@@ -87,6 +87,7 @@ codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
diff --git a/runtime/interpreter/mterp/cfi_asm_support.h b/runtime/interpreter/mterp/cfi_asm_support.h
new file mode 100644
index 0000000000..a97e153993
--- /dev/null
+++ b/runtime/interpreter/mterp/cfi_asm_support.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_MTERP_CFI_ASM_SUPPORT_H_
+#define ART_RUNTIME_INTERPRETER_MTERP_CFI_ASM_SUPPORT_H_
+
+/*
+ * To keep track of the Dalvik PC, give assign it a magic register number that
+ * won't be confused with a pysical register. Then, standard .cfi directives
+ * will track the location of it so that it may be extracted during a stack
+ * unwind.
+ *
+ * The Dalvik PC will be in either a physical registor, or the frame.
+ * Encoded from the ASCII string " DEX" -> 0x20 0x44 0x45 0x58
+ */
+#define DPC_PSEUDO_REG 0x20444558
+
+#endif // ART_RUNTIME_INTERPRETER_MTERP_CFI_ASM_SUPPORT_H_
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
index 03de985cd0..41b5d5650d 100644
--- a/runtime/interpreter/mterp/mips/entry.S
+++ b/runtime/interpreter/mterp/mips/entry.S
@@ -32,6 +32,7 @@
*/
ExecuteMterpImpl:
+ .cfi_startproc
.set noreorder
.cpload t9
.set reorder
@@ -53,6 +54,7 @@ ExecuteMterpImpl:
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC()
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
index 6e1ba1c882..1c784ef188 100644
--- a/runtime/interpreter/mterp/mips/footer.S
+++ b/runtime/interpreter/mterp/mips/footer.S
@@ -284,4 +284,5 @@ MterpProfileActive:
STACK_LOAD_FULL()
jalr zero, ra
+ .cfi_endproc
.end ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
index e4552ddf3d..0f7a6f1116 100644
--- a/runtime/interpreter/mterp/mips/header.S
+++ b/runtime/interpreter/mterp/mips/header.S
@@ -32,6 +32,7 @@
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
#if (__mips==32) && (__mips_isa_rev>=2)
#define MIPS32REVGE2 /* mips32r2 and greater */
diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S
index 436b88dbd0..841a817569 100644
--- a/runtime/interpreter/mterp/mips64/entry.S
+++ b/runtime/interpreter/mterp/mips64/entry.S
@@ -73,6 +73,7 @@ ExecuteMterpImpl:
dlsa rREFS, v0, rFP, 2
lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
dlsa rPC, v0, a1, 1
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
index d1acefd338..2b550cb533 100644
--- a/runtime/interpreter/mterp/mips64/header.S
+++ b/runtime/interpreter/mterp/mips64/header.S
@@ -102,6 +102,7 @@ The following registers have fixed assignments:
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 92dd19ed2f..987298bd8a 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -151,8 +151,14 @@ extern "C" size_t MterpShouldSwitchInterpreters()
Dbg::IsDebuggerActive() ||
// An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
// know how to deal with these so we could end up never dealing with it if we are in an
- // infinite loop.
- UNLIKELY(Thread::Current()->IsAsyncExceptionPending());
+ // infinite loop. Since this can be called in a tight loop and getting the current thread
+ // requires a TLS read we instead first check a short-circuit runtime flag that will only be
+ // set if something tries to set an async exception. This will make this function faster in
+ // the common case where no async exception has ever been sent. We don't need to worry about
+ // synchronization on the runtime flag since it is only set in a checkpoint which will either
+ // take place on the current thread or act as a synchronization point.
+ (UNLIKELY(runtime->AreAsyncExceptionsThrown()) &&
+ Thread::Current()->IsAsyncExceptionPending());
}
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 69d7edbe8a..f3c1124ec4 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -92,6 +92,7 @@ unspecified registers or condition codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
@@ -341,7 +342,7 @@ unspecified registers or condition codes.
/*
* On entry:
* r0 Thread* self/
- * r1 code_item
+ * r1 insns_
* r2 ShadowFrame
* r3 JValue* result_register
*
@@ -374,6 +375,7 @@ ENTRY ExecuteMterpImpl
VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 82edab465e..347d54f705 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -94,6 +94,7 @@ codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
@@ -378,7 +379,7 @@ codes.
* Interpreter entry point.
* On entry:
* x0 Thread* self/
- * x1 code_item
+ * x1 insns_
* x2 ShadowFrame
* x3 JValue* result_register
*
@@ -404,6 +405,7 @@ ENTRY ExecuteMterpImpl
add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ .cfi_register DPC_PSEUDO_REG, xPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
index 8cc1b19128..1687afa58a 100644
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ b/runtime/interpreter/mterp/out/mterp_mips.S
@@ -39,6 +39,7 @@
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
#if (__mips==32) && (__mips_isa_rev>=2)
#define MIPS32REVGE2 /* mips32r2 and greater */
@@ -765,6 +766,7 @@
*/
ExecuteMterpImpl:
+ .cfi_startproc
.set noreorder
.cpload t9
.set reorder
@@ -786,6 +788,7 @@ ExecuteMterpImpl:
EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC()
@@ -12842,5 +12845,6 @@ MterpProfileActive:
STACK_LOAD_FULL()
jalr zero, ra
+ .cfi_endproc
.end ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index 139ee25904..559c72bb0c 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -109,6 +109,7 @@ The following registers have fixed assignments:
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
@@ -407,6 +408,7 @@ ExecuteMterpImpl:
dlsa rREFS, v0, rFP, 2
lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
dlsa rPC, v0, a1, 1
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index cbab61ebf6..0613c9d12e 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -95,6 +95,7 @@ unspecified registers or condition codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
/*
* Handle mac compiler specific
@@ -342,7 +343,7 @@ unspecified registers or condition codes.
/*
* On entry:
* 0 Thread* self
- * 1 code_item
+ * 1 insns_
* 2 ShadowFrame
* 3 JValue* result_register
*
@@ -379,6 +380,7 @@ SYMBOL(ExecuteMterpImpl):
leal (rFP, %eax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
lea (%ecx, %eax, 2), rPC
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Set up for backwards branches & osr profiling */
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
index 83c3e4fb91..aa91db3b61 100644
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ b/runtime/interpreter/mterp/out/mterp_x86_64.S
@@ -91,6 +91,7 @@ unspecified registers or condition codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
/*
* Handle mac compiler specific
@@ -328,7 +329,7 @@ unspecified registers or condition codes.
/*
* On entry:
* 0 Thread* self
- * 1 code_item
+ * 1 insns_
* 2 ShadowFrame
* 3 JValue* result_register
*
@@ -362,6 +363,7 @@ SYMBOL(ExecuteMterpImpl):
leaq (rFP, %rax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
leaq (IN_ARG1, %rax, 2), rPC
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
index 055e834fed..10ca8366de 100644
--- a/runtime/interpreter/mterp/x86/entry.S
+++ b/runtime/interpreter/mterp/x86/entry.S
@@ -24,7 +24,7 @@
/*
* On entry:
* 0 Thread* self
- * 1 code_item
+ * 1 insns_
* 2 ShadowFrame
* 3 JValue* result_register
*
@@ -61,6 +61,7 @@ SYMBOL(ExecuteMterpImpl):
leal (rFP, %eax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
lea (%ecx, %eax, 2), rPC
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Set up for backwards branches & osr profiling */
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 370012f324..0e585e86f0 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -88,6 +88,7 @@ unspecified registers or condition codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
/*
* Handle mac compiler specific
diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S
index 83b845b702..d85ef7fe24 100644
--- a/runtime/interpreter/mterp/x86_64/entry.S
+++ b/runtime/interpreter/mterp/x86_64/entry.S
@@ -24,7 +24,7 @@
/*
* On entry:
* 0 Thread* self
- * 1 code_item
+ * 1 insns_
* 2 ShadowFrame
* 3 JValue* result_register
*
@@ -58,6 +58,7 @@ SYMBOL(ExecuteMterpImpl):
leaq (rFP, %rax, 4), rREFS
movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
leaq (IN_ARG1, %rax, 2), rPC
+ .cfi_register DPC_PSEUDO_REG, rPC
EXPORT_PC
/* Starting ibase */
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
index 9d21f3f1a1..a3ef8953ca 100644
--- a/runtime/interpreter/mterp/x86_64/header.S
+++ b/runtime/interpreter/mterp/x86_64/header.S
@@ -84,6 +84,7 @@ unspecified registers or condition codes.
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
+#include "interpreter/mterp/cfi_asm_support.h"
/*
* Handle mac compiler specific
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index bb8e5e5c15..bb0048de0b 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -1582,7 +1582,11 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
MethodHotness hotness_info(dex_data->GetHotnessInfo(method_idx));
if (startup ? hotness_info.IsStartup() : hotness_info.IsPostStartup()) {
- os << method_idx << ", ";
+ if (dex_file != nullptr) {
+ os << "\n\t\t" << dex_file->PrettyMethod(method_idx, true);
+ } else {
+ os << method_idx << ", ";
+ }
}
}
if (startup == false) {
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 22355638cd..c0de374904 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -744,6 +744,23 @@ static jobjectArray DexFile_getDexFileOutputPaths(JNIEnv* env,
return result;
}
+static jlong DexFile_getStaticSizeOfDexFile(JNIEnv* env, jclass, jobject cookie) {
+ const OatFile* oat_file = nullptr;
+ std::vector<const DexFile*> dex_files;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out */ dex_files, /* out */ oat_file)) {
+ DCHECK(env->ExceptionCheck());
+ return 0;
+ }
+
+ uint64_t file_size = 0;
+ for (auto& dex_file : dex_files) {
+ if (dex_file) {
+ file_size += dex_file->GetHeader().file_size_;
+ }
+ }
+ return static_cast<jlong>(file_size);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)Z"),
NATIVE_METHOD(DexFile,
@@ -779,7 +796,8 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, getDexFileStatus,
"(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
NATIVE_METHOD(DexFile, getDexFileOutputPaths,
- "(Ljava/lang/String;Ljava/lang/String;)[Ljava/lang/String;")
+ "(Ljava/lang/String;Ljava/lang/String;)[Ljava/lang/String;"),
+ NATIVE_METHOD(DexFile, getStaticSizeOfDexFile, "(Ljava/lang/Object;)J")
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 2663bea344..88a78ab4be 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -319,6 +319,53 @@ static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env,
return soa.AddLocalReference<jlongArray>(long_counts);
}
+static jobjectArray VMDebug_getInstancesOfClasses(JNIEnv* env,
+ jclass,
+ jobjectArray javaClasses,
+ jboolean includeAssignable) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Class>> classes = hs.NewHandle(
+ soa.Decode<mirror::ObjectArray<mirror::Class>>(javaClasses));
+ if (classes == nullptr) {
+ return nullptr;
+ }
+
+ jclass object_array_class = env->FindClass("[Ljava/lang/Object;");
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return nullptr;
+ }
+ CHECK(object_array_class != nullptr);
+
+ size_t num_classes = classes->GetLength();
+ jobjectArray result = env->NewObjectArray(num_classes, object_array_class, nullptr);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return nullptr;
+ }
+
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ MutableHandle<mirror::Class> h_class(hs.NewHandle<mirror::Class>(nullptr));
+ for (size_t i = 0; i < num_classes; ++i) {
+ h_class.Assign(classes->Get(i));
+
+ VariableSizedHandleScope hs2(soa.Self());
+ std::vector<Handle<mirror::Object>> raw_instances;
+ heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances);
+ jobjectArray array = env->NewObjectArray(raw_instances.size(),
+ WellKnownClasses::java_lang_Object,
+ nullptr);
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ return nullptr;
+ }
+
+ for (size_t j = 0; j < raw_instances.size(); ++j) {
+ env->SetObjectArrayElement(array, j, raw_instances[j].ToJObject());
+ }
+ env->SetObjectArrayElement(result, i, array);
+ }
+ return result;
+}
+
// We export the VM internal per-heap-space size/alloc/free metrics
// for the zygote space, alloc space (application heap), and the large
// object space for dumpsys meminfo. The other memory region data such
@@ -534,6 +581,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"),
NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"),
NATIVE_METHOD(VMDebug, getHeapSpaceStats, "([J)V"),
+ NATIVE_METHOD(VMDebug, getInstancesOfClasses, "([Ljava/lang/Class;Z)[[Ljava/lang/Object;"),
NATIVE_METHOD(VMDebug, getInstructionCount, "([I)V"),
FAST_NATIVE_METHOD(VMDebug, getLoadedClassCount, "()I"),
NATIVE_METHOD(VMDebug, getVmFeatureList, "()[Ljava/lang/String;"),
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f09b6c9825..d15de38b0a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -256,6 +256,7 @@ Runtime::Runtime()
force_native_bridge_(false),
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
+ async_exceptions_thrown_(false),
is_java_debuggable_(false),
zygote_max_failed_boots_(0),
experimental_flags_(ExperimentalFlags::kNone),
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6b01cc220f..476b71f169 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -586,6 +586,14 @@ class Runtime {
is_native_debuggable_ = value;
}
+ bool AreAsyncExceptionsThrown() const {
+ return async_exceptions_thrown_;
+ }
+
+ void SetAsyncExceptionsThrown() {
+ async_exceptions_thrown_ = true;
+ }
+
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
std::string GetFingerprint() {
return fingerprint_;
@@ -899,6 +907,10 @@ class Runtime {
// Whether we are running under native debugger.
bool is_native_debuggable_;
+ // whether or not any async exceptions have ever been thrown. This is used to speed up the
+ // MterpShouldSwitchInterpreters function.
+ bool async_exceptions_thrown_;
+
// Whether Java code needs to be debuggable.
bool is_java_debuggable_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index bec1c908ad..cb350edb5f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3700,6 +3700,7 @@ void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
CHECK(new_exception != nullptr);
+ Runtime::Current()->SetAsyncExceptionsThrown();
if (kIsDebugBuild) {
// Make sure we are in a checkpoint.
MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index c6ba2f7e43..978e51d7b0 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2237,7 +2237,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
} else {
/* return_type is the *expected* return type, not register value */
- DCHECK(!return_type.IsZero());
+ DCHECK(!return_type.IsZeroOrNull());
DCHECK(!return_type.IsUninitializedReference());
const uint32_t vregA = inst->VRegA_11x();
const RegType& reg_type = work_line_->GetRegisterType(this, vregA);
@@ -2485,7 +2485,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::ARRAY_LENGTH: {
const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
- if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
+ if (!res_type.IsArrayTypes() && !res_type.IsZeroOrNull()) {
+ // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
} else {
work_line_->SetRegisterType<LockOp::kClear>(this,
@@ -2592,7 +2593,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* Similar to the verification done for APUT */
const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
- if (!array_type.IsZero()) {
+ if (!array_type.IsZeroOrNull()) {
if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
@@ -2632,7 +2633,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const RegType& reg_type1 = work_line_->GetRegisterType(this, inst->VRegA_22t());
const RegType& reg_type2 = work_line_->GetRegisterType(this, inst->VRegB_22t());
bool mismatch = false;
- if (reg_type1.IsZero()) { // zero then integral or reference expected
+ if (reg_type1.IsZeroOrNull()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
} else if (reg_type1.IsReferenceTypes()) { // both references?
mismatch = !reg_type2.IsReferenceTypes();
@@ -2717,7 +2718,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
cast_type.HasClass() && // Could be conflict type, make sure it has a class.
!cast_type.GetClass()->IsInterface() &&
- (orig_type.IsZero() ||
+ (orig_type.IsZeroOrNull() ||
orig_type.IsStrictlyAssignableFrom(
cast_type.Merge(orig_type, &reg_types_, this), this))) {
RegisterLine* update_line = RegisterLine::Create(code_item_accessor_.RegistersSize(),
@@ -3005,7 +3006,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
/* no null refs allowed (?) */
- if (this_type.IsZero()) {
+ if (this_type.IsZeroOrNull()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unable to initialize null ref";
break;
}
@@ -3082,7 +3083,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* interface or Object (see comments in RegType::JoinClass).
*/
const RegType& this_type = work_line_->GetInvocationThis(this, inst);
- if (this_type.IsZero()) {
+ if (this_type.IsZeroOrNull()) {
/* null pointer always passes (and always fails at runtime) */
} else {
if (this_type.IsUninitializedTypes()) {
@@ -4081,7 +4082,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(
const RegType& adjusted_type = is_init
? GetRegTypeCache()->FromUninitialized(actual_arg_type)
: actual_arg_type;
- if (method_type != METHOD_INTERFACE && !adjusted_type.IsZero()) {
+ if (method_type != METHOD_INTERFACE && !adjusted_type.IsZeroOrNull()) {
const RegType* res_method_class;
// Miranda methods have the declaring interface as their declaring class, not the abstract
// class. It would be wrong to use this for the type check (interface type checks are
@@ -4454,7 +4455,7 @@ bool MethodVerifier::CheckSignaturePolymorphicMethod(ArtMethod* method) {
bool MethodVerifier::CheckSignaturePolymorphicReceiver(const Instruction* inst) {
const RegType& this_type = work_line_->GetInvocationThis(this, inst);
- if (this_type.IsZero()) {
+ if (this_type.IsZeroOrNull()) {
/* null pointer always passes (and always fails at run time) */
return true;
} else if (!this_type.IsNonZeroReferenceTypes()) {
@@ -4573,7 +4574,7 @@ ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
return nullptr;
}
- if (!actual_arg_type.IsZero()) {
+ if (!actual_arg_type.IsZeroOrNull()) {
mirror::Class* klass = res_method->GetDeclaringClass();
std::string temp;
const RegType& res_method_class =
@@ -4689,7 +4690,7 @@ void MethodVerifier::VerifyAGet(const Instruction* inst,
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
- if (array_type.IsZero()) {
+ if (array_type.IsZeroOrNull()) {
have_pending_runtime_throw_failure_ = true;
// Null array class; this code path will fail at runtime. Infer a merge-able type from the
// instruction type. TODO: have a proper notion of bottom here.
@@ -4804,7 +4805,7 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
- if (array_type.IsZero()) {
+ if (array_type.IsZeroOrNull()) {
// Null array type; this code path will fail at runtime.
// Still check that the given value matches the instruction's type.
// Note: this is, as usual, complicated by the fact the the instruction isn't fully typed
@@ -4926,7 +4927,7 @@ ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_id
DCHECK(self_->IsExceptionPending());
self_->ClearException();
return nullptr;
- } else if (obj_type.IsZero()) {
+ } else if (obj_type.IsZeroOrNull()) {
// Cannot infer and check type, however, access will cause null pointer exception.
// Fall through into a few last soft failure checks below.
} else if (!obj_type.IsReferenceTypes()) {
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index 631c6bd7ef..f719782727 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -29,6 +29,8 @@ namespace art {
namespace verifier {
inline bool RegType::CanAccess(const RegType& other) const {
+ DCHECK(IsReferenceTypes());
+ DCHECK(!IsNull());
if (Equals(other)) {
return true; // Trivial accessibility.
} else {
@@ -45,9 +47,13 @@ inline bool RegType::CanAccess(const RegType& other) const {
}
inline bool RegType::CanAccessMember(ObjPtr<mirror::Class> klass, uint32_t access_flags) const {
+ DCHECK(IsReferenceTypes());
if ((access_flags & kAccPublic) != 0) {
return true;
}
+ if (IsNull()) {
+ return true;
+ }
if (!IsUnresolvedTypes()) {
return GetClass()->CanAccessMember(klass, access_flags);
} else {
@@ -92,7 +98,7 @@ inline bool RegType::AssignableFrom(const RegType& lhs,
LOG(WARNING) << "RegType::AssignableFrom lhs is Conflict!";
return false;
case AssignmentType::kReference:
- if (rhs.IsZero()) {
+ if (rhs.IsZeroOrNull()) {
return true; // All reference types can be assigned null.
} else if (!rhs.IsReferenceTypes()) {
return false; // Expect rhs to be a reference type.
@@ -119,6 +125,7 @@ inline bool RegType::AssignableFrom(const RegType& lhs,
return result;
} else {
// Unresolved types are only assignable for null and equality.
+ // Null cannot be the left-hand side.
return false;
}
case AssignmentType::kNotAssignable:
@@ -199,6 +206,11 @@ inline const UndefinedType* UndefinedType::GetInstance() {
return instance_;
}
+inline const NullType* NullType::GetInstance() {
+ DCHECK(instance_ != nullptr);
+ return instance_;
+}
+
inline void* RegType::operator new(size_t size, ScopedArenaAllocator* allocator) {
return allocator->Alloc(size, kArenaAllocMisc);
}
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 8df2e0f50b..309c374fa8 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -51,6 +51,7 @@ const LongHiType* LongHiType::instance_ = nullptr;
const DoubleLoType* DoubleLoType::instance_ = nullptr;
const DoubleHiType* DoubleHiType::instance_ = nullptr;
const IntegerType* IntegerType::instance_ = nullptr;
+const NullType* NullType::instance_ = nullptr;
PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
@@ -581,6 +582,10 @@ static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
return a.IsConstantTypes() ? b : a;
}
+static const RegType& SelectNonConstant2(const RegType& a, const RegType& b) {
+ return a.IsConstantTypes() ? (b.IsZero() ? a : b) : a;
+}
+
const RegType& RegType::Merge(const RegType& incoming_type,
RegTypeCache* reg_types,
MethodVerifier* verifier) const {
@@ -695,8 +700,8 @@ const RegType& RegType::Merge(const RegType& incoming_type,
// special. They may only ever be merged with themselves (must be taken care of by the
// caller of Merge(), see the DCHECK on entry). So mark any other merge as conflicting here.
return conflict;
- } else if (IsZero() || incoming_type.IsZero()) {
- return SelectNonConstant(*this, incoming_type); // 0 MERGE ref => ref
+ } else if (IsZeroOrNull() || incoming_type.IsZeroOrNull()) {
+ return SelectNonConstant2(*this, incoming_type); // 0 MERGE ref => ref
} else if (IsJavaLangObject() || incoming_type.IsJavaLangObject()) {
return reg_types->JavaLangObject(false); // Object MERGE ref => Object
} else if (IsUnresolvedTypes() || incoming_type.IsUnresolvedTypes()) {
@@ -965,6 +970,21 @@ bool RegType::CanAssignArray(const RegType& src,
return cmp1.CanAssignArray(cmp2, reg_types, class_loader, verifier, soft_error);
}
+const NullType* NullType::CreateInstance(mirror::Class* klass,
+ const StringPiece& descriptor,
+ uint16_t cache_id) {
+ CHECK(instance_ == nullptr);
+ instance_ = new NullType(klass, descriptor, cache_id);
+ return instance_;
+}
+
+void NullType::Destroy() {
+ if (NullType::instance_ != nullptr) {
+ delete instance_;
+ instance_ = nullptr;
+ }
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index a2085a3f09..9055849ca0 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -129,8 +129,12 @@ class RegType {
virtual bool IsConstantShort() const { return false; }
virtual bool IsOne() const { return false; }
virtual bool IsZero() const { return false; }
+ virtual bool IsNull() const { return false; }
bool IsReferenceTypes() const {
- return IsNonZeroReferenceTypes() || IsZero();
+ return IsNonZeroReferenceTypes() || IsZero() || IsNull();
+ }
+ bool IsZeroOrNull() const {
+ return IsZero() || IsNull();
}
virtual bool IsNonZeroReferenceTypes() const { return false; }
bool IsCategory1Types() const {
@@ -857,6 +861,46 @@ class ImpreciseConstHiType FINAL : public ConstantType {
}
};
+// Special "null" type that captures the semantics of null / bottom.
+class NullType FINAL : public RegType {
+ public:
+ bool IsNull() const OVERRIDE {
+ return true;
+ }
+
+ // Get the singleton Null instance.
+ static const NullType* GetInstance() PURE;
+
+ // Create the singleton instance.
+ static const NullType* CreateInstance(mirror::Class* klass,
+ const StringPiece& descriptor,
+ uint16_t cache_id)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static void Destroy();
+
+ std::string Dump() const OVERRIDE {
+ return "null";
+ }
+
+ AssignmentType GetAssignmentTypeImpl() const OVERRIDE {
+ return AssignmentType::kReference;
+ }
+
+ bool IsConstantTypes() const OVERRIDE {
+ return true;
+ }
+
+ private:
+ NullType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ : RegType(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
+
+ static const NullType* instance_;
+};
+
// Common parent of all uninitialized types. Uninitialized types are created by
// "new" dex
// instructions and must be passed to a constructor.
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 197c97671e..61f34afdac 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -81,6 +81,9 @@ inline const UndefinedType& RegTypeCache::Undefined() {
inline const ConflictType& RegTypeCache::Conflict() {
return *ConflictType::GetInstance();
}
+inline const NullType& RegTypeCache::Null() {
+ return *NullType::GetInstance();
+}
inline const ImpreciseConstType& RegTypeCache::ByteConstant() {
const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 0029eb90a3..a3f08c8580 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,6 +16,8 @@
#include "reg_type_cache-inl.h"
+#include <type_traits>
+
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/casts.h"
@@ -51,8 +53,10 @@ ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry,
}
void RegTypeCache::FillPrimitiveAndSmallConstantTypes() {
+ // Note: this must have the same order as CreatePrimitiveAndSmallConstantTypes.
entries_.push_back(UndefinedType::GetInstance());
entries_.push_back(ConflictType::GetInstance());
+ entries_.push_back(NullType::GetInstance());
entries_.push_back(BooleanType::GetInstance());
entries_.push_back(ByteType::GetInstance());
entries_.push_back(ShortType::GetInstance());
@@ -304,6 +308,7 @@ void RegTypeCache::ShutDown() {
FloatType::Destroy();
DoubleLoType::Destroy();
DoubleHiType::Destroy();
+ NullType::Destroy();
for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
const PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
delete type;
@@ -314,33 +319,55 @@ void RegTypeCache::ShutDown() {
}
}
-template <class Type>
-const Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
- mirror::Class* klass = nullptr;
- // Try loading the class from linker.
- if (!descriptor.empty()) {
- klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
- descriptor.c_str());
- DCHECK(klass != nullptr);
- }
- const Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
- RegTypeCache::primitive_count_++;
- return entry;
-}
+// Helper for create_primitive_type_instance lambda.
+namespace {
+template <typename T>
+struct TypeHelper {
+ using type = T;
+ static_assert(std::is_convertible<T*, RegType*>::value, "T must be a RegType");
+
+ const char* descriptor;
+
+ explicit TypeHelper(const char* d) : descriptor(d) {}
+};
+} // namespace
void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() {
- CreatePrimitiveTypeInstance<UndefinedType>("");
- CreatePrimitiveTypeInstance<ConflictType>("");
- CreatePrimitiveTypeInstance<BooleanType>("Z");
- CreatePrimitiveTypeInstance<ByteType>("B");
- CreatePrimitiveTypeInstance<ShortType>("S");
- CreatePrimitiveTypeInstance<CharType>("C");
- CreatePrimitiveTypeInstance<IntegerType>("I");
- CreatePrimitiveTypeInstance<LongLoType>("J");
- CreatePrimitiveTypeInstance<LongHiType>("J");
- CreatePrimitiveTypeInstance<FloatType>("F");
- CreatePrimitiveTypeInstance<DoubleLoType>("D");
- CreatePrimitiveTypeInstance<DoubleHiType>("D");
+ // Note: this must have the same order as FillPrimitiveAndSmallConstantTypes.
+
+ // It is acceptable to pass on the const char* in type to CreateInstance, as all calls below are
+ // with compile-time constants that will have global lifetime. Use of the lambda ensures this
+ // code cannot leak to other users.
+ auto create_primitive_type_instance = [&](auto type) REQUIRES_SHARED(Locks::mutator_lock_) {
+ using Type = typename decltype(type)::type;
+ mirror::Class* klass = nullptr;
+ // Try loading the class from linker.
+ DCHECK(type.descriptor != nullptr);
+ if (strlen(type.descriptor) > 0) {
+ klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
+ type.descriptor);
+ DCHECK(klass != nullptr);
+ }
+ const Type* entry = Type::CreateInstance(klass,
+ type.descriptor,
+ RegTypeCache::primitive_count_);
+ RegTypeCache::primitive_count_++;
+ return entry;
+ };
+ create_primitive_type_instance(TypeHelper<UndefinedType>(""));
+ create_primitive_type_instance(TypeHelper<ConflictType>(""));
+ create_primitive_type_instance(TypeHelper<NullType>(""));
+ create_primitive_type_instance(TypeHelper<BooleanType>("Z"));
+ create_primitive_type_instance(TypeHelper<ByteType>("B"));
+ create_primitive_type_instance(TypeHelper<ShortType>("S"));
+ create_primitive_type_instance(TypeHelper<CharType>("C"));
+ create_primitive_type_instance(TypeHelper<IntegerType>("I"));
+ create_primitive_type_instance(TypeHelper<LongLoType>("J"));
+ create_primitive_type_instance(TypeHelper<LongHiType>("J"));
+ create_primitive_type_instance(TypeHelper<FloatType>("F"));
+ create_primitive_type_instance(TypeHelper<DoubleLoType>("D"));
+ create_primitive_type_instance(TypeHelper<DoubleHiType>("D"));
+
for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
PreciseConstType* type = new PreciseConstType(value, primitive_count_);
small_precise_constants_[value - kMinSmallConstant] = type;
@@ -396,6 +423,9 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left,
if (resolved_parts_merged.IsConflict()) {
return Conflict();
}
+ if (resolved_parts_merged.IsJavaLangObject()) {
+ return resolved_parts_merged;
+ }
bool resolved_merged_is_array = resolved_parts_merged.IsArrayTypes();
if (left_unresolved_is_array || right_unresolved_is_array || resolved_merged_is_array) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index d0907564e2..52776766bc 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -49,6 +49,7 @@ class IntegerType;
class LongHiType;
class LongLoType;
class MethodVerifier;
+class NullType;
class PreciseConstType;
class PreciseReferenceType;
class RegType;
@@ -123,6 +124,7 @@ class RegTypeCache {
const DoubleHiType& DoubleHi() REQUIRES_SHARED(Locks::mutator_lock_);
const UndefinedType& Undefined() REQUIRES_SHARED(Locks::mutator_lock_);
const ConflictType& Conflict();
+ const NullType& Null();
const PreciseReferenceType& JavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_);
const PreciseReferenceType& JavaLangString() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -171,9 +173,6 @@ class RegTypeCache {
// verifier.
StringPiece AddString(const StringPiece& string_piece);
- template <class Type>
- static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
- REQUIRES_SHARED(Locks::mutator_lock_);
static void CreatePrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
// A quick look up for popular small constants.
@@ -183,7 +182,7 @@ class RegTypeCache {
kMinSmallConstant + 1];
static constexpr size_t kNumPrimitivesAndSmallConstants =
- 12 + (kMaxSmallConstant - kMinSmallConstant + 1);
+ 13 + (kMaxSmallConstant - kMinSmallConstant + 1);
// Have the well known global primitives been created?
static bool primitive_initialized_;
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 1bc48ed71b..15a38f3fd7 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -664,6 +664,368 @@ TEST_F(RegTypeTest, MergingDouble) {
}
}
+TEST_F(RegTypeTest, MergeSemiLatticeRef) {
+ // (Incomplete) semilattice:
+ //
+ // Excluded for now: * category-2 types
+ // * interfaces
+ // * all of category-1 primitive types, including constants.
+ // This is to demonstrate/codify the reference side, mostly.
+ //
+ // Note: It is not a real semilattice because int = float makes this wonky. :-(
+ //
+ // Conflict
+ // |
+ // #---------#--------------------------#-----------------------------#
+ // | | |
+ // | | Object
+ // | | |
+ // int uninit types #---------------#--------#------------------#---------#
+ // | | | | | |
+ // | unresolved-merge-types | Object[] char[] byte[]
+ // | | | | | | | |
+ // | unresolved-types | #------Number #---------# | |
+ // | | | | | | | |
+ // | | #--------Integer Number[] Number[][] | |
+ // | | | | | | |
+ // | #---------------#--------#---------#--------#---------#
+ // | |
+ // | null
+ // | |
+ // #--------------------------#----------------------------#
+ // |
+ // 0
+
+ ArenaStack stack(Runtime::Current()->GetArenaPool());
+ ScopedArenaAllocator allocator(&stack);
+ ScopedObjectAccess soa(Thread::Current());
+
+ // We cannot allow moving GC. Otherwise we'd have to ensure the reg types are updated (reference
+ // reg types store a class pointer in a GCRoot, which is normally updated through active verifiers
+ // being registered with their thread), which is unnecessarily complex.
+ Runtime::Current()->GetHeap()->IncrementDisableMovingGC(soa.Self());
+
+ RegTypeCache cache(true, allocator);
+
+ const RegType& conflict = cache.Conflict();
+ const RegType& zero = cache.Zero();
+ const RegType& null = cache.Null();
+ const RegType& int_type = cache.Integer();
+
+ const RegType& obj = cache.JavaLangObject(false);
+ const RegType& obj_arr = cache.From(nullptr, "[Ljava/lang/Object;", false);
+ ASSERT_FALSE(obj_arr.IsUnresolvedReference());
+
+ const RegType& unresolved_a = cache.From(nullptr, "Ldoes/not/resolve/A;", false);
+ ASSERT_TRUE(unresolved_a.IsUnresolvedReference());
+ const RegType& unresolved_b = cache.From(nullptr, "Ldoes/not/resolve/B;", false);
+ ASSERT_TRUE(unresolved_b.IsUnresolvedReference());
+ const RegType& unresolved_ab = cache.FromUnresolvedMerge(unresolved_a, unresolved_b, nullptr);
+ ASSERT_TRUE(unresolved_ab.IsUnresolvedMergedReference());
+
+ const RegType& uninit_this = cache.UninitializedThisArgument(obj);
+ const RegType& uninit_obj_0 = cache.Uninitialized(obj, 0u);
+ const RegType& uninit_obj_1 = cache.Uninitialized(obj, 1u);
+
+ const RegType& uninit_unres_this = cache.UninitializedThisArgument(unresolved_a);
+ const RegType& uninit_unres_a_0 = cache.Uninitialized(unresolved_a, 0);
+ const RegType& uninit_unres_b_0 = cache.Uninitialized(unresolved_b, 0);
+
+ const RegType& number = cache.From(nullptr, "Ljava/lang/Number;", false);
+ ASSERT_FALSE(number.IsUnresolvedReference());
+ const RegType& integer = cache.From(nullptr, "Ljava/lang/Integer;", false);
+ ASSERT_FALSE(integer.IsUnresolvedReference());
+
+ const RegType& uninit_number_0 = cache.Uninitialized(number, 0u);
+ const RegType& uninit_integer_0 = cache.Uninitialized(integer, 0u);
+
+ const RegType& number_arr = cache.From(nullptr, "[Ljava/lang/Number;", false);
+ ASSERT_FALSE(number_arr.IsUnresolvedReference());
+ const RegType& integer_arr = cache.From(nullptr, "[Ljava/lang/Integer;", false);
+ ASSERT_FALSE(integer_arr.IsUnresolvedReference());
+
+ const RegType& number_arr_arr = cache.From(nullptr, "[[Ljava/lang/Number;", false);
+ ASSERT_FALSE(number_arr_arr.IsUnresolvedReference());
+
+ const RegType& char_arr = cache.From(nullptr, "[C", false);
+ ASSERT_FALSE(char_arr.IsUnresolvedReference());
+ const RegType& byte_arr = cache.From(nullptr, "[B", false);
+ ASSERT_FALSE(byte_arr.IsUnresolvedReference());
+
+ const RegType& unresolved_a_num = cache.FromUnresolvedMerge(unresolved_a, number, nullptr);
+ ASSERT_TRUE(unresolved_a_num.IsUnresolvedMergedReference());
+ const RegType& unresolved_b_num = cache.FromUnresolvedMerge(unresolved_b, number, nullptr);
+ ASSERT_TRUE(unresolved_b_num.IsUnresolvedMergedReference());
+ const RegType& unresolved_ab_num = cache.FromUnresolvedMerge(unresolved_ab, number, nullptr);
+ ASSERT_TRUE(unresolved_ab_num.IsUnresolvedMergedReference());
+
+ const RegType& unresolved_a_int = cache.FromUnresolvedMerge(unresolved_a, integer, nullptr);
+ ASSERT_TRUE(unresolved_a_int.IsUnresolvedMergedReference());
+ const RegType& unresolved_b_int = cache.FromUnresolvedMerge(unresolved_b, integer, nullptr);
+ ASSERT_TRUE(unresolved_b_int.IsUnresolvedMergedReference());
+ const RegType& unresolved_ab_int = cache.FromUnresolvedMerge(unresolved_ab, integer, nullptr);
+ ASSERT_TRUE(unresolved_ab_int.IsUnresolvedMergedReference());
+ std::vector<const RegType*> uninitialized_types = {
+ &uninit_this, &uninit_obj_0, &uninit_obj_1, &uninit_number_0, &uninit_integer_0
+ };
+ std::vector<const RegType*> unresolved_types = {
+ &unresolved_a,
+ &unresolved_b,
+ &unresolved_ab,
+ &unresolved_a_num,
+ &unresolved_b_num,
+ &unresolved_ab_num,
+ &unresolved_a_int,
+ &unresolved_b_int,
+ &unresolved_ab_int
+ };
+ std::vector<const RegType*> uninit_unresolved_types = {
+ &uninit_unres_this, &uninit_unres_a_0, &uninit_unres_b_0
+ };
+ std::vector<const RegType*> plain_nonobj_classes = { &number, &integer };
+ std::vector<const RegType*> plain_nonobj_arr_classes = {
+ &number_arr,
+ &number_arr_arr,
+ &integer_arr,
+ &char_arr,
+ };
+ // std::vector<const RegType*> others = { &conflict, &zero, &null, &obj, &int_type };
+
+ std::vector<const RegType*> all_minus_uninit_conflict;
+ all_minus_uninit_conflict.insert(all_minus_uninit_conflict.end(),
+ unresolved_types.begin(),
+ unresolved_types.end());
+ all_minus_uninit_conflict.insert(all_minus_uninit_conflict.end(),
+ plain_nonobj_classes.begin(),
+ plain_nonobj_classes.end());
+ all_minus_uninit_conflict.insert(all_minus_uninit_conflict.end(),
+ plain_nonobj_arr_classes.begin(),
+ plain_nonobj_arr_classes.end());
+ all_minus_uninit_conflict.push_back(&zero);
+ all_minus_uninit_conflict.push_back(&null);
+ all_minus_uninit_conflict.push_back(&obj);
+
+ std::vector<const RegType*> all_minus_uninit;
+ all_minus_uninit.insert(all_minus_uninit.end(),
+ all_minus_uninit_conflict.begin(),
+ all_minus_uninit_conflict.end());
+ all_minus_uninit.push_back(&conflict);
+
+
+ std::vector<const RegType*> all;
+ all.insert(all.end(), uninitialized_types.begin(), uninitialized_types.end());
+ all.insert(all.end(), uninit_unresolved_types.begin(), uninit_unresolved_types.end());
+ all.insert(all.end(), all_minus_uninit.begin(), all_minus_uninit.end());
+ all.push_back(&int_type);
+
+ auto check = [&](const RegType& in1, const RegType& in2, const RegType& expected_out)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const RegType& merge_result = in1.SafeMerge(in2, &cache, nullptr);
+ EXPECT_EQ(&expected_out, &merge_result)
+ << in1.Dump() << " x " << in2.Dump() << " = " << merge_result.Dump()
+ << " != " << expected_out.Dump();
+ };
+
+ // Identity.
+ {
+ for (auto r : all) {
+ check(*r, *r, *r);
+ }
+ }
+
+ // Define a covering relation through a list of Edges. We'll then derive LUBs from this and
+ // create checks for every pair of types.
+
+ struct Edge {
+ const RegType& from;
+ const RegType& to;
+
+ Edge(const RegType& from_, const RegType& to_) : from(from_), to(to_) {}
+ };
+ std::vector<Edge> edges;
+#define ADD_EDGE(from, to) edges.emplace_back((from), (to))
+
+ // To Conflict.
+ {
+ for (auto r : uninitialized_types) {
+ ADD_EDGE(*r, conflict);
+ }
+ for (auto r : uninit_unresolved_types) {
+ ADD_EDGE(*r, conflict);
+ }
+ ADD_EDGE(obj, conflict);
+ ADD_EDGE(int_type, conflict);
+ }
+
+ ADD_EDGE(zero, null);
+
+ // Unresolved.
+ {
+ ADD_EDGE(null, unresolved_a);
+ ADD_EDGE(null, unresolved_b);
+ ADD_EDGE(unresolved_a, unresolved_ab);
+ ADD_EDGE(unresolved_b, unresolved_ab);
+
+ ADD_EDGE(number, unresolved_a_num);
+ ADD_EDGE(unresolved_a, unresolved_a_num);
+ ADD_EDGE(number, unresolved_b_num);
+ ADD_EDGE(unresolved_b, unresolved_b_num);
+ ADD_EDGE(number, unresolved_ab_num);
+ ADD_EDGE(unresolved_a_num, unresolved_ab_num);
+ ADD_EDGE(unresolved_b_num, unresolved_ab_num);
+ ADD_EDGE(unresolved_ab, unresolved_ab_num);
+
+ ADD_EDGE(integer, unresolved_a_int);
+ ADD_EDGE(unresolved_a, unresolved_a_int);
+ ADD_EDGE(integer, unresolved_b_int);
+ ADD_EDGE(unresolved_b, unresolved_b_int);
+ ADD_EDGE(integer, unresolved_ab_int);
+ ADD_EDGE(unresolved_a_int, unresolved_ab_int);
+ ADD_EDGE(unresolved_b_int, unresolved_ab_int);
+ ADD_EDGE(unresolved_ab, unresolved_ab_int);
+
+ ADD_EDGE(unresolved_a_int, unresolved_a_num);
+ ADD_EDGE(unresolved_b_int, unresolved_b_num);
+ ADD_EDGE(unresolved_ab_int, unresolved_ab_num);
+
+ ADD_EDGE(unresolved_ab_num, obj);
+ }
+
+ // Classes.
+ {
+ ADD_EDGE(null, integer);
+ ADD_EDGE(integer, number);
+ ADD_EDGE(number, obj);
+ }
+
+ // Arrays.
+ {
+ ADD_EDGE(integer_arr, number_arr);
+ ADD_EDGE(number_arr, obj_arr);
+ ADD_EDGE(obj_arr, obj);
+ ADD_EDGE(number_arr_arr, obj_arr);
+
+ ADD_EDGE(char_arr, obj);
+ ADD_EDGE(byte_arr, obj);
+
+ ADD_EDGE(null, integer_arr);
+ ADD_EDGE(null, number_arr_arr);
+ ADD_EDGE(null, char_arr);
+ ADD_EDGE(null, byte_arr);
+ }
+
+ // Primitive.
+ {
+ ADD_EDGE(zero, int_type);
+ }
+#undef ADD_EDGE
+
+ // Create merge triples by using the covering relation established by edges to derive the
+ // expected merge for any pair of types.
+
+ // Expect merge(in1, in2) == out.
+ struct MergeExpectation {
+ const RegType& in1;
+ const RegType& in2;
+ const RegType& out;
+
+ MergeExpectation(const RegType& in1_, const RegType& in2_, const RegType& out_)
+ : in1(in1_), in2(in2_), out(out_) {}
+ };
+ std::vector<MergeExpectation> expectations;
+
+ for (auto r1 : all) {
+ for (auto r2 : all) {
+ if (r1 == r2) {
+ continue;
+ }
+
+ // Very simple algorithm here that is usually used with adjacency lists. Our graph is
+ // small, it didn't make sense to have lists per node. Thus, the regular guarantees
+ // of O(n + |e|) don't apply, but that is acceptable.
+ //
+ // To compute r1 lub r2 = merge(r1, r2):
+ // 1) Generate the reachable set of r1, name it grey.
+ // 2) Mark all grey reachable nodes of r2 as black.
+ // 3) Find black nodes with no in-edges from other black nodes.
+ // 4) If |3)| == 1, that's the lub.
+
+ // Generic BFS of the graph induced by edges, starting at start. new_node will be called
+ // with any discovered node, in order.
+ auto bfs = [&](auto new_node, const RegType* start) {
+ std::unordered_set<const RegType*> seen;
+ std::queue<const RegType*> work_list;
+ work_list.push(start);
+ while (!work_list.empty()) {
+ const RegType* cur = work_list.front();
+ work_list.pop();
+ auto it = seen.find(cur);
+ if (it != seen.end()) {
+ continue;
+ }
+ seen.insert(cur);
+ new_node(cur);
+
+ for (const Edge& edge : edges) {
+ if (&edge.from == cur) {
+ work_list.push(&edge.to);
+ }
+ }
+ }
+ };
+
+ std::unordered_set<const RegType*> grey;
+ auto compute_grey = [&](const RegType* cur) {
+ grey.insert(cur); // Mark discovered node as grey.
+ };
+ bfs(compute_grey, r1);
+
+ std::set<const RegType*> black;
+ auto compute_black = [&](const RegType* cur) {
+ // Mark discovered grey node as black.
+ if (grey.find(cur) != grey.end()) {
+ black.insert(cur);
+ }
+ };
+ bfs(compute_black, r2);
+
+ std::set<const RegType*> no_in_edge(black); // Copy of black, remove nodes with in-edges.
+ for (auto r : black) {
+ for (Edge& e : edges) {
+ if (&e.from == r) {
+ no_in_edge.erase(&e.to); // It doesn't matter whether "to" is black or not, just
+ // attempt to remove it.
+ }
+ }
+ }
+
+ // Helper to print sets when something went wrong.
+ auto print_set = [](auto& container) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::string result;
+ for (auto r : container) {
+ result.append(" + ");
+ result.append(r->Dump());
+ }
+ return result;
+ };
+ ASSERT_EQ(no_in_edge.size(), 1u) << r1->Dump() << " u " << r2->Dump()
+ << " grey=" << print_set(grey)
+ << " black=" << print_set(black)
+ << " no-in-edge=" << print_set(no_in_edge);
+ expectations.emplace_back(*r1, *r2, **no_in_edge.begin());
+ }
+ }
+
+ // Evaluate merge expectations. The merge is expected to be commutative.
+
+ for (auto& triple : expectations) {
+ check(triple.in1, triple.in2, triple.out);
+ check(triple.in2, triple.in1, triple.out);
+ }
+
+ Runtime::Current()->GetHeap()->DecrementDisableMovingGC(soa.Self());
+}
+
TEST_F(RegTypeTest, ConstPrecision) {
// Tests creating primitive types types.
ArenaStack stack(Runtime::Current()->GetArenaPool());
diff --git a/test/071-dexfile-get-static-size/build b/test/071-dexfile-get-static-size/build
new file mode 100755
index 0000000000..0bba66d065
--- /dev/null
+++ b/test/071-dexfile-get-static-size/build
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@"
+
+# Create and add as resources to the test jar file:
+# 1. test1.dex
+# 2. test2.dex
+# 3. test-jar.jar, containing test1.dex as classes.dex
+# 4. multi-jar.jar, containing test1.dex as classes.dex and test2.dex as classes2.dex
+mkdir test-jar
+cp test1.dex test-jar/classes.dex
+cp test2.dex test-jar/classes2.dex
+zip -j test-jar.jar test-jar/classes.dex
+zip -j multi-jar.jar test-jar/classes.dex test-jar/classes2.dex
+jar uf ${TEST_NAME}.jar test1.dex test2.dex test-jar.jar multi-jar.jar
+
diff --git a/test/071-dexfile-get-static-size/expected.txt b/test/071-dexfile-get-static-size/expected.txt
new file mode 100644
index 0000000000..dfb77c3a2f
--- /dev/null
+++ b/test/071-dexfile-get-static-size/expected.txt
@@ -0,0 +1,4 @@
+Size for test1.dex: 1864
+Size for test2.dex: 1264
+Size for test-jar.jar: 1864
+Size for multi-jar.jar: 3128
diff --git a/test/071-dexfile-get-static-size/info.txt b/test/071-dexfile-get-static-size/info.txt
new file mode 100644
index 0000000000..5b528e81b4
--- /dev/null
+++ b/test/071-dexfile-get-static-size/info.txt
@@ -0,0 +1,3 @@
+Test DexFile.getStaticSizeOfDexFile API.
+
+test1.dex and test2.dex are arbitrary valid dex files.
diff --git a/test/071-dexfile-get-static-size/src/Main.java b/test/071-dexfile-get-static-size/src/Main.java
new file mode 100644
index 0000000000..4bf453801e
--- /dev/null
+++ b/test/071-dexfile-get-static-size/src/Main.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.FileOutputStream;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+ private static void extractResource(String resource, String filename) throws Exception {
+ ClassLoader loader = Main.class.getClassLoader();
+ InputStream is = loader.getResourceAsStream(resource);
+ OutputStream os = new FileOutputStream(filename);
+ int read;
+ byte[] buf = new byte[4096];
+ while ((read = is.read(buf)) >= 0) {
+ os.write(buf, 0, read);
+ }
+ is.close();
+ os.close();
+ }
+
+ private static long getDexFileSize(String filename) throws Exception {
+ ClassLoader loader = Main.class.getClassLoader();
+ Class<?> DexFile = loader.loadClass("dalvik.system.DexFile");
+ Method DexFile_loadDex = DexFile.getMethod("loadDex",
+ String.class,
+ String.class,
+ Integer.TYPE);
+ Method DexFile_getStaticSizeOfDexFile = DexFile.getMethod("getStaticSizeOfDexFile");
+ Object dexFile = DexFile_loadDex.invoke(null, filename, null, 0);
+ return (Long) DexFile_getStaticSizeOfDexFile.invoke(dexFile);
+ }
+
+ private static void test(String resource) throws Exception {
+ String filename = System.getenv("DEX_LOCATION") + "/" + resource;
+ extractResource(resource, filename);
+ long size = getDexFileSize(filename);
+ System.out.println("Size for " + resource + ": " + size);
+ }
+
+ public static void main(String[] args) throws Exception {
+ test("test1.dex");
+ test("test2.dex");
+ test("test-jar.jar");
+ test("multi-jar.jar");
+ }
+}
diff --git a/test/071-dexfile-get-static-size/test1.dex b/test/071-dexfile-get-static-size/test1.dex
new file mode 100644
index 0000000000..84602d03c2
--- /dev/null
+++ b/test/071-dexfile-get-static-size/test1.dex
Binary files differ
diff --git a/test/071-dexfile-get-static-size/test2.dex b/test/071-dexfile-get-static-size/test2.dex
new file mode 100644
index 0000000000..a07c46ef59
--- /dev/null
+++ b/test/071-dexfile-get-static-size/test2.dex
Binary files differ
diff --git a/test/099-vmdebug/expected.txt b/test/099-vmdebug/expected.txt
index b8d72f66f8..f7801de62f 100644
--- a/test/099-vmdebug/expected.txt
+++ b/test/099-vmdebug/expected.txt
@@ -23,3 +23,9 @@ Instances of null 0
Instances of ClassA assignable 3
Array counts [2, 1, 0]
Array counts assignable [3, 1, 0]
+ClassD got 3, combined mask: 13
+ClassE got 2, combined mask: 18
+null got 0
+ClassD assignable got 5, combined mask: 31
+ClassE assignable got 2, combined mask: 18
+null assignable got 0
diff --git a/test/099-vmdebug/info.txt b/test/099-vmdebug/info.txt
index 7f88086986..873429e076 100644
--- a/test/099-vmdebug/info.txt
+++ b/test/099-vmdebug/info.txt
@@ -1 +1 @@
-Tests of private dalvik.system.VMDebug support for method tracing.
+Tests of dalvik.system.VMDebug APIs.
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 90ad3155ca..e0d829a0d6 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -33,6 +33,7 @@ public class Main {
}
testMethodTracing();
testCountInstances();
+ testGetInstances();
testRuntimeStat();
testRuntimeStats();
}
@@ -249,6 +250,59 @@ public class Main {
System.out.println("Array counts assignable " + Arrays.toString(counts));
}
+ static class ClassD {
+ public int mask;
+
+ public ClassD(int mask) {
+ this.mask = mask;
+ }
+ }
+
+ static class ClassE extends ClassD {
+ public ClassE(int mask) {
+ super(mask);
+ }
+ }
+
+ private static void testGetInstances() throws Exception {
+ ArrayList<Object> l = new ArrayList<Object>();
+ l.add(new ClassD(0x01));
+ l.add(new ClassE(0x02));
+ l.add(new ClassD(0x04));
+ l.add(new ClassD(0x08));
+ l.add(new ClassE(0x10));
+ Runtime.getRuntime().gc();
+ Class<?>[] classes = new Class<?>[] {ClassD.class, ClassE.class, null};
+ Object[][] instances = VMDebug.getInstancesOfClasses(classes, false);
+
+ int mask = 0;
+ for (Object instance : instances[0]) {
+ mask |= ((ClassD)instance).mask;
+ }
+ System.out.println("ClassD got " + instances[0].length + ", combined mask: " + mask);
+
+ mask = 0;
+ for (Object instance : instances[1]) {
+ mask |= ((ClassD)instance).mask;
+ }
+ System.out.println("ClassE got " + instances[1].length + ", combined mask: " + mask);
+ System.out.println("null got " + instances[2].length);
+
+ instances = VMDebug.getInstancesOfClasses(classes, true);
+ mask = 0;
+ for (Object instance : instances[0]) {
+ mask |= ((ClassD)instance).mask;
+ }
+ System.out.println("ClassD assignable got " + instances[0].length + ", combined mask: " + mask);
+
+ mask = 0;
+ for (Object instance : instances[1]) {
+ mask |= ((ClassD)instance).mask;
+ }
+ System.out.println("ClassE assignable got " + instances[1].length + ", combined mask: " + mask);
+ System.out.println("null assignable got " + instances[2].length);
+ }
+
private static class VMDebug {
private static final Method startMethodTracingMethod;
private static final Method stopMethodTracingMethod;
@@ -257,6 +311,7 @@ public class Main {
private static final Method getRuntimeStatsMethod;
private static final Method countInstancesOfClassMethod;
private static final Method countInstancesOfClassesMethod;
+ private static final Method getInstancesOfClassesMethod;
static {
try {
Class<?> c = Class.forName("dalvik.system.VMDebug");
@@ -270,6 +325,8 @@ public class Main {
Class.class, Boolean.TYPE);
countInstancesOfClassesMethod = c.getDeclaredMethod("countInstancesOfClasses",
Class[].class, Boolean.TYPE);
+ getInstancesOfClassesMethod = c.getDeclaredMethod("getInstancesOfClasses",
+ Class[].class, Boolean.TYPE);
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -300,5 +357,9 @@ public class Main {
return (long[]) countInstancesOfClassesMethod.invoke(
null, new Object[]{classes, assignable});
}
+ public static Object[][] getInstancesOfClasses(Class<?>[] classes, boolean assignable) throws Exception {
+ return (Object[][]) getInstancesOfClassesMethod.invoke(
+ null, new Object[]{classes, assignable});
+ }
}
}
diff --git a/test/638-checker-inline-cache-intrinsic/expected.txt b/test/638-checker-inline-cache-intrinsic/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/638-checker-inline-cache-intrinsic/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/638-checker-inline-cache-intrinsic/info.txt b/test/638-checker-inline-cache-intrinsic/info.txt
new file mode 100644
index 0000000000..764577be54
--- /dev/null
+++ b/test/638-checker-inline-cache-intrinsic/info.txt
@@ -0,0 +1 @@
+Verify the devirtualization of a method that should be intrinsified.
diff --git a/test/638-checker-inline-cache-intrinsic/run b/test/638-checker-inline-cache-intrinsic/run
new file mode 100644
index 0000000000..f43681dd56
--- /dev/null
+++ b/test/638-checker-inline-cache-intrinsic/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} --jit --runtime-option -Xjitthreshold:100 -Xcompiler-option --verbose-methods=inlineMonomorphic,knownReceiverType,stringEquals $@
diff --git a/test/638-checker-inline-cache-intrinsic/src/Main.java b/test/638-checker-inline-cache-intrinsic/src/Main.java
new file mode 100644
index 0000000000..472cbf68bc
--- /dev/null
+++ b/test/638-checker-inline-cache-intrinsic/src/Main.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: char Main.$noinline$inlineMonomorphic(java.lang.CharSequence) inliner (before)
+ /// CHECK: InvokeInterface method_name:java.lang.CharSequence.charAt
+
+ /// CHECK-START: char Main.$noinline$inlineMonomorphic(java.lang.CharSequence) inliner (after)
+ /// CHECK: Deoptimize
+ /// CHECK: InvokeVirtual method_name:java.lang.String.charAt intrinsic:StringCharAt
+
+ /// CHECK-START: char Main.$noinline$inlineMonomorphic(java.lang.CharSequence) instruction_simplifier$after_inlining (after)
+ /// CHECK: Deoptimize
+ /// CHECK-NOT: InvokeInterface
+ /// CHECK-NOT: InvokeVirtual
+
+ public static char $noinline$inlineMonomorphic(CharSequence cs) {
+ return cs.charAt(0);
+ }
+
+ /// CHECK-START: char Main.$noinline$knownReceiverType() inliner (before)
+ /// CHECK: InvokeInterface method_name:java.lang.CharSequence.charAt
+
+ /// CHECK-START: char Main.$noinline$knownReceiverType() inliner (after)
+ /// CHECK: InvokeVirtual method_name:java.lang.String.charAt intrinsic:StringCharAt
+
+ /// CHECK-START: char Main.$noinline$knownReceiverType() instruction_simplifier$after_inlining (after)
+ /// CHECK-NOT: InvokeInterface
+ /// CHECK-NOT: InvokeVirtual
+
+ public static char $noinline$knownReceiverType() {
+ CharSequence cs = "abc";
+ return cs.charAt(1);
+ }
+
+ /// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) inliner (before)
+ /// CHECK: InvokeVirtual method_name:java.lang.Object.equals intrinsic:None
+
+ /// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) inliner (after)
+ /// CHECK: Deoptimize
+ /// CHECK: InvokeVirtual method_name:java.lang.Object.equals intrinsic:StringEquals
+
+ /// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) instruction_simplifier$after_inlining (after)
+ /// CHECK: Deoptimize
+ /// CHECK: InvokeVirtual method_name:java.lang.Object.equals intrinsic:StringEquals
+
+ public static boolean $noinline$stringEquals(Object obj) {
+ return obj.equals("def");
+ }
+
+ public static void test() {
+ // Warm up inline cache.
+ for (int i = 0; i < 45; i++) {
+ $noinline$inlineMonomorphic(str);
+ }
+ for (int i = 0; i < 60; i++) {
+ $noinline$stringEquals(str);
+ }
+ ensureJitCompiled(Main.class, "$noinline$stringEquals");
+ ensureJitCompiled(Main.class, "$noinline$inlineMonomorphic");
+ ensureJitCompiled(Main.class, "$noinline$knownReceiverType");
+ if ($noinline$inlineMonomorphic(str) != 'x') {
+ throw new Error("Expected x");
+ }
+ if ($noinline$knownReceiverType() != 'b') {
+ throw new Error("Expected b");
+ }
+ if ($noinline$stringEquals("abc")) {
+ throw new Error("Expected false");
+ }
+ }
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ test();
+ }
+
+ static String str = "xyz";
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+}
diff --git a/test/667-jit-jni-stub/run b/test/667-jit-jni-stub/run
index 1877be482e..f235c6bc90 100755
--- a/test/667-jit-jni-stub/run
+++ b/test/667-jit-jni-stub/run
@@ -15,4 +15,5 @@
# limitations under the License.
# Disable AOT compilation of JNI stubs.
-${RUN} "${@}" --no-prebuild --no-dex2oat
+# Ensure this test is not subject to unexpected code collection.
+${RUN} "${@}" --no-prebuild --no-dex2oat --runtime-option -Xjitinitialsize:32M
diff --git a/test/711-checker-type-conversion/src/Main.java b/test/711-checker-type-conversion/src/Main.java
index 64ffcd2f1f..2c9c3a157e 100644
--- a/test/711-checker-type-conversion/src/Main.java
+++ b/test/711-checker-type-conversion/src/Main.java
@@ -22,20 +22,15 @@ public class Main {
}
}
- /// CHECK-START: byte Main.getByte1() instruction_simplifier (before)
+ /// CHECK-START: byte Main.getByte1() constant_folding (before)
/// CHECK: TypeConversion
/// CHECK: TypeConversion
/// CHECK: Add
/// CHECK: TypeConversion
- /// CHECK-START: byte Main.getByte1() instruction_simplifier (after)
+ /// CHECK-START: byte Main.getByte1() constant_folding (after)
/// CHECK-NOT: TypeConversion
- /// CHECK: Add
- /// CHECK: TypeConversion
-
- /// CHECK-START: byte Main.getByte1() instruction_simplifier$before_codegen (after)
/// CHECK-NOT: Add
- /// CHECK-NOT: TypeConversion
static byte getByte1() {
int i = -2;
@@ -43,20 +38,15 @@ public class Main {
return (byte)((byte)i + (byte)j);
}
- /// CHECK-START: byte Main.getByte2() instruction_simplifier (before)
+ /// CHECK-START: byte Main.getByte2() constant_folding (before)
/// CHECK: TypeConversion
/// CHECK: TypeConversion
/// CHECK: Add
/// CHECK: TypeConversion
- /// CHECK-START: byte Main.getByte2() instruction_simplifier (after)
+ /// CHECK-START: byte Main.getByte2() constant_folding (after)
/// CHECK-NOT: TypeConversion
- /// CHECK: Add
- /// CHECK: TypeConversion
-
- /// CHECK-START: byte Main.getByte2() instruction_simplifier$before_codegen (after)
/// CHECK-NOT: Add
- /// CHECK: TypeConversion
static byte getByte2() {
int i = -100;
@@ -64,8 +54,24 @@ public class Main {
return (byte)((byte)i + (byte)j);
}
+ /// CHECK-START: byte Main.getByte3() constant_folding (before)
+ /// CHECK: TypeConversion
+ /// CHECK: TypeConversion
+ /// CHECK: Add
+ /// CHECK: TypeConversion
+
+ /// CHECK-START: byte Main.getByte2() constant_folding (after)
+ /// CHECK-NOT: TypeConversion
+ /// CHECK-NOT: Add
+
+ static byte getByte3() {
+ long i = 0xabcdabcdabcdL;
+ return (byte)((byte)i + (byte)i);
+ }
+
public static void main(String[] args) {
assertByteEquals(getByte1(), (byte)-5);
assertByteEquals(getByte2(), (byte)(-201));
+ assertByteEquals(getByte3(), (byte)(0xcd + 0xcd));
}
}
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index e7503827f2..554b8a5429 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -874,7 +874,7 @@ def parse_option():
global run_all_configs
parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
- parser.add_argument('-t', '--test', dest='test', help='name of the test')
+ parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
parser.add_argument('-j', type=int, dest='n_thread')
parser.add_argument('--timeout', default=timeout, type=int, dest='timeout')
for variant in TOTAL_VARIANTS_SET:
@@ -906,10 +906,12 @@ def parse_option():
options = setup_env_for_build_target(target_config[options['build_target']],
parser, options)
- test = ''
+ tests = None
env.EXTRA_DISABLED_TESTS.update(set(options['skips']))
- if options['test']:
- test = parse_test_name(options['test'])
+ if options['tests']:
+ tests = set()
+ for test_name in options['tests']:
+ tests |= parse_test_name(test_name)
for variant_type in VARIANT_TYPE_DICT:
for variant in VARIANT_TYPE_DICT[variant_type]:
@@ -935,11 +937,11 @@ def parse_option():
if options['run_all']:
run_all_configs = True
- return test
+ return tests
def main():
gather_test_info()
- user_requested_test = parse_option()
+ user_requested_tests = parse_option()
setup_test_env()
if build:
build_targets = ''
@@ -956,8 +958,8 @@ def main():
build_command += ' dist'
if subprocess.call(build_command.split()):
sys.exit(1)
- if user_requested_test:
- test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_test,))
+ if user_requested_tests:
+ test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_tests,))
else:
test_runner_thread = threading.Thread(target=run_tests, args=(RUN_TEST_SET,))
test_runner_thread.daemon = True
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
index d7b1dd78d6..3bed29bafc 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
@@ -853,7 +853,7 @@ public class Parser {
}
public long getId() {
- return mBuffer.getInt();
+ return mBuffer.getInt() & 0xFFFFFFFFL;
}
public boolean getBool() {
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index abcc728890..bf1c9370b0 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -64,6 +64,11 @@
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
},
+{
+ description: "Tests for VMDebug functionality not implemented in the upstream libjdwp",
+ result: EXEC_FAILED,
+ name: "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest#testVMDebug"
+},
/* TODO Categorize these failures more. */
{
description: "Tests that fail on both ART and RI. These tests are likely incorrect",