summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc20
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc17
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc17
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc19
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc27
-rw-r--r--compiler/optimizing/code_generator_x86.h5
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/code_generator_x86_64.h4
-rw-r--r--compiler/optimizing/intrinsic_objects.cc120
-rw-r--r--compiler/optimizing/intrinsic_objects.h54
-rw-r--r--compiler/optimizing/intrinsics.cc276
-rw-r--r--compiler/optimizing/intrinsics.h45
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc25
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc25
-rw-r--r--compiler/optimizing/intrinsics_mips.cc37
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc33
-rw-r--r--compiler/optimizing/intrinsics_x86.cc49
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc39
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc24
-rw-r--r--dex2oat/linker/image_writer.cc46
-rw-r--r--test/717-integer-value-of/expected.txt1
-rw-r--r--test/717-integer-value-of/info.txt2
-rw-r--r--test/717-integer-value-of/src/Main.java134
-rw-r--r--test/knownfailures.json7
29 files changed, 774 insertions, 276 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index be963fbbdb..11521e68d0 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -63,6 +63,7 @@ art_cc_defaults {
"optimizing/inliner.cc",
"optimizing/instruction_builder.cc",
"optimizing/instruction_simplifier.cc",
+ "optimizing/intrinsic_objects.cc",
"optimizing/intrinsics.cc",
"optimizing/licm.cc",
"optimizing/linear_order.cc",
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7775eb942e..ad4b5cf339 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -27,6 +27,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_arm64.h"
@@ -4787,6 +4788,25 @@ void CodeGeneratorARM64::EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_la
__ ldr(out, MemOperand(base, /* offset placeholder */ 0));
}
+void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
+ uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset);
+ EmitAdrpPlaceholder(adrp_label, reg.X());
+ // Add LDR with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label);
+ EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ Ldr(reg.W(), DeduplicateBootImageAddressLiteral(reinterpret_cast<uintptr_t>(address)));
+ }
+}
+
template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 8833075b0b..dc4964d9e4 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -636,6 +636,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Register out,
vixl::aarch64::Register base);
+ void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_offset);
+
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
void EmitThunkCode(const linker::LinkerPatch& patch,
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 91c13154bb..6804340cd4 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -27,6 +27,7 @@
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics_arm_vixl.h"
#include "linker/linker_patch.h"
@@ -9536,6 +9537,22 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFil
});
}
+void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
+ EmitMovwMovtPlaceholder(labels, reg);
+ __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset);
+ __ Ldr(reg, DeduplicateBootImageAddressLiteral(dchecked_integral_cast<uint32_t>(address)));
+ }
+}
+
template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index d5b739bd7c..4893d3c25e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -600,6 +600,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
+ void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_offset);
+
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
void EmitThunkCode(const linker::LinkerPatch& patch,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 507db364b5..112eb517b5 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -26,6 +26,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_mips.h"
@@ -1739,6 +1740,22 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
// offset to `out` (e.g. lw, jialc, addiu).
}
+void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
+ __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ LoadConst32(reg, dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address)));
+ }
+}
+
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
const DexFile& dex_file,
dex::StringIndex string_index,
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 2e7c736dbd..9fdb385ce6 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -645,6 +645,8 @@ class CodeGeneratorMIPS : public CodeGenerator {
Register out,
Register base);
+ void LoadBootImageAddress(Register reg, uint32_t boot_image_offset);
+
// The JitPatchInfo is used for JIT string and class loads.
struct JitPatchInfo {
JitPatchInfo(const DexFile& dex_file, uint64_t idx)
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 08a6512feb..9f863640d5 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -24,6 +24,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_mips64.h"
@@ -1638,6 +1639,24 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
}
}
+void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+ // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+ __ Lwu(reg, AT, /* placeholder */ 0x5678);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset);
+ __ LoadLiteral(reg, kLoadDoubleword, DeduplicateBootImageAddressLiteral(address));
+ }
+}
+
Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 6e69e4611a..25c886f55d 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -615,6 +615,8 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
GpuRegister out,
PcRelativePatchInfo* info_low = nullptr);
+ void LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_offset);
+
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const Literal* literal,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9f42ac76f5..12872edd0d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -23,6 +23,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_x86.h"
@@ -2188,7 +2189,9 @@ void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeMethodLoadKind()) {
+ if (invoke->GetLocations()->CanCall() &&
+ invoke->HasPcRelativeMethodLoadKind() &&
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).IsInvalid()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
}
return;
@@ -4969,6 +4972,28 @@ Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
return &string_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86::LoadBootImageAddress(Register reg,
+ uint32_t boot_image_offset,
+ HInvokeStaticOrDirect* invoke) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ HX86ComputeBaseMethodAddress* method_address =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ DCHECK(method_address != nullptr);
+ Register method_address_reg =
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
+ __ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
+ RecordBootImageRelRoPatch(method_address, boot_image_offset);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+ }
+}
+
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 6c76e27d35..7d18e2b4f3 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -426,6 +426,11 @@ class CodeGeneratorX86 : public CodeGenerator {
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
+
+ void LoadBootImageAddress(Register reg,
+ uint32_t boot_image_offset,
+ HInvokeStaticOrDirect* invoke);
+
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 05194b15d5..9631c15668 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -22,6 +22,7 @@
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_x86_64.h"
@@ -1107,6 +1108,20 @@ Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
return &string_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_offset) {
+ DCHECK(!GetCompilerOptions().IsBootImage());
+ if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ RecordBootImageRelRoPatch(boot_image_offset);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_offset;
+ __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+ }
+}
+
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9a4c53b524..cf862d3f34 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -429,7 +429,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_offset);
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
@@ -566,6 +566,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
+ void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+
// Assign a 64 bit constant to an address.
void MoveInt64ToAddress(const Address& addr_low,
const Address& addr_high,
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
new file mode 100644
index 0000000000..3c20ad698b
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsic_objects.h"
+
+#include "art_field-inl.h"
+#include "base/logging.h"
+#include "class_root.h"
+#include "handle.h"
+#include "obj_ptr-inl.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
+ ClassLinker* class_linker)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+ if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
+ return nullptr;
+ }
+ ArtField* cache_field =
+ integer_cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ CHECK(cache_field != nullptr);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> integer_cache =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+ cache_field->GetObject(integer_cache_class));
+ CHECK(integer_cache != nullptr);
+ return integer_cache;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::AllocateBootImageLiveObjects(
+ Thread* self,
+ ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The objects used for the Integer.valueOf() intrinsic must remain live even if references
+ // to them are removed using reflection. Image roots are not accessible through reflection,
+ // so the array we construct here shall keep them alive.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::Object>> integer_cache =
+ hs.NewHandle(LookupIntegerCache(self, class_linker));
+ size_t live_objects_size =
+ (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
+ ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects =
+ mirror::ObjectArray<mirror::Object>::Alloc(
+ self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker), live_objects_size);
+ int32_t index = 0;
+ if (integer_cache != nullptr) {
+ live_objects->Set(index++, integer_cache.Get());
+ for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
+ live_objects->Set(index++, integer_cache->Get(i));
+ }
+ }
+ CHECK_EQ(index, live_objects->GetLength());
+
+ if (kIsDebugBuild && integer_cache != nullptr) {
+ CHECK_EQ(integer_cache.Get(), GetIntegerValueOfCache(live_objects));
+ for (int32_t i = 0, len = integer_cache->GetLength(); i != len; ++i) {
+ CHECK_EQ(integer_cache->GetWithoutChecks(i), GetIntegerValueOfObject(live_objects, i));
+ }
+ }
+ return live_objects;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::GetIntegerValueOfCache(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+ DCHECK(boot_image_live_objects != nullptr);
+ if (boot_image_live_objects->GetLength() == 0u) {
+ return nullptr; // No intrinsic objects.
+ }
+ // No need for read barrier for boot image object or for verifying the value that was just stored.
+ ObjPtr<mirror::Object> result =
+ boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(0);
+ DCHECK(result != nullptr);
+ DCHECK(result->IsObjectArray());
+ DCHECK(result->GetClass()->DescriptorEquals("[Ljava/lang/Integer;"));
+ return ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(result);
+}
+
+ObjPtr<mirror::Object> IntrinsicObjects::GetIntegerValueOfObject(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ uint32_t index) {
+ DCHECK(boot_image_live_objects != nullptr);
+ DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+ DCHECK_LT(index,
+ static_cast<uint32_t>(GetIntegerValueOfCache(boot_image_live_objects)->GetLength()));
+
+ // No need for read barrier for boot image object or for verifying the value that was just stored.
+ ObjPtr<mirror::Object> result =
+ boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(
+ /* skip the IntegerCache.cache */ 1u + index);
+ DCHECK(result != nullptr);
+ DCHECK(result->GetClass()->DescriptorEquals("Ljava/lang/Integer;"));
+ return result;
+}
+
+MemberOffset IntrinsicObjects::GetIntegerValueOfArrayDataOffset(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+ DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+ MemberOffset result = mirror::ObjectArray<mirror::Object>::OffsetOfElement(1u);
+ DCHECK_EQ(GetIntegerValueOfObject(boot_image_live_objects, 0u),
+ (boot_image_live_objects
+ ->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(result)));
+ return result;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/intrinsic_objects.h b/compiler/optimizing/intrinsic_objects.h
new file mode 100644
index 0000000000..ffadd03428
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+
+#include "base/mutex.h"
+
+namespace art {
+
+class ClassLinker;
+template <class MirrorType> class ObjPtr;
+class MemberOffset;
+class Thread;
+
+namespace mirror {
+class Object;
+template <class T> class ObjectArray;
+} // namespace mirror
+
+class IntrinsicObjects {
+ public:
+ static ObjPtr<mirror::ObjectArray<mirror::Object>> AllocateBootImageLiveObjects(
+ Thread* self,
+ ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Functions for retrieving data for Integer.valueOf().
+ static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerValueOfCache(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::Object> GetIntegerValueOfObject(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
+ static MemberOffset GetIntegerValueOfArrayDataOffset(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index f0c91f3ba0..81b2b7b581 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -21,10 +21,12 @@
#include "base/utils.h"
#include "class_linker.h"
#include "dex/invoke_type.h"
-#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
-#include "mirror/dex_cache-inl.h"
+#include "gc/space/image_space.h"
+#include "image-inl.h"
+#include "intrinsic_objects.h"
#include "nodes.h"
+#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -221,105 +223,223 @@ std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
return os;
}
+static ObjPtr<mirror::ObjectArray<mirror::Object>> GetBootImageLiveObjects()
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ const std::vector<gc::space::ImageSpace*>& boot_image_spaces = heap->GetBootImageSpaces();
+ DCHECK(!boot_image_spaces.empty());
+ const ImageHeader& main_header = boot_image_spaces[0]->GetImageHeader();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+ main_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kBootImageLiveObjects));
+ DCHECK(boot_image_live_objects != nullptr);
+ DCHECK(heap->ObjectIsInBootImageSpace(boot_image_live_objects));
+ return boot_image_live_objects;
+}
+
+static bool CheckIntegerCache(Thread* self,
+ ClassLinker* class_linker,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(boot_image_cache != nullptr);
+
+ // Since we have a cache in the boot image, both java.lang.Integer and
+ // java.lang.Integer$IntegerCache must be initialized in the boot image.
+ ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader */ nullptr);
+ DCHECK(cache_class != nullptr);
+ DCHECK(cache_class->IsInitialized());
+ ObjPtr<mirror::Class> integer_class =
+ class_linker->LookupClass(self, "Ljava/lang/Integer;", /* class_loader */ nullptr);
+ DCHECK(integer_class != nullptr);
+ DCHECK(integer_class->IsInitialized());
+
+ // Check that the current cache is the same as the `boot_image_cache`.
+ ArtField* cache_field = cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ DCHECK(cache_field != nullptr);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(cache_class));
+ if (current_cache != boot_image_cache) {
+ return false; // Messed up IntegerCache.cache.
+ }
+
+ // Check that the range matches the boot image cache length.
+ ArtField* low_field = cache_class->FindDeclaredStaticField("low", "I");
+ DCHECK(low_field != nullptr);
+ int32_t low = low_field->GetInt(cache_class);
+ ArtField* high_field = cache_class->FindDeclaredStaticField("high", "I");
+ DCHECK(high_field != nullptr);
+ int32_t high = high_field->GetInt(cache_class);
+ if (boot_image_cache->GetLength() != high - low + 1) {
+ return false; // Messed up IntegerCache.low or IntegerCache.high.
+ }
+
+ // Check that the elements match the boot image intrinsic objects and check their values as well.
+ ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+ DCHECK(value_field != nullptr);
+ for (int32_t i = 0, len = boot_image_cache->GetLength(); i != len; ++i) {
+ ObjPtr<mirror::Object> boot_image_object =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, i);
+ DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boot_image_object));
+ // No need for read barrier for comparison with a boot image object.
+ ObjPtr<mirror::Object> current_object =
+ boot_image_cache->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(i);
+ if (boot_image_object != current_object) {
+ return false; // Messed up IntegerCache.cache[i]
+ }
+ if (value_field->GetInt(boot_image_object) != low + i) {
+ return false; // Messed up IntegerCache.cache[i].value.
+ }
+ }
+
+ return true;
+}
+
void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
CodeGenerator* codegen,
Location return_location,
Location first_argument_location) {
- if (Runtime::Current()->IsAotCompiler()) {
- if (codegen->GetCompilerOptions().IsBootImage() ||
- codegen->GetCompilerOptions().GetCompilePic()) {
- // TODO(ngeoffray): Support boot image compilation.
- return;
- }
- }
-
- IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
-
- // Most common case is that we have found all we needed (classes are initialized
- // and in the boot image). Bail if not.
- if (info.integer_cache == nullptr ||
- info.integer == nullptr ||
- info.cache == nullptr ||
- info.value_offset == 0 ||
- // low and high cannot be 0, per the spec.
- info.low == 0 ||
- info.high == 0) {
- LOG(INFO) << "Integer.valueOf will not be optimized";
+ if (codegen->GetCompilerOptions().IsBootImage()) {
+ // TODO: Implement for boot image. We need access to CompilerDriver::IsImageClass()
+ // to verify that the IntegerCache shall be in the image.
return;
}
+ Runtime* runtime = Runtime::Current();
+ gc::Heap* heap = runtime->GetHeap();
+ if (heap->GetBootImageSpaces().empty()) {
+ return; // Running without boot image, cannot use required boot image objects.
+ }
// The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
- invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
- if (!invoke->InputAt(0)->IsConstant()) {
- locations->SetInAt(0, Location::RequiresRegister());
+ LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly;
+ {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> cache =
+ IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects);
+ if (cache == nullptr) {
+ return; // No cache in the boot image.
+ }
+ if (runtime->UseJitCompilation()) {
+ if (!CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache)) {
+ return; // The cache was somehow messed up, probably by using reflection.
+ }
+ } else {
+ DCHECK(runtime->IsAotCompiler());
+ DCHECK(CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache));
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ // Retrieve the `value` from the lowest cached Integer.
+ ObjPtr<mirror::Object> low_integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+ ObjPtr<mirror::Class> integer_class =
+ low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+ DCHECK(value_field != nullptr);
+ int32_t low = value_field->GetInt(low_integer);
+ if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) <
+ static_cast<uint32_t>(cache->GetLength())) {
+ // No call, we shall use direct pointer to the Integer object. Note that we cannot
+ // do this for JIT as the "low" can change through reflection before emitting the code.
+ call_kind = LocationSummary::kNoCall;
+ }
+ }
+ }
+ }
+
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations = new (allocator) LocationSummary(invoke, call_kind, kIntrinsified);
+ if (call_kind == LocationSummary::kCallOnMainOnly) {
+ locations->SetInAt(0, Location::RegisterOrConstant(invoke->InputAt(0)));
+ locations->AddTemp(first_argument_location);
+ locations->SetOut(return_location);
+ } else {
+ locations->SetInAt(0, Location::ConstantLocation(invoke->InputAt(0)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister());
}
- locations->AddTemp(first_argument_location);
- locations->SetOut(return_location);
}
-IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
+static int32_t GetIntegerCacheLowFromIntegerCache(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> cache_class = Runtime::Current()->GetClassLinker()->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader */ nullptr);
+ DCHECK(cache_class != nullptr);
+ DCHECK(cache_class->IsInitialized());
+ ArtField* low_field = cache_class->FindDeclaredStaticField("low", "I");
+ DCHECK(low_field != nullptr);
+ return low_field->GetInt(cache_class);
+}
+
+static uint32_t CalculateBootImageOffset(ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(heap->ObjectIsInBootImageSpace(object));
+ return reinterpret_cast<const uint8_t*>(object.Ptr()) - heap->GetBootImageSpaces()[0]->Begin();
+}
+
+inline IntrinsicVisitor::IntegerValueOfInfo::IntegerValueOfInfo()
+ : integer_boot_image_offset(0u),
+ value_offset(0),
+ low(0),
+ length(0u),
+ value_boot_image_offset(0u) {}
+
+IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo(HInvoke* invoke) {
// Note that we could cache all of the data looked up here. but there's no good
// location for it. We don't want to add it to WellKnownClasses, to avoid creating global
// jni values. Adding it as state to the compiler singleton seems like wrong
// separation of concerns.
// The need for this data should be pretty rare though.
- // The most common case is that the classes are in the boot image and initialized,
- // which is easy to generate code for. We bail if not.
+ // Note that at this point we can no longer abort the code generation. Therefore,
+ // we need to provide data that shall not lead to a crash even if the fields were
+ // modified through reflection since ComputeIntegerValueOfLocations() when JITting.
+
+ Runtime* runtime = Runtime::Current();
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- gc::Heap* heap = runtime->GetHeap();
- IntegerValueOfInfo info;
- info.integer_cache = class_linker->LookupClass(self,
- "Ljava/lang/Integer$IntegerCache;",
- /* class_loader */ nullptr).Ptr();
- if (info.integer_cache == nullptr || !info.integer_cache->IsInitialized()) {
- // Optimization only works if the class is initialized.
- return info;
- }
- if (!heap->ObjectIsInBootImageSpace(info.integer_cache)) {
- // Optimization only works if the class is in the boot image.
- // TODO: Implement the intrinsic for boot image compilation.
- return info;
- }
- info.integer =
- class_linker->LookupClass(self, "Ljava/lang/Integer;", /* class_loader */ nullptr).Ptr();
- DCHECK(info.integer != nullptr);
- DCHECK(info.integer->IsInitialized()); // Must be initialized since IntegerCache is initialized.
- if (!heap->ObjectIsInBootImageSpace(info.integer)) {
- // Optimization only works if the class is in the boot image.
- return info;
- }
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+ ObjPtr<mirror::Object> low_integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+ ObjPtr<mirror::Class> integer_class = low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ ArtField* value_field = integer_class->FindDeclaredInstanceField("value", "I");
+ DCHECK(value_field != nullptr);
- ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
- CHECK(field != nullptr);
- info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
- field->GetObject(info.integer_cache).Ptr());
- if (info.cache == nullptr) {
- return info; // Did someone mess up the IntegerCache using reflection?
+ IntegerValueOfInfo info;
+ info.integer_boot_image_offset = CalculateBootImageOffset(integer_class);
+ info.value_offset = value_field->GetOffset().Uint32Value();
+ if (runtime->UseJitCompilation()) {
+ // Use the current `IntegerCache.low` for JIT to avoid truly surprising behavior if the
+ // code messes up the `value` field in the lowest cached Integer using reflection.
+ info.low = GetIntegerCacheLowFromIntegerCache(self);
+ } else {
+ // For AOT, the `low_integer->value` should be the same as `IntegerCache.low`.
+ info.low = value_field->GetInt(low_integer);
+ DCHECK_EQ(info.low, GetIntegerCacheLowFromIntegerCache(self));
}
-
- if (!heap->ObjectIsInBootImageSpace(info.cache)) {
- // Optimization only works if the object is in the boot image.
- return info;
+ // Do not look at `IntegerCache.high`, use the immutable length of the cache array instead.
+ info.length = dchecked_integral_cast<uint32_t>(
+ IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects)->GetLength());
+
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
+ if (index < static_cast<uint32_t>(info.length)) {
+ ObjPtr<mirror::Object> integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, index);
+ DCHECK(runtime->GetHeap()->ObjectIsInBootImageSpace(integer));
+ info.value_boot_image_offset = CalculateBootImageOffset(integer);
+ } else {
+ info.value_boot_image_offset = 0u; // Not in the cache.
+ }
+ } else {
+ info.array_data_boot_image_offset =
+ CalculateBootImageOffset(boot_image_live_objects) +
+ IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects).Uint32Value();
}
- field = info.integer->FindDeclaredInstanceField("value", "I");
- CHECK(field != nullptr);
- info.value_offset = field->GetOffset().Int32Value();
-
- field = info.integer_cache->FindDeclaredStaticField("low", "I");
- CHECK(field != nullptr);
- info.low = field->GetInt(info.integer_cache);
-
- field = info.integer_cache->FindDeclaredStaticField("high", "I");
- CHECK(field != nullptr);
- info.high = field->GetInt(info.integer_cache);
-
- DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
return info;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 30cffac015..f2b78239d6 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -126,33 +126,32 @@ class IntrinsicVisitor : public ValueObject {
Location return_location,
Location first_argument_location);
- // Temporary data structure for holding Integer.valueOf useful data. We only
- // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
- // mirror::Class pointers in this structure.
+ // Temporary data structure for holding Integer.valueOf data for generating code.
+ // We only use it if the boot image contains the IntegerCache objects.
struct IntegerValueOfInfo {
- IntegerValueOfInfo()
- : integer_cache(nullptr),
- integer(nullptr),
- cache(nullptr),
- low(0),
- high(0),
- value_offset(0) {}
-
- // The java.lang.IntegerCache class.
- mirror::Class* integer_cache;
- // The java.lang.Integer class.
- mirror::Class* integer;
- // Value of java.lang.IntegerCache#cache.
- mirror::ObjectArray<mirror::Object>* cache;
- // Value of java.lang.IntegerCache#low.
+ IntegerValueOfInfo();
+
+ // Boot image offset of java.lang.Integer for allocating an instance.
+ uint32_t integer_boot_image_offset;
+ // Offset of the Integer.value field for initializing a newly allocated instance.
+ uint32_t value_offset;
+ // The low value in the cache.
int32_t low;
- // Value of java.lang.IntegerCache#high.
- int32_t high;
- // The offset of java.lang.Integer.value.
- int32_t value_offset;
+ // The length of the cache array.
+ uint32_t length;
+
+ union {
+ // Boot image offset of the target Integer object for constant input in the cache range.
+ // If the input is out of range, this is set to 0u and the code must allocate a new Integer.
+ uint32_t value_boot_image_offset;
+
+ // Boot image offset of the cache array data used for non-constant input in the cache range.
+ // If the input is out of range, the code must allocate a new Integer.
+ uint32_t array_data_boot_image_offset;
+ };
};
- static IntegerValueOfInfo ComputeIntegerValueOfInfo();
+ static IntegerValueOfInfo ComputeIntegerValueOfInfo(HInvoke* invoke);
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c3d643a7d1..b4890e4850 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2791,7 +2791,7 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = GetVIXLAssembler();
@@ -2802,20 +2802,15 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ Mov(temp.W(), value);
@@ -2825,16 +2820,15 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
Register in = RegisterFrom(locations->InAt(0), DataType::Type::kInt32);
// Check bounds of our cache.
__ Add(out.W(), in.W(), -info.low);
- __ Cmp(out.W(), info.high - info.low + 1);
+ __ Cmp(out.W(), info.length);
vixl::aarch64::Label allocate, done;
__ B(&allocate, hs);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
MemOperand source = HeapOperand(
temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference));
codegen_->Load(DataType::Type::kReference, out, source);
@@ -2842,8 +2836,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ Str(in.W(), HeapOperand(out.W(), info.value_offset));
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index fecf1ccbfa..0835060f5c 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2940,7 +2940,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
ArmVIXLAssembler* const assembler = GetAssembler();
@@ -2951,20 +2951,15 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
vixl32::Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ Mov(temp, value);
@@ -2974,23 +2969,21 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
vixl32::Register in = RegisterFrom(locations->InAt(0));
// Check bounds of our cache.
__ Add(out, in, -info.low);
- __ Cmp(out, info.high - info.low + 1);
+ __ Cmp(out, info.length);
vixl32::Label allocate, done;
__ B(hs, &allocate, /* is_far_target */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_offset);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
assembler->MaybeUnpoisonHeapReference(out);
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index ae248a3e5c..a3eb42b4b7 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2601,7 +2601,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
MipsAssembler* assembler = GetAssembler();
InstructionCodeGeneratorMIPS* icodegen =
@@ -2609,22 +2609,18 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
Register out = locations->Out().AsRegister<Register>();
InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadConst32(out, address);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
@@ -2633,27 +2629,23 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
Register in = locations->InAt(0).AsRegister<Register>();
MipsLabel allocate, done;
- int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
- // Is (info.low <= in) && (in <= info.high)?
__ Addiu32(out, in, -info.low);
- // As unsigned quantities is out < (info.high - info.low + 1)?
- if (IsInt<16>(count)) {
- __ Sltiu(AT, out, count);
+ // As unsigned quantities is out < info.length ?
+ if (IsUint<15>(info.length)) {
+ __ Sltiu(AT, out, info.length);
} else {
- __ LoadConst32(AT, count);
+ __ LoadConst32(AT, info.length);
__ Sltu(AT, out, AT);
}
- // Branch if out >= (info.high - info.low + 1).
- // This means that "in" is outside of the range [info.low, info.high].
+ // Branch if out >= info.length. This means that "in" is outside of the valid range.
__ Beqz(AT, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadConst32(TMP, data_offset + address);
+ codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_offset);
__ ShiftAndAdd(out, out, TMP, TIMES_4);
__ Lw(out, out, 0);
__ MaybeUnpoisonHeapReference(out);
@@ -2661,8 +2653,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst32(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 9a9ae714bc..510040bf38 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2267,7 +2267,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
Mips64Assembler* assembler = GetAssembler();
InstructionCodeGeneratorMIPS64* icodegen =
@@ -2275,22 +2275,18 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
InvokeRuntimeCallingConvention calling_convention;
+ GpuRegister argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadConst64(out, address);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
@@ -2299,22 +2295,18 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
Mips64Label allocate, done;
- int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
- // Is (info.low <= in) && (in <= info.high)?
__ Addiu32(out, in, -info.low);
- // As unsigned quantities is out < (info.high - info.low + 1)?
- __ LoadConst32(AT, count);
- // Branch if out >= (info.high - info.low + 1).
- // This means that "in" is outside of the range [info.low, info.high].
+ // As unsigned quantities is out < info.length ?
+ __ LoadConst32(AT, info.length);
+ // Branch if out >= info.length . This means that "in" is outside of the valid range.
__ Bgeuc(out, AT, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadConst64(TMP, data_offset + address);
+ codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_offset);
__ Dlsa(out, out, TMP, TIMES_4);
__ Lwu(out, out, 0);
__ MaybeUnpoisonHeapReference(out);
@@ -2322,8 +2314,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst64(calling_convention.GetRegisterAt(0), address);
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ StoreToOffset(kStoreWord, in, out, info.value_offset);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f84a33bb8e..645ca49645 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2851,57 +2851,76 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+ DCHECK(invoke->IsInvokeStaticOrDirect());
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
invoke,
codegen_,
Location::RegisterLocation(EAX),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+
+ LocationSummary* locations = invoke->GetLocations();
+ if (locations != nullptr) {
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ if (invoke_static_or_direct->HasSpecialInput() &&
+ invoke->InputAt(invoke_static_or_direct->GetSpecialInputIndex())
+ ->IsX86ComputeBaseMethodAddress()) {
+ locations->SetInAt(invoke_static_or_direct->GetSpecialInputIndex(),
+ Location::RequiresRegister());
+ }
+ }
}
void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ DCHECK(invoke->IsInvokeStaticOrDirect());
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
X86Assembler* assembler = GetAssembler();
Register out = locations->Out().AsRegister<Register>();
InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(address));
+ codegen_->LoadBootImageAddress(
+ out, info.value_boot_image_offset, invoke->AsInvokeStaticOrDirect());
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->LoadBootImageAddress(
+ argument, info.integer_boot_image_offset, invoke->AsInvokeStaticOrDirect());
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), Immediate(value));
}
} else {
+ DCHECK(locations->CanCall());
Register in = locations->InAt(0).AsRegister<Register>();
// Check bounds of our cache.
__ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
+ __ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ movl(out, Address(out, TIMES_4, data_offset + address));
+ constexpr size_t kElementSize = sizeof(mirror::HeapReference<mirror::Object>);
+ uint32_t mid_array_boot_image_offset =
+ info.array_data_boot_image_offset - info.low * kElementSize;
+ codegen_->LoadBootImageAddress(
+ out, mid_array_boot_image_offset, invoke->AsInvokeStaticOrDirect());
+ DCHECK_NE(out, in);
+ static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Check heap reference size.");
+ __ movl(out, Address(out, in, TIMES_4, 0));
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
+ codegen_->LoadBootImageAddress(
+ argument, info.integer_boot_image_offset, invoke->AsInvokeStaticOrDirect());
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), in);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7627dc9490..6d85f3a1ac 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2660,56 +2660,47 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke);
LocationSummary* locations = invoke->GetLocations();
X86_64Assembler* assembler = GetAssembler();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
InvokeRuntimeCallingConvention calling_convention;
- if (invoke->InputAt(0)->IsConstant()) {
+ CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
+ if (invoke->InputAt(0)->IsIntConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (info.value_boot_image_offset != 0u) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(static_cast<int32_t>(address)));
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_offset);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(argument, Immediate(static_cast<int32_t>(address)));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), Immediate(value));
}
} else {
+ DCHECK(locations->CanCall());
CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
// Check bounds of our cache.
__ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
+ __ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- if (data_offset + address <= std::numeric_limits<int32_t>::max()) {
- __ movl(out, Address(out, TIMES_4, data_offset + address));
- } else {
- CpuRegister temp = CpuRegister(calling_convention.GetRegisterAt(0));
- __ movl(temp, Immediate(static_cast<int32_t>(data_offset + address)));
- __ movl(out, Address(temp, out, TIMES_4, 0));
- }
+ DCHECK_NE(out.AsRegister(), argument.AsRegister());
+ codegen_->LoadBootImageAddress(argument, info.array_data_boot_image_offset);
+ static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Check heap reference size.");
+ __ movl(out, Address(argument, out, TIMES_4, 0));
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(argument, Immediate(static_cast<int32_t>(address)));
+ codegen_->LoadBootImageAddress(argument, info.integer_boot_image_offset);
codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
__ movl(Address(out, info.value_offset), in);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 9049457da5..05ec765b19 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -193,18 +193,19 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
void HandleInvoke(HInvoke* invoke) {
- // If this is an invoke-static/-direct with PC-relative dex cache array
- // addressing, we need the PC-relative address base.
HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
- // We can't add a pointer to the constant area if we already have a current
- // method pointer. This may arise when sharpening doesn't remove the current
- // method pointer from the invoke.
- if (invoke_static_or_direct != nullptr &&
- invoke_static_or_direct->HasCurrentMethodInput()) {
+
+ // We can't add the method address if we already have a current method pointer.
+ // This may arise when sharpening doesn't remove the current method pointer from the invoke.
+ if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasCurrentMethodInput()) {
+ // Note: This happens only for recursive calls (including compiling an intrinsic
+ // by faking a call to itself; we use kRuntimeCall for this case).
DCHECK(!invoke_static_or_direct->HasPcRelativeMethodLoadKind());
return;
}
+ // If this is an invoke-static/-direct with PC-relative addressing (within boot image
+ // or using .bss or .data.bimg.rel.ro), we need the PC-relative address base.
bool base_added = false;
if (invoke_static_or_direct != nullptr &&
invoke_static_or_direct->HasPcRelativeMethodLoadKind() &&
@@ -224,7 +225,6 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- // These intrinsics need the constant area.
switch (invoke->GetIntrinsic()) {
case Intrinsics::kMathAbsDouble:
case Intrinsics::kMathAbsFloat:
@@ -235,7 +235,15 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
LOG(FATAL) << "Unreachable min/max/abs: intrinsics should have been lowered "
"to IR nodes by instruction simplifier";
UNREACHABLE();
+ case Intrinsics::kIntegerValueOf:
+ // This intrinsic can be call free if it loads the address of the boot image object.
+ // If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
+ if (!codegen_->GetCompilerOptions().GetCompilePic()) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Intrinsics::kMathRoundFloat:
+ // This intrinsic needs the constant area.
if (!base_added) {
DCHECK(invoke_static_or_direct != nullptr);
DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 8840dfab84..a61ad8f7c2 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -52,7 +52,6 @@
#include "handle_scope-inl.h"
#include "image.h"
#include "imt_conflict_table.h"
-#include "subtype_check.h"
#include "jni/jni_internal.h"
#include "linear_alloc.h"
#include "lock_word.h"
@@ -71,8 +70,10 @@
#include "oat.h"
#include "oat_file.h"
#include "oat_file_manager.h"
+#include "optimizing/intrinsic_objects.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
+#include "subtype_check.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "well_known_classes.h"
@@ -1332,47 +1333,6 @@ ObjPtr<mirror::ObjectArray<mirror::Object>> ImageWriter::CollectDexCaches(Thread
return dex_caches;
}
-static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
- ClassLinker* class_linker)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
- self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
- if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
- return nullptr;
- }
- ArtField* cache_field =
- integer_cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
- CHECK(cache_field != nullptr);
- ObjPtr<ObjectArray<mirror::Object>> integer_cache =
- ObjPtr<ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(integer_cache_class));
- CHECK(integer_cache != nullptr);
- return integer_cache;
-}
-
-static ObjPtr<mirror::ObjectArray<mirror::Object>> CollectBootImageLiveObjects(
- Thread* self,
- ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
- // The objects used for the Integer.valueOf() intrinsic must remain live even if references
- // to them are removed using reflection. Image roots are not accessible through reflection,
- // so the array we construct here shall keep them alive.
- StackHandleScope<1> hs(self);
- Handle<ObjectArray<mirror::Object>> integer_cache =
- hs.NewHandle(LookupIntegerCache(self, class_linker));
- size_t live_objects_size =
- (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
- ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects = ObjectArray<Object>::Alloc(
- self, GetClassRoot<ObjectArray<Object>>(class_linker), live_objects_size);
- int32_t index = 0;
- if (integer_cache != nullptr) {
- live_objects->Set(index++, integer_cache.Get());
- for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
- live_objects->Set(index++, integer_cache->Get(i));
- }
- }
- CHECK_EQ(index, live_objects->GetLength());
- return live_objects;
-}
-
ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -1397,7 +1357,7 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
runtime->GetPreAllocatedNoClassDefFoundError());
if (!compile_app_image_) {
ObjPtr<ObjectArray<Object>> boot_image_live_objects =
- CollectBootImageLiveObjects(self, class_linker);
+ IntrinsicObjects::AllocateBootImageLiveObjects(self, class_linker);
image_roots->Set<false>(ImageHeader::kBootImageLiveObjects, boot_image_live_objects);
}
for (int32_t i = 0, num = ImageHeader::NumberOfImageRoots(compile_app_image_); i != num; ++i) {
diff --git a/test/717-integer-value-of/expected.txt b/test/717-integer-value-of/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/717-integer-value-of/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/717-integer-value-of/info.txt b/test/717-integer-value-of/info.txt
new file mode 100644
index 0000000000..b65d679ab1
--- /dev/null
+++ b/test/717-integer-value-of/info.txt
@@ -0,0 +1,2 @@
+Regression test for JIT crash when compiling Integer.valueOf() intrinsic after
+having messed up the IntegerCache through reflection.
diff --git a/test/717-integer-value-of/src/Main.java b/test/717-integer-value-of/src/Main.java
new file mode 100644
index 0000000000..557b65c1c7
--- /dev/null
+++ b/test/717-integer-value-of/src/Main.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ if (!isDalvik) {
+ // This test is ART-specific. Just fake the expected output.
+ System.out.println("JNI_OnLoad called");
+ return;
+ }
+ System.loadLibrary(args[0]);
+ if (!hasJit()) {
+ return;
+ }
+ testValueOfArg();
+ testValueOfConst();
+ }
+
+ public static void testValueOfArg() throws Exception {
+ final VolatileFlag start_end = new VolatileFlag();
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Class<?> integerCacheClass = Class.forName("java.lang.Integer$IntegerCache");
+ Field cacheField = integerCacheClass.getDeclaredField("cache");
+ cacheField.setAccessible(true);
+
+ Integer[] cache = (Integer[]) cacheField.get(integerCacheClass);
+ Integer[] alt_cache = new Integer[cache.length];
+ System.arraycopy(cache, 0, alt_cache, 0, cache.length);
+
+ // Let the main thread know that everything is set up.
+ synchronized (start_end) {
+ start_end.notify();
+ }
+ while (!start_end.flag) {
+ cacheField.set(integerCacheClass, alt_cache);
+ cacheField.set(integerCacheClass, cache);
+ }
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+ };
+ synchronized (start_end) {
+ t.start();
+ start_end.wait(); // Wait for the thread to start.
+ }
+ // Previously, this may have used an invalid IntegerValueOfInfo (because of seeing
+ // the `alt_cache` which is not in the boot image) when asked to emit code after
+ // using a valid info (using `cache`) when requesting locations.
+ ensureJitCompiled(Main.class, "getAsInteger");
+
+ start_end.flag = true;
+ t.join();
+
+ Runtime.getRuntime().gc(); // Collect the `alt_cache`.
+
+ // If `getAsInteger()` was miscompiled, it shall try to retrieve an Integer reference
+ // from a collected array (low = 0, high = 0 means that this happens only for value 0),
+ // reading from a bogus location. Depending on the GC type, this bogus memory access may
+ // yield SIGSEGV or `null` or even a valid reference.
+ Integer new0 = getAsInteger(0);
+ int value = (int) new0;
+
+ if (value != 0) {
+ throw new Error("value is " + value);
+ }
+ }
+
+ public static void testValueOfConst() throws Exception {
+ Class<?> integerCacheClass = Class.forName("java.lang.Integer$IntegerCache");
+ Field cacheField = integerCacheClass.getDeclaredField("cache");
+ cacheField.setAccessible(true);
+ Field lowField = integerCacheClass.getDeclaredField("low");
+ lowField.setAccessible(true);
+
+ Integer[] cache = (Integer[]) cacheField.get(integerCacheClass);
+ int low = (int) lowField.get(integerCacheClass);
+ Integer old42 = cache[42 - low];
+ cache[42 - low] = new Integer(42);
+
+ // This used to hit
+ // DCHECK(boxed != nullptr &&
+ // Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ // when compiling the intrinsic.
+ ensureJitCompiled(Main.class, "get42AsInteger");
+
+ cache[42 - low] = old42;
+ Runtime.getRuntime().gc();
+ Integer new42 = get42AsInteger();
+
+ // If the DCHECK() was removed, MterpInvokeVirtualQuick() used to crash here.
+ // (Note: Our fault handler on x86-64 then also crashed.)
+ int value = (int) new42;
+
+ if (value != (int) old42) {
+ throw new Error("value is " + value);
+ }
+ }
+
+ private static class VolatileFlag {
+ public volatile boolean flag = false;
+ }
+
+ public static Integer get42AsInteger() {
+ return Integer.valueOf(42);
+ }
+
+ public static Integer getAsInteger(int value) {
+ return Integer.valueOf(value);
+ }
+
+ private native static boolean hasJit();
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+
+ private final static boolean isDalvik = System.getProperty("java.vm.name").equals("Dalvik");
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index c680f53315..f6ae0be5c0 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -532,13 +532,6 @@
"bug": "b/33650497"
},
{
- "tests": "640-checker-integer-valueof",
- "description": [
- "The java.lang.Integer.valueOf intrinsic is not supported in PIC mode."
- ],
- "variant": "optimizing & pictest | speed-profile & pictest"
- },
- {
"tests": "202-thread-oome",
"description": "ASAN aborts when large thread stacks are requested.",
"variant": "host",