diff options
Diffstat (limited to 'compiler/optimizing/code_generator_arm64.cc')
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 658 |
1 files changed, 576 insertions, 82 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 60f8f98757d..d4cfab82de3 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -30,7 +30,6 @@ #include "heap_poisoning.h" #include "intrinsics.h" #include "intrinsics_arm64.h" -#include "linker/arm64/relative_patcher_arm64.h" #include "linker/linker_patch.h" #include "lock_word.h" #include "mirror/array-inl.h" @@ -78,6 +77,7 @@ using helpers::OutputFPRegister; using helpers::OutputRegister; using helpers::QRegisterFrom; using helpers::RegisterFrom; +using helpers::SRegisterFrom; using helpers::StackOperandFrom; using helpers::VIXLRegCodeFromART; using helpers::WRegisterFrom; @@ -1424,6 +1424,62 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { __ FinalizeCode(); CodeGenerator::Finalize(allocator); + + // Verify Baker read barrier linker patches. + if (kIsDebugBuild) { + ArrayRef<const uint8_t> code = allocator->GetMemory(); + for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) { + DCHECK(info.label.IsBound()); + uint32_t literal_offset = info.label.GetLocation(); + DCHECK_ALIGNED(literal_offset, 4u); + + auto GetInsn = [&code](uint32_t offset) { + DCHECK_ALIGNED(offset, 4u); + return + (static_cast<uint32_t>(code[offset + 0]) << 0) + + (static_cast<uint32_t>(code[offset + 1]) << 8) + + (static_cast<uint32_t>(code[offset + 2]) << 16)+ + (static_cast<uint32_t>(code[offset + 3]) << 24); + }; + + const uint32_t encoded_data = info.custom_data; + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + // Check that the next instruction matches the expected LDR. + switch (kind) { + case BakerReadBarrierKind::kField: { + DCHECK_GE(code.size() - literal_offset, 8u); + uint32_t next_insn = GetInsn(literal_offset + 4u); + // LDR (immediate) with correct base_reg. + CheckValidReg(next_insn & 0x1fu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5)); + break; + } + case BakerReadBarrierKind::kArray: { + DCHECK_GE(code.size() - literal_offset, 8u); + uint32_t next_insn = GetInsn(literal_offset + 4u); + // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL), + // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2]. + CheckValidReg(next_insn & 0x1fu); // Check destination register. + const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5)); + CheckValidReg((next_insn >> 16) & 0x1f); // Check index register + break; + } + case BakerReadBarrierKind::kGcRoot: { + DCHECK_GE(literal_offset, 4u); + uint32_t prev_insn = GetInsn(literal_offset - 4u); + // LDR (immediate) with correct root_reg. + const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); + CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); + UNREACHABLE(); + } + } + } } void ParallelMoveResolverARM64::PrepareForEmitNativeCode() { @@ -2128,6 +2184,26 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod __ Bind(slow_path->GetExitLabel()); } +void InstructionCodeGeneratorARM64::GenerateBitstringTypeCheckCompare( + HTypeCheckInstruction* check, vixl::aarch64::Register temp) { + uint32_t path_to_root = check->GetBitstringPathToRoot(); + uint32_t mask = check->GetBitstringMask(); + DCHECK(IsPowerOfTwo(mask + 1)); + size_t mask_bits = WhichPowerOf2(mask + 1); + + if (mask_bits == 16u) { + // Load only the bitstring part of the status word. + __ Ldrh(temp, HeapOperand(temp, mirror::Class::StatusOffset())); + } else { + // /* uint32_t */ temp = temp->status_ + __ Ldr(temp, HeapOperand(temp, mirror::Class::StatusOffset())); + // Extract the bitstring bits. + __ Ubfx(temp, temp, 0, mask_bits); + } + // Compare the bitstring bits to `path_to_root`. + __ Cmp(temp, path_to_root); +} + void CodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) { BarrierType type = BarrierAll; @@ -3865,6 +3941,8 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { case TypeCheckKind::kInterfaceCheck: call_kind = LocationSummary::kCallOnSlowPath; break; + case TypeCheckKind::kBitstringCheck: + break; } LocationSummary* locations = @@ -3873,7 +3951,13 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); + if (type_check_kind == TypeCheckKind::kBitstringCheck) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } // The "out" register is used as a temporary, so it overlaps with the inputs. // Note that TypeCheckSlowPathARM64 uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); @@ -3886,7 +3970,9 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = instruction->GetLocations(); Location obj_loc = locations->InAt(0); Register obj = InputRegisterAt(instruction, 0); - Register cls = InputRegisterAt(instruction, 1); + Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck) + ? Register() + : InputRegisterAt(instruction, 1); Location out_loc = locations->Out(); Register out = OutputRegister(instruction); const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); @@ -4072,6 +4158,23 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } break; } + + case TypeCheckKind::kBitstringCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kWithoutReadBarrier); + + GenerateBitstringTypeCheckCompare(instruction, out); + __ Cset(out, eq); + if (zero.IsLinked()) { + __ B(&done); + } + break; + } } if (zero.IsLinked()) { @@ -4094,7 +4197,13 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); + if (type_check_kind == TypeCheckKind::kBitstringCheck) { + locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant())); + locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant())); + locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant())); + } else { + locations->SetInAt(1, Location::RequiresRegister()); + } // Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64. locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); } @@ -4104,7 +4213,9 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = instruction->GetLocations(); Location obj_loc = locations->InAt(0); Register obj = InputRegisterAt(instruction, 0); - Register cls = InputRegisterAt(instruction, 1); + Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck) + ? Register() + : InputRegisterAt(instruction, 1); const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); DCHECK_GE(num_temps, 1u); DCHECK_LE(num_temps, 3u); @@ -4285,6 +4396,20 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { __ B(ne, &start_loop); break; } + + case TypeCheckKind::kBitstringCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kWithoutReadBarrier); + + GenerateBitstringTypeCheckCompare(instruction, temp); + __ B(ne, type_check_slow_path->GetEntryLabel()); + break; + } } __ Bind(&done); @@ -4459,12 +4584,23 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall( // Load method address from literal pool. __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress())); break; + case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: { + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + uint32_t boot_image_offset = GetBootImageOffset(invoke); + vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset); + EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp)); + // Add LDR with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label); + // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load. + EmitLdrOffsetPlaceholder(ldr_label, WRegisterFrom(temp), XRegisterFrom(temp)); + break; + } case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: { - // Add ADRP with its PC-relative DexCache access patch. + // Add ADRP with its PC-relative .bss entry patch. MethodReference target_method(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()); vixl::aarch64::Label* adrp_label = NewMethodBssEntryPatch(target_method); EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp)); - // Add LDR with its PC-relative DexCache access patch. + // Add LDR with its PC-relative .bss entry patch. vixl::aarch64::Label* ldr_label = NewMethodBssEntryPatch(target_method, adrp_label); EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp)); @@ -4559,6 +4695,13 @@ void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* i codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); } +vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch( + uint32_t boot_image_offset, + vixl::aarch64::Label* adrp_label) { + return NewPcRelativePatch( + /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_); +} + vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch( MethodReference target_method, vixl::aarch64::Label* adrp_label) { @@ -4681,6 +4824,14 @@ inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches( } } +linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t boot_image_offset) { + DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null. + return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset); +} + void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) { DCHECK(linker_patches->empty()); size_t size = @@ -4700,11 +4851,10 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* lin EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>( boot_image_string_patches_, linker_patches); } else { - DCHECK(boot_image_method_patches_.empty()); - EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>( - boot_image_type_patches_, linker_patches); - EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>( - boot_image_string_patches_, linker_patches); + EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>( + boot_image_method_patches_, linker_patches); + DCHECK(boot_image_type_patches_.empty()); + DCHECK(boot_image_string_patches_.empty()); } EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>( method_bss_entry_patches_, linker_patches); @@ -4719,6 +4869,44 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* lin DCHECK_EQ(size, linker_patches->size()); } +bool CodeGeneratorARM64::NeedsThunkCode(const linker::LinkerPatch& patch) const { + return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch || + patch.GetType() == linker::LinkerPatch::Type::kCallRelative; +} + +void CodeGeneratorARM64::EmitThunkCode(const linker::LinkerPatch& patch, + /*out*/ ArenaVector<uint8_t>* code, + /*out*/ std::string* debug_name) { + Arm64Assembler assembler(GetGraph()->GetAllocator()); + switch (patch.GetType()) { + case linker::LinkerPatch::Type::kCallRelative: { + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64PointerSize).Int32Value()); + assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); + if (GetCompilerOptions().GenerateAnyDebugInfo()) { + *debug_name = "MethodCallThunk"; + } + break; + } + case linker::LinkerPatch::Type::kBakerReadBarrierBranch: { + DCHECK_EQ(patch.GetBakerCustomValue2(), 0u); + CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name); + break; + } + default: + LOG(FATAL) << "Unexpected patch type " << patch.GetType(); + UNREACHABLE(); + } + + // Ensure we emit the literal pool if any. + assembler.FinalizeCode(); + code->resize(assembler.CodeSize()); + MemoryRegion code_region(code->data(), code->size()); + assembler.FinalizeInstructions(code_region); +} + vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value) { return uint32_literals_.GetOrCreate( value, @@ -4779,7 +4967,7 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind( case HLoadClass::LoadKind::kReferrersClass: break; case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: - case HLoadClass::LoadKind::kBootImageClassTable: + case HLoadClass::LoadKind::kBootImageRelRo: case HLoadClass::LoadKind::kBssEntry: DCHECK(!Runtime::Current()->UseJitCompilation()); break; @@ -4859,12 +5047,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA DCHECK(!cls->MustGenerateClinitCheck()); // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ Register current_method = InputRegisterAt(cls, 0); - GenerateGcRootFieldLoad(cls, - out_loc, - current_method, - ArtMethod::DeclaringClassOffset().Int32Value(), - /* fixup_label */ nullptr, - read_barrier_option); + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + current_method, + ArtMethod::DeclaringClassOffset().Int32Value(), + /* fixup_label */ nullptr, + read_barrier_option); break; } case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: { @@ -4888,23 +5076,16 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); break; } - case HLoadClass::LoadKind::kBootImageClassTable: { + case HLoadClass::LoadKind::kBootImageRelRo: { DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); - // Add ADRP with its PC-relative type patch. - const DexFile& dex_file = cls->GetDexFile(); - dex::TypeIndex type_index = cls->GetTypeIndex(); - vixl::aarch64::Label* adrp_label = codegen_->NewBootImageTypePatch(dex_file, type_index); + uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls); + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset); codegen_->EmitAdrpPlaceholder(adrp_label, out.X()); - // Add LDR with its PC-relative type patch. + // Add LDR with its PC-relative .data.bimg.rel.ro patch. vixl::aarch64::Label* ldr_label = - codegen_->NewBootImageTypePatch(dex_file, type_index, adrp_label); + codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label); codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X()); - // Extract the reference from the slot data, i.e. clear the hash bits. - int32_t masked_hash = ClassTable::TableSlot::MaskHash( - ComputeModifiedUtf8Hash(dex_file.StringByTypeIdx(type_index))); - if (masked_hash != 0) { - __ Sub(out.W(), out.W(), Operand(masked_hash)); - } break; } case HLoadClass::LoadKind::kBssEntry: { @@ -4914,16 +5095,16 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA vixl::aarch64::Register temp = XRegisterFrom(out_loc); vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index); codegen_->EmitAdrpPlaceholder(adrp_label, temp); - // Add LDR with its PC-relative Class patch. + // Add LDR with its PC-relative Class .bss entry patch. vixl::aarch64::Label* ldr_label = codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label); // /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */ - GenerateGcRootFieldLoad(cls, - out_loc, - temp, - /* offset placeholder */ 0u, - ldr_label, - read_barrier_option); + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + temp, + /* offset placeholder */ 0u, + ldr_label, + read_barrier_option); generate_null_check = true; break; } @@ -4931,12 +5112,12 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass())); - GenerateGcRootFieldLoad(cls, - out_loc, - out.X(), - /* offset */ 0, - /* fixup_label */ nullptr, - read_barrier_option); + codegen_->GenerateGcRootFieldLoad(cls, + out_loc, + out.X(), + /* offset */ 0, + /* fixup_label */ nullptr, + read_barrier_option); break; } case HLoadClass::LoadKind::kRuntimeCall: @@ -4989,7 +5170,7 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind( HLoadString::LoadKind desired_string_load_kind) { switch (desired_string_load_kind) { case HLoadString::LoadKind::kBootImageLinkTimePcRelative: - case HLoadString::LoadKind::kBootImageInternTable: + case HLoadString::LoadKind::kBootImageRelRo: case HLoadString::LoadKind::kBssEntry: DCHECK(!Runtime::Current()->UseJitCompilation()); break; @@ -5055,16 +5236,15 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address)); return; } - case HLoadString::LoadKind::kBootImageInternTable: { + case HLoadString::LoadKind::kBootImageRelRo: { DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); - // Add ADRP with its PC-relative String patch. - const DexFile& dex_file = load->GetDexFile(); - const dex::StringIndex string_index = load->GetStringIndex(); - vixl::aarch64::Label* adrp_label = codegen_->NewBootImageStringPatch(dex_file, string_index); + // Add ADRP with its PC-relative .data.bimg.rel.ro patch. + uint32_t boot_image_offset = codegen_->GetBootImageOffset(load); + vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset); codegen_->EmitAdrpPlaceholder(adrp_label, out.X()); - // Add LDR with its PC-relative String patch. + // Add LDR with its PC-relative .data.bimg.rel.ro patch. vixl::aarch64::Label* ldr_label = - codegen_->NewBootImageStringPatch(dex_file, string_index, adrp_label); + codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label); codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X()); return; } @@ -5076,16 +5256,16 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD Register temp = XRegisterFrom(out_loc); vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index); codegen_->EmitAdrpPlaceholder(adrp_label, temp); - // Add LDR with its .bss entry String patch. + // Add LDR with its PC-relative String .bss entry patch. vixl::aarch64::Label* ldr_label = codegen_->NewStringBssEntryPatch(dex_file, string_index, adrp_label); // /* GcRoot<mirror::String> */ out = *(base_address + offset) /* PC-relative */ - GenerateGcRootFieldLoad(load, - out_loc, - temp, - /* offset placeholder */ 0u, - ldr_label, - kCompilerReadBarrierOption); + codegen_->GenerateGcRootFieldLoad(load, + out_loc, + temp, + /* offset placeholder */ 0u, + ldr_label, + kCompilerReadBarrierOption); SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load); codegen_->AddSlowPath(slow_path); @@ -5098,12 +5278,12 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD __ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(), load->GetStringIndex(), load->GetString())); - GenerateGcRootFieldLoad(load, - out_loc, - out.X(), - /* offset */ 0, - /* fixup_label */ nullptr, - kCompilerReadBarrierOption); + codegen_->GenerateGcRootFieldLoad(load, + out_loc, + out.X(), + /* offset */ 0, + /* fixup_label */ nullptr, + kCompilerReadBarrierOption); return; } default: @@ -5462,6 +5642,153 @@ void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) { } } +// TODO: integrate with HandleBinaryOp? +static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) { + LocationSummary* locations = new (allocator) LocationSummary(minmax); + switch (minmax->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType(); + } +} + +void InstructionCodeGeneratorARM64::GenerateMinMaxInt(LocationSummary* locations, + bool is_min, + DataType::Type type) { + Location op1 = locations->InAt(0); + Location op2 = locations->InAt(1); + Location out = locations->Out(); + + Register op1_reg; + Register op2_reg; + Register out_reg; + if (type == DataType::Type::kInt64) { + op1_reg = XRegisterFrom(op1); + op2_reg = XRegisterFrom(op2); + out_reg = XRegisterFrom(out); + } else { + DCHECK_EQ(type, DataType::Type::kInt32); + op1_reg = WRegisterFrom(op1); + op2_reg = WRegisterFrom(op2); + out_reg = WRegisterFrom(out); + } + + __ Cmp(op1_reg, op2_reg); + __ Csel(out_reg, op1_reg, op2_reg, is_min ? lt : gt); +} + +void InstructionCodeGeneratorARM64::GenerateMinMaxFP(LocationSummary* locations, + bool is_min, + DataType::Type type) { + Location op1 = locations->InAt(0); + Location op2 = locations->InAt(1); + Location out = locations->Out(); + + FPRegister op1_reg; + FPRegister op2_reg; + FPRegister out_reg; + if (type == DataType::Type::kFloat64) { + op1_reg = DRegisterFrom(op1); + op2_reg = DRegisterFrom(op2); + out_reg = DRegisterFrom(out); + } else { + DCHECK_EQ(type, DataType::Type::kFloat32); + op1_reg = SRegisterFrom(op1); + op2_reg = SRegisterFrom(op2); + out_reg = SRegisterFrom(out); + } + + if (is_min) { + __ Fmin(out_reg, op1_reg, op2_reg); + } else { + __ Fmax(out_reg, op1_reg, op2_reg); + } +} + +// TODO: integrate with HandleBinaryOp? +void InstructionCodeGeneratorARM64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) { + DataType::Type type = minmax->GetResultType(); + switch (type) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + GenerateMinMaxInt(minmax->GetLocations(), is_min, type); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + GenerateMinMaxFP(minmax->GetLocations(), is_min, type); + break; + default: + LOG(FATAL) << "Unexpected type for HMinMax " << type; + } +} + +void LocationsBuilderARM64::VisitMin(HMin* min) { + CreateMinMaxLocations(GetGraph()->GetAllocator(), min); +} + +void InstructionCodeGeneratorARM64::VisitMin(HMin* min) { + GenerateMinMax(min, /*is_min*/ true); +} + +void LocationsBuilderARM64::VisitMax(HMax* max) { + CreateMinMaxLocations(GetGraph()->GetAllocator(), max); +} + +void InstructionCodeGeneratorARM64::VisitMax(HMax* max) { + GenerateMinMax(max, /*is_min*/ false); +} + +void LocationsBuilderARM64::VisitAbs(HAbs* abs) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs); + switch (abs->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); + break; + default: + LOG(FATAL) << "Unexpected type for abs operation " << abs->GetResultType(); + } +} + +void InstructionCodeGeneratorARM64::VisitAbs(HAbs* abs) { + switch (abs->GetResultType()) { + case DataType::Type::kInt32: + case DataType::Type::kInt64: { + Register in_reg = InputRegisterAt(abs, 0); + Register out_reg = OutputRegister(abs); + __ Cmp(in_reg, Operand(0)); + __ Cneg(out_reg, in_reg, lt); + break; + } + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: { + FPRegister in_reg = InputFPRegisterAt(abs, 0); + FPRegister out_reg = OutputFPRegister(abs); + __ Fabs(out_reg, in_reg); + break; + } + default: + LOG(FATAL) << "Unexpected type for abs operation " << abs->GetResultType(); + } +} + void LocationsBuilderARM64::VisitConstructorFence(HConstructorFence* constructor_fence) { constructor_fence->SetLocations(nullptr); } @@ -5905,7 +6232,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( } } -void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( +void CodeGeneratorARM64::GenerateGcRootFieldLoad( HInstruction* instruction, Location root, Register obj, @@ -5939,9 +6266,8 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( DCHECK(temps.IsAvailable(ip0)); DCHECK(temps.IsAvailable(ip1)); temps.Exclude(ip0, ip1); - uint32_t custom_data = - linker::Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg.GetCode()); - vixl::aarch64::Label* cbnz_label = codegen_->NewBakerReadBarrierPatch(custom_data); + uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode()); + vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data); EmissionCheckScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize); vixl::aarch64::Label return_address; @@ -5970,14 +6296,14 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. The entrypoint will // be loaded by the slow path code. SlowPathCodeARM64* slow_path = - new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); - codegen_->AddSlowPath(slow_path); + new (GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root); + AddSlowPath(slow_path); // /* GcRoot<mirror::Object> */ root = *(obj + offset) if (fixup_label == nullptr) { __ Ldr(root_reg, MemOperand(obj, offset)); } else { - codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj); + EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj); } static_assert( sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>), @@ -5997,10 +6323,10 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( if (fixup_label == nullptr) { __ Add(root_reg.X(), obj.X(), offset); } else { - codegen_->EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X()); + EmitAddPlaceholder(fixup_label, root_reg.X(), obj.X()); } // /* mirror::Object* */ root = root->Read() - codegen_->GenerateReadBarrierForRootSlow(instruction, root, root); + GenerateReadBarrierForRootSlow(instruction, root, root); } } else { // Plain GC root load with no read barrier. @@ -6008,12 +6334,12 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad( if (fixup_label == nullptr) { __ Ldr(root_reg, MemOperand(obj, offset)); } else { - codegen_->EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X()); + EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj.X()); } // Note that GC roots are not affected by heap poisoning, thus we // do not have to unpoison `root_reg` here. } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); } void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, @@ -6062,9 +6388,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins DCHECK(temps.IsAvailable(ip0)); DCHECK(temps.IsAvailable(ip1)); temps.Exclude(ip0, ip1); - uint32_t custom_data = linker::Arm64RelativePatcher::EncodeBakerReadBarrierFieldData( - base.GetCode(), - obj.GetCode()); + uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode()); vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data); { @@ -6149,8 +6473,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins DCHECK(temps.IsAvailable(ip0)); DCHECK(temps.IsAvailable(ip1)); temps.Exclude(ip0, ip1); - uint32_t custom_data = - linker::Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(temp.GetCode()); + uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode()); vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data); __ Add(temp.X(), obj.X(), Operand(data_offset)); @@ -6510,5 +6833,176 @@ void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_ #undef __ #undef QUICK_ENTRY_POINT +#define __ assembler.GetVIXLAssembler()-> + +static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, + vixl::aarch64::Register base_reg, + vixl::aarch64::MemOperand& lock_word, + vixl::aarch64::Label* slow_path, + vixl::aarch64::Label* throw_npe = nullptr) { + // Load the lock word containing the rb_state. + __ Ldr(ip0.W(), lock_word); + // Given the numeric representation, it's enough to check the low bit of the rb_state. + static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path); + static_assert( + BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET, + "Field and array LDR offsets must be the same to reuse the same code."); + // To throw NPE, we return to the fast path; the artificial dependence below does not matter. + if (throw_npe != nullptr) { + __ Bind(throw_npe); + } + // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning). + static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), + "Field LDR must be 1 instruction (4B) before the return address label; " + " 2 instructions (8B) for heap poisoning."); + __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); + // Introduce a dependency on the lock_word including rb_state, + // to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + __ Add(base_reg, base_reg, Operand(ip0, LSR, 32)); + __ Br(lr); // And return back to the function. + // Note: The fake dependency is unnecessary for the slow path. +} + +// Load the read barrier introspection entrypoint in register `entrypoint`. +static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, + vixl::aarch64::Register entrypoint) { + // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection. + DCHECK_EQ(ip0.GetCode(), 16u); + const int32_t entry_point_offset = + Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode()); + __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); +} + +void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler, + uint32_t encoded_data, + /*out*/ std::string* debug_name) { + BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); + switch (kind) { + case BakerReadBarrierKind::kField: { + auto base_reg = + Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + auto holder_reg = + Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data)); + CheckValidReg(holder_reg.GetCode()); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + // If base_reg differs from holder_reg, the offset was too large and we must have emitted + // an explicit null check before the load. Otherwise, for implicit null checks, we need to + // null-check the holder as we do not necessarily do that check before going to the thunk. + vixl::aarch64::Label throw_npe_label; + vixl::aarch64::Label* throw_npe = nullptr; + if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) { + throw_npe = &throw_npe_label; + __ Cbz(holder_reg.W(), throw_npe); + } + // Check if the holder is gray and, if not, add fake dependency to the base register + // and return to the LDR instruction to load the reference. Otherwise, use introspection + // to load the reference and call the entrypoint that performs further checks on the + // reference and marks it if needed. + vixl::aarch64::Label slow_path; + MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, throw_npe); + __ Bind(&slow_path); + MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); + __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset. + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset. + __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference. + // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. + __ Br(ip1); // Jump to the entrypoint. + break; + } + case BakerReadBarrierKind::kArray: { + auto base_reg = + Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(base_reg.GetCode()); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + vixl::aarch64::Label slow_path; + int32_t data_offset = + mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); + MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); + DCHECK_LT(lock_word.GetOffset(), 0); + EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); + __ Bind(&slow_path); + MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); + __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset. + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set). + __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create + // a switch case target based on the index register. + __ Mov(ip0, base_reg); // Move the base register to ip0. + __ Br(ip1); // Jump to the entrypoint's array switch case. + break; + } + case BakerReadBarrierKind::kGcRoot: { + // Check if the reference needs to be marked and if so (i.e. not null, not marked yet + // and it does not have a forwarding address), call the correct introspection entrypoint; + // otherwise return the reference (or the extracted forwarding address). + // There is no gray bit check for GC roots. + auto root_reg = + Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); + CheckValidReg(root_reg.GetCode()); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); + temps.Exclude(ip0, ip1); + vixl::aarch64::Label return_label, not_marked, forwarding_address; + __ Cbz(root_reg, &return_label); + MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value()); + __ Ldr(ip0.W(), lock_word); + __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked); + __ Bind(&return_label); + __ Br(lr); + __ Bind(¬_marked); + __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1)); + __ B(&forwarding_address, mi); + LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); + // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to + // art_quick_read_barrier_mark_introspection_gc_roots. + __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET)); + __ Mov(ip0.W(), root_reg); + __ Br(ip1); + __ Bind(&forwarding_address); + __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift); + __ Br(lr); + break; + } + default: + LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind); + UNREACHABLE(); + } + + if (GetCompilerOptions().GenerateAnyDebugInfo()) { + std::ostringstream oss; + oss << "BakerReadBarrierThunk"; + switch (kind) { + case BakerReadBarrierKind::kField: + oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data) + << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data); + break; + case BakerReadBarrierKind::kArray: + oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + case BakerReadBarrierKind::kGcRoot: + oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data); + DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg, + BakerReadBarrierSecondRegField::Decode(encoded_data)); + break; + } + *debug_name = oss.str(); + } +} + +#undef __ + } // namespace arm64 } // namespace art |
