summaryrefslogtreecommitdiff
path: root/compiler/optimizing/nodes.h
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/nodes.h')
-rw-r--r--compiler/optimizing/nodes.h892
1 files changed, 676 insertions, 216 deletions
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7ed5bca947..939c49f9a6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -21,6 +21,7 @@
#include <array>
#include <type_traits>
+#include "base/arena_allocator.h"
#include "base/arena_bit_vector.h"
#include "base/arena_containers.h"
#include "base/arena_object.h"
@@ -32,6 +33,9 @@
#include "base/stl_util.h"
#include "base/transform_array_ref.h"
#include "art_method.h"
+#include "block_namer.h"
+#include "class_root.h"
+#include "compilation_kind.h"
#include "data_type.h"
#include "deoptimization_kind.h"
#include "dex/dex_file.h"
@@ -68,6 +72,7 @@ class HParameterValue;
class HPhi;
class HSuspendCheck;
class HTryBoundary;
+class FieldInfo;
class LiveInterval;
class LocationSummary;
class SlowPathCode;
@@ -272,13 +277,6 @@ class ReferenceTypeInfo : ValueObject {
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(IsValid());
- DCHECK(rti.IsValid());
- return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
- GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
- }
-
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
@@ -309,26 +307,90 @@ class ReferenceTypeInfo : ValueObject {
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
+class HandleCache {
+ public:
+ explicit HandleCache(VariableSizedHandleScope* handles) : handles_(handles) { }
+
+ VariableSizedHandleScope* GetHandles() { return handles_; }
+
+ template <typename T>
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return handles_->NewHandle(object);
+ }
+
+ template <typename T>
+ MutableHandle<T> NewHandle(ObjPtr<T> object) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return handles_->NewHandle(object);
+ }
+
+ ReferenceTypeInfo::TypeHandle GetObjectClassHandle() {
+ return GetRootHandle(ClassRoot::kJavaLangObject, &object_class_handle_);
+ }
+
+ ReferenceTypeInfo::TypeHandle GetClassClassHandle() {
+ return GetRootHandle(ClassRoot::kJavaLangClass, &class_class_handle_);
+ }
+
+ ReferenceTypeInfo::TypeHandle GetMethodHandleClassHandle() {
+ return GetRootHandle(ClassRoot::kJavaLangInvokeMethodHandleImpl, &method_handle_class_handle_);
+ }
+
+ ReferenceTypeInfo::TypeHandle GetMethodTypeClassHandle() {
+ return GetRootHandle(ClassRoot::kJavaLangInvokeMethodType, &method_type_class_handle_);
+ }
+
+ ReferenceTypeInfo::TypeHandle GetStringClassHandle() {
+ return GetRootHandle(ClassRoot::kJavaLangString, &string_class_handle_);
+ }
+
+ ReferenceTypeInfo::TypeHandle GetThrowableClassHandle() {
+ return GetRootHandle(ClassRoot::kJavaLangThrowable, &throwable_class_handle_);
+ }
+
+
+ private:
+ inline ReferenceTypeInfo::TypeHandle GetRootHandle(ClassRoot class_root,
+ ReferenceTypeInfo::TypeHandle* cache) {
+ if (UNLIKELY(!ReferenceTypeInfo::IsValidHandle(*cache))) {
+ *cache = CreateRootHandle(handles_, class_root);
+ }
+ return *cache;
+ }
+
+ static ReferenceTypeInfo::TypeHandle CreateRootHandle(VariableSizedHandleScope* handles,
+ ClassRoot class_root);
+
+ VariableSizedHandleScope* handles_;
+
+ ReferenceTypeInfo::TypeHandle object_class_handle_;
+ ReferenceTypeInfo::TypeHandle class_class_handle_;
+ ReferenceTypeInfo::TypeHandle method_handle_class_handle_;
+ ReferenceTypeInfo::TypeHandle method_type_class_handle_;
+ ReferenceTypeInfo::TypeHandle string_class_handle_;
+ ReferenceTypeInfo::TypeHandle throwable_class_handle_;
+};
+
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocGraph> {
public:
HGraph(ArenaAllocator* allocator,
ArenaStack* arena_stack,
+ VariableSizedHandleScope* handles,
const DexFile& dex_file,
uint32_t method_idx,
InstructionSet instruction_set,
InvokeType invoke_type = kInvalidInvokeType,
bool dead_reference_safe = false,
bool debuggable = false,
- bool osr = false,
- bool is_shared_jit_code = false,
- bool baseline = false,
+ CompilationKind compilation_kind = CompilationKind::kOptimized,
int start_instruction_id = 0)
: allocator_(allocator),
arena_stack_(arena_stack),
+ handle_cache_(handles),
blocks_(allocator->Adapter(kArenaAllocBlockList)),
reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
+ reachability_graph_(allocator, 0, 0, true, kArenaAllocReachabilityGraph),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -341,6 +403,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_simd_(false),
has_loops_(false),
has_irreducible_loops_(false),
+ has_direct_critical_native_call_(false),
dead_reference_safe_(dead_reference_safe),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
@@ -357,21 +420,28 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
art_method_(nullptr),
- inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
- osr_(osr),
- baseline_(baseline),
- cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)),
- is_shared_jit_code_(is_shared_jit_code) {
+ compilation_kind_(compilation_kind),
+ cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
- // Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
- void InitializeInexactObjectRTI(VariableSizedHandleScope* handles);
+ std::ostream& Dump(std::ostream& os,
+ std::optional<std::reference_wrapper<const BlockNamer>> namer = std::nullopt);
ArenaAllocator* GetAllocator() const { return allocator_; }
ArenaStack* GetArenaStack() const { return arena_stack_; }
+
+ HandleCache* GetHandleCache() { return &handle_cache_; }
+
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
+ // An iterator to only blocks that are still actually in the graph (when
+ // blocks are removed they are replaced with 'nullptr' in GetBlocks to
+ // simplify block-id assignment and avoid memmoves in the block-list).
+ IterationRange<FilterNull<ArenaVector<HBasicBlock*>::const_iterator>> GetActiveBlocks() const {
+ return FilterOutNull(MakeIterationRange(GetBlocks()));
+ }
+
bool IsInSsaForm() const { return in_ssa_form_; }
void SetInSsaForm() { in_ssa_form_ = true; }
@@ -386,6 +456,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
void ComputeDominanceInformation();
void ClearDominanceInformation();
+ void ComputeReachabilityInformation();
+ void ClearReachabilityInformation();
void ClearLoopInformation();
void FindBackEdges(ArenaBitVector* visited);
GraphAnalysisResult BuildDominatorTree();
@@ -534,6 +606,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_bounds_checks_ = value;
}
+ // Returns true if dest is reachable from source, using either blocks or block-ids.
+ bool PathBetween(const HBasicBlock* source, const HBasicBlock* dest) const;
+ bool PathBetween(uint32_t source_id, uint32_t dest_id) const;
+
// Is the code known to be robust against eliminating dead references
// and the effects of early finalization?
bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
@@ -589,13 +665,11 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
return instruction_set_;
}
- bool IsCompilingOsr() const { return osr_; }
+ bool IsCompilingOsr() const { return compilation_kind_ == CompilationKind::kOsr; }
- bool IsCompilingBaseline() const { return baseline_; }
+ bool IsCompilingBaseline() const { return compilation_kind_ == CompilationKind::kBaseline; }
- bool IsCompilingForSharedJitCode() const {
- return is_shared_jit_code_;
- }
+ CompilationKind GetCompilationKind() const { return compilation_kind_; }
ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
return cha_single_implementation_list_;
@@ -624,6 +698,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
bool HasIrreducibleLoops() const { return has_irreducible_loops_; }
void SetHasIrreducibleLoops(bool value) { has_irreducible_loops_ = value; }
+ bool HasDirectCriticalNativeCall() const { return has_direct_critical_native_call_; }
+ void SetHasDirectCriticalNativeCall(bool value) { has_direct_critical_native_call_ = value; }
+
ArtMethod* GetArtMethod() const { return art_method_; }
void SetArtMethod(ArtMethod* method) { art_method_ = method; }
@@ -632,7 +709,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// before cursor.
HInstruction* InsertOppositeCondition(HInstruction* cond, HInstruction* cursor);
- ReferenceTypeInfo GetInexactObjectRti() const { return inexact_object_rti_; }
+ ReferenceTypeInfo GetInexactObjectRti() {
+ return ReferenceTypeInfo::Create(handle_cache_.GetObjectClassHandle(), /* is_exact= */ false);
+ }
uint32_t GetNumberOfCHAGuards() { return number_of_cha_guards_; }
void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
@@ -675,6 +754,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
ArenaAllocator* const allocator_;
ArenaStack* const arena_stack_;
+ HandleCache handle_cache_;
+
// List of blocks in insertion order.
ArenaVector<HBasicBlock*> blocks_;
@@ -685,6 +766,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// post order, this order is not incrementally kept up-to-date.
ArenaVector<HBasicBlock*> linear_order_;
+ // Reachability graph for checking connectedness between nodes. Acts as a partitioned vector where
+ // each RoundUp(blocks_.size(), BitVector::kWordBits) is the reachability of each node.
+ ArenaBitVectorArray reachability_graph_;
+
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
@@ -731,6 +816,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// so there might be false positives.
bool has_irreducible_loops_;
+ // Flag whether there are any direct calls to native code registered
+ // for @CriticalNative methods.
+ bool has_direct_critical_native_call_;
+
// Is the code known to be robust against eliminating dead references
// and the effects of early finalization? If false, dead reference variables
// are kept if they might be visible to the garbage collector.
@@ -781,26 +870,15 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// (such as when the superclass could not be found).
ArtMethod* art_method_;
- // Keep the RTI of inexact Object to avoid having to pass stack handle
- // collection pointer to passes which may create NullConstant.
- ReferenceTypeInfo inexact_object_rti_;
-
- // Whether we are compiling this graph for on stack replacement: this will
- // make all loops seen as irreducible and emit special stack maps to mark
- // compiled code entries which the interpreter can directly jump to.
- const bool osr_;
-
- // Whether we are compiling baseline (not running optimizations). This affects
- // the code being generated.
- const bool baseline_;
+ // How we are compiling the graph: either optimized, osr, or baseline.
+ // For osr, we will make all loops seen as irreducible and emit special
+ // stack maps to mark compiled code entries which the interpreter can
+ // directly jump to.
+ const CompilationKind compilation_kind_;
// List of methods that are assumed to have single implementation.
ArenaSet<ArtMethod*> cha_single_implementation_list_;
- // Whether we are JIT compiling in the shared region area, putting
- // restrictions on, for example, how literals are being generated.
- bool is_shared_jit_code_;
-
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
friend class HInliner; // For the reverse post order.
@@ -808,6 +886,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
+inline std::ostream& operator<<(std::ostream& os, HGraph& graph) {
+ return graph.Dump(os);
+}
+
class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
public:
HLoopInformation(HBasicBlock* header, HGraph* graph)
@@ -1016,6 +1098,10 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
return predecessors_;
}
+ size_t GetNumberOfPredecessors() const {
+ return GetPredecessors().size();
+ }
+
const ArenaVector<HBasicBlock*>& GetSuccessors() const {
return successors_;
}
@@ -1358,6 +1444,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
friend class HGraph;
friend class HInstruction;
+ // Allow manual control of the ordering of predecessors/successors
+ friend class OptimizingUnitTestHelper;
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
@@ -1387,7 +1475,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
DISALLOW_COPY_AND_ASSIGN(HLoopInformationOutwardIterator);
};
-#define FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
+#define FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(M) \
M(Above, Condition) \
M(AboveOrEqual, Condition) \
M(Abs, UnaryOperation) \
@@ -1422,6 +1510,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
+ M(PredicatedInstanceFieldGet, Instruction) \
M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(IntermediateAddress, Instruction) \
@@ -1477,7 +1566,9 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(TryBoundary, Instruction) \
M(TypeConversion, Instruction) \
M(UShr, BinaryOperation) \
- M(Xor, BinaryOperation) \
+ M(Xor, BinaryOperation)
+
+#define FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(M) \
M(VecReplicateScalar, VecUnaryOperation) \
M(VecExtractScalar, VecUnaryOperation) \
M(VecReduce, VecUnaryOperation) \
@@ -1507,6 +1598,13 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(VecDotProd, VecOperation) \
M(VecLoad, VecMemoryOperation) \
M(VecStore, VecMemoryOperation) \
+ M(VecPredSetAll, VecPredSetOperation) \
+ M(VecPredWhile, VecPredSetOperation) \
+ M(VecPredCondition, VecOperation) \
+
+#define FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_SCALAR_COMMON(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_VECTOR_COMMON(M)
/*
* Instructions, shared across several (not all) architectures.
@@ -1563,7 +1661,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(VecOperation, Instruction) \
M(VecUnaryOperation, VecOperation) \
M(VecBinaryOperation, VecOperation) \
- M(VecMemoryOperation, VecOperation)
+ M(VecMemoryOperation, VecOperation) \
+ M(VecPredSetOperation, VecOperation)
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1589,8 +1688,7 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
H##type& operator=(const H##type&) = delete; \
public:
-#define DEFAULT_COPY_CONSTRUCTOR(type) \
- explicit H##type(const H##type& other) = default;
+#define DEFAULT_COPY_CONSTRUCTOR(type) H##type(const H##type& other) = default;
template <typename T>
class HUseListNode : public ArenaObject<kArenaAllocUseListNode>,
@@ -2014,6 +2112,23 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
return GetParent() != nullptr;
}
+ class EnvInputSelector {
+ public:
+ explicit EnvInputSelector(const HEnvironment* e) : env_(e) {}
+ HInstruction* operator()(size_t s) const {
+ return env_->GetInstructionAt(s);
+ }
+ private:
+ const HEnvironment* env_;
+ };
+
+ using HConstEnvInputRef = TransformIterator<CountIter, EnvInputSelector>;
+ IterationRange<HConstEnvInputRef> GetEnvInputs() const {
+ IterationRange<CountIter> range(Range(Size()));
+ return MakeIterationRange(MakeTransformIterator(range.begin(), EnvInputSelector(this)),
+ MakeTransformIterator(range.end(), EnvInputSelector(this)));
+ }
+
private:
ArenaVector<HUserRecord<HEnvironment*>> vregs_;
ArenaVector<Location> locations_;
@@ -2029,10 +2144,46 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
DISALLOW_COPY_AND_ASSIGN(HEnvironment);
};
+std::ostream& operator<<(std::ostream& os, const HInstruction& rhs);
+
+// Iterates over the Environments
+class HEnvironmentIterator : public ValueObject,
+ public std::iterator<std::forward_iterator_tag, HEnvironment*> {
+ public:
+ explicit HEnvironmentIterator(HEnvironment* cur) : cur_(cur) {}
+
+ HEnvironment* operator*() const {
+ return cur_;
+ }
+
+ HEnvironmentIterator& operator++() {
+ DCHECK(cur_ != nullptr);
+ cur_ = cur_->GetParent();
+ return *this;
+ }
+
+ HEnvironmentIterator operator++(int) {
+ HEnvironmentIterator prev(*this);
+ ++(*this);
+ return prev;
+ }
+
+ bool operator==(const HEnvironmentIterator& other) const {
+ return other.cur_ == cur_;
+ }
+
+ bool operator!=(const HEnvironmentIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ HEnvironment* cur_;
+};
+
class HInstruction : public ArenaObject<kArenaAllocInstruction> {
public:
#define DECLARE_KIND(type, super) k##type,
- enum InstructionKind {
+ enum InstructionKind { // private marker to avoid generate-operator-out.py from processing.
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_KIND)
kLastInstructionKind
};
@@ -2062,6 +2213,22 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
virtual ~HInstruction() {}
+ std::ostream& Dump(std::ostream& os, bool dump_args = false);
+
+ // Helper for dumping without argument information using operator<<
+ struct NoArgsDump {
+ const HInstruction* ins;
+ };
+ NoArgsDump DumpWithoutArgs() const {
+ return NoArgsDump{this};
+ }
+ // Helper for dumping with argument information using operator<<
+ struct ArgsDump {
+ const HInstruction* ins;
+ };
+ ArgsDump DumpWithArgs() const {
+ return ArgsDump{this};
+ }
HInstruction* GetNext() const { return next_; }
HInstruction* GetPrevious() const { return previous_; }
@@ -2131,6 +2298,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// Does the instruction always throw an exception unconditionally?
virtual bool AlwaysThrows() const { return false; }
+ // Will this instruction only cause async exceptions if it causes any at all?
+ virtual bool OnlyThrowsAsyncExceptions() const {
+ return false;
+ }
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
@@ -2179,8 +2350,9 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
DCHECK(user != nullptr);
// Note: fixup_end remains valid across push_front().
auto fixup_end = uses_.empty() ? uses_.begin() : ++uses_.begin();
+ ArenaAllocator* allocator = user->GetBlock()->GetGraph()->GetAllocator();
HUseListNode<HInstruction*>* new_node =
- new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HInstruction*>(user, index);
+ new (allocator) HUseListNode<HInstruction*>(user, index);
uses_.push_front(*new_node);
FixUpUserRecordsAfterUseInsertion(fixup_end);
}
@@ -2251,6 +2423,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
bool HasEnvironment() const { return environment_ != nullptr; }
HEnvironment* GetEnvironment() const { return environment_; }
+ IterationRange<HEnvironmentIterator> GetAllEnvironments() const {
+ return MakeIterationRange(HEnvironmentIterator(GetEnvironment()),
+ HEnvironmentIterator(nullptr));
+ }
// Set the `environment_` field. Raw because this method does not
// update the uses lists.
void SetRawEnvironment(HEnvironment* environment) {
@@ -2351,6 +2527,17 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
UNREACHABLE();
}
+ virtual bool IsFieldAccess() const {
+ return false;
+ }
+
+ virtual const FieldInfo& GetFieldInfo() const {
+ CHECK(IsFieldAccess()) << "Only callable on field accessors not " << DebugName() << " "
+ << *this;
+ LOG(FATAL) << "Must be overridden by field accessors. Not implemented by " << *this;
+ UNREACHABLE();
+ }
+
// Return whether instruction can be cloned (copied).
virtual bool IsClonable() const { return false; }
@@ -2403,10 +2590,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return NeedsEnvironment() || IsCurrentMethod();
}
- // Returns whether the code generation of the instruction will require to have access
- // to the dex cache of the current method's declaring class via the current method.
- virtual bool NeedsDexCacheOfDeclaringClass() const { return false; }
-
// Does this instruction have any use in an environment before
// control flow hits 'other'?
bool HasAnyEnvironmentUseBefore(HInstruction* other);
@@ -2590,7 +2773,15 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
friend class HGraph;
friend class HInstructionList;
};
-std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
+
+std::ostream& operator<<(std::ostream& os, HInstruction::InstructionKind rhs);
+std::ostream& operator<<(std::ostream& os, const HInstruction::NoArgsDump rhs);
+std::ostream& operator<<(std::ostream& os, const HInstruction::ArgsDump rhs);
+std::ostream& operator<<(std::ostream& os, const HUseList<HInstruction*>& lst);
+std::ostream& operator<<(std::ostream& os, const HUseList<HEnvironment*>& lst);
+
+// Forward declarations for friends
+template <typename InnerIter> struct HSTLInstructionIterator;
// Iterates over the instructions, while preserving the next instruction
// in case the current instruction gets removed from the list by the user
@@ -2610,10 +2801,12 @@ class HInstructionIterator : public ValueObject {
}
private:
+ HInstructionIterator() : instruction_(nullptr), next_(nullptr) {}
+
HInstruction* instruction_;
HInstruction* next_;
- DISALLOW_COPY_AND_ASSIGN(HInstructionIterator);
+ friend struct HSTLInstructionIterator<HInstructionIterator>;
};
// Iterates over the instructions without saving the next instruction,
@@ -2632,9 +2825,11 @@ class HInstructionIteratorHandleChanges : public ValueObject {
}
private:
+ HInstructionIteratorHandleChanges() : instruction_(nullptr) {}
+
HInstruction* instruction_;
- DISALLOW_COPY_AND_ASSIGN(HInstructionIteratorHandleChanges);
+ friend struct HSTLInstructionIterator<HInstructionIteratorHandleChanges>;
};
@@ -2653,12 +2848,63 @@ class HBackwardInstructionIterator : public ValueObject {
}
private:
+ HBackwardInstructionIterator() : instruction_(nullptr), next_(nullptr) {}
+
HInstruction* instruction_;
HInstruction* next_;
- DISALLOW_COPY_AND_ASSIGN(HBackwardInstructionIterator);
+ friend struct HSTLInstructionIterator<HBackwardInstructionIterator>;
+};
+
+template <typename InnerIter>
+struct HSTLInstructionIterator : public ValueObject,
+ public std::iterator<std::forward_iterator_tag, HInstruction*> {
+ public:
+ static_assert(std::is_same_v<InnerIter, HBackwardInstructionIterator> ||
+ std::is_same_v<InnerIter, HInstructionIterator> ||
+ std::is_same_v<InnerIter, HInstructionIteratorHandleChanges>,
+ "Unknown wrapped iterator!");
+
+ explicit HSTLInstructionIterator(InnerIter inner) : inner_(inner) {}
+ HInstruction* operator*() const {
+ DCHECK(inner_.Current() != nullptr);
+ return inner_.Current();
+ }
+
+ HSTLInstructionIterator<InnerIter>& operator++() {
+ DCHECK(*this != HSTLInstructionIterator<InnerIter>::EndIter());
+ inner_.Advance();
+ return *this;
+ }
+
+ HSTLInstructionIterator<InnerIter> operator++(int) {
+ HSTLInstructionIterator<InnerIter> prev(*this);
+ ++(*this);
+ return prev;
+ }
+
+ bool operator==(const HSTLInstructionIterator<InnerIter>& other) const {
+ return inner_.Current() == other.inner_.Current();
+ }
+
+ bool operator!=(const HSTLInstructionIterator<InnerIter>& other) const {
+ return !(*this == other);
+ }
+
+ static HSTLInstructionIterator<InnerIter> EndIter() {
+ return HSTLInstructionIterator<InnerIter>(InnerIter());
+ }
+
+ private:
+ InnerIter inner_;
};
+template <typename InnerIter>
+IterationRange<HSTLInstructionIterator<InnerIter>> MakeSTLInstructionIteratorRange(InnerIter iter) {
+ return MakeIterationRange(HSTLInstructionIterator<InnerIter>(iter),
+ HSTLInstructionIterator<InnerIter>::EndIter());
+}
+
class HVariableInputSizeInstruction : public HInstruction {
public:
using HInstruction::GetInputRecords; // Keep the const version visible.
@@ -3615,14 +3861,14 @@ class HBinaryOperation : public HExpression<2> {
// The comparison bias applies for floating point operations and indicates how NaN
// comparisons are treated:
-enum class ComparisonBias {
+enum class ComparisonBias { // private marker to avoid generate-operator-out.py from processing.
kNoBias, // bias is not applicable (i.e. for long operation)
kGtBias, // return 1 for NaN comparisons
kLtBias, // return -1 for NaN comparisons
kLast = kLtBias
};
-std::ostream& operator<<(std::ostream& os, const ComparisonBias& rhs);
+std::ostream& operator<<(std::ostream& os, ComparisonBias rhs);
class HCondition : public HBinaryOperation {
public:
@@ -4149,8 +4395,6 @@ class HCompare final : public HBinaryOperation {
SideEffectsForArchRuntimeCalls(comparison_type),
dex_pc) {
SetPackedField<ComparisonBiasField>(bias);
- DCHECK_EQ(comparison_type, DataType::Kind(first->GetType()));
- DCHECK_EQ(comparison_type, DataType::Kind(second->GetType()));
}
template <typename T>
@@ -4237,11 +4481,16 @@ class HNewInstance final : public HExpression<1> {
dex_file_(dex_file),
entrypoint_(entrypoint) {
SetPackedFlag<kFlagFinalizable>(finalizable);
+ SetPackedFlag<kFlagPartialMaterialization>(false);
SetRawInputAt(0, cls);
}
bool IsClonable() const override { return true; }
+ void SetPartialMaterialization() {
+ SetPackedFlag<kFlagPartialMaterialization>(true);
+ }
+
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
@@ -4250,6 +4499,9 @@ class HNewInstance final : public HExpression<1> {
// Can throw errors when out-of-memory or if it's not instantiable/accessible.
bool CanThrow() const override { return true; }
+ bool OnlyThrowsAsyncExceptions() const override {
+ return !IsFinalizable() && !NeedsChecks();
+ }
bool NeedsChecks() const {
return entrypoint_ == kQuickAllocObjectWithChecks;
@@ -4259,6 +4511,10 @@ class HNewInstance final : public HExpression<1> {
bool CanBeNull() const override { return false; }
+ bool IsPartialMaterialization() const {
+ return GetPackedFlag<kFlagPartialMaterialization>();
+ }
+
QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
void SetEntrypoint(QuickEntrypointEnum entrypoint) {
@@ -4283,7 +4539,8 @@ class HNewInstance final : public HExpression<1> {
private:
static constexpr size_t kFlagFinalizable = kNumberOfGenericPackedBits;
- static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
+ static constexpr size_t kFlagPartialMaterialization = kFlagFinalizable + 1;
+ static constexpr size_t kNumberOfNewInstancePackedBits = kFlagPartialMaterialization + 1;
static_assert(kNumberOfNewInstancePackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -4292,9 +4549,9 @@ class HNewInstance final : public HExpression<1> {
QuickEntrypointEnum entrypoint_;
};
-enum IntrinsicNeedsEnvironmentOrCache {
- kNoEnvironmentOrCache, // Intrinsic does not require an environment or dex cache.
- kNeedsEnvironmentOrCache // Intrinsic requires an environment or requires a dex cache.
+enum IntrinsicNeedsEnvironment {
+ kNoEnvironment, // Intrinsic does not require an environment.
+ kNeedsEnvironment // Intrinsic requires an environment.
};
enum IntrinsicSideEffects {
@@ -4309,6 +4566,57 @@ enum IntrinsicExceptions {
kCanThrow // Intrinsic may throw exceptions.
};
+// Determines how to load an ArtMethod*.
+enum class MethodLoadKind {
+ // Use a String init ArtMethod* loaded from Thread entrypoints.
+ kStringInit,
+
+ // Use the method's own ArtMethod* loaded by the register allocator.
+ kRecursive,
+
+ // Use PC-relative boot image ArtMethod* address that will be known at link time.
+ // Used for boot image methods referenced by boot image code.
+ kBootImageLinkTimePcRelative,
+
+ // Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
+ // Used for app->boot calls with relocatable image.
+ kBootImageRelRo,
+
+ // Load from an entry in the .bss section using a PC-relative load.
+ // Used for methods outside boot image referenced by AOT-compiled app and boot image code.
+ kBssEntry,
+
+ // Use ArtMethod* at a known address, embed the direct address in the code.
+ // Used for for JIT-compiled calls.
+ kJitDirectAddress,
+
+ // Make a runtime call to resolve and call the method. This is the last-resort-kind
+ // used when other kinds are unimplemented on a particular architecture.
+ kRuntimeCall,
+};
+
+// Determines the location of the code pointer of an invoke.
+enum class CodePtrLocation {
+ // Recursive call, use local PC-relative call instruction.
+ kCallSelf,
+
+ // Use native pointer from the Artmethod*.
+ // Used for @CriticalNative to avoid going through the compiled stub. This call goes through
+ // a special resolution stub if the class is not initialized or no native code is registered.
+ kCallCriticalNative,
+
+ // Use code pointer from the ArtMethod*.
+ // Used when we don't know the target code. This is also the last-resort-kind used when
+ // other kinds are unimplemented or impractical (i.e. slow) on a particular architecture.
+ kCallArtMethod,
+};
+
+static inline bool IsPcRelativeMethodLoadKind(MethodLoadKind load_kind) {
+ return load_kind == MethodLoadKind::kBootImageLinkTimePcRelative ||
+ load_kind == MethodLoadKind::kBootImageRelRo ||
+ load_kind == MethodLoadKind::kBssEntry;
+}
+
class HInvoke : public HVariableInputSizeInstruction {
public:
bool NeedsEnvironment() const override;
@@ -4323,8 +4631,6 @@ class HInvoke : public HVariableInputSizeInstruction {
// inputs at the end of their list of inputs.
uint32_t GetNumberOfArguments() const { return number_of_arguments_; }
- uint32_t GetDexMethodIndex() const { return dex_method_index_; }
-
InvokeType GetInvokeType() const {
return GetPackedField<InvokeTypeField>();
}
@@ -4334,7 +4640,7 @@ class HInvoke : public HVariableInputSizeInstruction {
}
void SetIntrinsic(Intrinsics intrinsic,
- IntrinsicNeedsEnvironmentOrCache needs_env_or_cache,
+ IntrinsicNeedsEnvironment needs_env,
IntrinsicSideEffects side_effects,
IntrinsicExceptions exceptions);
@@ -4367,7 +4673,13 @@ class HInvoke : public HVariableInputSizeInstruction {
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
- void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetResolvedMethod(ArtMethod* method);
+
+ MethodReference GetMethodReference() const { return method_reference_; }
+
+ const MethodReference GetResolvedMethodReference() const {
+ return resolved_method_reference_;
+ }
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -4387,8 +4699,9 @@ class HInvoke : public HVariableInputSizeInstruction {
uint32_t number_of_other_inputs,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
+ MethodReference resolved_method_reference,
InvokeType invoke_type)
: HVariableInputSizeInstruction(
kind,
@@ -4399,13 +4712,12 @@ class HInvoke : public HVariableInputSizeInstruction {
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
- dex_method_index_(dex_method_index),
+ method_reference_(method_reference),
+ resolved_method_reference_(resolved_method_reference),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
SetPackedField<InvokeTypeField>(invoke_type);
SetPackedFlag<kFlagCanThrow>(true);
- // Check mutator lock, constructors lack annotalysis support.
- Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
SetResolvedMethod(resolved_method);
}
@@ -4413,7 +4725,9 @@ class HInvoke : public HVariableInputSizeInstruction {
uint32_t number_of_arguments_;
ArtMethod* resolved_method_;
- const uint32_t dex_method_index_;
+ const MethodReference method_reference_;
+ // Cached values of the resolved method, to avoid needing the mutator lock.
+ const MethodReference resolved_method_reference_;
Intrinsics intrinsic_;
// A magic word holding optimizations for intrinsics. See intrinsics.h.
@@ -4426,7 +4740,7 @@ class HInvokeUnresolved final : public HInvoke {
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
InvokeType invoke_type)
: HInvoke(kInvokeUnresolved,
allocator,
@@ -4434,8 +4748,9 @@ class HInvokeUnresolved final : public HInvoke {
/* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
nullptr,
+ MethodReference(nullptr, 0u),
invoke_type) {
}
@@ -4453,23 +4768,34 @@ class HInvokePolymorphic final : public HInvoke {
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index)
+ MethodReference method_reference,
+ // resolved_method is the ArtMethod object corresponding to the polymorphic
+ // method (e.g. VarHandle.get), resolved using the class linker. It is needed
+ // to pass intrinsic information to the HInvokePolymorphic node.
+ ArtMethod* resolved_method,
+ MethodReference resolved_method_reference,
+ dex::ProtoIndex proto_idx)
: HInvoke(kInvokePolymorphic,
allocator,
number_of_arguments,
/* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- dex_method_index,
- nullptr,
- kVirtual) {
+ method_reference,
+ resolved_method,
+ resolved_method_reference,
+ kPolymorphic),
+ proto_idx_(proto_idx) {
}
bool IsClonable() const override { return true; }
+ dex::ProtoIndex GetProtoIndex() { return proto_idx_; }
+
DECLARE_INSTRUCTION(InvokePolymorphic);
protected:
+ dex::ProtoIndex proto_idx_;
DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
};
@@ -4479,15 +4805,17 @@ class HInvokeCustom final : public HInvoke {
uint32_t number_of_arguments,
uint32_t call_site_index,
DataType::Type return_type,
- uint32_t dex_pc)
+ uint32_t dex_pc,
+ MethodReference method_reference)
: HInvoke(kInvokeCustom,
allocator,
number_of_arguments,
/* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- /* dex_method_index= */ dex::kDexNoIndex,
+ method_reference,
/* resolved_method= */ nullptr,
+ MethodReference(nullptr, 0u),
kStatic),
call_site_index_(call_site_index) {
}
@@ -4509,53 +4837,13 @@ class HInvokeStaticOrDirect final : public HInvoke {
public:
// Requirements of this method call regarding the class
// initialization (clinit) check of its declaring class.
- enum class ClinitCheckRequirement {
+ enum class ClinitCheckRequirement { // private marker to avoid generate-operator-out.py from processing.
kNone, // Class already initialized.
kExplicit, // Static call having explicit clinit check as last input.
kImplicit, // Static call implicitly requiring a clinit check.
kLast = kImplicit
};
- // Determines how to load the target ArtMethod*.
- enum class MethodLoadKind {
- // Use a String init ArtMethod* loaded from Thread entrypoints.
- kStringInit,
-
- // Use the method's own ArtMethod* loaded by the register allocator.
- kRecursive,
-
- // Use PC-relative boot image ArtMethod* address that will be known at link time.
- // Used for boot image methods referenced by boot image code.
- kBootImageLinkTimePcRelative,
-
- // Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
- // Used for app->boot calls with relocatable image.
- kBootImageRelRo,
-
- // Load from an entry in the .bss section using a PC-relative load.
- // Used for methods outside boot image referenced by AOT-compiled app and boot image code.
- kBssEntry,
-
- // Use ArtMethod* at a known address, embed the direct address in the code.
- // Used for for JIT-compiled calls.
- kJitDirectAddress,
-
- // Make a runtime call to resolve and call the method. This is the last-resort-kind
- // used when other kinds are unimplemented on a particular architecture.
- kRuntimeCall,
- };
-
- // Determines the location of the code pointer.
- enum class CodePtrLocation {
- // Recursive call, use local PC-relative call instruction.
- kCallSelf,
-
- // Use code pointer from the ArtMethod*.
- // Used when we don't know the target code. This is also the last-resort-kind used when
- // other kinds are unimplemented or impractical (i.e. slow) on a particular architecture.
- kCallArtMethod,
- };
-
struct DispatchInfo {
MethodLoadKind method_load_kind;
CodePtrLocation code_ptr_location;
@@ -4570,42 +4858,42 @@ class HInvokeStaticOrDirect final : public HInvoke {
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
DispatchInfo dispatch_info,
InvokeType invoke_type,
- MethodReference target_method,
+ MethodReference resolved_method_reference,
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(kInvokeStaticOrDirect,
allocator,
number_of_arguments,
- // There is potentially one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit.
- (NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
+ // There is potentially one extra argument for the HCurrentMethod input,
+ // and one other if the clinit check is explicit. These can be removed later.
+ (NeedsCurrentMethodInput(dispatch_info) ? 1u : 0u) +
(clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
return_type,
dex_pc,
- method_index,
+ method_reference,
resolved_method,
+ resolved_method_reference,
invoke_type),
- target_method_(target_method),
dispatch_info_(dispatch_info) {
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
bool IsClonable() const override { return true; }
- void SetDispatchInfo(const DispatchInfo& dispatch_info) {
+ void SetDispatchInfo(DispatchInfo dispatch_info) {
bool had_current_method_input = HasCurrentMethodInput();
- bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info.method_load_kind);
+ bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info);
// Using the current method is the default and once we find a better
// method load kind, we should not go back to using the current method.
DCHECK(had_current_method_input || !needs_current_method_input);
if (had_current_method_input && !needs_current_method_input) {
- DCHECK_EQ(InputAt(GetSpecialInputIndex()), GetBlock()->GetGraph()->GetCurrentMethod());
- RemoveInputAt(GetSpecialInputIndex());
+ DCHECK_EQ(InputAt(GetCurrentMethodIndex()), GetBlock()->GetGraph()->GetCurrentMethod());
+ RemoveInputAt(GetCurrentMethodIndex());
}
dispatch_info_ = dispatch_info;
}
@@ -4614,14 +4902,6 @@ class HInvokeStaticOrDirect final : public HInvoke {
return dispatch_info_;
}
- void AddSpecialInput(HInstruction* input) {
- // We allow only one special input.
- DCHECK(!IsStringInit() && !HasCurrentMethodInput());
- DCHECK(InputCount() == GetSpecialInputIndex() ||
- (InputCount() == GetSpecialInputIndex() + 1 && IsStaticWithExplicitClinitCheck()));
- InsertInputAt(GetSpecialInputIndex(), input);
- }
-
using HInstruction::GetInputRecords; // Keep the const version visible.
ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() override {
ArrayRef<HUserRecord<HInstruction*>> input_records = HInvoke::GetInputRecords();
@@ -4642,7 +4922,7 @@ class HInvokeStaticOrDirect final : public HInvoke {
}
bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const override {
- // We access the method via the dex cache so we can't do an implicit null check.
+ // We do not access the method via object reference, so we cannot do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
}
@@ -4651,35 +4931,13 @@ class HInvokeStaticOrDirect final : public HInvoke {
return GetType() == DataType::Type::kReference && !IsStringInit();
}
- // Get the index of the special input, if any.
- //
- // If the invoke HasCurrentMethodInput(), the "special input" is the current
- // method pointer; otherwise there may be one platform-specific special input,
- // such as PC-relative addressing base.
- uint32_t GetSpecialInputIndex() const { return GetNumberOfArguments(); }
- bool HasSpecialInput() const { return GetNumberOfArguments() != InputCount(); }
-
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
- bool NeedsDexCacheOfDeclaringClass() const override;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
bool HasPcRelativeMethodLoadKind() const {
- return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
- GetMethodLoadKind() == MethodLoadKind::kBootImageRelRo ||
- GetMethodLoadKind() == MethodLoadKind::kBssEntry;
- }
- bool HasCurrentMethodInput() const {
- // This function can be called only after the invoke has been fully initialized by the builder.
- if (NeedsCurrentMethodInput(GetMethodLoadKind())) {
- DCHECK(InputAt(GetSpecialInputIndex())->IsCurrentMethod());
- return true;
- } else {
- DCHECK(InputCount() == GetSpecialInputIndex() ||
- !InputAt(GetSpecialInputIndex())->IsCurrentMethod());
- return false;
- }
+ return IsPcRelativeMethodLoadKind(GetMethodLoadKind());
}
QuickEntrypointEnum GetStringInitEntryPoint() const {
@@ -4703,8 +4961,58 @@ class HInvokeStaticOrDirect final : public HInvoke {
return GetInvokeType() == kStatic;
}
- MethodReference GetTargetMethod() const {
- return target_method_;
+ // Does this method load kind need the current method as an input?
+ static bool NeedsCurrentMethodInput(DispatchInfo dispatch_info) {
+ return dispatch_info.method_load_kind == MethodLoadKind::kRecursive ||
+ dispatch_info.method_load_kind == MethodLoadKind::kRuntimeCall ||
+ dispatch_info.code_ptr_location == CodePtrLocation::kCallCriticalNative;
+ }
+
+ // Get the index of the current method input.
+ size_t GetCurrentMethodIndex() const {
+ DCHECK(HasCurrentMethodInput());
+ return GetCurrentMethodIndexUnchecked();
+ }
+ size_t GetCurrentMethodIndexUnchecked() const {
+ return GetNumberOfArguments();
+ }
+
+ // Check if the method has a current method input.
+ bool HasCurrentMethodInput() const {
+ if (NeedsCurrentMethodInput(GetDispatchInfo())) {
+ DCHECK(InputAt(GetCurrentMethodIndexUnchecked()) == nullptr || // During argument setup.
+ InputAt(GetCurrentMethodIndexUnchecked())->IsCurrentMethod());
+ return true;
+ } else {
+ DCHECK(InputCount() == GetCurrentMethodIndexUnchecked() ||
+ InputAt(GetCurrentMethodIndexUnchecked()) == nullptr || // During argument setup.
+ !InputAt(GetCurrentMethodIndexUnchecked())->IsCurrentMethod());
+ return false;
+ }
+ }
+
+ // Get the index of the special input.
+ size_t GetSpecialInputIndex() const {
+ DCHECK(HasSpecialInput());
+ return GetSpecialInputIndexUnchecked();
+ }
+ size_t GetSpecialInputIndexUnchecked() const {
+ return GetNumberOfArguments() + (HasCurrentMethodInput() ? 1u : 0u);
+ }
+
+ // Check if the method has a special input.
+ bool HasSpecialInput() const {
+ size_t other_inputs =
+ GetSpecialInputIndexUnchecked() + (IsStaticWithExplicitClinitCheck() ? 1u : 0u);
+ size_t input_count = InputCount();
+ DCHECK_LE(input_count - other_inputs, 1u) << other_inputs << " " << input_count;
+ return other_inputs != input_count;
+ }
+
+ void AddSpecialInput(HInstruction* input) {
+ // We allow only one special input.
+ DCHECK(!HasSpecialInput());
+ InsertInputAt(GetSpecialInputIndexUnchecked(), input);
}
// Remove the HClinitCheck or the replacement HLoadClass (set as last input by
@@ -4734,11 +5042,6 @@ class HInvokeStaticOrDirect final : public HInvoke {
return IsStatic() && (GetClinitCheckRequirement() == ClinitCheckRequirement::kImplicit);
}
- // Does this method load kind need the current method as an input?
- static bool NeedsCurrentMethodInput(MethodLoadKind kind) {
- return kind == MethodLoadKind::kRecursive || kind == MethodLoadKind::kRuntimeCall;
- }
-
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
protected:
@@ -4756,11 +5059,10 @@ class HInvokeStaticOrDirect final : public HInvoke {
kFieldClinitCheckRequirement,
kFieldClinitCheckRequirementSize>;
- // Cached values of the resolved method, to avoid needing the mutator lock.
- const MethodReference target_method_;
DispatchInfo dispatch_info_;
};
-std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
+std::ostream& operator<<(std::ostream& os, MethodLoadKind rhs);
+std::ostream& operator<<(std::ostream& os, CodePtrLocation rhs);
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
class HInvokeVirtual final : public HInvoke {
@@ -4769,8 +5071,9 @@ class HInvokeVirtual final : public HInvoke {
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
+ MethodReference resolved_method_reference,
uint32_t vtable_index)
: HInvoke(kInvokeVirtual,
allocator,
@@ -4778,8 +5081,9 @@ class HInvokeVirtual final : public HInvoke {
0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
resolved_method,
+ resolved_method_reference,
kVirtual),
vtable_index_(vtable_index) {
}
@@ -4808,10 +5112,7 @@ class HInvokeVirtual final : public HInvoke {
}
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
- // TODO: Add implicit null checks in intrinsics.
- return (obj == InputAt(0)) && !IsIntrinsic();
- }
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override;
uint32_t GetVTableIndex() const { return vtable_index_; }
@@ -4831,19 +5132,27 @@ class HInvokeInterface final : public HInvoke {
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index,
+ MethodReference method_reference,
ArtMethod* resolved_method,
- uint32_t imt_index)
+ MethodReference resolved_method_reference,
+ uint32_t imt_index,
+ MethodLoadKind load_kind)
: HInvoke(kInvokeInterface,
allocator,
- number_of_arguments,
+ number_of_arguments + (NeedsCurrentMethod(load_kind) ? 1 : 0),
0u,
return_type,
dex_pc,
- dex_method_index,
+ method_reference,
resolved_method,
+ resolved_method_reference,
kInterface),
- imt_index_(imt_index) {
+ imt_index_(imt_index),
+ hidden_argument_load_kind_(load_kind) {
+ }
+
+ static bool NeedsCurrentMethod(MethodLoadKind load_kind) {
+ return load_kind == MethodLoadKind::kRecursive;
}
bool IsClonable() const override { return true; }
@@ -4853,12 +5162,16 @@ class HInvokeInterface final : public HInvoke {
return (obj == InputAt(0)) && !IsIntrinsic();
}
- bool NeedsDexCacheOfDeclaringClass() const override {
- // The assembly stub currently needs it.
- return true;
+ size_t GetSpecialInputIndex() const {
+ return GetNumberOfArguments();
+ }
+
+ void AddSpecialInput(HInstruction* input) {
+ InsertInputAt(GetSpecialInputIndex(), input);
}
uint32_t GetImtIndex() const { return imt_index_; }
+ MethodLoadKind GetHiddenArgumentLoadKind() const { return hidden_argument_load_kind_; }
DECLARE_INSTRUCTION(InvokeInterface);
@@ -4868,6 +5181,9 @@ class HInvokeInterface final : public HInvoke {
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t imt_index_;
+
+ // How the hidden argument (the interface method) is being loaded.
+ const MethodLoadKind hidden_argument_load_kind_;
};
class HNeg final : public HUnaryOperation {
@@ -5542,8 +5858,6 @@ class HRor final : public HBinaryOperation {
public:
HRor(DataType::Type result_type, HInstruction* value, HInstruction* distance)
: HBinaryOperation(kRor, result_type, value, distance) {
- DCHECK_EQ(result_type, DataType::Kind(value->GetType()));
- DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(distance->GetType()));
}
template <typename T>
@@ -5797,6 +6111,23 @@ class FieldInfo : public ValueObject {
const DexFile& GetDexFile() const { return dex_file_; }
bool IsVolatile() const { return is_volatile_; }
+ bool Equals(const FieldInfo& other) const {
+ return field_ == other.field_ &&
+ field_offset_ == other.field_offset_ &&
+ field_type_ == other.field_type_ &&
+ is_volatile_ == other.is_volatile_ &&
+ index_ == other.index_ &&
+ declaring_class_def_index_ == other.declaring_class_def_index_ &&
+ &dex_file_ == &other.dex_file_;
+ }
+
+ std::ostream& Dump(std::ostream& os) const {
+ os << field_ << ", off: " << field_offset_ << ", type: " << field_type_
+ << ", volatile: " << std::boolalpha << is_volatile_ << ", index_: " << std::dec << index_
+ << ", declaring_class: " << declaring_class_def_index_ << ", dex: " << dex_file_;
+ return os;
+ }
+
private:
ArtField* const field_;
const MemberOffset field_offset_;
@@ -5807,6 +6138,14 @@ class FieldInfo : public ValueObject {
const DexFile& dex_file_;
};
+inline bool operator==(const FieldInfo& a, const FieldInfo& b) {
+ return a.Equals(b);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const FieldInfo& a) {
+ return a.Dump(os);
+}
+
class HInstanceFieldGet final : public HExpression<1> {
public:
HInstanceFieldGet(HInstruction* value,
@@ -5848,7 +6187,8 @@ class HInstanceFieldGet final : public HExpression<1> {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
@@ -5869,6 +6209,96 @@ class HInstanceFieldGet final : public HExpression<1> {
const FieldInfo field_info_;
};
+class HPredicatedInstanceFieldGet final : public HExpression<2> {
+ public:
+ HPredicatedInstanceFieldGet(HInstanceFieldGet* orig,
+ HInstruction* target,
+ HInstruction* default_val)
+ : HExpression(kPredicatedInstanceFieldGet,
+ orig->GetFieldType(),
+ orig->GetSideEffects(),
+ orig->GetDexPc()),
+ field_info_(orig->GetFieldInfo()) {
+ // NB Default-val is at 0 so we can avoid doing a move.
+ SetRawInputAt(1, target);
+ SetRawInputAt(0, default_val);
+ }
+
+ HPredicatedInstanceFieldGet(HInstruction* value,
+ ArtField* field,
+ HInstruction* default_value,
+ DataType::Type field_type,
+ MemberOffset field_offset,
+ bool is_volatile,
+ uint32_t field_idx,
+ uint16_t declaring_class_def_index,
+ const DexFile& dex_file,
+ uint32_t dex_pc)
+ : HExpression(kPredicatedInstanceFieldGet,
+ field_type,
+ SideEffects::FieldReadOfType(field_type, is_volatile),
+ dex_pc),
+ field_info_(field,
+ field_offset,
+ field_type,
+ is_volatile,
+ field_idx,
+ declaring_class_def_index,
+ dex_file) {
+ SetRawInputAt(1, value);
+ SetRawInputAt(0, default_value);
+ }
+
+ bool IsClonable() const override {
+ return true;
+ }
+ bool CanBeMoved() const override {
+ return !IsVolatile();
+ }
+
+ HInstruction* GetDefaultValue() const {
+ return InputAt(0);
+ }
+ HInstruction* GetTarget() const {
+ return InputAt(1);
+ }
+
+ bool InstructionDataEquals(const HInstruction* other) const override {
+ const HPredicatedInstanceFieldGet* other_get = other->AsPredicatedInstanceFieldGet();
+ return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue() &&
+ GetDefaultValue() == other_get->GetDefaultValue();
+ }
+
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const override {
+ return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
+ }
+
+ size_t ComputeHashCode() const override {
+ return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
+ }
+
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
+ MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
+ DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
+ bool IsVolatile() const { return field_info_.IsVolatile(); }
+
+ void SetType(DataType::Type new_type) {
+ DCHECK(DataType::IsIntegralType(GetType()));
+ DCHECK(DataType::IsIntegralType(new_type));
+ DCHECK_EQ(DataType::Size(GetType()), DataType::Size(new_type));
+ SetPackedField<TypeField>(new_type);
+ }
+
+ DECLARE_INSTRUCTION(PredicatedInstanceFieldGet);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(PredicatedInstanceFieldGet);
+
+ private:
+ const FieldInfo field_info_;
+};
+
class HInstanceFieldSet final : public HExpression<2> {
public:
HInstanceFieldSet(HInstruction* object,
@@ -5892,6 +6322,7 @@ class HInstanceFieldSet final : public HExpression<2> {
declaring_class_def_index,
dex_file) {
SetPackedFlag<kFlagValueCanBeNull>(true);
+ SetPackedFlag<kFlagIsPredicatedSet>(false);
SetRawInputAt(0, object);
SetRawInputAt(1, value);
}
@@ -5902,13 +6333,16 @@ class HInstanceFieldSet final : public HExpression<2> {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
HInstruction* GetValue() const { return InputAt(1); }
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
+ bool GetIsPredicatedSet() const { return GetPackedFlag<kFlagIsPredicatedSet>(); }
+ void SetIsPredicatedSet(bool value = true) { SetPackedFlag<kFlagIsPredicatedSet>(value); }
DECLARE_INSTRUCTION(InstanceFieldSet);
@@ -5917,7 +6351,8 @@ class HInstanceFieldSet final : public HExpression<2> {
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
- static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagValueCanBeNull + 1;
+ static constexpr size_t kFlagIsPredicatedSet = kFlagValueCanBeNull + 1;
+ static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagIsPredicatedSet + 1;
static_assert(kNumberOfInstanceFieldSetPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
@@ -6284,6 +6719,21 @@ class HLoadClass final : public HInstruction {
// Used for classes outside boot image referenced by AOT-compiled app and boot image code.
kBssEntry,
+ // Load from an entry for public class in the .bss section using a PC-relative load.
+ // Used for classes that were unresolved during AOT-compilation outside the literal
+ // package of the compiling class. Such classes are accessible only if they are public
+ // and the .bss entry shall therefore be filled only if the resolved class is public.
+ kBssEntryPublic,
+
+ // Load from an entry for package class in the .bss section using a PC-relative load.
+ // Used for classes that were unresolved during AOT-compilation but within the literal
+ // package of the compiling class. Such classes are accessible if they are public or
+ // in the same package which, given the literal package match, requires only matching
+ // defining class loader and the .bss entry shall therefore be filled only if at least
+ // one of those conditions holds. Note that all code in an oat file belongs to classes
+ // with the same defining class loader.
+ kBssEntryPackage,
+
// Use a known boot image Class* address, embedded in the code by the codegen.
// Used for boot image classes referenced by apps in JIT-compiled code.
kJitBootImageAddress,
@@ -6336,7 +6786,9 @@ class HLoadClass final : public HInstruction {
bool HasPcRelativeLoadKind() const {
return GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
GetLoadKind() == LoadKind::kBootImageRelRo ||
- GetLoadKind() == LoadKind::kBssEntry;
+ GetLoadKind() == LoadKind::kBssEntry ||
+ GetLoadKind() == LoadKind::kBssEntryPublic ||
+ GetLoadKind() == LoadKind::kBssEntryPackage;
}
bool CanBeMoved() const override { return true; }
@@ -6352,9 +6804,6 @@ class HLoadClass final : public HInstruction {
}
void SetMustGenerateClinitCheck(bool generate_clinit_check) {
- // The entrypoint the code generator is going to call does not do
- // clinit of the class.
- DCHECK(!NeedsAccessCheck());
SetPackedFlag<kFlagGenerateClInitCheck>(generate_clinit_check);
}
@@ -6384,7 +6833,7 @@ class HLoadClass final : public HInstruction {
}
// Loaded class RTI is marked as valid by RTP if the klass_ is admissible.
- void SetValidLoadedClassRTI() REQUIRES_SHARED(Locks::mutator_lock_) {
+ void SetValidLoadedClassRTI() {
DCHECK(klass_ != nullptr);
SetPackedFlag<kFlagValidLoadedClassRTI>(true);
}
@@ -6392,10 +6841,6 @@ class HLoadClass final : public HInstruction {
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
- bool NeedsDexCacheOfDeclaringClass() const override {
- return GetLoadKind() == LoadKind::kRuntimeCall;
- }
-
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
}
@@ -6407,9 +6852,14 @@ class HLoadClass final : public HInstruction {
bool MustResolveTypeOnSlowPath() const {
// Check that this instruction has a slow path.
- DCHECK(GetLoadKind() != LoadKind::kRuntimeCall); // kRuntimeCall calls on main path.
- DCHECK(GetLoadKind() == LoadKind::kBssEntry || MustGenerateClinitCheck());
- return GetLoadKind() == LoadKind::kBssEntry;
+ LoadKind load_kind = GetLoadKind();
+ DCHECK(load_kind != LoadKind::kRuntimeCall); // kRuntimeCall calls on main path.
+ bool must_resolve_type_on_slow_path =
+ load_kind == LoadKind::kBssEntry ||
+ load_kind == LoadKind::kBssEntryPublic ||
+ load_kind == LoadKind::kBssEntryPackage;
+ DCHECK(must_resolve_type_on_slow_path || MustGenerateClinitCheck());
+ return must_resolve_type_on_slow_path;
}
void MarkInBootImage() {
@@ -6451,6 +6901,8 @@ class HLoadClass final : public HInstruction {
return load_kind == LoadKind::kReferrersClass ||
load_kind == LoadKind::kBootImageLinkTimePcRelative ||
load_kind == LoadKind::kBssEntry ||
+ load_kind == LoadKind::kBssEntryPublic ||
+ load_kind == LoadKind::kBssEntryPackage ||
load_kind == LoadKind::kRuntimeCall;
}
@@ -6458,14 +6910,14 @@ class HLoadClass final : public HInstruction {
// The special input is the HCurrentMethod for kRuntimeCall or kReferrersClass.
// For other load kinds it's empty or possibly some architecture-specific instruction
- // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
+ // for PC-relative loads, i.e. kBssEntry* or kBootImageLinkTimePcRelative.
HUserRecord<HInstruction*> special_input_;
// A type index and dex file where the class can be accessed. The dex file can be:
// - The compiling method's dex file if the class is defined there too.
// - The compiling method's dex file if the class is referenced there.
// - The dex file where the class is defined. When the load kind can only be
- // kBssEntry or kRuntimeCall, we cannot emit code for this `HLoadClass`.
+ // kBssEntry* or kRuntimeCall, we cannot emit code for this `HLoadClass`.
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
@@ -6494,6 +6946,8 @@ inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
GetLoadKind() == LoadKind::kBootImageRelRo ||
GetLoadKind() == LoadKind::kBssEntry ||
+ GetLoadKind() == LoadKind::kBssEntryPublic ||
+ GetLoadKind() == LoadKind::kBssEntryPackage ||
GetLoadKind() == LoadKind::kJitBootImageAddress) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
special_input_ = HUserRecord<HInstruction*>(special_input);
@@ -6593,10 +7047,6 @@ class HLoadString final : public HInstruction {
return true;
}
- bool NeedsDexCacheOfDeclaringClass() const override {
- return GetLoadKind() == LoadKind::kRuntimeCall;
- }
-
bool CanBeNull() const override { return false; }
bool CanThrow() const override { return NeedsEnvironment(); }
@@ -6833,7 +7283,8 @@ class HStaticFieldGet final : public HExpression<1> {
return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
}
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
@@ -6882,7 +7333,8 @@ class HStaticFieldSet final : public HExpression<2> {
}
bool IsClonable() const override { return true; }
- const FieldInfo& GetFieldInfo() const { return field_info_; }
+ bool IsFieldAccess() const override { return true; }
+ const FieldInfo& GetFieldInfo() const override { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
@@ -7146,7 +7598,7 @@ class HThrow final : public HExpression<1> {
* Implementation strategies for the code generator of a HInstanceOf
* or `HCheckCast`.
*/
-enum class TypeCheckKind {
+enum class TypeCheckKind { // private marker to avoid generate-operator-out.py from processing.
kUnresolvedCheck, // Check against an unresolved type.
kExactCheck, // Can do a single class compare.
kClassHierarchyCheck, // Can just walk the super class chain.
@@ -7244,7 +7696,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
}
// Target class RTI is marked as valid by RTP if the klass_ is admissible.
- void SetValidTargetClassRTI() REQUIRES_SHARED(Locks::mutator_lock_) {
+ void SetValidTargetClassRTI() {
DCHECK(klass_ != nullptr);
SetPackedFlag<kFlagValidTargetClassRTI>(true);
}
@@ -7420,7 +7872,7 @@ enum MemBarrierKind {
kNTStoreStore,
kLastBarrierKind = kNTStoreStore
};
-std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
+std::ostream& operator<<(std::ostream& os, MemBarrierKind kind);
class HMemoryBarrier final : public HExpression<0> {
public:
@@ -7801,7 +8253,7 @@ class HParallelMove final : public HExpression<0> {
DCHECK(!destination.OverlapsWith(move.GetDestination()))
<< "Overlapped destination for two moves in a parallel move: "
<< move.GetSource() << " ==> " << move.GetDestination() << " and "
- << source << " ==> " << destination;
+ << source << " ==> " << destination << " for " << SafePrint(instruction);
}
}
moves_.emplace_back(source, destination, type, instruction);
@@ -8121,10 +8573,18 @@ inline HInstruction* HuntForDeclaration(HInstruction* instruction) {
return instruction;
}
+inline bool IsAddOrSub(const HInstruction* instruction) {
+ return instruction->IsAdd() || instruction->IsSub();
+}
+
void RemoveEnvironmentUses(HInstruction* instruction);
bool HasEnvironmentUsedByOthers(HInstruction* instruction);
void ResetEnvironmentInputRecords(HInstruction* instruction);
+// Detects an instruction that is >= 0. As long as the value is carried by
+// a single instruction, arithmetic wrap-around cannot occur.
+bool IsGEZero(HInstruction* instruction);
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_H_