diff options
author | Alex Light <allight@google.com> | 2021-01-22 14:05:13 +0000 |
---|---|---|
committer | Alex Light <allight@google.com> | 2021-01-22 07:15:51 -0800 |
commit | fc1ce4e8be0d977e3d41699f5ec746d68f63c024 (patch) | |
tree | b656aa7c9e62aa181dfbf7fd4f2a0d32b8bf0704 /compiler/optimizing/load_store_analysis.cc | |
parent | c6da1be58086e873c9695f8c4c1a3a8ca718696e (diff) |
Revert^2 "Partial Load Store Elimination"
This reverts commit 47ac53100303e7e864b7f6d65f17b23088ccf1d6.
There was a bug in LSE where we would incorrectly record the
shadow$_monitor_ field as not having a default initial value. This
caused partial LSE to be unable to compile the Object.identityHashCode
function, causing crashes. This issue was fixed in a parent CL. Also
updated all Offsets in LSE_test to be outside of the object header
regardless of configuration.
Test: ./test.py --host
Bug: 67037140
Reason for revert: Fixed issue with shadow$_monitor_ field and offsets
Change-Id: I4fb2afff4d410da818db38ed833927dfc0f6be33
Diffstat (limited to 'compiler/optimizing/load_store_analysis.cc')
-rw-r--r-- | compiler/optimizing/load_store_analysis.cc | 43 |
1 files changed, 37 insertions, 6 deletions
diff --git a/compiler/optimizing/load_store_analysis.cc b/compiler/optimizing/load_store_analysis.cc index 3daa6472de..38ed98adaf 100644 --- a/compiler/optimizing/load_store_analysis.cc +++ b/compiler/optimizing/load_store_analysis.cc @@ -16,6 +16,9 @@ #include "load_store_analysis.h" +#include "base/scoped_arena_allocator.h" +#include "optimizing/escape.h" + namespace art { // A cap for the number of heap locations to prevent pathological time/space consumption. @@ -100,14 +103,11 @@ void ReferenceInfo::PrunePartialEscapeWrites() { allocator_, graph->GetBlocks().size(), false, kArenaAllocLSA); for (const HUseListNode<HInstruction*>& use : reference_->GetUses()) { const HInstruction* user = use.GetUser(); - const bool possible_exclusion = - !additional_exclusions.IsBitSet(user->GetBlock()->GetBlockId()) && - subgraph_.ContainsBlock(user->GetBlock()); - const bool is_written_to = + if (!additional_exclusions.IsBitSet(user->GetBlock()->GetBlockId()) && + subgraph_.ContainsBlock(user->GetBlock()) && (user->IsUnresolvedInstanceFieldSet() || user->IsUnresolvedStaticFieldSet() || user->IsInstanceFieldSet() || user->IsStaticFieldSet() || user->IsArraySet()) && - (reference_ == user->InputAt(0)); - if (possible_exclusion && is_written_to && + (reference_ == user->InputAt(0)) && std::any_of(subgraph_.UnreachableBlocks().begin(), subgraph_.UnreachableBlocks().end(), [&](const HBasicBlock* excluded) -> bool { @@ -148,6 +148,37 @@ bool HeapLocationCollector::InstructionEligibleForLSERemoval(HInstruction* inst) } } +void ReferenceInfo::CollectPartialEscapes(HGraph* graph) { + ScopedArenaAllocator saa(graph->GetArenaStack()); + ArenaBitVector seen_instructions(&saa, graph->GetCurrentInstructionId(), false, kArenaAllocLSA); + // Get regular escapes. + ScopedArenaVector<HInstruction*> additional_escape_vectors(saa.Adapter(kArenaAllocLSA)); + LambdaEscapeVisitor scan_instructions([&](HInstruction* escape) -> bool { + HandleEscape(escape); + // LSE can't track heap-locations through Phi and Select instructions so we + // need to assume all escapes from these are escapes for the base reference. + if ((escape->IsPhi() || escape->IsSelect()) && !seen_instructions.IsBitSet(escape->GetId())) { + seen_instructions.SetBit(escape->GetId()); + additional_escape_vectors.push_back(escape); + } + return true; + }); + additional_escape_vectors.push_back(reference_); + while (!additional_escape_vectors.empty()) { + HInstruction* ref = additional_escape_vectors.back(); + additional_escape_vectors.pop_back(); + DCHECK(ref == reference_ || ref->IsPhi() || ref->IsSelect()) << *ref; + VisitEscapes(ref, scan_instructions); + } + + // Mark irreducible loop headers as escaping since they cannot be tracked through. + for (HBasicBlock* blk : graph->GetActiveBlocks()) { + if (blk->IsLoopHeader() && blk->GetLoopInformation()->IsIrreducible()) { + HandleEscape(blk); + } + } +} + void HeapLocationCollector::DumpReferenceStats(OptimizingCompilerStats* stats) { if (stats == nullptr) { return; |