summaryrefslogtreecommitdiff
path: root/compiler/optimizing/execution_subgraph.h
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing/execution_subgraph.h')
-rw-r--r--compiler/optimizing/execution_subgraph.h44
1 files changed, 44 insertions, 0 deletions
diff --git a/compiler/optimizing/execution_subgraph.h b/compiler/optimizing/execution_subgraph.h
index dac938ed62..7fabbaead1 100644
--- a/compiler/optimizing/execution_subgraph.h
+++ b/compiler/optimizing/execution_subgraph.h
@@ -27,6 +27,7 @@
#include "base/bit_vector-inl.h"
#include "base/globals.h"
#include "base/iteration_range.h"
+#include "base/mutex.h"
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
#include "base/stl_util.h"
@@ -35,6 +36,18 @@
namespace art {
+// Helper for transforming blocks to block_ids.
+class BlockToBlockIdTransformer {
+ public:
+ BlockToBlockIdTransformer(BlockToBlockIdTransformer&&) = default;
+ BlockToBlockIdTransformer(const BlockToBlockIdTransformer&) = default;
+ BlockToBlockIdTransformer() {}
+
+ inline uint32_t operator()(const HBasicBlock* b) const {
+ return b->GetBlockId();
+ }
+};
+
// Helper for transforming block ids to blocks.
class BlockIdToBlockTransformer {
public:
@@ -61,6 +74,20 @@ class BlockIdToBlockTransformer {
const HGraph* const graph_;
};
+class BlockIdFilterThunk {
+ public:
+ explicit BlockIdFilterThunk(const BitVector& i) : inner_(i) {}
+ BlockIdFilterThunk(BlockIdFilterThunk&& other) noexcept = default;
+ BlockIdFilterThunk(const BlockIdFilterThunk&) = default;
+
+ bool operator()(const HBasicBlock* b) const {
+ return inner_.IsBitSet(b->GetBlockId());
+ }
+
+ private:
+ const BitVector& inner_;
+};
+
// A representation of a particular section of the graph. The graph is split
// into an excluded and included area and is used to track escapes.
//
@@ -80,10 +107,18 @@ class BlockIdToBlockTransformer {
// cohort-exit block to reach any cohort-entry block. This means we can use the
// boundary between the cohort and the rest of the graph to insert
// materialization blocks for partial LSE.
+//
+// TODO We really should expand this to take into account where the object
+// allocation takes place directly. Currently we always act as though it were
+// allocated in the entry block. This is a massively simplifying assumption but
+// means we can't partially remove objects that are repeatedly allocated in a
+// loop.
class ExecutionSubgraph : public ArenaObject<kArenaAllocLSA> {
public:
using BitVecBlockRange =
IterationRange<TransformIterator<BitVector::IndexIterator, BlockIdToBlockTransformer>>;
+ using FilteredBitVecBlockRange = IterationRange<
+ FilterIterator<ArenaVector<HBasicBlock*>::const_iterator, BlockIdFilterThunk>>;
// A set of connected blocks which are connected and removed from the
// ExecutionSubgraph. See above comment for explanation.
@@ -110,6 +145,15 @@ class ExecutionSubgraph : public ArenaObject<kArenaAllocLSA> {
return BlockIterRange(entry_blocks_);
}
+ FilteredBitVecBlockRange EntryBlocksReversePostOrder() const {
+ return Filter(MakeIterationRange(graph_->GetReversePostOrder()),
+ BlockIdFilterThunk(entry_blocks_));
+ }
+
+ bool IsEntryBlock(const HBasicBlock* blk) const {
+ return entry_blocks_.IsBitSet(blk->GetBlockId());
+ }
+
// Blocks that have successors outside of the cohort. The successors of
// these blocks will need to have PHI's to restore state.
BitVecBlockRange ExitBlocks() const {