summaryrefslogtreecommitdiff
path: root/compiler/optimizing/stack_map_test.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2017-01-17 09:32:18 -0800
committerMathieu Chartier <mathieuc@google.com>2017-02-15 14:46:15 -0800
commitd776ff08e07494327716f0d2ea1a774b2ebfbca9 (patch)
treecedf874dd494d881adc572a10a9d14bca852add6 /compiler/optimizing/stack_map_test.cc
parent3fb852a88d2a8ffaa87089752f4b1d5f9d6ce3c1 (diff)
Add invoke infos to stack maps
Invoke info records the invoke type and dex method index for invokes that may reach artQuickResolutionTrampoline. Having this information recorded allows the runtime to avoid reading the dex code and pulling in extra pages. Code size increase for a large app: 93886360 -> 95811480 (2.05% increase) 1/2 of the code size increase is from making less stack maps deduped. I suspect there is less deduping because of the invoke info method index. Merged disabled until we measure the RAM savings. Test: test-art-host, N6P boots Bug: 34109702 Change-Id: I6c5e4a60675a1d7c76dee0561a12909e4ab6d5d9
Diffstat (limited to 'compiler/optimizing/stack_map_test.cc')
-rw-r--r--compiler/optimizing/stack_map_test.cc45
1 files changed, 44 insertions, 1 deletions
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 041695187b..330f7f28b6 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -934,7 +934,6 @@ TEST(StackMapTest, CodeOffsetTest) {
EXPECT_EQ(offset_mips64.Uint32Value(kMips64), kMips64InstructionAlignment);
}
-
TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaPool pool;
ArenaAllocator arena(&pool);
@@ -963,4 +962,48 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
stack_map2.GetStackMaskIndex(encoding.stack_map.encoding));
}
+TEST(StackMapTest, TestInvokeInfo) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena, kRuntimeISA);
+
+ ArenaBitVector sp_mask(&arena, 0, true);
+ sp_mask.SetBit(1);
+ stream.BeginStackMapEntry(0, 4, 0x3, &sp_mask, 0, 0);
+ stream.AddInvoke(kSuper, 1);
+ stream.EndStackMapEntry();
+ stream.BeginStackMapEntry(0, 8, 0x3, &sp_mask, 0, 0);
+ stream.AddInvoke(kStatic, 3);
+ stream.EndStackMapEntry();
+ stream.BeginStackMapEntry(0, 16, 0x3, &sp_mask, 0, 0);
+ stream.AddInvoke(kDirect, 65535);
+ stream.EndStackMapEntry();
+
+ const size_t size = stream.PrepareForFillIn();
+ MemoryRegion region(arena.Alloc(size, kArenaAllocMisc), size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(3u, code_info.GetNumberOfStackMaps(encoding));
+
+ InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4, encoding));
+ InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8, encoding));
+ InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16, encoding));
+ InvokeInfo invoke_invalid(code_info.GetInvokeInfoForNativePcOffset(12, encoding));
+ EXPECT_FALSE(invoke_invalid.IsValid()); // No entry for that index.
+ EXPECT_TRUE(invoke1.IsValid());
+ EXPECT_TRUE(invoke2.IsValid());
+ EXPECT_TRUE(invoke3.IsValid());
+ EXPECT_EQ(invoke1.GetInvokeType(encoding.invoke_info.encoding), kSuper);
+ EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding), 1u);
+ EXPECT_EQ(invoke1.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 4u);
+ EXPECT_EQ(invoke2.GetInvokeType(encoding.invoke_info.encoding), kStatic);
+ EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding), 3u);
+ EXPECT_EQ(invoke2.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 8u);
+ EXPECT_EQ(invoke3.GetInvokeType(encoding.invoke_info.encoding), kDirect);
+ EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding), 65535u);
+ EXPECT_EQ(invoke3.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 16u);
+}
+
} // namespace art