summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_generator_mips.cc
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2018-01-04 18:42:57 +0000
committerVladimir Marko <vmarko@google.com>2018-01-10 14:30:26 +0000
commitdc682aa9d0eae1a851af059434adb6f6cf8f06f8 (patch)
treef93f00493ee5887b05b42a6a5dd99eb6794daad4 /compiler/optimizing/code_generator_mips.cc
parentd6b7e8c63f8eca25460f56f66dcae15eaa897ff0 (diff)
Use 28 bits for type check bit string.
And reverse the order of fields in the Class::status_. This avoids generated code size increase: - ClassStatus in high bits allows class initialization check using "status_high_byte < (kInitialized << 4)" which is unaffected by the low 4 bits of LHS instead of needing to extract the status bits, - the type check bit string in the bottom bits instead of somewehere in the middle allows the comparison on ARM to be done using the same code size as with the old layout in most cases (except when the compared value is 9-16 bits and not a modified immediate: 2 bytes less for 9-12 bits and sometimes 2 bytes more for 13-16 bits; the latter could be worked around using LDRH if the second character's boundary is at 16 bits). Add one of the extra bits to the 2nd character to push its boundary to 16 bits so that we can test an implementation using 16-bit loads in a subsequent CL, arbitrarily add the other three bits to the 3rd character. This CL is only about making those bits available and allowing testing, the determination of how to use the additonal bits for the best impact (whether to have a 4th character or distribute them differently among the three characters) shall be done later. Test: m test-art-host-gtest Test: testrunner.py --host --optimizing Test: Pixel 2 XL boots. Test: testrunner.py --target --optimizing Bug: 64692057 Change-Id: I38c59837e3df3accb813fb1e04dc42e9afcd2d73
Diffstat (limited to 'compiler/optimizing/code_generator_mips.cc')
-rw-r--r--compiler/optimizing/code_generator_mips.cc10
1 files changed, 8 insertions, 2 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ebe252a9c8..c8bd5d4fc8 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1915,8 +1915,14 @@ void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool d
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
Register class_reg) {
- __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
+ __ LoadConst32(AT, shifted_initialized_value);
__ Bltu(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);