diff options
author | Ulyana Trafimovich <skvadrik@google.com> | 2020-07-16 15:09:38 +0000 |
---|---|---|
committer | Ulyana Trafimovich <skvadrik@google.com> | 2020-07-17 11:04:27 +0000 |
commit | e886d68b9c40c941d8966b9c90d0e265c75fb19e (patch) | |
tree | ad78c4e375c95a96e200baa2786e748505ab928b /simulator | |
parent | f12dd5861e0eaf1822c12137fd353b5e79761a6c (diff) |
Revert^2 "VIXL simulator for ART (Stage1)"
This reverts commit 3060bb919cd2f37c6a97e87c1581ac5294af72b3.
Reason for revert: relanding original change. The fix is setting
`device_supported: false` for libart(d)-simulator module in the .bp
file (`m checkbuild` attempted to build it for arm32 and failed).
Original commit message:
VIXL simulator for ART (Stage1)
Quick User Guide: test/README.simulator.md
This CL enables running ART run-tests in a simulator on host machine.
Some benefits of using this simulator approach:
- No need to use a target device at all.
Save developers from solving the device troubles: build, flash, usb,
adb, etc.
- Speed up development/debug/test cycle.
- Allows easy debugging/testing new instruction features without real
hardware.
- Allows using a smaller AOSP Android manifest master-art.
The Stage1 CL provides support for running 30% of current run-tests.
The rest unsupported test cases are kept in knownfailures.json.
Future work will be supporting proper stack frame layout between
simulator and quick entrypoints, so that stack walk,
QuickArgumentVisitor, deoptimization, etc can be supported.
This CL adds libart(d)-simulator-container library to the ART APEX. It
has caused the following increase of the APEX size (small, about 0.13%
for release APEX, measured for target aosp_arm64-userdebug):
Before:
88992 com.android.art.debug.apex
51612 com.android.art.release.apex
112352 com.android.art.testing.apex
After:
89124 com.android.art.debug.apex
51680 com.android.art.release.apex
112468 com.android.art.testing.apex
Change-Id: I461c80aa9c4ce0673eef1c0254d2c539f2b6a8d5
Test: art/test.py --run-test --optimizing --simulate-arm64
Test: art/test.py --run-test --optimizing --host
Test: m test-art-host-gtest
Diffstat (limited to 'simulator')
-rw-r--r-- | simulator/Android.bp | 28 | ||||
-rw-r--r-- | simulator/code_simulator_arm64.cc | 378 | ||||
-rw-r--r-- | simulator/code_simulator_arm64.h | 18 | ||||
-rw-r--r-- | simulator/include/code_simulator.h | 13 |
4 files changed, 427 insertions, 10 deletions
diff --git a/simulator/Android.bp b/simulator/Android.bp index 1410444a3d..d71e56578c 100644 --- a/simulator/Android.bp +++ b/simulator/Android.bp @@ -18,12 +18,15 @@ cc_library_headers { name: "libart_simulator_headers", host_supported: true, export_include_dirs: ["include"], + apex_available: [ + "com.android.art.release", + "com.android.art.debug", + ], } cc_defaults { name: "libart_simulator_defaults", host_supported: true, - device_supported: false, defaults: ["art_defaults"], srcs: [ @@ -36,7 +39,12 @@ cc_defaults { ], cflags: ["-DVIXL_INCLUDE_SIMULATOR_AARCH64"], - header_libs: ["libart_simulator_headers"], + header_libs: [ + "jni_platform_headers", + "libdexfile_all_headers", + "libart_runtime_headers_ndk", + "libart_simulator_headers", + ], } art_cc_library { @@ -47,6 +55,7 @@ art_cc_library { "libartbase", "libvixl", ], + device_supported: false, } art_cc_library { @@ -60,6 +69,7 @@ art_cc_library { "libartbased", "libvixld", ], + device_supported: false, } cc_defaults { @@ -73,8 +83,12 @@ cc_defaults { shared_libs: [ "libbase", ], - - header_libs: ["libart_simulator_headers"], + header_libs: [ + "jni_platform_headers", + "libdexfile_all_headers", + "libart_runtime_headers_ndk", + "libart_simulator_headers", + ], export_include_dirs: ["."], // TODO: Consider a proper separation. } @@ -83,7 +97,10 @@ art_cc_library { defaults: ["libart_simulator_container_defaults"], shared_libs: [ "libartbase", - "libart", + ], + apex_available: [ + "com.android.art.release", + "com.android.art.debug", ], } @@ -95,7 +112,6 @@ art_cc_library { ], shared_libs: [ "libartbased", - "libartd", ], apex_available: [ "com.android.art.debug", diff --git a/simulator/code_simulator_arm64.cc b/simulator/code_simulator_arm64.cc index a64bd0bc0b..7521d183b8 100644 --- a/simulator/code_simulator_arm64.cc +++ b/simulator/code_simulator_arm64.cc @@ -16,13 +16,178 @@ #include "code_simulator_arm64.h" -#include <android-base/logging.h> +#include "art_method.h" +#include "base/logging.h" +#include "class_linker.h" +#include "thread.h" + +#include <string> +#include <cstring> +#include <math.h> + +static constexpr bool kEnableSimulateMethodAllowList = false; + +static const std::vector<std::string> simulate_method_allow_list = { + // Add any run test method you want to simulate here, for example: + // test/684-checker-simd-dotprod + "other.TestByte.testDotProdComplex", + "other.TestByte.testDotProdComplexSignedCastedToUnsigned", + "other.TestByte.testDotProdComplexUnsigned", + "other.TestByte.testDotProdComplexUnsignedCastedToSigned", +}; +static const std::vector<std::string> avoid_simulation_method_list = { + // For now, we can focus on simulating run test methods called by main(). + "main", + "<clinit>", + // Currently, we don't simulate Java library methods. + "java.", + "sun.", + "dalvik.", + "android.", + "libcore.", +}; using namespace vixl::aarch64; // NOLINT(build/namespaces) namespace art { namespace arm64 { + // Special registers defined in asm_support_arm64.s. + // Register holding Thread::current(). + static const unsigned kSelf = 19; + // Marking register. + static const unsigned kMR = 20; + // Frame Pointer. + static const unsigned kFp = 29; + // Stack Pointer. + static const unsigned kSp = 31; + +class CustomSimulator final: public Simulator { + public: + explicit CustomSimulator(Decoder* decoder) : Simulator(decoder), qpoints_(nullptr) {} + virtual ~CustomSimulator() {} + + void SetEntryPoints(QuickEntryPoints* qpoints) { + DCHECK(qpoints_ == nullptr); + qpoints_ = qpoints; + } + + template <typename R, typename... P> + struct RuntimeCallHelper { + static void Execute(Simulator* simulator, R (*f)(P...)) { + simulator->RuntimeCallNonVoid(f); + } + }; + + // Partial specialization when the return type is `void`. + template <typename... P> + struct RuntimeCallHelper<void, P...> { + static void Execute(Simulator* simulator, void (*f)(P...)) { + simulator->RuntimeCallVoid(f); + } + }; + + // Override Simulator::VisitUnconditionalBranchToRegister to handle any runtime invokes + // which can be simulated. + void VisitUnconditionalBranchToRegister(const vixl::aarch64::Instruction* instr) override + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(qpoints_ != nullptr); + if (instr->Mask(UnconditionalBranchToRegisterMask) == BR) { + // The thunk mechansim code (LDR, BR) is generated by + // CodeGeneratorARM64::InvokeRuntime() + + // Conceptually, the control flow works as if: + // ######################################################################### + // Compiled Method (arm64) | THUNK (arm64) | Runtime Function (x86_64) + // ######################################################################### + // BL kQuickTestSuspend@thunk -> LDR x16, [...] + // BR x16 -------> art_quick_test_suspend + // ^ (x86 ret) + // | | + // +---------------------------------------------------+ + + // Actual control flow: arm64 code <-> x86_64 runtime, intercepted by simulator. + // ########################################################################## + // arm64 code in simulator | | ART Runtime (x86_64) + // ########################################################################## + // BL kQuickTestSuspend@thunk -> LDR x16, [...] + // BR x16 ---> simulator ---> art_quick_test_suspend + // ^ (x86 call) (x86 ret) + // | | + // +------------------------------------- simulator <-------------+ + // (ARM ret) + // + + const void* target = reinterpret_cast<const void*>(ReadXRegister(instr->GetRn())); + auto lr = vixl::aarch64::Instruction::Cast(get_lr()); + if (target == reinterpret_cast<const void*>(qpoints_->pTestSuspend)) { + RuntimeCallHelper<void>::Execute(this, qpoints_->pTestSuspend); + } else { + // For branching to fixed addresses or labels, nothing has changed. + Simulator::VisitUnconditionalBranchToRegister(instr); + return; + } + WritePc(lr); // aarch64 return + return; + } else if (instr->Mask(UnconditionalBranchToRegisterMask) == BLR) { + const void* target = reinterpret_cast<const void*>(ReadXRegister(instr->GetRn())); + auto lr = instr->GetNextInstruction(); + if (target == reinterpret_cast<const void*>(qpoints_->pAllocObjectInitialized)) { + RuntimeCallHelper<void *, mirror::Class *>::Execute(this, qpoints_->pAllocObjectInitialized); + } else if (target == reinterpret_cast<const void*>(qpoints_->pAllocArrayResolved8) || + target == reinterpret_cast<const void*>(qpoints_->pAllocArrayResolved16) || + target == reinterpret_cast<const void*>(qpoints_->pAllocArrayResolved32) || + target == reinterpret_cast<const void*>(qpoints_->pAllocArrayResolved64)) { + RuntimeCallHelper<void *, mirror::Class *, int32_t>::Execute(this, + reinterpret_cast<void *(*)(art::mirror::Class *, int)>(const_cast<void*>(target))); + } else { + // For branching to fixed addresses or labels, nothing has changed. + Simulator::VisitUnconditionalBranchToRegister(instr); + return; + } + WritePc(lr); // aarch64 return + return; + } + Simulator::VisitUnconditionalBranchToRegister(instr); + return; + } + + // TODO(simulator): Maybe integrate these into vixl? + int64_t get_sp() const { + return ReadRegister<int64_t>(kSp, Reg31IsStackPointer); + } + + int64_t get_x(int32_t n) const { + return ReadRegister<int64_t>(n, Reg31IsStackPointer); + } + + int64_t get_lr() const { + return ReadRegister<int64_t>(kLinkRegCode); + } + + int64_t get_fp() const { + return ReadXRegister(kFp); + } + + private: + QuickEntryPoints* qpoints_; +}; + +static const void* GetQuickCodeFromArtMethod(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(!method->IsAbstract()); + DCHECK(!method->IsNative()); + DCHECK(Runtime::SimulatorMode()); + DCHECK(method->CanBeSimulated()); + + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + const void* code = method->GetOatMethodQuickCode(linker->GetImagePointerSize()); + if (code != nullptr) { + return code; + } + return nullptr; +} + // VIXL has not been tested on 32bit architectures, so Simulator is not always // available. To avoid linker error on these architectures, we check if we can simulate // in the beginning of following methods, with compile time constant `kCanSimulate`. @@ -40,7 +205,11 @@ CodeSimulatorArm64::CodeSimulatorArm64() : CodeSimulator(), decoder_(nullptr), simulator_(nullptr) { DCHECK(kCanSimulate); decoder_ = new Decoder(); - simulator_ = new Simulator(decoder_); + simulator_ = new CustomSimulator(decoder_); + if (VLOG_IS_ON(simulator)) { + simulator_->SetColouredTrace(true); + simulator_->SetTraceParameters(LOG_DISASM | LOG_WRITE); + } } CodeSimulatorArm64::~CodeSimulatorArm64() { @@ -51,7 +220,7 @@ CodeSimulatorArm64::~CodeSimulatorArm64() { void CodeSimulatorArm64::RunFrom(intptr_t code_buffer) { DCHECK(kCanSimulate); - simulator_->RunFrom(reinterpret_cast<const Instruction*>(code_buffer)); + simulator_->RunFrom(reinterpret_cast<const vixl::aarch64::Instruction*>(code_buffer)); } bool CodeSimulatorArm64::GetCReturnBool() const { @@ -69,5 +238,208 @@ int64_t CodeSimulatorArm64::GetCReturnInt64() const { return simulator_->ReadXRegister(0); } +void CodeSimulatorArm64::Invoke(ArtMethod* method, uint32_t* args, uint32_t args_size_in_bytes, + Thread* self, JValue* result, const char* shorty, bool isStatic) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(kCanSimulate); + // ARM64 simulator only supports 64-bit host machines. Because: + // 1) vixl simulator is not tested on 32-bit host machines. + // 2) Data structures in ART have different representations for 32/64-bit machines. + DCHECK(sizeof(args) == sizeof(int64_t)); + + if (VLOG_IS_ON(simulator)) { + VLOG(simulator) << "\nVIXL_SIMULATOR simulate: " << method->PrettyMethod(); + } + + InitRegistersForInvokeStub(method, args, args_size_in_bytes, self, result, shorty, isStatic); + + int64_t quick_code = reinterpret_cast<int64_t>(GetQuickCodeFromArtMethod(method)); + RunFrom(quick_code); + + GetResultFromShorty(result, shorty); + + // Ensure simulation state is not carried over from one method to another. + simulator_->ResetState(); + + // Reset stack pointer. + simulator_->WriteSp(saved_sp_); +} + +void CodeSimulatorArm64::GetResultFromShorty(JValue* result, const char* shorty) { + switch (shorty[0]) { + case 'V': + return; + case 'D': + result->SetD(simulator_->ReadDRegister(0)); + return; + case 'F': + result->SetF(simulator_->ReadSRegister(0)); + return; + default: + // Just store x0. Doesn't matter if it is 64 or 32 bits. + result->SetJ(simulator_->ReadXRegister(0)); + return; + } +} + +// Init registers for invoking art_quick_invoke_stub: +// +// extern"C" void art_quick_invoke_stub(ArtMethod *method, x0 +// uint32_t *args, x1 +// uint32_t argsize, w2 +// Thread *self, x3 +// JValue *result, x4 +// char *shorty); x5 +// +// See art/runtime/arch/arm64/quick_entrypoints_arm64.S +// +// +----------------------+ +// | | +// | C/C++ frame | +// | LR'' | +// | FP'' | <- SP' +// +----------------------+ +// +----------------------+ +// | X28 | +// | : | +// | X19 (*self) | +// | SP' | Saved registers +// | X5 (*shorty) | +// | X4 (*result) | +// | LR' | +// | FP' | <- FP +// +----------------------+ +// | uint32_t out[n-1] | +// | : : | Outs +// | uint32_t out[0] | +// | ArtMethod* | <- SP value=null +// +----------------------+ +// +// Outgoing registers: +// x0 - Current ArtMethod* +// x1-x7 - integer parameters. +// d0-d7 - Floating point parameters. +// xSELF = self +// SP = & of ArtMethod* +// x1 - "this" pointer (for non-static method) +void CodeSimulatorArm64::InitRegistersForInvokeStub(ArtMethod* method, uint32_t* args, + uint32_t args_size_in_bytes, Thread* self, + JValue* result, const char* shorty, + bool isStatic) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(kCanSimulate); + + // Set registers x0, x4, x5, and x19. + simulator_->WriteXRegister(0, reinterpret_cast<int64_t>(method)); + simulator_->WriteXRegister(kSelf, reinterpret_cast<int64_t>(self)); + simulator_->WriteXRegister(4, reinterpret_cast<int64_t>(result)); + simulator_->WriteXRegister(5, reinterpret_cast<int64_t>(shorty)); + + // Stack Pointer here is not the real one in hardware. This will break stack overflow check. + // Also note that the simulator stack is limited. + saved_sp_ = simulator_->get_sp(); + // x4, x5, x19, x20 .. x28, SP, LR, FP saved (15 in total). + const int64_t regs_save_size_in_bytes = kXRegSizeInBytes * 15; + const int64_t frame_save_size = regs_save_size_in_bytes + + kXRegSizeInBytes + // ArtMethod* + static_cast<int64_t>(args_size_in_bytes); + // Comply with 16-byte alignment requirement for SP. + void** new_sp = reinterpret_cast<void**>((saved_sp_ - frame_save_size) & (~0xfUL)); + + simulator_->WriteSp(new_sp); + + // Store null into ArtMethod* at bottom of frame. + *new_sp++ = nullptr; + // Copy arguments into stack frame. + std::memcpy(new_sp, args, args_size_in_bytes * sizeof(uint32_t)); + + // Callee-saved registers. + int64_t* save_registers = reinterpret_cast<int64_t*>(saved_sp_) + 3; + save_registers[0] = simulator_->get_fp(); + save_registers[1] = simulator_->get_lr(); + save_registers[2] = simulator_->get_x(4); // X4 (*result) + save_registers[3] = simulator_->get_x(5); // X5 (*shorty) + save_registers[4] = saved_sp_; + save_registers[5] = simulator_->get_x(kSelf); // X19 (*self) + for (unsigned int i = 6; i < 15; i++) { + save_registers[i] = simulator_->get_x(i + 14); // X20 .. X28 + } + + // Use xFP (Frame Pointer) now, as it's callee-saved. + simulator_->WriteXRegister(kFp, saved_sp_ - regs_save_size_in_bytes); + + // Fill registers from args, according to shorty. + static const unsigned kRegisterIndexLimit = 8; + unsigned fpr_index = 0; + unsigned gpr_index = 1; // x1 ~ x7 integer parameters. + shorty++; // Skip the return value. + // For non-static method, load "this" parameter, and increment args pointer. + if (!isStatic) { + simulator_->WriteWRegister(gpr_index++, *args++); + } + // Loop to fill registers. + for (const char* s = shorty; *s != '\0'; s++) { + switch (*s) { + case 'D': + simulator_->WriteDRegister(fpr_index++, *reinterpret_cast<double*>(args)); + args += 2; + break; + case 'J': + simulator_->WriteXRegister(gpr_index++, *reinterpret_cast<int64_t*>(args)); + args += 2; + break; + case 'F': + simulator_->WriteSRegister(fpr_index++, *reinterpret_cast<float*>(args)); + args++; + break; + default: + // Everything else takes one vReg. + simulator_->WriteWRegister(gpr_index++, *reinterpret_cast<int32_t*>(args)); + args++; + break; + } + if (gpr_index > kRegisterIndexLimit || fpr_index < kRegisterIndexLimit) { + // TODO: Handle register spill. + UNREACHABLE(); + } + } + + // REFRESH_MARKING_REGISTER + if (kUseReadBarrier) { + simulator_->WriteWRegister(kMR, self->GetIsGcMarking()); + } +} + +void CodeSimulatorArm64::InitEntryPoints(QuickEntryPoints* qpoints) { + simulator_->SetEntryPoints(qpoints); +} + +bool CodeSimulatorArm64::CanSimulate(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { + std::string name = method->PrettyMethod(); + + // Make sure simulate methods with $simulate$ in their names. + if (name.find("$simulate$") != std::string::npos) { + return true; + } + // Simulation allow list mode, only simulate method on the allow list. + if (kEnableSimulateMethodAllowList) { + for (auto& s : simulate_method_allow_list) { + if (name.find(s) != std::string::npos) { + return true; + } + } + return false; + } + // Avoid simulating following methods. + for (auto& s : avoid_simulation_method_list) { + if (name.find(s) != std::string::npos) { + return false; + } + } + + // Try to simulate as much as we can. + return true; +} + } // namespace arm64 } // namespace art diff --git a/simulator/code_simulator_arm64.h b/simulator/code_simulator_arm64.h index e726500452..ea5c95a3cc 100644 --- a/simulator/code_simulator_arm64.h +++ b/simulator/code_simulator_arm64.h @@ -27,10 +27,13 @@ #include "arch/instruction_set.h" #include "code_simulator.h" +#include "entrypoints/quick/quick_entrypoints.h" namespace art { namespace arm64 { +class CustomSimulator; + class CodeSimulatorArm64 : public CodeSimulator { public: static CodeSimulatorArm64* CreateCodeSimulatorArm64(); @@ -42,11 +45,24 @@ class CodeSimulatorArm64 : public CodeSimulator { int32_t GetCReturnInt32() const override; int64_t GetCReturnInt64() const override; + bool CanSimulate(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) override; + void Invoke(ArtMethod* method, uint32_t* args, uint32_t args_size, Thread* self, JValue* result, + const char* shorty, bool isStatic) override REQUIRES_SHARED(Locks::mutator_lock_); + + void InitEntryPoints(QuickEntryPoints* qpoints) override; + private: CodeSimulatorArm64(); + void InitRegistersForInvokeStub(ArtMethod* method, uint32_t* args, uint32_t args_size, + Thread* self, JValue* result, const char* shorty, bool isStatic) + REQUIRES_SHARED(Locks::mutator_lock_); + + void GetResultFromShorty(JValue* result, const char* shorty); + vixl::aarch64::Decoder* decoder_; - vixl::aarch64::Simulator* simulator_; + CustomSimulator* simulator_; + int64_t saved_sp_; // TODO: Enable CodeSimulatorArm64 for more host ISAs once Simulator supports them. static constexpr bool kCanSimulate = (kRuntimeISA == InstructionSet::kX86_64); diff --git a/simulator/include/code_simulator.h b/simulator/include/code_simulator.h index 256ab23aa4..22bac1e83f 100644 --- a/simulator/include/code_simulator.h +++ b/simulator/include/code_simulator.h @@ -18,9 +18,15 @@ #define ART_SIMULATOR_INCLUDE_CODE_SIMULATOR_H_ #include "arch/instruction_set.h" +#include "runtime.h" namespace art { +class ArtMethod; +union JValue; +class Thread; +struct QuickEntryPoints; + class CodeSimulator { public: CodeSimulator() {} @@ -35,6 +41,13 @@ class CodeSimulator { virtual int32_t GetCReturnInt32() const = 0; virtual int64_t GetCReturnInt64() const = 0; + virtual bool CanSimulate(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0; + virtual void Invoke(ArtMethod* method, uint32_t* args, uint32_t args_size, Thread* self, + JValue* result, const char* shorty, bool isStatic) + REQUIRES_SHARED(Locks::mutator_lock_) = 0; + + virtual void InitEntryPoints(QuickEntryPoints* qpoints) = 0; + private: DISALLOW_COPY_AND_ASSIGN(CodeSimulator); }; |