summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_generator.h
diff options
context:
space:
mode:
authorZheng Xu <zheng.xu@arm.com>2015-04-24 17:35:39 +0800
committerZheng Xu <zheng.xu@arm.com>2015-04-24 17:38:13 +0800
commitda40309f61f98c16d7d58e4c34cc0f5eef626f93 (patch)
tree7525c544dc9acae0e1041757149be2eabb733dc8 /compiler/optimizing/code_generator.h
parent021190bf584662e75b269ef47cd48e2044e34fe4 (diff)
Opt compiler: ARM64: Use ldp/stp on arm64 for slow paths.
It should be a bit faster than load/store single registers and reduce the code size. Change-Id: I67b8302adf6174b7bb728f7c2afd2c237e34ffde
Diffstat (limited to 'compiler/optimizing/code_generator.h')
-rw-r--r--compiler/optimizing/code_generator.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e536b2d0ee..9b3cf8a45f 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -77,8 +77,8 @@ class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
bool IsCoreRegisterSaved(int reg) const {
@@ -97,11 +97,13 @@ class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
return saved_fpu_stack_offsets_[reg];
}
- private:
+ protected:
static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
static constexpr uint32_t kRegisterNotSaved = -1;
uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
+
+ private:
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};