diff options
author | Treehugger Robot <treehugger-gerrit@google.com> | 2016-09-05 18:12:13 +0000 |
---|---|---|
committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2016-09-05 18:12:13 +0000 |
commit | 86dc59ebe25cfe36d4edb39c2b7b2653f079448b (patch) | |
tree | 446d80551ab72701914ed08ac18fcaeffd6555b1 /compiler/optimizing/code_generator.h | |
parent | b0f443217306b8a307f73d35cb27c1cac2e1c360 (diff) | |
parent | 70e97462116a47ef2e582ea29a037847debcc029 (diff) |
Merge "Avoid excessive spill slots for slow paths."
Diffstat (limited to 'compiler/optimizing/code_generator.h')
-rw-r--r-- | compiler/optimizing/code_generator.h | 28 |
1 files changed, 26 insertions, 2 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 78a8afb156..00a779ad95 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -22,6 +22,7 @@ #include "base/arena_containers.h" #include "base/arena_object.h" #include "base/bit_field.h" +#include "base/bit_utils.h" #include "base/enums.h" #include "globals.h" #include "graph_visualizer.h" @@ -211,8 +212,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { virtual size_t GetFloatingPointSpillSlotSize() const = 0; virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0; void InitializeCodeGeneration(size_t number_of_spill_slots, - size_t maximum_number_of_live_core_registers, - size_t maximum_number_of_live_fpu_registers, + size_t maximum_safepoint_spill_size, size_t number_of_out_slots, const ArenaVector<HBasicBlock*>& block_order); // Backends can override this as necessary. For most, no special alignment is required. @@ -278,6 +278,30 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { return (fpu_callee_save_mask_ & (1 << reg)) != 0; } + uint32_t GetSlowPathSpills(LocationSummary* locations, bool core_registers) const { + DCHECK(locations->OnlyCallsOnSlowPath() || + (locations->Intrinsified() && locations->CallsOnMainAndSlowPath() && + !locations->HasCustomSlowPathCallingConvention())); + uint32_t live_registers = core_registers + ? locations->GetLiveRegisters()->GetCoreRegisters() + : locations->GetLiveRegisters()->GetFloatingPointRegisters(); + if (locations->HasCustomSlowPathCallingConvention()) { + // Save only the live registers that the custom calling convention wants us to save. + uint32_t caller_saves = core_registers + ? locations->GetCustomSlowPathCallerSaves().GetCoreRegisters() + : locations->GetCustomSlowPathCallerSaves().GetFloatingPointRegisters(); + return live_registers & caller_saves; + } else { + // Default ABI, we need to spill non-callee-save live registers. + uint32_t callee_saves = core_registers ? core_callee_save_mask_ : fpu_callee_save_mask_; + return live_registers & ~callee_saves; + } + } + + size_t GetNumberOfSlowPathSpills(LocationSummary* locations, bool core_registers) const { + return POPCOUNT(GetSlowPathSpills(locations, core_registers)); + } + // Record native to dex mapping for a suspend point. Required by runtime. void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr); // Check whether we have already recorded mapping at this PC. |