summaryrefslogtreecommitdiff
path: root/compiler/optimizing/code_generator_arm.cc
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2015-11-20 15:08:11 +0000
committerVladimir Marko <vmarko@google.com>2015-11-20 16:18:39 +0000
commitf9d741e32c6f1629ce70eefc68d3363fa1cfd696 (patch)
tree409005e5b1d01d2830c20421f8466125e110d6af /compiler/optimizing/code_generator_arm.cc
parentbeb709a2607a00b5df33f0235f22ccdd876cee22 (diff)
Optimizing/ARM: Improve long shifts by 1.
Implement long Shl(x,1) as LSLS+ADC, Shr(x,1) as ASR+RRX and UShr(x,1) as LSR+RRX. Remove the simplification substituting Shl(x,1) with ADD(x,x) as it interferes with some other optimizations instead of helping them. And since it didn't help 64-bit architectures anyway, codegen is the correct place for it. This is now implemented for ARM and x86, so only mips32 can be improved. Change-Id: Idd14f23292198b2260189e1497ca5411b21743b3
Diffstat (limited to 'compiler/optimizing/code_generator_arm.cc')
-rw-r--r--compiler/optimizing/code_generator_arm.cc14
1 files changed, 13 insertions, 1 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 12ab68e8cd..c7e412c8e3 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3361,7 +3361,19 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) {
__ mov(o_l, ShifterOperand(high));
__ LoadImmediate(o_h, 0);
}
- } else { // shift_value < 32
+ } else if (shift_value == 1) {
+ if (op->IsShl()) {
+ __ Lsls(o_l, low, 1);
+ __ adc(o_h, high, ShifterOperand(high));
+ } else if (op->IsShr()) {
+ __ Asrs(o_h, high, 1);
+ __ Rrx(o_l, low);
+ } else {
+ __ Lsrs(o_h, high, 1);
+ __ Rrx(o_l, low);
+ }
+ } else {
+ DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
if (op->IsShl()) {
__ Lsl(o_h, high, shift_value);
__ orr(o_h, o_h, ShifterOperand(low, LSR, 32 - shift_value));