summaryrefslogtreecommitdiff
path: root/compiler/optimizing/optimizing_compiler.cc
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2020-08-05 12:20:28 +0100
committerTreehugger Robot <treehugger-gerrit@google.com>2020-08-10 09:17:34 +0000
commitd3e9c62976780e830da79ae32be4192dee196db2 (patch)
treebf7855545f49ea039c6824d340ce2a162ad40ebd /compiler/optimizing/optimizing_compiler.cc
parent60ef3997cbcd866c505e51ecde7f06a0535110a0 (diff)
ARM: Allow FP args in core regs for @CriticalNative.
If a float or double argument needs to be passed in core register to a @CriticalNative method due to soft-float native ABI, insert a fake call to Float.floatToRawIntBits() or Double.doubleToRawLongBits() to satisfy type checks in the compiler. We cannot do that for intrinsics that expect those inputs in actual FP registers, so we still prevent such intrinsics from using `kCallCriticalNative`. This should be irrelevant if an actual intrinsic implementation is emitted. There are currently two unimplemented intrinsics that are affected by the carve-out, namely MathRoundDouble and FP16ToHalf, and four intrinsics implemented only when ARMv8A is supported, namely MathRint, MathRoundFloat, MathCeil and MathFloor. Test: testrunner.py --target --32 -t 178-app-image-native-method Bug: 112189621 Change-Id: Id14ef4f49f8a0e6489f97dc9588c0e6a5c122632
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc14
1 files changed, 14 insertions, 0 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f201f806d6..169f8b6446 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -468,6 +468,19 @@ bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const {
switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#if defined(ART_ENABLE_CODEGEN_arm)
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
+ OptimizationDef arm_optimizations[] = {
+ OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ arm_optimizations);
+ }
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
OptimizationDef x86_optimizations[] = {
@@ -501,6 +514,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
OptDef(OptimizationPass::kInstructionSimplifierArm),
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
OptDef(OptimizationPass::kScheduling)
};
return RunOptimizations(graph,