summaryrefslogtreecommitdiff
path: root/compiler/optimizing/codegen_test.cc
diff options
context:
space:
mode:
authorMark Mendell <mark.p.mendell@intel.com>2015-03-31 22:16:59 -0400
committerMark Mendell <mark.p.mendell@intel.com>2015-04-01 08:45:38 -0400
commitfb8d279bc011b31d0765dc7ca59afea324fd0d0c (patch)
tree01b21964ce0516bda835faa15b260ac290714fe0 /compiler/optimizing/codegen_test.cc
parentdcff612c3a6e1427749771c4559f198fa480f709 (diff)
[optimizing] Implement x86/x86_64 math intrinsics
Implement floor/ceil/round/RoundFloat on x86 and x86_64. Implement RoundDouble on x86_64. Add support for roundss and roundsd on both architectures. Support them in the disassembler as well. Add the instruction set features for x86, as the 'round' instruction is only supported if SSE4.1 is supported. Fix the tests to handle the addition of passing the instruction set features to x86 and x86_64. Add assembler tests for roundsd and roundss to x86_64 assembler tests. Change-Id: I9742d5930befb0bbc23f3d6c83ce0183ed9fe04f Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
Diffstat (limited to 'compiler/optimizing/codegen_test.cc')
-rw-r--r--compiler/optimizing/codegen_test.cc18
1 files changed, 14 insertions, 4 deletions
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 6053ad51f4..2be117bf38 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -19,6 +19,8 @@
#include "arch/instruction_set.h"
#include "arch/arm/instruction_set_features_arm.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/x86/instruction_set_features_x86.h"
+#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
@@ -108,7 +110,9 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
InternalCodeAllocator allocator;
CompilerOptions compiler_options;
- x86::CodeGeneratorX86 codegenX86(graph, compiler_options);
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
// We avoid doing a stack overflow check that requires the runtime being setup,
// by making sure the compiler knows the methods we are running are leaf methods.
codegenX86.CompileBaseline(&allocator, true);
@@ -124,7 +128,9 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
Run(allocator, codegenARM, has_result, expected);
}
- x86_64::CodeGeneratorX86_64 codegenX86_64(graph, compiler_options);
+ std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
+ X86_64InstructionSetFeatures::FromCppDefines());
+ x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
codegenX86_64.CompileBaseline(&allocator, true);
if (kRuntimeISA == kX86_64) {
Run(allocator, codegenX86_64, has_result, expected);
@@ -175,10 +181,14 @@ static void RunCodeOptimized(HGraph* graph,
compiler_options);
RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kX86) {
- x86::CodeGeneratorX86 codegenX86(graph, compiler_options);
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kX86_64) {
- x86_64::CodeGeneratorX86_64 codegenX86_64(graph, compiler_options);
+ std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
+ X86_64InstructionSetFeatures::FromCppDefines());
+ x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
}
}