summaryrefslogtreecommitdiffstats
path: root/compiler/utils/x86_64
diff options
context:
space:
mode:
authorMark Mendell <mark.p.mendell@intel.com>2015-03-31 22:16:59 -0400
committerMark Mendell <mark.p.mendell@intel.com>2015-04-01 08:45:38 -0400
commitfb8d279bc011b31d0765dc7ca59afea324fd0d0c (patch)
tree01b21964ce0516bda835faa15b260ac290714fe0 /compiler/utils/x86_64
parentdcff612c3a6e1427749771c4559f198fa480f709 (diff)
downloadart-fb8d279bc011b31d0765dc7ca59afea324fd0d0c.zip
art-fb8d279bc011b31d0765dc7ca59afea324fd0d0c.tar.gz
art-fb8d279bc011b31d0765dc7ca59afea324fd0d0c.tar.bz2
[optimizing] Implement x86/x86_64 math intrinsics
Implement floor/ceil/round/RoundFloat on x86 and x86_64. Implement RoundDouble on x86_64. Add support for roundss and roundsd on both architectures. Support them in the disassembler as well. Add the instruction set features for x86, as the 'round' instruction is only supported if SSE4.1 is supported. Fix the tests to handle the addition of passing the instruction set features to x86 and x86_64. Add assembler tests for roundsd and roundss to x86_64 assembler tests. Change-Id: I9742d5930befb0bbc23f3d6c83ce0183ed9fe04f Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
Diffstat (limited to 'compiler/utils/x86_64')
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc24
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc8
3 files changed, 35 insertions, 0 deletions
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index bd155ed..e82d90c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -796,6 +796,30 @@ void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) {
}
+void X86_64Assembler::roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x3A);
+ EmitUint8(0x0B);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+ EmitUint8(imm.value());
+}
+
+
+void X86_64Assembler::roundss(XmmRegister dst, XmmRegister src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x3A);
+ EmitUint8(0x0A);
+ EmitXmmRegisterOperand(dst.LowBits(), src);
+ EmitUint8(imm.value());
+}
+
+
void X86_64Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 495f74f..39f781c 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -353,6 +353,9 @@ class X86_64Assembler FINAL : public Assembler {
void ucomiss(XmmRegister a, XmmRegister b);
void ucomisd(XmmRegister a, XmmRegister b);
+ void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
+ void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
+
void sqrtsd(XmmRegister dst, XmmRegister src);
void sqrtss(XmmRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 00f508b..4402dfc 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -692,6 +692,14 @@ TEST_F(AssemblerX86_64Test, Sqrtsd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::sqrtsd, "sqrtsd %{reg2}, %{reg1}"), "sqrtsd");
}
+TEST_F(AssemblerX86_64Test, Roundss) {
+ DriverStr(RepeatFFI(&x86_64::X86_64Assembler::roundss, 1, "roundss ${imm}, %{reg2}, %{reg1}"), "roundss");
+}
+
+TEST_F(AssemblerX86_64Test, Roundsd) {
+ DriverStr(RepeatFFI(&x86_64::X86_64Assembler::roundsd, 1, "roundsd ${imm}, %{reg2}, %{reg1}"), "roundsd");
+}
+
TEST_F(AssemblerX86_64Test, Xorps) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::xorps, "xorps %{reg2}, %{reg1}"), "xorps");
}