summaryrefslogtreecommitdiffstats
path: root/compiler/optimizing/intrinsics_x86_64.cc
diff options
context:
space:
mode:
authorMark Mendell <mark.p.mendell@intel.com>2015-05-07 11:25:03 -0400
committerMark Mendell <mark.p.mendell@intel.com>2015-05-07 12:33:00 -0400
commit92e83bf8c0b2df8c977ffbc527989631d94b1819 (patch)
treefb23a1d027549a68a9ea88bf0eb63f85bc116aee /compiler/optimizing/intrinsics_x86_64.cc
parentdceab011395333520959cf743d680bcf5dc78797 (diff)
downloadart-92e83bf8c0b2df8c977ffbc527989631d94b1819.zip
art-92e83bf8c0b2df8c977ffbc527989631d94b1819.tar.gz
art-92e83bf8c0b2df8c977ffbc527989631d94b1819.tar.bz2
[optimizing] Tune some x86_64 moves
Generate Moves of constant FP values by loading from the constant table. Use 'movl' to load a 64 bit register for positive 32-bit values, saving a byte in the generated code by taking advantage of the implicit zero extension. Change a couple of xorq(reg, reg) to xorl to (potentially) save a byte of code per xor. Change-Id: I5b2a807f0d3b29294fd4e7b8ef6d654491fa0b01 Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
Diffstat (limited to 'compiler/optimizing/intrinsics_x86_64.cc')
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
1 files changed, 1 insertions, 1 deletions
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 1fc5432..3dbcd4c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -783,7 +783,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
__ Bind(&nan);
// output = 0
- __ xorq(out, out);
+ __ xorl(out, out);
__ Bind(&done);
}