summaryrefslogtreecommitdiffstats
path: root/libc/arch-arm
diff options
context:
space:
mode:
authorChitti Babu Theegala <ctheegal@codeaurora.org>2011-11-18 10:25:58 +0530
committerRicardo Cerqueira <cyanogenmod@cerqueira.org>2012-07-10 20:32:25 +0100
commitf9089b10e367465c3c61832431cf7ddd4e33d47f (patch)
treed8228a85c53cb49b5d3d973f634c5517ed342265 /libc/arch-arm
parent467dcf56ba73e66ce4e73c055fa042de5c11633a (diff)
downloadbionic-f9089b10e367465c3c61832431cf7ddd4e33d47f.zip
bionic-f9089b10e367465c3c61832431cf7ddd4e33d47f.tar.gz
bionic-f9089b10e367465c3c61832431cf7ddd4e33d47f.tar.bz2
msm7627A: Enable 7627A specific memcpy routine
Cache line size for 7627A is 32 bytes. The existing memcpy routine gives sub-optimal performance for this cache line size. The memcpy routine has been optimized taking this into consideration. Currently 7627A is the only ARM-v7 target with cache line size of 32 bytes, hence this optimized code has been featurized under CORTEX_CACHE_LINE_32 in memcpy.S, memset.S, which can be enabled by definingTARGET_CORTEX_CACHE_LINE_32 in BoardConfig.mk This change also adds corresponding cflag definition in Android.mk. Change-Id: Idea0d1b977f60e0a690ddee36b1d9c67d3c241ef
Diffstat (limited to 'libc/arch-arm')
-rw-r--r--libc/arch-arm/bionic/memcpy.S130
-rw-r--r--libc/arch-arm/bionic/memset.S4
2 files changed, 131 insertions, 3 deletions
diff --git a/libc/arch-arm/bionic/memcpy.S b/libc/arch-arm/bionic/memcpy.S
index 3d7446c..4a9aac8 100644
--- a/libc/arch-arm/bionic/memcpy.S
+++ b/libc/arch-arm/bionic/memcpy.S
@@ -140,6 +140,134 @@ memcpy:
.endfunc
.end
#elif !defined(ARCH_ARM_USE_NON_NEON_MEMCPY) /* !SCORPION_NEON_OPTIMIZATION */
+#if defined(CORTEX_CACHE_LINE_32)
+ /*
+ *This can be enabled by setting flag
+ *TARGET_CORTEX_CACHE_LINE_32 in
+ *device/<vendor>/<board>/BoardConfig.mk
+ */
+ .text
+ .fpu neon
+
+ .global memcpy
+ .type memcpy, %function
+ .align 4
+
+/* a prefetch distance of 4 cache-lines works best experimentally */
+#define CACHE_LINE_SIZE 32
+memcpy:
+ .fnstart
+ .save {r0, lr}
+ stmfd sp!, {r0, lr}
+
+ /* start preloading as early as possible */
+ pld [r1, #(CACHE_LINE_SIZE*0)]
+ pld [r1, #(CACHE_LINE_SIZE*1)]
+
+ /* do we have at least 16-bytes to copy (needed for alignment below) */
+ cmp r2, #16
+ blo 5f
+
+ /* align destination to half cache-line for the write-buffer */
+ rsb r3, r0, #0
+ ands r3, r3, #0xF
+ beq 0f
+
+ /* copy up to 15-bytes (count in r3) */
+ sub r2, r2, r3
+ movs ip, r3, lsl #31
+ ldrmib lr, [r1], #1
+ strmib lr, [r0], #1
+ ldrcsb ip, [r1], #1
+ ldrcsb lr, [r1], #1
+ strcsb ip, [r0], #1
+ strcsb lr, [r0], #1
+ movs ip, r3, lsl #29
+ bge 1f
+ // copies 4 bytes, destination 32-bits aligned
+ vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
+1: bcc 2f
+ // copies 8 bytes, destination 64-bits aligned
+ vld1.8 {d0}, [r1]!
+ vst1.8 {d0}, [r0, :64]!
+2:
+
+0: /* preload immediately the next cache line, which we may need */
+ pld [r1, #(CACHE_LINE_SIZE*0)]
+ pld [r1, #(CACHE_LINE_SIZE*1)]
+
+ /* make sure we have at least 128 bytes to copy */
+ subs r2, r2, #128
+ blo 2f
+
+ /* preload all the cache lines we need.
+ * NOTE: the number of pld below depends on PREFETCH_DISTANCE,
+ * ideally would would increase the distance in the main loop to
+ * avoid the goofy code below. In practice this doesn't seem to make
+ * a big difference.
+ */
+ pld [r1, #(CACHE_LINE_SIZE*2)]
+ pld [r1, #(CACHE_LINE_SIZE*3)]
+ pld [r1, #(CACHE_LINE_SIZE*4)]
+
+ .align 3
+1: /* The main loop copies 128 bytes at a time */
+ subs r2, r2, #128
+ vld1.8 {d0 - d3}, [r1]!
+ vld1.8 {d4 - d7}, [r1]!
+ pld [r1, #(CACHE_LINE_SIZE*1)]
+ pld [r1, #(CACHE_LINE_SIZE*2)]
+ vld1.8 {d16 - d19}, [r1]!
+ vld1.8 {d20 - d23}, [r1]!
+ pld [r1, #(CACHE_LINE_SIZE*1)]
+ pld [r1, #(CACHE_LINE_SIZE*2)]
+ vst1.8 {d0 - d3}, [r0, :128]!
+ vst1.8 {d4 - d7}, [r0, :128]!
+ vst1.8 {d16 - d19}, [r0, :128]!
+ vst1.8 {d20 - d23}, [r0, :128]!
+ bhs 1b
+
+2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
+ add r2, r2, #128
+ subs r2, r2, #32
+ blo 4f
+
+3: /* 32 bytes at a time. These cache lines were already preloaded */
+ vld1.8 {d0 - d3}, [r1]!
+ subs r2, r2, #32
+ vst1.8 {d0 - d3}, [r0, :128]!
+ bhs 3b
+
+4: /* less than 32 left */
+ add r2, r2, #32
+ tst r2, #0x10
+ beq 5f
+ // copies 16 bytes, 128-bits aligned
+ vld1.8 {d0, d1}, [r1]!
+ vst1.8 {d0, d1}, [r0, :128]!
+
+5: /* copy up to 15-bytes (count in r2) */
+ movs ip, r2, lsl #29
+ bcc 1f
+ vld1.8 {d0}, [r1]!
+ vst1.8 {d0}, [r0]!
+1: bge 2f
+ vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
+ vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
+2: movs ip, r2, lsl #31
+ ldrmib r3, [r1], #1
+ ldrcsb ip, [r1], #1
+ ldrcsb lr, [r1], #1
+ strmib r3, [r0], #1
+ strcsb ip, [r0], #1
+ strcsb lr, [r0], #1
+
+ ldmfd sp!, {r0, lr}
+ bx lr
+ .fnend
+#else /*!CORTEX_CACHE_LINE_32*/
+
.text
.fpu neon
@@ -249,7 +377,7 @@ ENTRY(memcpy)
ldmfd sp!, {r0, lr}
bx lr
END(memcpy)
-
+#endif /* CORTEX_CACHE_LINE_32 */
#endif /* !SCORPION_NEON_OPTIMIZATION */
#else /* __ARM_ARCH__ < 7 */
diff --git a/libc/arch-arm/bionic/memset.S b/libc/arch-arm/bionic/memset.S
index 3ea5aef..c386e7e 100644
--- a/libc/arch-arm/bionic/memset.S
+++ b/libc/arch-arm/bionic/memset.S
@@ -30,7 +30,7 @@
#include <machine/asm.h>
-#if defined(SCORPION_NEON_OPTIMIZATION)
+#if( defined(SCORPION_NEON_OPTIMIZATION) || defined(CORTEX_CACHE_LINE_32))
.code 32
.align 8
.global memset
@@ -111,7 +111,7 @@ memset:
bx lr
.end
-#else /* !SCORPION_NEON_OPTIMIZATION */
+#else /* !(SCORPION_NEON_OPTIMIZATION || CORTEX_CACHE_LINE_32) */
/*