diff options
author | Prachee Ramsinghani <pracheer@codeaurora.org> | 2011-05-23 17:33:15 +0530 |
---|---|---|
committer | Prachee Ramsinghani <pracheer@codeaurora.org> | 2011-06-01 18:11:17 +0530 |
commit | 628a9221c0d10574d55dd02ea19f284139b706d8 (patch) | |
tree | d0ee5a22d18dbedae3f73d8e2d5716bfa80606eb | |
parent | 1c7a9c9c8fccb2636593a752205f6d3e8d37ce01 (diff) | |
download | bionic-628a9221c0d10574d55dd02ea19f284139b706d8.zip bionic-628a9221c0d10574d55dd02ea19f284139b706d8.tar.gz bionic-628a9221c0d10574d55dd02ea19f284139b706d8.tar.bz2 |
msm7627A: Enable 7627A specific memcpy routine
Cache line size for 7627A is 32 bytes. The existing memcpy routine
gives sub-optimal performance for this cache line size. The memcpy
routine has been optimized taking this into consideration. Currently
7627A is the only ARM-v7 target with cache line size of 32 bytes,
hence this optimized code has been featurized under
CORTEX_CACHE_LINE_32 in memcpy.S, which can be enabled by defining
TARGET_CORTEX_CACHE_LINE_32 in BoardConfig.mk
This change also adds corresponding cflag definition in Android.mk.
Change-Id: I984e8fbb6fa7f32ccfa2264809f1668f7633cb99
-rw-r--r-- | libc/Android.mk | 4 | ||||
-rw-r--r-- | libc/arch-arm/bionic/memcpy.S | 129 |
2 files changed, 132 insertions, 1 deletions
diff --git a/libc/Android.mk b/libc/Android.mk index dcd7ba4..6a8034a 100644 --- a/libc/Android.mk +++ b/libc/Android.mk @@ -511,6 +511,10 @@ ifeq ($(TARGET_ARCH),arm) libc_common_cflags += -DPLDSIZE=$(TARGET_SCORPION_BIONIC_PLDSIZE) endif endif + + ifeq ($(TARGET_CORTEX_CACHE_LINE_32),true) + libc_common_cflags += -DCORTEX_CACHE_LINE_32 + endif else # !arm ifeq ($(TARGET_ARCH),x86) libc_crt_target_cflags := -m32 diff --git a/libc/arch-arm/bionic/memcpy.S b/libc/arch-arm/bionic/memcpy.S index e92ff5e..0494216 100644 --- a/libc/arch-arm/bionic/memcpy.S +++ b/libc/arch-arm/bionic/memcpy.S @@ -139,6 +139,133 @@ memcpy: .endfunc .end #else /* !SCORPION_NEON_OPTIMIZATION */ +#if defined(CORTEX_CACHE_LINE_32) + /* + *This can be enabled by setting flag + *TARGET_CORTEX_CACHE_LINE_32 in + *device/<vendor>/<board>/BoardConfig.mk + */ + .text + .fpu neon + + .global memcpy + .type memcpy, %function + .align 4 + +/* a prefetch distance of 4 cache-lines works best experimentally */ +#define CACHE_LINE_SIZE 32 +memcpy: + .fnstart + .save {r0, lr} + stmfd sp!, {r0, lr} + + /* start preloading as early as possible */ + pld [r1, #(CACHE_LINE_SIZE*0)] + pld [r1, #(CACHE_LINE_SIZE*1)] + + /* do we have at least 16-bytes to copy (needed for alignment below) */ + cmp r2, #16 + blo 5f + + /* align destination to half cache-line for the write-buffer */ + rsb r3, r0, #0 + ands r3, r3, #0xF + beq 0f + + /* copy up to 15-bytes (count in r3) */ + sub r2, r2, r3 + movs ip, r3, lsl #31 + ldrmib lr, [r1], #1 + strmib lr, [r0], #1 + ldrcsb ip, [r1], #1 + ldrcsb lr, [r1], #1 + strcsb ip, [r0], #1 + strcsb lr, [r0], #1 + movs ip, r3, lsl #29 + bge 1f + // copies 4 bytes, destination 32-bits aligned + vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]! + vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]! +1: bcc 2f + // copies 8 bytes, destination 64-bits aligned + vld1.8 {d0}, [r1]! + vst1.8 {d0}, [r0, :64]! +2: + +0: /* preload immediately the next cache line, which we may need */ + pld [r1, #(CACHE_LINE_SIZE*0)] + pld [r1, #(CACHE_LINE_SIZE*1)] + + /* make sure we have at least 128 bytes to copy */ + subs r2, r2, #128 + blo 2f + + /* preload all the cache lines we need. + * NOTE: the number of pld below depends on PREFETCH_DISTANCE, + * ideally would would increase the distance in the main loop to + * avoid the goofy code below. In practice this doesn't seem to make + * a big difference. + */ + pld [r1, #(CACHE_LINE_SIZE*2)] + pld [r1, #(CACHE_LINE_SIZE*3)] + pld [r1, #(CACHE_LINE_SIZE*4)] + + .align 3 +1: /* The main loop copies 128 bytes at a time */ + subs r2, r2, #128 + vld1.8 {d0 - d3}, [r1]! + vld1.8 {d4 - d7}, [r1]! + pld [r1, #(CACHE_LINE_SIZE*1)] + pld [r1, #(CACHE_LINE_SIZE*2)] + vld1.8 {d16 - d19}, [r1]! + vld1.8 {d20 - d23}, [r1]! + pld [r1, #(CACHE_LINE_SIZE*1)] + pld [r1, #(CACHE_LINE_SIZE*2)] + vst1.8 {d0 - d3}, [r0, :128]! + vst1.8 {d4 - d7}, [r0, :128]! + vst1.8 {d16 - d19}, [r0, :128]! + vst1.8 {d20 - d23}, [r0, :128]! + bhs 1b + +2: /* fix-up the remaining count and make sure we have >= 32 bytes left */ + add r2, r2, #128 + subs r2, r2, #32 + blo 4f + +3: /* 32 bytes at a time. These cache lines were already preloaded */ + vld1.8 {d0 - d3}, [r1]! + subs r2, r2, #32 + vst1.8 {d0 - d3}, [r0, :128]! + bhs 3b + +4: /* less than 32 left */ + add r2, r2, #32 + tst r2, #0x10 + beq 5f + // copies 16 bytes, 128-bits aligned + vld1.8 {d0, d1}, [r1]! + vst1.8 {d0, d1}, [r0, :128]! + +5: /* copy up to 15-bytes (count in r2) */ + movs ip, r2, lsl #29 + bcc 1f + vld1.8 {d0}, [r1]! + vst1.8 {d0}, [r0]! +1: bge 2f + vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]! + vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]! +2: movs ip, r2, lsl #31 + ldrmib r3, [r1], #1 + ldrcsb ip, [r1], #1 + ldrcsb lr, [r1], #1 + strmib r3, [r0], #1 + strcsb ip, [r0], #1 + strcsb lr, [r0], #1 + + ldmfd sp!, {r0, lr} + bx lr + .fnend +#else /*!CORTEX_CACHE_LINE_32*/ .text .fpu neon @@ -253,7 +380,7 @@ memcpy: ldmfd sp!, {r0, lr} bx lr .fnend - +#endif #endif /* !SCORPION_NEON_OPTIMIZATION */ #else /* __ARM_ARCH__ < 7 */ |