diff options
author | Henrik Smiding <henrik.smiding@stericsson.com> | 2010-09-15 16:08:03 +0200 |
---|---|---|
committer | Christian Bejram <christian.bejram@stericsson.com> | 2012-05-07 14:18:02 +0200 |
commit | fe6338da9168330d44b409b2ee36103e8bfe6697 (patch) | |
tree | 904bc5df5c840b96996259fb9658b492161117e3 | |
parent | f7db5ecc4d662da8368f31d57401ca1a39690e40 (diff) | |
download | bionic-fe6338da9168330d44b409b2ee36103e8bfe6697.zip bionic-fe6338da9168330d44b409b2ee36103e8bfe6697.tar.gz bionic-fe6338da9168330d44b409b2ee36103e8bfe6697.tar.bz2 |
Adjust memcpy for ARM Cortex A9 cache line size
ARM Cortex A8 use 64 bytes and ARM Cortex A9 use 32 bytes cache line
size.
The following patch:
Adds code to adjust memcpy cache line size to match A9 cache line
size.
Adds a flag to select between 32 bytes and 64 bytes cache line
size.
Copyright (C) ST-Ericsson SA 2010
Modified neon implementation to fit Cortex A9 cache line size
Author: Henrik Smiding henrik.smiding@stericsson.com for
ST-Ericsson.
Change-Id: I8a55946bfb074e6ec0a14805ed65f73fcd0984a3
Signed-off-by: Christian Bejram <christian.bejram@stericsson.com>
-rw-r--r-- | libc/Android.mk | 8 | ||||
-rw-r--r-- | libc/arch-arm/bionic/memcpy.S | 33 |
2 files changed, 37 insertions, 4 deletions
diff --git a/libc/Android.mk b/libc/Android.mk index 9881d59..2628507 100644 --- a/libc/Android.mk +++ b/libc/Android.mk @@ -471,6 +471,14 @@ ifeq ($(TARGET_ARCH),arm) ifeq ($(ARCH_ARM_HAVE_TLS_REGISTER),true) libc_common_cflags += -DHAVE_ARM_TLS_REGISTER endif + # + # Define HAVE_32_BYTE_CACHE_LINES to indicate to C + # library it should use to 32-byte version of memcpy, and not + # the 64-byte version. + # + ifeq ($(ARCH_ARM_HAVE_32_BYTE_CACHE_LINES),true) + libc_common_cflags += -DHAVE_32_BYTE_CACHE_LINE + endif else # !arm ifeq ($(TARGET_ARCH),x86) libc_crt_target_cflags := diff --git a/libc/arch-arm/bionic/memcpy.S b/libc/arch-arm/bionic/memcpy.S index 438fa00..815b5f6 100644 --- a/libc/arch-arm/bionic/memcpy.S +++ b/libc/arch-arm/bionic/memcpy.S @@ -34,23 +34,28 @@ .text .fpu neon +#ifdef HAVE_32_BYTE_CACHE_LINE +/* a prefetch distance of 2 cache-lines */ +#define CACHE_LINE_SIZE 32 +#define PREFETCH_DISTANCE (CACHE_LINE_SIZE*2) +#else /* a prefetch distance of 4 cache-lines works best experimentally */ #define CACHE_LINE_SIZE 64 #define PREFETCH_DISTANCE (CACHE_LINE_SIZE*4) +#endif ENTRY(memcpy) .save {r0, lr} - stmfd sp!, {r0, lr} - /* start preloading as early as possible */ pld [r1, #(CACHE_LINE_SIZE*0)] + stmfd sp!, {r0, lr} pld [r1, #(CACHE_LINE_SIZE*1)] /* do we have at least 16-bytes to copy (needed for alignment below) */ cmp r2, #16 blo 5f - /* align destination to half cache-line for the write-buffer */ + /* align destination to cache-line for the write-buffer */ rsb r3, r0, #0 ands r3, r3, #0xF beq 0f @@ -79,6 +84,26 @@ ENTRY(memcpy) pld [r1, #(CACHE_LINE_SIZE*0)] pld [r1, #(CACHE_LINE_SIZE*1)] +#ifdef HAVE_32_BYTE_CACHE_LINE + /* make sure we have at least 32 bytes to copy */ + subs r2, r2, #32 + blo 4f + + /* preload all the cache lines we need. + * NOTE: the number of pld below depends on PREFETCH_DISTANCE, + * ideally would would increase the distance in the main loop to + * avoid the goofy code below. In practice this doesn't seem to make + * a big difference. + */ + pld [r1, #(PREFETCH_DISTANCE)] + +1: /* The main loop copies 32 bytes at a time */ + vld1.8 {d0 - d3}, [r1]! + pld [r1, #(PREFETCH_DISTANCE)] + subs r2, r2, #32 + vst1.8 {d0 - d3}, [r0, :128]! + bhs 1b +#else /* make sure we have at least 64 bytes to copy */ subs r2, r2, #64 blo 2f @@ -112,7 +137,7 @@ ENTRY(memcpy) subs r2, r2, #32 vst1.8 {d0 - d3}, [r0, :128]! bhs 3b - +#endif 4: /* less than 32 left */ add r2, r2, #32 tst r2, #0x10 |