summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDirk Rettschlag <dirk.rettschlag@gmail.com>2013-02-21 13:56:50 +0100
committerSteve Kondik <shade@chemlab.org>2013-02-21 22:42:08 -0800
commit5b11f41dcefa665fff2764c0e7d32b4354db677b (patch)
tree73b2242632873c5aeff49cc32622ea918324569f
parent46876cad1e6a4c9b15f1f85883409b6f1f3aea4d (diff)
downloadbionic-5b11f41dcefa665fff2764c0e7d32b4354db677b.zip
bionic-5b11f41dcefa665fff2764c0e7d32b4354db677b.tar.gz
bionic-5b11f41dcefa665fff2764c0e7d32b4354db677b.tar.bz2
libc: Add optimized strcmp
This version is from Newlib. Original comment of the author: The attached patch provides a new implementation of strcmp for ARM, using LDRD instead of LDR whenever possible. For older architectures that do not support LDRD, this implementation uses the same algorithm as before. This patch replaces strcmp.c with strcmp.S. The huge inline assembly from strcmp.c was converted into plain assembly and included in strcmp.S under the appropriate predefines. Testing and benchmarking: * Validation: successfully passes a test that compares different strings of length 1-128 and offsets 0-8 from a word boundary. Checked on qemu/A15/A9, ARM/Thumb mode, Big/Little Endian. This test is also added to newlib testsuite as part of this patch. * Integration with gcc: no regression on qemu for arm-none-eabi --with-cpu a15/a9 --with-mode arm/thumb. * Performance (relative to the current strcmp in newlib, only in ARM mode): On Dhrystone, the new implementation (ldrd) is 22% faster on Cortex-A15 FPGA, and 16% on Cortex-A9 VE2. On synthetic benchmarks, which measure the average number of cycles for strcmp on strings of length 4-128K and offsets 0,1,2,3,4,8 from a word boundary, where the strings are equal, the new implementation is three times faster for long strings, when the input strings have the same offset from a word boundary, and up to 30% faster in other cases, on both A15 FPGA and A9 VE2. Change-Id: I7a39770cad48c2ecc6b6d375165652cd4f0c0619 Signed-off-by: Dirk Rettschlag <dirk.rettschlag@gmail.com>
-rw-r--r--libc/arch-arm/bionic/strcmp.S941
-rw-r--r--tests/string_benchmark.cpp21
2 files changed, 727 insertions, 235 deletions
diff --git a/libc/arch-arm/bionic/strcmp.S b/libc/arch-arm/bionic/strcmp.S
index 764a531..31f6123 100644
--- a/libc/arch-arm/bionic/strcmp.S
+++ b/libc/arch-arm/bionic/strcmp.S
@@ -1,6 +1,5 @@
/*
- * Copyright (c) 2011 The Android Open Source Project
- * Copyright (c) 2008 ARM Ltd
+ * Copyright (c) 2012 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,288 +29,760 @@
#include <machine/cpu-features.h>
#include <machine/asm.h>
- .text
-
#ifdef __ARMEB__
-#define SHFT2LSB lsl
-#define SHFT2LSBEQ lsleq
-#define SHFT2MSB lsr
-#define SHFT2MSBEQ lsreq
+#define S2LOMEM lsl
+#define S2LOMEMEQ lsleq
+#define S2HIMEM lsr
#define MSB 0x000000ff
#define LSB 0xff000000
-#else
-#define SHFT2LSB lsr
-#define SHFT2LSBEQ lsreq
-#define SHFT2MSB lsl
-#define SHFT2MSBEQ lsleq
+#define BYTE0_OFFSET 24
+#define BYTE1_OFFSET 16
+#define BYTE2_OFFSET 8
+#define BYTE3_OFFSET 0
+#else /* not __ARMEB__ */
+#define S2LOMEM lsr
+#define S2LOMEMEQ lsreq
+#define S2HIMEM lsl
+#define BYTE0_OFFSET 0
+#define BYTE1_OFFSET 8
+#define BYTE2_OFFSET 16
+#define BYTE3_OFFSET 24
#define MSB 0xff000000
#define LSB 0x000000ff
+#endif /* not __ARMEB__ */
+
+.syntax unified
+
+#if defined (__thumb__)
+ .thumb
+ .thumb_func
#endif
+ .global strcmp
+ .type strcmp, %function
+strcmp:
+
+#if (defined (__thumb__) && !defined (__thumb2__))
+1:
+ ldrb r2, [r0]
+ ldrb r3, [r1]
+ adds r0, r0, #1
+ adds r1, r1, #1
+ cmp r2, #0
+ beq 2f
+ cmp r2, r3
+ beq 1b
+2:
+ subs r0, r2, r3
+ bx lr
+#elif (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
+1:
+ ldrb r2, [r0], #1
+ ldrb r3, [r1], #1
+ cmp r2, #1
+ it cs
+ cmpcs r2, r3
+ beq 1b
+ subs r0, r2, r3
+ RETURN
+
+
+#elif (__ARM_ARCH__ >= 6)
+ /* Use LDRD whenever possible. */
+
+/* The main thing to look out for when comparing large blocks is that
+ the loads do not cross a page boundary when loading past the index
+ of the byte with the first difference or the first string-terminator.
+
+ For example, if the strings are identical and the string-terminator
+ is at index k, byte by byte comparison will not load beyond address
+ s1+k and s2+k; word by word comparison may load up to 3 bytes beyond
+ k; double word - up to 7 bytes. If the load of these bytes crosses
+ a page boundary, it might cause a memory fault (if the page is not mapped)
+ that would not have happened in byte by byte comparison.
+
+ If an address is (double) word aligned, then a load of a (double) word
+ from that address will not cross a page boundary.
+ Therefore, the algorithm below considers word and double-word alignment
+ of strings separately. */
+
+/* High-level description of the algorithm.
+
+ * The fast path: if both strings are double-word aligned,
+ use LDRD to load two words from each string in every loop iteration.
+ * If the strings have the same offset from a word boundary,
+ use LDRB to load and compare byte by byte until
+ the first string is aligned to a word boundary (at most 3 bytes).
+ This is optimized for quick return on short unaligned strings.
+ * If the strings have the same offset from a double-word boundary,
+ use LDRD to load two words from each string in every loop iteration, as in the fast path.
+ * If the strings do not have the same offset from a double-word boundary,
+ load a word from the second string before the loop to initialize the queue.
+ Use LDRD to load two words from every string in every loop iteration.
+ Inside the loop, load the second word from the second string only after comparing
+ the first word, using the queued value, to guarantee safety across page boundaries.
+ * If the strings do not have the same offset from a word boundary,
+ use LDR and a shift queue. Order of loads and comparisons matters,
+ similarly to the previous case.
+
+ * Use UADD8 and SEL to compare words, and use REV and CLZ to compute the return value.
+ * The only difference between ARM and Thumb modes is the use of CBZ instruction.
+ * The only difference between big and little endian is the use of REV in little endian
+ to compute the return value, instead of MOV.
+ * No preload. [TODO.]
+*/
+
+ .macro m_cbz reg label
+#ifdef __thumb2__
+ cbz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ beq \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbz */
+
+ .macro m_cbnz reg label
+#ifdef __thumb2__
+ cbnz \reg, \label
+#else /* not defined __thumb2__ */
+ cmp \reg, #0
+ bne \label
+#endif /* not defined __thumb2__ */
+ .endm /* m_cbnz */
+
+ .macro init
+ /* Macro to save temporary registers and prepare magic values. */
+ subs sp, sp, #16
+ strd r4, r5, [sp, #8]
+ strd r6, r7, [sp]
+ mvn r6, #0 /* all F */
+ mov r7, #0 /* all 0 */
+ .endm /* init */
+
+ .macro magic_compare_and_branch w1 w2 label
+ /* Macro to compare registers w1 and w2 and conditionally branch to label. */
+ cmp \w1, \w2 /* Are w1 and w2 the same? */
+ magic_find_zero_bytes \w1
+ it eq
+ cmpeq ip, #0 /* Is there a zero byte in w1? */
+ bne \label
+ .endm /* magic_compare_and_branch */
+
+ .macro magic_find_zero_bytes w1
+ /* Macro to find all-zero bytes in w1, result is in ip. */
+#if (defined (__ARM_FEATURE_DSP))
+ uadd8 ip, \w1, r6
+ sel ip, r7, r6
+#else /* not defined (__ARM_FEATURE_DSP) */
+ /* __ARM_FEATURE_DSP is not defined for some Cortex-M processors.
+ Coincidently, these processors only have Thumb-2 mode, where we can use the
+ the (large) magic constant available directly as an immediate in instructions.
+ Note that we cannot use the magic constant in ARM mode, where we need
+ to create the constant in a register. */
+ sub ip, \w1, #0x01010101
+ bic ip, ip, \w1
+ and ip, ip, #0x80808080
+#endif /* not defined (__ARM_FEATURE_DSP) */
+ .endm /* magic_find_zero_bytes */
+
+ .macro setup_return w1 w2
+#ifdef __ARMEB__
+ mov r1, \w1
+ mov r2, \w2
+#else /* not __ARMEB__ */
+ rev r1, \w1
+ rev r2, \w2
+#endif /* not __ARMEB__ */
+ .endm /* setup_return */
+
+ /*
+ optpld r0, #0
+ optpld r1, #0
+ */
+
+ /* Are both strings double-word aligned? */
+ orr ip, r0, r1
+ tst ip, #7
+ bne do_align
+
+ /* Fast path. */
+ init
+
+doubleword_aligned:
+
+ /* Get here when the strings to compare are double-word aligned. */
+ /* Compare two words in every iteration. */
+ .p2align 2
+2:
+ /*
+ optpld r0, #16
+ optpld r1, #16
+ */
+
+ /* Load the next double-word from each string. */
+ ldrd r2, r3, [r0], #8
+ ldrd r4, r5, [r1], #8
+
+ magic_compare_and_branch w1=r2, w2=r4, label=return_24
+ magic_compare_and_branch w1=r3, w2=r5, label=return_35
+ b 2b
+
+do_align:
+ /* Is the first string word-aligned? */
+ ands ip, r0, #3
+ beq word_aligned_r0
+
+ /* Fast compare byte by byte until the first string is word-aligned. */
+ /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes
+ to read until the next word boudnary is 4-ip. */
+ bic r0, r0, #3
+ ldr r2, [r0], #4
+ lsls ip, ip, #31
+ beq byte2
+ bcs byte3
+
+byte1:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE1_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte2:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE2_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbz reg=r3, label=fast_return
+
+byte3:
+ ldrb ip, [r1], #1
+ uxtb r3, r2, ror #BYTE3_OFFSET
+ subs ip, r3, ip
+ bne fast_return
+ m_cbnz reg=r3, label=word_aligned_r0
+
+fast_return:
+ mov r0, ip
+ bx lr
+
+word_aligned_r0:
+ init
+ /* The first string is word-aligned. */
+ /* Is the second string word-aligned? */
+ ands ip, r1, #3
+ bne strcmp_unaligned
+
+word_aligned:
+ /* The strings are word-aligned. */
+ /* Is the first string double-word aligned? */
+ tst r0, #4
+ beq doubleword_aligned_r0
+
+ /* If r0 is not double-word aligned yet, align it by loading
+ and comparing the next word from each string. */
+ ldr r2, [r0], #4
+ ldr r4, [r1], #4
+ magic_compare_and_branch w1=r2 w2=r4 label=return_24
+
+doubleword_aligned_r0:
+ /* Get here when r0 is double-word aligned. */
+ /* Is r1 doubleword_aligned? */
+ tst r1, #4
+ beq doubleword_aligned
+
+ /* Get here when the strings to compare are word-aligned,
+ r0 is double-word aligned, but r1 is not double-word aligned. */
+ /* Initialize the queue. */
+ ldr r5, [r1], #4
+
+ /* Compare two words in every iteration. */
+ .p2align 2
+3:
+ /*
+ optpld r0, #16
+ optpld r1, #16
+ */
+
+ /* Load the next double-word from each string and compare. */
+ ldrd r2, r3, [r0], #8
+ magic_compare_and_branch w1=r2 w2=r5 label=return_25
+ ldrd r4, r5, [r1], #8
+ magic_compare_and_branch w1=r3 w2=r4 label=return_34
+ b 3b
+
+ .macro miscmp_word offsetlo offsethi
+ /* Macro to compare misaligned strings. */
+ /* r0, r1 are word-aligned, and at least one of the strings
+ is not double-word aligned. */
+ /* Compare one word in every loop iteration. */
+ /* OFFSETLO is the original bit-offset of r1 from a word-boundary,
+ OFFSETHI is 32 - OFFSETLO (i.e., offset from the next word). */
+
+ /* Initialize the shift queue. */
+ ldr r5, [r1], #4
+
+ /* Compare one word from each string in every loop iteration. */
+ .p2align 2
+7:
+ ldr r3, [r0], #4
+ S2LOMEM r5, r5, #\offsetlo
+ magic_find_zero_bytes w1=r3
+ cmp r7, ip, S2HIMEM #\offsetlo
+ and r2, r3, r6, S2LOMEM #\offsetlo
+ it eq
+ cmpeq r2, r5
+ bne return_25
+ ldr r5, [r1], #4
+ cmp ip, #0
+ eor r3, r2, r3
+ S2HIMEM r2, r5, #\offsethi
+ it eq
+ cmpeq r3, r2
+ bne return_32
+ b 7b
+ .endm /* miscmp_word */
+
+strcmp_unaligned:
+ /* r0 is word-aligned, r1 is at offset ip from a word. */
+ /* Align r1 to the (previous) word-boundary. */
+ bic r1, r1, #3
+
+ /* Unaligned comparison word by word using LDRs. */
+ cmp ip, #2
+ beq miscmp_word_16 /* If ip == 2. */
+ bge miscmp_word_24 /* If ip == 3. */
+ miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */
+miscmp_word_16: miscmp_word offsetlo=16 offsethi=16
+miscmp_word_24: miscmp_word offsetlo=24 offsethi=8
+
+
+return_32:
+ setup_return w1=r3, w2=r2
+ b do_return
+return_34:
+ setup_return w1=r3, w2=r4
+ b do_return
+return_25:
+ setup_return w1=r2, w2=r5
+ b do_return
+return_35:
+ setup_return w1=r3, w2=r5
+ b do_return
+return_24:
+ setup_return w1=r2, w2=r4
+
+do_return:
+
+#ifdef __ARMEB__
+ mov r0, ip
+#else /* not __ARMEB__ */
+ rev r0, ip
+#endif /* not __ARMEB__ */
+
+ /* Restore temporaries early, before computing the return value. */
+ ldrd r6, r7, [sp]
+ ldrd r4, r5, [sp, #8]
+ adds sp, sp, #16
+
+ /* There is a zero or a different byte between r1 and r2. */
+ /* r0 contains a mask of all-zero bytes in r1. */
+ /* Using r0 and not ip here because cbz requires low register. */
+ m_cbz reg=r0, label=compute_return_value
+ clz r0, r0
+ /* r0 contains the number of bits on the left of the first all-zero byte in r1. */
+ rsb r0, r0, #24
+ /* Here, r0 contains the number of bits on the right of the first all-zero byte in r1. */
+ lsr r1, r1, r0
+ lsr r2, r2, r0
+
+compute_return_value:
+ movs r0, #1
+ cmp r1, r2
+ /* The return value is computed as follows.
+ If r1>r2 then (C==1 and Z==0) and LS doesn't hold and r0 is #1 at return.
+ If r1<r2 then (C==0 and Z==0) and we execute SBC with carry_in=0,
+ which means r0:=r0-r0-1 and r0 is #-1 at return.
+ If r1=r2 then (C==1 and Z==1) and we execute SBC with carry_in=1,
+ which means r0:=r0-r0 and r0 is #0 at return.
+ (C==0 and Z==1) cannot happen because the carry bit is "not borrow". */
+ it ls
+ sbcls r0, r0, r0
+ bx lr
+
+
+#else /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
+ defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
+ (defined (__thumb__) && !defined (__thumb2__))) */
+
+ /* Use LDR whenever possible. */
+
+#ifdef __thumb2__
+#define magic1(REG) 0x01010101
+#define magic2(REG) 0x80808080
+#else
#define magic1(REG) REG
#define magic2(REG) REG, lsl #7
+#endif
-ENTRY(strcmp)
- PLD(r0, #0)
- PLD(r1, #0)
- eor r2, r0, r1
- tst r2, #3
-
- /* Strings not at same byte offset from a word boundary. */
- bne .Lstrcmp_unaligned
- ands r2, r0, #3
- bic r0, r0, #3
- bic r1, r1, #3
- ldr ip, [r0], #4
- it eq
- ldreq r3, [r1], #4
- beq 1f
-
- /* Although s1 and s2 have identical initial alignment, they are
- * not currently word aligned. Rather than comparing bytes,
- * make sure that any bytes fetched from before the addressed
- * bytes are forced to 0xff. Then they will always compare
- * equal.
- */
- eor r2, r2, #3
- lsl r2, r2, #3
- mvn r3, #MSB
- SHFT2LSB r2, r3, r2
- ldr r3, [r1], #4
- orr ip, ip, r2
- orr r3, r3, r2
+ optpld r0
+ optpld r1
+ eor r2, r0, r1
+ tst r2, #3
+ /* Strings not at same byte offset from a word boundary. */
+ bne strcmp_unaligned
+ ands r2, r0, #3
+ bic r0, r0, #3
+ bic r1, r1, #3
+ ldr ip, [r0], #4
+ it eq
+ ldreq r3, [r1], #4
+ beq 1f
+ /* Although s1 and s2 have identical initial alignment, they are
+ not currently word aligned. Rather than comparing bytes,
+ make sure that any bytes fetched from before the addressed
+ bytes are forced to 0xff. Then they will always compare
+ equal. */
+ eor r2, r2, #3
+ lsl r2, r2, #3
+ mvn r3, MSB
+ S2LOMEM r2, r3, r2
+ ldr r3, [r1], #4
+ orr ip, ip, r2
+ orr r3, r3, r2
1:
- /* Load the 'magic' constant 0x01010101. */
- str r4, [sp, #-4]!
- mov r4, #1
- orr r4, r4, r4, lsl #8
- orr r4, r4, r4, lsl #16
- .p2align 2
+#ifndef __thumb2__
+ /* Load the 'magic' constant 0x01010101. */
+ str r4, [sp, #-4]!
+ mov r4, #1
+ orr r4, r4, r4, lsl #8
+ orr r4, r4, r4, lsl #16
+#endif
+ .p2align 2
4:
- PLD(r0, #8)
- PLD(r1, #8)
- sub r2, ip, magic1(r4)
- cmp ip, r3
- itttt eq
-
- /* check for any zero bytes in first word */
- biceq r2, r2, ip
- tsteq r2, magic2(r4)
- ldreq ip, [r0], #4
- ldreq r3, [r1], #4
- beq 4b
+ optpld r0, #8
+ optpld r1, #8
+ sub r2, ip, magic1(r4)
+ cmp ip, r3
+ itttt eq
+ /* check for any zero bytes in first word */
+ biceq r2, r2, ip
+ tsteq r2, magic2(r4)
+ ldreq ip, [r0], #4
+ ldreq r3, [r1], #4
+ beq 4b
2:
- /* There's a zero or a different byte in the word */
- SHFT2MSB r0, ip, #24
- SHFT2LSB ip, ip, #8
- cmp r0, #1
- it cs
- cmpcs r0, r3, SHFT2MSB #24
- it eq
- SHFT2LSBEQ r3, r3, #8
- beq 2b
- /* On a big-endian machine, r0 contains the desired byte in bits
- * 0-7; on a little-endian machine they are in bits 24-31. In
- * both cases the other bits in r0 are all zero. For r3 the
- * interesting byte is at the other end of the word, but the
- * other bits are not necessarily zero. We need a signed result
- * representing the differnece in the unsigned bytes, so for the
- * little-endian case we can't just shift the interesting bits up.
- */
+ /* There's a zero or a different byte in the word */
+ S2HIMEM r0, ip, #24
+ S2LOMEM ip, ip, #8
+ cmp r0, #1
+ it cs
+ cmpcs r0, r3, S2HIMEM #24
+ it eq
+ S2LOMEMEQ r3, r3, #8
+ beq 2b
+ /* On a big-endian machine, r0 contains the desired byte in bits
+ 0-7; on a little-endian machine they are in bits 24-31. In
+ both cases the other bits in r0 are all zero. For r3 the
+ interesting byte is at the other end of the word, but the
+ other bits are not necessarily zero. We need a signed result
+ representing the differnece in the unsigned bytes, so for the
+ little-endian case we can't just shift the interesting bits
+ up. */
#ifdef __ARMEB__
- sub r0, r0, r3, lsr #24
+ sub r0, r0, r3, lsr #24
#else
- and r3, r3, #255
- /* No RSB instruction in Thumb2 */
+ and r3, r3, #255
#ifdef __thumb2__
- lsr r0, r0, #24
- sub r0, r0, r3
+ /* No RSB instruction in Thumb2 */
+ lsr r0, r0, #24
+ sub r0, r0, r3
#else
- rsb r0, r3, r0, lsr #24
+ rsb r0, r3, r0, lsr #24
#endif
#endif
- ldr r4, [sp], #4
- bx lr
-
-.Lstrcmp_unaligned:
- wp1 .req r0
- wp2 .req r1
- b1 .req r2
- w1 .req r4
- w2 .req r5
- t1 .req ip
- @ r3 is scratch
-
- /* First of all, compare bytes until wp1(sp1) is word-aligned. */
+#ifndef __thumb2__
+ ldr r4, [sp], #4
+#endif
+ RETURN
+
+
+strcmp_unaligned:
+
+#if 0
+ /* The assembly code below is based on the following alogrithm. */
+#ifdef __ARMEB__
+#define RSHIFT <<
+#define LSHIFT >>
+#else
+#define RSHIFT >>
+#define LSHIFT <<
+#endif
+
+#define body(shift) \
+ mask = 0xffffffffU RSHIFT shift; \
+ w1 = *wp1++; \
+ w2 = *wp2++; \
+ do \
+ { \
+ t1 = w1 & mask; \
+ if (__builtin_expect(t1 != w2 RSHIFT shift, 0)) \
+ { \
+ w2 RSHIFT= shift; \
+ break; \
+ } \
+ if (__builtin_expect(((w1 - b1) & ~w1) & (b1 << 7), 0)) \
+ { \
+ /* See comment in assembler below re syndrome on big-endian */\
+ if ((((w1 - b1) & ~w1) & (b1 << 7)) & mask) \
+ w2 RSHIFT= shift; \
+ else \
+ { \
+ w2 = *wp2; \
+ t1 = w1 RSHIFT (32 - shift); \
+ w2 = (w2 LSHIFT (32 - shift)) RSHIFT (32 - shift); \
+ } \
+ break; \
+ } \
+ w2 = *wp2++; \
+ t1 ^= w1; \
+ if (__builtin_expect(t1 != w2 LSHIFT (32 - shift), 0)) \
+ { \
+ t1 = w1 >> (32 - shift); \
+ w2 = (w2 << (32 - shift)) RSHIFT (32 - shift); \
+ break; \
+ } \
+ w1 = *wp1++; \
+ } while (1)
+
+ const unsigned* wp1;
+ const unsigned* wp2;
+ unsigned w1, w2;
+ unsigned mask;
+ unsigned shift;
+ unsigned b1 = 0x01010101;
+ char c1, c2;
+ unsigned t1;
+
+ while (((unsigned) s1) & 3)
+ {
+ c1 = *s1++;
+ c2 = *s2++;
+ if (c1 == 0 || c1 != c2)
+ return c1 - (int)c2;
+ }
+ wp1 = (unsigned*) (((unsigned)s1) & ~3);
+ wp2 = (unsigned*) (((unsigned)s2) & ~3);
+ t1 = ((unsigned) s2) & 3;
+ if (t1 == 1)
+ {
+ body(8);
+ }
+ else if (t1 == 2)
+ {
+ body(16);
+ }
+ else
+ {
+ body (24);
+ }
+
+ do
+ {
+#ifdef __ARMEB__
+ c1 = (char) t1 >> 24;
+ c2 = (char) w2 >> 24;
+#else /* not __ARMEB__ */
+ c1 = (char) t1;
+ c2 = (char) w2;
+#endif /* not __ARMEB__ */
+ t1 RSHIFT= 8;
+ w2 RSHIFT= 8;
+ } while (c1 != 0 && c1 == c2);
+ return c1 - c2;
+#endif /* 0 */
+
+
+ wp1 .req r0
+ wp2 .req r1
+ b1 .req r2
+ w1 .req r4
+ w2 .req r5
+ t1 .req ip
+ @ r3 is scratch
+
+ /* First of all, compare bytes until wp1(sp1) is word-aligned. */
1:
- tst wp1, #3
- beq 2f
- ldrb r2, [wp1], #1
- ldrb r3, [wp2], #1
- cmp r2, #1
- it cs
- cmpcs r2, r3
- beq 1b
- sub r0, r2, r3
- bx lr
+ tst wp1, #3
+ beq 2f
+ ldrb r2, [wp1], #1
+ ldrb r3, [wp2], #1
+ cmp r2, #1
+ it cs
+ cmpcs r2, r3
+ beq 1b
+ sub r0, r2, r3
+ RETURN
2:
- str r5, [sp, #-4]!
- str r4, [sp, #-4]!
- mov b1, #1
- orr b1, b1, b1, lsl #8
- orr b1, b1, b1, lsl #16
-
- and t1, wp2, #3
- bic wp2, wp2, #3
- ldr w1, [wp1], #4
- ldr w2, [wp2], #4
- cmp t1, #2
- beq 2f
- bhi 3f
-
- /* Critical inner Loop: Block with 3 bytes initial overlap */
- .p2align 2
+ str r5, [sp, #-4]!
+ str r4, [sp, #-4]!
+ //stmfd sp!, {r4, r5}
+ mov b1, #1
+ orr b1, b1, b1, lsl #8
+ orr b1, b1, b1, lsl #16
+
+ and t1, wp2, #3
+ bic wp2, wp2, #3
+ ldr w1, [wp1], #4
+ ldr w2, [wp2], #4
+ cmp t1, #2
+ beq 2f
+ bhi 3f
+
+ /* Critical inner Loop: Block with 3 bytes initial overlap */
+ .p2align 2
1:
- bic t1, w1, #MSB
- cmp t1, w2, SHFT2LSB #8
- sub r3, w1, b1
- bic r3, r3, w1
- bne 4f
- ands r3, r3, b1, lsl #7
- it eq
- ldreq w2, [wp2], #4
- bne 5f
- eor t1, t1, w1
- cmp t1, w2, SHFT2MSB #24
- bne 6f
- ldr w1, [wp1], #4
- b 1b
+ bic t1, w1, MSB
+ cmp t1, w2, S2LOMEM #8
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #24
+ bne 6f
+ ldr w1, [wp1], #4
+ b 1b
4:
- SHFT2LSB w2, w2, #8
- b 8f
+ S2LOMEM w2, w2, #8
+ b 8f
5:
#ifdef __ARMEB__
- /* The syndrome value may contain false ones if the string ends
- * with the bytes 0x01 0x00
- */
- tst w1, #0xff000000
- itt ne
- tstne w1, #0x00ff0000
- tstne w1, #0x0000ff00
- beq 7f
+ /* The syndrome value may contain false ones if the string ends
+ with the bytes 0x01 0x00 */
+ tst w1, #0xff000000
+ itt ne
+ tstne w1, #0x00ff0000
+ tstne w1, #0x0000ff00
+ beq 7f
#else
- bics r3, r3, #0xff000000
- bne 7f
+ bics r3, r3, #0xff000000
+ bne 7f
#endif
- ldrb w2, [wp2]
- SHFT2LSB t1, w1, #24
+ ldrb w2, [wp2]
+ S2LOMEM t1, w1, #24
#ifdef __ARMEB__
- lsl w2, w2, #24
+ lsl w2, w2, #24
#endif
- b 8f
+ b 8f
6:
- SHFT2LSB t1, w1, #24
- and w2, w2, #LSB
- b 8f
+ S2LOMEM t1, w1, #24
+ and w2, w2, LSB
+ b 8f
- /* Critical inner Loop: Block with 2 bytes initial overlap */
- .p2align 2
+ /* Critical inner Loop: Block with 2 bytes initial overlap */
+ .p2align 2
2:
- SHFT2MSB t1, w1, #16
- sub r3, w1, b1
- SHFT2LSB t1, t1, #16
- bic r3, r3, w1
- cmp t1, w2, SHFT2LSB #16
- bne 4f
- ands r3, r3, b1, lsl #7
- it eq
- ldreq w2, [wp2], #4
- bne 5f
- eor t1, t1, w1
- cmp t1, w2, SHFT2MSB #16
- bne 6f
- ldr w1, [wp1], #4
- b 2b
+ S2HIMEM t1, w1, #16
+ sub r3, w1, b1
+ S2LOMEM t1, t1, #16
+ bic r3, r3, w1
+ cmp t1, w2, S2LOMEM #16
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #16
+ bne 6f
+ ldr w1, [wp1], #4
+ b 2b
5:
#ifdef __ARMEB__
- /* The syndrome value may contain false ones if the string ends
- * with the bytes 0x01 0x00
- */
- tst w1, #0xff000000
- it ne
- tstne w1, #0x00ff0000
- beq 7f
+ /* The syndrome value may contain false ones if the string ends
+ with the bytes 0x01 0x00 */
+ tst w1, #0xff000000
+ it ne
+ tstne w1, #0x00ff0000
+ beq 7f
#else
- lsls r3, r3, #16
- bne 7f
+ lsls r3, r3, #16
+ bne 7f
#endif
- ldrh w2, [wp2]
- SHFT2LSB t1, w1, #16
+ ldrh w2, [wp2]
+ S2LOMEM t1, w1, #16
#ifdef __ARMEB__
- lsl w2, w2, #16
+ lsl w2, w2, #16
#endif
- b 8f
+ b 8f
6:
- SHFT2MSB w2, w2, #16
- SHFT2LSB t1, w1, #16
+ S2HIMEM w2, w2, #16
+ S2LOMEM t1, w1, #16
4:
- SHFT2LSB w2, w2, #16
- b 8f
+ S2LOMEM w2, w2, #16
+ b 8f
- /* Critical inner Loop: Block with 1 byte initial overlap */
- .p2align 2
+ /* Critical inner Loop: Block with 1 byte initial overlap */
+ .p2align 2
3:
- and t1, w1, #LSB
- cmp t1, w2, SHFT2LSB #24
- sub r3, w1, b1
- bic r3, r3, w1
- bne 4f
- ands r3, r3, b1, lsl #7
- it eq
- ldreq w2, [wp2], #4
- bne 5f
- eor t1, t1, w1
- cmp t1, w2, SHFT2MSB #8
- bne 6f
- ldr w1, [wp1], #4
- b 3b
+ and t1, w1, LSB
+ cmp t1, w2, S2LOMEM #24
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, S2HIMEM #8
+ bne 6f
+ ldr w1, [wp1], #4
+ b 3b
4:
- SHFT2LSB w2, w2, #24
- b 8f
+ S2LOMEM w2, w2, #24
+ b 8f
5:
- /* The syndrome value may contain false ones if the string ends
- * with the bytes 0x01 0x00
- */
- tst w1, #LSB
- beq 7f
- ldr w2, [wp2], #4
+ /* The syndrome value may contain false ones if the string ends
+ with the bytes 0x01 0x00 */
+ tst w1, LSB
+ beq 7f
+ ldr w2, [wp2], #4
6:
- SHFT2LSB t1, w1, #8
- bic w2, w2, #MSB
- b 8f
+ S2LOMEM t1, w1, #8
+ bic w2, w2, MSB
+ b 8f
7:
- mov r0, #0
- ldr r4, [sp], #4
- ldr r5, [sp], #4
- bx lr
-
+ mov r0, #0
+ //ldmfd sp!, {r4, r5}
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ RETURN
8:
- and r2, t1, #LSB
- and r0, w2, #LSB
- cmp r0, #1
- it cs
- cmpcs r0, r2
- itt eq
- SHFT2LSBEQ t1, t1, #8
- SHFT2LSBEQ w2, w2, #8
- beq 8b
- sub r0, r2, r0
- ldr r4, [sp], #4
- ldr r5, [sp], #4
- bx lr
-END(strcmp)
+ and r2, t1, LSB
+ and r0, w2, LSB
+ cmp r0, #1
+ it cs
+ cmpcs r0, r2
+ itt eq
+ S2LOMEMEQ t1, t1, #8
+ S2LOMEMEQ w2, w2, #8
+ beq 8b
+ sub r0, r2, r0
+ //ldmfd sp!, {r4, r5}
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ RETURN
+
+#endif /* !(defined (_ISA_THUMB_2) || defined (_ISA_ARM_6)
+ defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED) ||
+ (defined (__thumb__) && !defined (__thumb2__))) */
diff --git a/tests/string_benchmark.cpp b/tests/string_benchmark.cpp
index 7ac8e98..eab31ac 100644
--- a/tests/string_benchmark.cpp
+++ b/tests/string_benchmark.cpp
@@ -110,3 +110,24 @@ static void BM_strlen(int iters, int nbytes) {
delete[] s;
}
BENCHMARK(BM_strlen)->AT_COMMON_SIZES;
+
+static void BM_strcmp(int iters, int nbytes) {
+ StopBenchmarkTiming();
+ char* src = new char[nbytes]; char* dst = new char[nbytes];
+ memset(src, 'x', nbytes);
+ memset(dst, 'x', nbytes);
+ StartBenchmarkTiming();
+
+ volatile int c __attribute__((unused)) = 0;
+ for (int i = 0; i < iters; i++) {
+ c += strcmp(dst, src);
+ }
+
+ StopBenchmarkTiming();
+ SetBenchmarkBytesProcessed(int64_t(iters) * int64_t(nbytes));
+ delete[] src;
+ delete[] dst;
+}
+BENCHMARK(BM_strcmp)->AT_COMMON_SIZES;
+
+