diff options
author | Christopher Ferris <cferris@google.com> | 2013-08-21 09:41:12 -0700 |
---|---|---|
committer | Christopher Ferris <cferris@google.com> | 2013-08-28 15:42:05 -0700 |
commit | 05332f2ce7e542d32ff4d5cd9f60248ad71fbf0d (patch) | |
tree | fd733a2427cea53f12496696b9d0a046af583b63 /libc/arch-arm/cortex-a15 | |
parent | 26ac803232540fc074709c75d17bc3ec0c03f2c4 (diff) | |
download | bionic-05332f2ce7e542d32ff4d5cd9f60248ad71fbf0d.zip bionic-05332f2ce7e542d32ff4d5cd9f60248ad71fbf0d.tar.gz bionic-05332f2ce7e542d32ff4d5cd9f60248ad71fbf0d.tar.bz2 |
Fix all debug directives.
The backtrace when a fortify check failed was not correct. This change
adds all of the necessary directives to get a correct backtrace.
Fix the strcmp directives and change all labels to local labels.
Testing:
- Verify that the runtime can decode the stack for __memcpy_chk, __memset_chk,
__strcpy_chk, __strcat_chk fortify failures.
- Verify that gdb can decode the stack properly when hitting a fortify check.
- Verify that the runtime can decode the stack for a seg fault for all of the
_chk functions and for memcpy/memset.
- Verify that gdb can decode the stack for a seg fault for all of the _chk
functions and for memcpy/memset.
- Verify that the runtime can decode the stack for a seg fault for strcmp.
- Verify that gdb can decode the stack for a seg fault in strcmp.
Bug: 10342460
Bug: 10345269
Change-Id: I1dedadfee207dce4a285e17a21e8952bbc63786a
Diffstat (limited to 'libc/arch-arm/cortex-a15')
-rw-r--r-- | libc/arch-arm/cortex-a15/bionic/__strcat_chk.S | 29 | ||||
-rw-r--r-- | libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S | 21 | ||||
-rw-r--r-- | libc/arch-arm/cortex-a15/bionic/memcpy.S | 15 | ||||
-rw-r--r-- | libc/arch-arm/cortex-a15/bionic/memcpy_base.S | 40 | ||||
-rw-r--r-- | libc/arch-arm/cortex-a15/bionic/strcmp.S | 95 |
5 files changed, 122 insertions, 78 deletions
diff --git a/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S b/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S index 4b125c8..4693600 100644 --- a/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S +++ b/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S @@ -40,13 +40,13 @@ ENTRY(__strcat_chk) .cfi_startproc pld [r0, #0] - .save {r0, lr} push {r0, lr} + .save {r0, lr} .cfi_def_cfa_offset 8 .cfi_rel_offset r0, 0 .cfi_rel_offset lr, 4 - .save {r4, r5} push {r4, r5} + .save {r4, r5} .cfi_adjust_cfa_offset 8 .cfi_rel_offset r4, 0 .cfi_rel_offset r5, 4 @@ -180,22 +180,31 @@ ENTRY(__strcat_chk) .L_strlen_done: add r2, r3, r4 cmp r2, lr - bgt .L_fortify_check_failed + bgt __strcat_chk_failed // Set up the registers for the memcpy code. mov r1, r5 pld [r1, #64] mov r2, r4 add r0, r0, r3 - .pad #-8 pop {r4, r5} - .cfi_adjust_cfa_offset -8 - .cfi_restore r4 - .cfi_restore r5 - #include "memcpy_base.S" + .cfi_endproc +END(__strcat_chk) + +#define MEMCPY_BASE __strcat_chk_memcpy_base +#define MEMCPY_BASE_ALIGNED __strcat_chk_memcpy_base_aligned + +#include "memcpy_base.S" -.L_fortify_check_failed: +ENTRY(__strcat_chk_failed) + .cfi_startproc + .save {r0, lr} + .save {r4, r5} + + .cfi_def_cfa_offset 8 + .cfi_rel_offset r0, 0 + .cfi_rel_offset lr, 4 .cfi_adjust_cfa_offset 8 .cfi_rel_offset r4, 0 .cfi_rel_offset r5, 4 @@ -211,7 +220,7 @@ error_message: .word error_string-(1b+4) .cfi_endproc -END(__strcat_chk) +END(__strcat_chk_failed) .data error_string: diff --git a/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S b/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S index a045816..1224b49 100644 --- a/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S +++ b/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S @@ -39,8 +39,8 @@ ENTRY(__strcpy_chk) .cfi_startproc pld [r0, #0] - .save {r0, lr} push {r0, lr} + .save {r0, lr} .cfi_def_cfa_offset 8 .cfi_rel_offset r0, 0 .cfi_rel_offset lr, 4 @@ -151,14 +151,25 @@ ENTRY(__strcpy_chk) pld [r1, #64] ldr r0, [sp] cmp r3, lr - bge .L_fortify_check_failed + bge __strcpy_chk_failed // Add 1 for copy length to get the string terminator. add r2, r3, #1 - #include "memcpy_base.S" + .cfi_endproc +END(__strcpy_chk) + +#define MEMCPY_BASE __strcpy_chk_memcpy_base +#define MEMCPY_BASE_ALIGNED __strcpy_chk_memcpy_base_aligned +#include "memcpy_base.S" + +ENTRY(__strcpy_chk_failed) + .cfi_startproc + .save {r0, lr} + .cfi_def_cfa_offset 8 + .cfi_rel_offset r0, 0 + .cfi_rel_offset lr, 4 -.L_fortify_check_failed: ldr r0, error_message ldr r1, error_code 1: @@ -170,7 +181,7 @@ error_message: .word error_string-(1b+4) .cfi_endproc -END(__strcpy_chk) +END(__strcpy_chk_failed) .data error_string: diff --git a/libc/arch-arm/cortex-a15/bionic/memcpy.S b/libc/arch-arm/cortex-a15/bionic/memcpy.S index 16881d4..a300e43 100644 --- a/libc/arch-arm/cortex-a15/bionic/memcpy.S +++ b/libc/arch-arm/cortex-a15/bionic/memcpy.S @@ -74,23 +74,24 @@ END(__memcpy_chk) ENTRY(memcpy) .cfi_startproc pld [r1, #64] - .save {r0, lr} push {r0, lr} + .save {r0, lr} .cfi_def_cfa_offset 8 .cfi_rel_offset r0, 0 .cfi_rel_offset lr, 4 - #include "memcpy_base.S" - .cfi_endproc END(memcpy) - .fnstart +#define MEMCPY_BASE __memcpy_base +#define MEMCPY_BASE_ALIGNED __memcpy_base_aligned +#include "memcpy_base.S" + +ENTRY(__memcpy_chk_fail) .cfi_startproc -__memcpy_chk_fail: // Preserve lr for backtrace. - .save {lr} push {lr} + .save {lr} .cfi_def_cfa_offset 4 .cfi_rel_offset lr, 0 @@ -104,7 +105,7 @@ error_code: error_message: .word error_string-(1b+8) .cfi_endproc - .fnend +END(__memcpy_chk_fail) .data error_string: diff --git a/libc/arch-arm/cortex-a15/bionic/memcpy_base.S b/libc/arch-arm/cortex-a15/bionic/memcpy_base.S index 647e065..0154676 100644 --- a/libc/arch-arm/cortex-a15/bionic/memcpy_base.S +++ b/libc/arch-arm/cortex-a15/bionic/memcpy_base.S @@ -53,6 +53,13 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +ENTRY(MEMCPY_BASE) + .cfi_startproc + .save {r0, lr} + .cfi_def_cfa_offset 8 + .cfi_rel_offset r0, 0 + .cfi_rel_offset lr, 4 + // Assumes that n >= 0, and dst, src are valid pointers. // For any sizes less than 832 use the neon code that doesn't // care about the src alignment. This avoids any checks @@ -162,20 +169,34 @@ ands r3, r3, #0x3 bne .L_copy_unknown_alignment + .cfi_endproc +END(MEMCPY_BASE) + +ENTRY(MEMCPY_BASE_ALIGNED) + .cfi_startproc + .save {r0, lr} + .cfi_def_cfa_offset 8 + .cfi_rel_offset r0, 0 + .cfi_rel_offset lr, 4 + // To try and improve performance, stack layout changed, // i.e., not keeping the stack looking like users expect // (highest numbered register at highest address). - // TODO: Add debug frame directives. - // We don't need exception unwind directives, because the code below - // does not throw any exceptions and does not call any other functions. - // Generally, newlib functions like this lack debug information for - // assembler source. - .save {r4, r5} strd r4, r5, [sp, #-8]! - .save {r6, r7} + .save {r4, r5} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r4, 0 + .cfi_rel_offset r5, 4 strd r6, r7, [sp, #-8]! - .save {r8, r9} + .save {r6, r7} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r6, 0 + .cfi_rel_offset r7, 0 strd r8, r9, [sp, #-8]! + .save {r8, r9} + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset r8, 0 + .cfi_rel_offset r9, 4 // Optimized for already aligned dst code. ands ip, r0, #3 @@ -301,3 +322,6 @@ // Src is guaranteed to be at least word aligned by this point. b .L_word_aligned + + .cfi_endproc +END(MEMCPY_BASE_ALIGNED) diff --git a/libc/arch-arm/cortex-a15/bionic/strcmp.S b/libc/arch-arm/cortex-a15/bionic/strcmp.S index 2719bf7..13b329f 100644 --- a/libc/arch-arm/cortex-a15/bionic/strcmp.S +++ b/libc/arch-arm/cortex-a15/bionic/strcmp.S @@ -122,7 +122,6 @@ ENTRY(strcmp) .macro init /* Macro to save temporary registers and prepare magic values. */ - .save {r4-r7} subs sp, sp, #16 .cfi_def_cfa_offset 16 strd r4, r5, [sp, #8] @@ -178,12 +177,13 @@ ENTRY(strcmp) /* Are both strings double-word aligned? */ orr ip, r0, r1 tst ip, #7 - bne do_align + bne .L_do_align /* Fast path. */ + .save {r4-r7} init -doubleword_aligned: +.L_doubleword_aligned: /* Get here when the strings to compare are double-word aligned. */ /* Compare two words in every iteration. */ @@ -196,14 +196,14 @@ doubleword_aligned: ldrd r2, r3, [r0], #8 ldrd r4, r5, [r1], #8 - magic_compare_and_branch w1=r2, w2=r4, label=return_24 - magic_compare_and_branch w1=r3, w2=r5, label=return_35 + magic_compare_and_branch w1=r2, w2=r4, label=.L_return_24 + magic_compare_and_branch w1=r3, w2=r5, label=.L_return_35 b 2b -do_align: +.L_do_align: /* Is the first string word-aligned? */ ands ip, r0, #3 - beq word_aligned_r0 + beq .L_word_aligned_r0 /* Fast compare byte by byte until the first string is word-aligned. */ /* The offset of r0 from a word boundary is in ip. Thus, the number of bytes @@ -211,58 +211,58 @@ do_align: bic r0, r0, #3 ldr r2, [r0], #4 lsls ip, ip, #31 - beq byte2 - bcs byte3 + beq .L_byte2 + bcs .L_byte3 -byte1: +.L_byte1: ldrb ip, [r1], #1 uxtb r3, r2, ror #BYTE1_OFFSET subs ip, r3, ip - bne fast_return - m_cbz reg=r3, label=fast_return + bne .L_fast_return + m_cbz reg=r3, label=.L_fast_return -byte2: +.L_byte2: ldrb ip, [r1], #1 uxtb r3, r2, ror #BYTE2_OFFSET subs ip, r3, ip - bne fast_return - m_cbz reg=r3, label=fast_return + bne .L_fast_return + m_cbz reg=r3, label=.L_fast_return -byte3: +.L_byte3: ldrb ip, [r1], #1 uxtb r3, r2, ror #BYTE3_OFFSET subs ip, r3, ip - bne fast_return - m_cbnz reg=r3, label=word_aligned_r0 + bne .L_fast_return + m_cbnz reg=r3, label=.L_word_aligned_r0 -fast_return: +.L_fast_return: mov r0, ip bx lr -word_aligned_r0: +.L_word_aligned_r0: init /* The first string is word-aligned. */ /* Is the second string word-aligned? */ ands ip, r1, #3 - bne strcmp_unaligned + bne .L_strcmp_unaligned -word_aligned: +.L_word_aligned: /* The strings are word-aligned. */ /* Is the first string double-word aligned? */ tst r0, #4 - beq doubleword_aligned_r0 + beq .L_doubleword_aligned_r0 /* If r0 is not double-word aligned yet, align it by loading and comparing the next word from each string. */ ldr r2, [r0], #4 ldr r4, [r1], #4 - magic_compare_and_branch w1=r2 w2=r4 label=return_24 + magic_compare_and_branch w1=r2 w2=r4 label=.L_return_24 -doubleword_aligned_r0: +.L_doubleword_aligned_r0: /* Get here when r0 is double-word aligned. */ /* Is r1 doubleword_aligned? */ tst r1, #4 - beq doubleword_aligned + beq .L_doubleword_aligned /* Get here when the strings to compare are word-aligned, r0 is double-word aligned, but r1 is not double-word aligned. */ @@ -278,9 +278,9 @@ doubleword_aligned_r0: /* Load the next double-word from each string and compare. */ ldrd r2, r3, [r0], #8 - magic_compare_and_branch w1=r2 w2=r5 label=return_25 + magic_compare_and_branch w1=r2 w2=r5 label=.L_return_25 ldrd r4, r5, [r1], #8 - magic_compare_and_branch w1=r3 w2=r4 label=return_34 + magic_compare_and_branch w1=r3 w2=r4 label=.L_return_34 b 3b .macro miscmp_word offsetlo offsethi @@ -304,47 +304,47 @@ doubleword_aligned_r0: and r2, r3, r6, S2LOMEM #\offsetlo it eq cmpeq r2, r5 - bne return_25 + bne .L_return_25 ldr r5, [r1], #4 cmp ip, #0 eor r3, r2, r3 S2HIMEM r2, r5, #\offsethi it eq cmpeq r3, r2 - bne return_32 + bne .L_return_32 b 7b .endm /* miscmp_word */ -strcmp_unaligned: +.L_strcmp_unaligned: /* r0 is word-aligned, r1 is at offset ip from a word. */ /* Align r1 to the (previous) word-boundary. */ bic r1, r1, #3 /* Unaligned comparison word by word using LDRs. */ cmp ip, #2 - beq miscmp_word_16 /* If ip == 2. */ - bge miscmp_word_24 /* If ip == 3. */ + beq .L_miscmp_word_16 /* If ip == 2. */ + bge .L_miscmp_word_24 /* If ip == 3. */ miscmp_word offsetlo=8 offsethi=24 /* If ip == 1. */ -miscmp_word_16: miscmp_word offsetlo=16 offsethi=16 -miscmp_word_24: miscmp_word offsetlo=24 offsethi=8 +.L_miscmp_word_16: miscmp_word offsetlo=16 offsethi=16 +.L_miscmp_word_24: miscmp_word offsetlo=24 offsethi=8 -return_32: +.L_return_32: setup_return w1=r3, w2=r2 - b do_return -return_34: + b .L_do_return +.L_return_34: setup_return w1=r3, w2=r4 - b do_return -return_25: + b .L_do_return +.L_return_25: setup_return w1=r2, w2=r5 - b do_return -return_35: + b .L_do_return +.L_return_35: setup_return w1=r3, w2=r5 - b do_return -return_24: + b .L_do_return +.L_return_24: setup_return w1=r2, w2=r4 -do_return: +.L_do_return: #ifdef __ARMEB__ mov r0, ip @@ -355,7 +355,6 @@ do_return: /* Restore temporaries early, before computing the return value. */ ldrd r6, r7, [sp] ldrd r4, r5, [sp, #8] - .pad #-16 adds sp, sp, #16 .cfi_def_cfa_offset 0 .cfi_restore r4 @@ -366,7 +365,7 @@ do_return: /* There is a zero or a different byte between r1 and r2. */ /* r0 contains a mask of all-zero bytes in r1. */ /* Using r0 and not ip here because cbz requires low register. */ - m_cbz reg=r0, label=compute_return_value + m_cbz reg=r0, label=.L_compute_return_value clz r0, r0 /* r0 contains the number of bits on the left of the first all-zero byte in r1. */ rsb r0, r0, #24 @@ -374,7 +373,7 @@ do_return: lsr r1, r1, r0 lsr r2, r2, r0 -compute_return_value: +.L_compute_return_value: movs r0, #1 cmp r1, r2 /* The return value is computed as follows. |