diff options
author | Varvara Rainchik <varvara.rainchik@intel.com> | 2014-05-27 12:41:55 +0400 |
---|---|---|
committer | Christopher Ferris <cferris@google.com> | 2014-06-05 11:08:09 -0700 |
commit | fce861498c8c4720c6ad2475a73bb4c3e55d6948 (patch) | |
tree | 4496b97573a1d8c1e1361a76aabcac63c90513c6 /libc/arch-x86_64/string/sse2-memmove-slm.S | |
parent | 431aa4dc9282e23231ba9243f43fd3d49b5f88b3 (diff) | |
download | bionic-fce861498c8c4720c6ad2475a73bb4c3e55d6948.zip bionic-fce861498c8c4720c6ad2475a73bb4c3e55d6948.tar.gz bionic-fce861498c8c4720c6ad2475a73bb4c3e55d6948.tar.bz2 |
Fix for slm-tuned memmove (both 32- and 64-bit).
Introduce a test for memmove that catches a fault.
Fix both 32- and 64-bit versions of slm-tuned memmove.
Change-Id: Ib416def2610a0972e32c3b9b6055b54967643dc3
Signed-off-by: Varvara Rainchik <varvara.rainchik@intel.com>
Diffstat (limited to 'libc/arch-x86_64/string/sse2-memmove-slm.S')
-rw-r--r-- | libc/arch-x86_64/string/sse2-memmove-slm.S | 330 |
1 files changed, 104 insertions, 226 deletions
diff --git a/libc/arch-x86_64/string/sse2-memmove-slm.S b/libc/arch-x86_64/string/sse2-memmove-slm.S index ee8440e..0dbffad 100644 --- a/libc/arch-x86_64/string/sse2-memmove-slm.S +++ b/libc/arch-x86_64/string/sse2-memmove-slm.S @@ -99,7 +99,7 @@ ENTRY (MEMMOVE) /* Check whether we should copy backward or forward. */ cmp %rsi, %rdi je L(mm_return) - ja L(mm_len_0_or_more_backward) + jg L(mm_len_0_or_more_backward) /* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128] separately. */ @@ -107,7 +107,7 @@ ENTRY (MEMMOVE) jbe L(mm_len_0_16_bytes_forward) cmp $32, %rdx - jg L(mm_len_32_or_more_forward) + ja L(mm_len_32_or_more_forward) /* Copy [0..32] and return. */ movdqu (%rsi), %xmm0 @@ -118,7 +118,7 @@ ENTRY (MEMMOVE) L(mm_len_32_or_more_forward): cmp $64, %rdx - jg L(mm_len_64_or_more_forward) + ja L(mm_len_64_or_more_forward) /* Copy [0..64] and return. */ movdqu (%rsi), %xmm0 @@ -133,7 +133,7 @@ L(mm_len_32_or_more_forward): L(mm_len_64_or_more_forward): cmp $128, %rdx - jg L(mm_len_128_or_more_forward) + ja L(mm_len_128_or_more_forward) /* Copy [0..128] and return. */ movdqu (%rsi), %xmm0 @@ -155,13 +155,6 @@ L(mm_len_64_or_more_forward): jmp L(mm_return) L(mm_len_128_or_more_forward): - - cmp $SHARED_CACHE_SIZE_HALF, %rdx - jae L(mm_large_page_forward) - - mov %rsi, %r8 // copy src to r8 - mov %rdi, %r9 // copy dst to r9 - /* Aligning the address of destination. */ /* save first unaligned 64 bytes */ movdqu (%rsi), %xmm0 @@ -169,56 +162,57 @@ L(mm_len_128_or_more_forward): movdqu 32(%rsi), %xmm2 movdqu 48(%rsi), %xmm3 - lea 64(%r9), %rdi - and $-64, %rdi /* rdi now aligned to next 64 byte boundary */ + lea 64(%rdi), %r8 + and $-64, %r8 /* r8 now aligned to next 64 byte boundary */ + sub %rdi, %rsi /* rsi = src - dst = diff */ - sub %r9, %rsi /* rsi = src - dst = diff */ + movdqu (%r8, %rsi), %xmm4 + movdqu 16(%r8, %rsi), %xmm5 + movdqu 32(%r8, %rsi), %xmm6 + movdqu 48(%r8, %rsi), %xmm7 - movdqu (%rdi, %rsi), %xmm4 - movdqu 16(%rdi, %rsi), %xmm5 - movdqu 32(%rdi, %rsi), %xmm6 - movdqu 48(%rdi, %rsi), %xmm7 - - movdqu %xmm0, (%r9) - movdqu %xmm1, 16(%r9) - movdqu %xmm2, 32(%r9) - movdqu %xmm3, 48(%r9) - movdqa %xmm4, (%rdi) - movdqa %xmm5, 16(%rdi) - movdqa %xmm6, 32(%rdi) - movdqa %xmm7, 48(%rdi) - add $64, %rdi + movdqu %xmm0, (%rdi) + movdqu %xmm1, 16(%rdi) + movdqu %xmm2, 32(%rdi) + movdqu %xmm3, 48(%rdi) + movdqa %xmm4, (%r8) + movaps %xmm5, 16(%r8) + movaps %xmm6, 32(%r8) + movaps %xmm7, 48(%r8) + add $64, %r8 - lea (%r9, %rdx), %rbx + lea (%rdi, %rdx), %rbx and $-64, %rbx - - cmp %rdi, %rbx + cmp %r8, %rbx jbe L(mm_copy_remaining_forward) + cmp $SHARED_CACHE_SIZE_HALF, %rdx + jae L(mm_large_page_loop_forward) + .p2align 4 L(mm_main_loop_forward): - prefetcht0 128(%rdi, %rsi) - - movdqu (%rdi, %rsi), %xmm0 - movdqu 16(%rdi, %rsi), %xmm1 - movdqu 32(%rdi, %rsi), %xmm2 - movdqu 48(%rdi, %rsi), %xmm3 - movdqa %xmm0, (%rdi) - movdqa %xmm1, 16(%rdi) - movdqa %xmm2, 32(%rdi) - movdqa %xmm3, 48(%rdi) - lea 64(%rdi), %rdi - cmp %rdi, %rbx + prefetcht0 128(%r8, %rsi) + + movdqu (%r8, %rsi), %xmm0 + movdqu 16(%r8, %rsi), %xmm1 + movdqu 32(%r8, %rsi), %xmm2 + movdqu 48(%r8, %rsi), %xmm3 + movdqa %xmm0, (%r8) + movaps %xmm1, 16(%r8) + movaps %xmm2, 32(%r8) + movaps %xmm3, 48(%r8) + lea 64(%r8), %r8 + cmp %r8, %rbx ja L(mm_main_loop_forward) L(mm_copy_remaining_forward): - add %r9, %rdx - sub %rdi, %rdx + add %rdi, %rdx + sub %r8, %rdx /* We copied all up till %rdi position in the dst. In %rdx now is how many bytes are left to copy. Now we need to advance %r8. */ - lea (%rdi, %rsi), %r8 + lea (%r8, %rsi), %r9 L(mm_remaining_0_64_bytes_forward): cmp $32, %rdx @@ -237,49 +231,49 @@ L(mm_remaining_0_64_bytes_forward): cmpb $2, %dl .p2align 4,,1 ja L(mm_remaining_3_4_bytes_forward) - movzbl -1(%r8,%rdx), %esi - movzbl (%r8), %ebx - movb %sil, -1(%rdi,%rdx) - movb %bl, (%rdi) + movzbl -1(%r9,%rdx), %esi + movzbl (%r9), %ebx + movb %sil, -1(%r8,%rdx) + movb %bl, (%r8) jmp L(mm_return) L(mm_remaining_33_64_bytes_forward): - movdqu (%r8), %xmm0 - movdqu 16(%r8), %xmm1 - movdqu -32(%r8, %rdx), %xmm2 - movdqu -16(%r8, %rdx), %xmm3 - movdqu %xmm0, (%rdi) - movdqu %xmm1, 16(%rdi) - movdqu %xmm2, -32(%rdi, %rdx) - movdqu %xmm3, -16(%rdi, %rdx) + movdqu (%r9), %xmm0 + movdqu 16(%r9), %xmm1 + movdqu -32(%r9, %rdx), %xmm2 + movdqu -16(%r9, %rdx), %xmm3 + movdqu %xmm0, (%r8) + movdqu %xmm1, 16(%r8) + movdqu %xmm2, -32(%r8, %rdx) + movdqu %xmm3, -16(%r8, %rdx) jmp L(mm_return) L(mm_remaining_17_32_bytes_forward): - movdqu (%r8), %xmm0 - movdqu -16(%r8, %rdx), %xmm1 - movdqu %xmm0, (%rdi) - movdqu %xmm1, -16(%rdi, %rdx) - jmp L(mm_return) - -L(mm_remaining_3_4_bytes_forward): - movzwl -2(%r8,%rdx), %esi - movzwl (%r8), %ebx - movw %si, -2(%rdi,%rdx) - movw %bx, (%rdi) + movdqu (%r9), %xmm0 + movdqu -16(%r9, %rdx), %xmm1 + movdqu %xmm0, (%r8) + movdqu %xmm1, -16(%r8, %rdx) jmp L(mm_return) L(mm_remaining_5_8_bytes_forward): - movl (%r8), %esi - movl -4(%r8,%rdx), %ebx - movl %esi, (%rdi) - movl %ebx, -4(%rdi,%rdx) + movl (%r9), %esi + movl -4(%r9,%rdx), %ebx + movl %esi, (%r8) + movl %ebx, -4(%r8,%rdx) jmp L(mm_return) L(mm_remaining_9_16_bytes_forward): - mov (%r8), %rsi - mov -8(%r8, %rdx), %rbx - mov %rsi, (%rdi) - mov %rbx, -8(%rdi, %rdx) + mov (%r9), %rsi + mov -8(%r9, %rdx), %rbx + mov %rsi, (%r8) + mov %rbx, -8(%r8, %rdx) + jmp L(mm_return) + +L(mm_remaining_3_4_bytes_forward): + movzwl -2(%r9,%rdx), %esi + movzwl (%r9), %ebx + movw %si, -2(%r8,%rdx) + movw %bx, (%r8) jmp L(mm_return) L(mm_len_0_16_bytes_forward): @@ -321,16 +315,21 @@ L(mm_len_9_16_bytes_forward): mov %rsi, -8(%rdi, %rdx) jmp L(mm_return) +L(mm_recalc_len): +/* Compute in %rdx how many bytes are left to copy after + the main loop stops. */ + mov %rbx, %rdx + sub %rdi, %rdx /* The code for copying backwards. */ L(mm_len_0_or_more_backward): -/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128] +/* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128] separately. */ cmp $16, %rdx jbe L(mm_len_0_16_bytes_backward) cmp $32, %rdx - jg L(mm_len_32_or_more_backward) + ja L(mm_len_32_or_more_backward) /* Copy [0..32] and return. */ movdqu (%rsi), %xmm0 @@ -341,7 +340,7 @@ L(mm_len_0_or_more_backward): L(mm_len_32_or_more_backward): cmp $64, %rdx - jg L(mm_len_64_or_more_backward) + ja L(mm_len_64_or_more_backward) /* Copy [0..64] and return. */ movdqu (%rsi), %xmm0 @@ -356,7 +355,7 @@ L(mm_len_32_or_more_backward): L(mm_len_64_or_more_backward): cmp $128, %rdx - jg L(mm_len_128_or_more_backward) + ja L(mm_len_128_or_more_backward) /* Copy [0..128] and return. */ movdqu (%rsi), %xmm0 @@ -378,10 +377,6 @@ L(mm_len_64_or_more_backward): jmp L(mm_return) L(mm_len_128_or_more_backward): - - cmp $SHARED_CACHE_SIZE_HALF, %rdx - jae L(mm_large_page_backward) - /* Aligning the address of destination. We need to save 16 bits from the source in order not to overwrite them. */ movdqu -16(%rsi, %rdx), %xmm0 @@ -405,22 +400,19 @@ L(mm_len_128_or_more_backward): movdqu %xmm2, -48(%rdi, %rdx) movdqu %xmm3, -64(%rdi, %rdx) movdqa %xmm4, -16(%r9) - movdqa %xmm5, -32(%r9) - movdqa %xmm6, -48(%r9) - movdqa %xmm7, -64(%r9) + movaps %xmm5, -32(%r9) + movaps %xmm6, -48(%r9) + movaps %xmm7, -64(%r9) lea -64(%r9), %r9 lea 64(%rdi), %rbx and $-64, %rbx -/* Compute in %rdx how many bytes are left to copy after - the main loop stops. */ - mov %rbx, %rdx - sub %rdi, %rdx - cmp %r9, %rbx - jb L(mm_main_loop_backward) - jmp L(mm_len_0_or_more_backward) + jae L(mm_recalc_len) + + cmp $SHARED_CACHE_SIZE_HALF, %rdx + jae L(mm_large_page_loop_backward) .p2align 4 L(mm_main_loop_backward): @@ -432,13 +424,13 @@ L(mm_main_loop_backward): movdqu -32(%r9, %r8), %xmm2 movdqu -16(%r9, %r8), %xmm3 movdqa %xmm0, -64(%r9) - movdqa %xmm1, -48(%r9) - movdqa %xmm2, -32(%r9) - movdqa %xmm3, -16(%r9) + movaps %xmm1, -48(%r9) + movaps %xmm2, -32(%r9) + movaps %xmm3, -16(%r9) lea -64(%r9), %r9 cmp %r9, %rbx jb L(mm_main_loop_backward) - jmp L(mm_len_0_or_more_backward) + jmp L(mm_recalc_len) /* Copy [0..16] and return. */ L(mm_len_0_16_bytes_backward): @@ -485,138 +477,23 @@ L(mm_return): /* Big length copy forward part. */ -L(mm_large_page_forward): -/* Aligning the address of destination. We need to save - 16 bits from the source in order not to overwrite them. */ - - mov %rsi, %r8 - mov %rdi, %r9 - - movdqu (%rsi), %xmm0 - movdqu 16(%rsi), %xmm1 - movdqu 32(%rsi), %xmm2 - movdqu 48(%rsi), %xmm3 - - lea 64(%r9), %rdi - and $-64, %rdi /* rdi = aligned dst */ - - sub %r9, %rsi /* rsi = diff */ - - movdqu (%rdi, %rsi), %xmm4 - movdqu 16(%rdi, %rsi), %xmm5 - movdqu 32(%rdi, %rsi), %xmm6 - movdqu 48(%rdi, %rsi), %xmm7 - - movdqu %xmm0, (%r9) - movdqu %xmm1, 16(%r9) - movdqu %xmm2, 32(%r9) - movdqu %xmm3, 48(%r9) - movntdq %xmm4, (%rdi) - movntdq %xmm5, 16(%rdi) - movntdq %xmm6, 32(%rdi) - movntdq %xmm7, 48(%rdi) - add $64, %rdi - - lea (%r9, %rdx), %rbx - and $-128, %rbx - - cmp %rdi, %rbx - jbe L(mm_copy_remaining_forward) - .p2align 4 L(mm_large_page_loop_forward): - movdqu (%rdi, %rsi), %xmm0 - movdqu 16(%rdi, %rsi), %xmm1 - movdqu 32(%rdi, %rsi), %xmm2 - movdqu 48(%rdi, %rsi), %xmm3 - movdqu 64(%rdi, %rsi), %xmm4 - movdqu 80(%rdi, %rsi), %xmm5 - movdqu 96(%rdi, %rsi), %xmm6 - movdqu 112(%rdi, %rsi), %xmm7 - movntdq %xmm0, (%rdi) - movntdq %xmm1, 16(%rdi) - movntdq %xmm2, 32(%rdi) - movntdq %xmm3, 48(%rdi) - movntdq %xmm4, 64(%rdi) - movntdq %xmm5, 80(%rdi) - movntdq %xmm6, 96(%rdi) - movntdq %xmm7, 112(%rdi) - lea 128(%rdi), %rdi - cmp %rdi, %rbx + movdqu (%r8, %rsi), %xmm0 + movdqu 16(%r8, %rsi), %xmm1 + movdqu 32(%r8, %rsi), %xmm2 + movdqu 48(%r8, %rsi), %xmm3 + movntdq %xmm0, (%r8) + movntdq %xmm1, 16(%r8) + movntdq %xmm2, 32(%r8) + movntdq %xmm3, 48(%r8) + lea 64(%r8), %r8 + cmp %r8, %rbx ja L(mm_large_page_loop_forward) sfence - - add %r9, %rdx - sub %rdi, %rdx -/* We copied all up till %rdi position in the dst. - In %rdx now is how many bytes are left to copy. - Now we need to advance %r8. */ - lea (%rdi, %rsi), %r8 - - cmp $64, %rdx - jb L(mm_remaining_0_64_bytes_forward) - - movdqu (%r8), %xmm0 - movdqu 16(%r8), %xmm1 - movdqu 32(%r8), %xmm2 - movdqu 48(%r8), %xmm3 - movdqu -64(%r8, %rdx), %xmm4 - movdqu -48(%r8, %rdx), %xmm5 - movdqu -32(%r8, %rdx), %xmm6 - movdqu -16(%r8, %rdx), %xmm7 - movdqu %xmm0, (%rdi) - movdqu %xmm1, 16(%rdi) - movdqu %xmm2, 32(%rdi) - movdqu %xmm3, 48(%rdi) - movdqu %xmm4, -64(%rdi, %rdx) - movdqu %xmm5, -48(%rdi, %rdx) - movdqu %xmm6, -32(%rdi, %rdx) - movdqu %xmm7, -16(%rdi, %rdx) - jmp L(mm_return) - + jmp L(mm_copy_remaining_forward) /* Big length copy backward part. */ -L(mm_large_page_backward): -/* Aligning the address of destination. We need to save - 16 bits from the source in order not to overwrite them. */ - - movdqu -16(%rsi, %rdx), %xmm0 - movdqu -32(%rsi, %rdx), %xmm1 - movdqu -48(%rsi, %rdx), %xmm2 - movdqu -64(%rsi, %rdx), %xmm3 - - lea (%rdi, %rdx), %r9 - and $-64, %r9 - - mov %rsi, %r8 - sub %rdi, %r8 - - movdqu -16(%r9, %r8), %xmm4 - movdqu -32(%r9, %r8), %xmm5 - movdqu -48(%r9, %r8), %xmm6 - movdqu -64(%r9, %r8), %xmm7 - - movdqu %xmm0, -16(%rdi, %rdx) - movdqu %xmm1, -32(%rdi, %rdx) - movdqu %xmm2, -48(%rdi, %rdx) - movdqu %xmm3, -64(%rdi, %rdx) - movntdq %xmm4, -16(%r9) - movntdq %xmm5, -32(%r9) - movntdq %xmm6, -48(%r9) - movntdq %xmm7, -64(%r9) - lea -64(%r9), %r9 - - lea 128(%rdi), %rbx - and $-64, %rbx - -/* Compute in %rdx how many bytes are left to copy after - the main loop stops. */ - mov %rbx, %rdx - sub %rdi, %rdx - - cmp %r9, %rbx - jae L(mm_len_0_or_more_backward) - .p2align 4 L(mm_large_page_loop_backward): movdqu -64(%r9, %r8), %xmm0 @@ -630,6 +507,7 @@ L(mm_large_page_loop_backward): lea -64(%r9), %r9 cmp %r9, %rbx jb L(mm_large_page_loop_backward) - jmp L(mm_len_0_or_more_backward) + sfence + jmp L(mm_recalc_len) END (MEMMOVE) |