summaryrefslogtreecommitdiffstats
path: root/linux-arm/crypto/sha
diff options
context:
space:
mode:
authorAdam Langley <agl@google.com>2015-05-11 17:20:37 -0700
committerKenny Root <kroot@google.com>2015-05-12 23:06:14 +0000
commite9ada863a7b3e81f5d2b1e3bdd2305da902a87f5 (patch)
tree6e43e34595ecf887c26c32b86d8ab097fe8cac64 /linux-arm/crypto/sha
parentb3106a0cc1493bbe0505c0ec0ce3da4ca90a29ae (diff)
downloadexternal_boringssl-e9ada863a7b3e81f5d2b1e3bdd2305da902a87f5.zip
external_boringssl-e9ada863a7b3e81f5d2b1e3bdd2305da902a87f5.tar.gz
external_boringssl-e9ada863a7b3e81f5d2b1e3bdd2305da902a87f5.tar.bz2
external/boringssl: bump revision.
This change bumps the BoringSSL revision to the current tip-of-tree. Change-Id: I91d5bf467e16e8d86cb19a4de873985f524e5faa
Diffstat (limited to 'linux-arm/crypto/sha')
-rw-r--r--linux-arm/crypto/sha/sha1-armv4-large.S248
-rw-r--r--linux-arm/crypto/sha/sha256-armv4.S422
-rw-r--r--linux-arm/crypto/sha/sha512-armv4.S1457
3 files changed, 1171 insertions, 956 deletions
diff --git a/linux-arm/crypto/sha/sha1-armv4-large.S b/linux-arm/crypto/sha/sha1-armv4-large.S
index 5abc328..52c99bf 100644
--- a/linux-arm/crypto/sha/sha1-armv4-large.S
+++ b/linux-arm/crypto/sha/sha1-armv4-large.S
@@ -3,7 +3,7 @@
.text
.code 32
-.global sha1_block_data_order
+.globl sha1_block_data_order
.type sha1_block_data_order,%function
.align 5
@@ -12,12 +12,15 @@ sha1_block_data_order:
sub r3,pc,#8 @ sha1_block_data_order
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
+#ifdef __APPLE__
+ ldr r12,[r12]
+#endif
tst r12,#ARMV8_SHA1
bne .LARMv8
tst r12,#ARMV7_NEON
bne .LNEON
#endif
- stmdb sp!,{r4-r12,lr}
+ stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
@@ -193,7 +196,7 @@ sha1_block_data_order:
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r3,r10,ror#2 @ F_xx_xx
+ and r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
@@ -210,7 +213,7 @@ sha1_block_data_order:
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r7,r10,ror#2 @ F_xx_xx
+ and r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
@@ -227,7 +230,7 @@ sha1_block_data_order:
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r6,r10,ror#2 @ F_xx_xx
+ and r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
@@ -244,7 +247,7 @@ sha1_block_data_order:
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r5,r10,ror#2 @ F_xx_xx
+ and r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
@@ -265,7 +268,7 @@ sha1_block_data_order:
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- eor r10,r4,r10,ror#2 @ F_xx_xx
+ eor r10,r4,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_20_39(B,C,D)
@@ -281,7 +284,7 @@ sha1_block_data_order:
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- eor r10,r3,r10,ror#2 @ F_xx_xx
+ eor r10,r3,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_20_39(B,C,D)
@@ -297,7 +300,7 @@ sha1_block_data_order:
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- eor r10,r7,r10,ror#2 @ F_xx_xx
+ eor r10,r7,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_20_39(B,C,D)
@@ -313,7 +316,7 @@ sha1_block_data_order:
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- eor r10,r6,r10,ror#2 @ F_xx_xx
+ eor r10,r6,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_20_39(B,C,D)
@@ -329,7 +332,7 @@ sha1_block_data_order:
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- eor r10,r5,r10,ror#2 @ F_xx_xx
+ eor r10,r5,r10,ror#2 @ F_xx_xx
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
@@ -352,8 +355,8 @@ sha1_block_data_order:
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r4,r10,ror#2 @ F_xx_xx
- and r11,r5,r6 @ F_xx_xx
+ and r10,r4,r10,ror#2 @ F_xx_xx
+ and r11,r5,r6 @ F_xx_xx
add r7,r7,r9 @ E+=X[i]
add r7,r7,r10 @ E+=F_40_59(B,C,D)
add r7,r7,r11,ror#2
@@ -369,8 +372,8 @@ sha1_block_data_order:
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r3,r10,ror#2 @ F_xx_xx
- and r11,r4,r5 @ F_xx_xx
+ and r10,r3,r10,ror#2 @ F_xx_xx
+ and r11,r4,r5 @ F_xx_xx
add r6,r6,r9 @ E+=X[i]
add r6,r6,r10 @ E+=F_40_59(B,C,D)
add r6,r6,r11,ror#2
@@ -386,8 +389,8 @@ sha1_block_data_order:
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r7,r10,ror#2 @ F_xx_xx
- and r11,r3,r4 @ F_xx_xx
+ and r10,r7,r10,ror#2 @ F_xx_xx
+ and r11,r3,r4 @ F_xx_xx
add r5,r5,r9 @ E+=X[i]
add r5,r5,r10 @ E+=F_40_59(B,C,D)
add r5,r5,r11,ror#2
@@ -403,8 +406,8 @@ sha1_block_data_order:
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r6,r10,ror#2 @ F_xx_xx
- and r11,r7,r3 @ F_xx_xx
+ and r10,r6,r10,ror#2 @ F_xx_xx
+ and r11,r7,r3 @ F_xx_xx
add r4,r4,r9 @ E+=X[i]
add r4,r4,r10 @ E+=F_40_59(B,C,D)
add r4,r4,r11,ror#2
@@ -420,8 +423,8 @@ sha1_block_data_order:
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
eor r9,r9,r11,ror#31
str r9,[r14,#-4]!
- and r10,r5,r10,ror#2 @ F_xx_xx
- and r11,r6,r7 @ F_xx_xx
+ and r10,r5,r10,ror#2 @ F_xx_xx
+ and r11,r6,r7 @ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
@@ -445,25 +448,26 @@ sha1_block_data_order:
bne .Lloop @ [+18], total 1307
#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
- ldmia sp!,{r4-r12,lr}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha1_block_data_order,.-sha1_block_data_order
.align 5
-.LK_00_19: .word 0x5a827999
-.LK_20_39: .word 0x6ed9eba1
-.LK_40_59: .word 0x8f1bbcdc
-.LK_60_79: .word 0xca62c1d6
+.LK_00_19:.word 0x5a827999
+.LK_20_39:.word 0x6ed9eba1
+.LK_40_59:.word 0x8f1bbcdc
+.LK_60_79:.word 0xca62c1d6
#if __ARM_MAX_ARCH__>=7
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-sha1_block_data_order
#endif
-.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align 2
.align 5
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
@@ -473,7 +477,7 @@ sha1_block_data_order:
.align 4
sha1_block_data_order_neon:
.LNEON:
- stmdb sp!,{r4-r12,lr}
+ stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
@ dmb @ errata #451034 on early Cortex A8
@ vstmdb sp!,{d8-d15} @ ABI specification says so
@@ -485,21 +489,21 @@ sha1_block_data_order_neon:
ldmia r0,{r3,r4,r5,r6,r7} @ load context
mov r12,sp
- vld1.8 {q0-q1},[r1]! @ handles unaligned
- veor q15,q15,q15
- vld1.8 {q2-q3},[r1]!
- vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
+ vld1.8 {q0,q1},[r1]! @ handles unaligned
+ veor q15,q15,q15
+ vld1.8 {q2,q3},[r1]!
+ vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
vrev32.8 q0,q0 @ yes, even on
vrev32.8 q1,q1 @ big-endian...
vrev32.8 q2,q2
vadd.i32 q8,q0,q14
vrev32.8 q3,q3
vadd.i32 q9,q1,q14
- vst1.32 {q8},[r12,:128]!
+ vst1.32 {q8},[r12,:128]!
vadd.i32 q10,q2,q14
- vst1.32 {q9},[r12,:128]!
- vst1.32 {q10},[r12,:128]!
- ldr r9,[sp] @ big RAW stall
+ vst1.32 {q9},[r12,:128]!
+ vst1.32 {q10},[r12,:128]!
+ ldr r9,[sp] @ big RAW stall
.Loop_neon:
vext.8 q8,q0,q1,#8
@@ -1177,10 +1181,10 @@ sha1_block_data_order_neon:
teq r1,r2
sub r8,r8,#16
subeq r1,r1,#64
- vld1.8 {q0-q1},[r1]!
+ vld1.8 {q0,q1},[r1]!
ldr r9,[sp,#4]
eor r11,r10,r6
- vld1.8 {q2-q3},[r1]!
+ vld1.8 {q2,q3},[r1]!
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
@@ -1313,7 +1317,7 @@ sha1_block_data_order_neon:
bne .Loop_neon
@ vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r12,pc}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha1_block_data_order_neon,.-sha1_block_data_order_neon
#endif
#if __ARM_MAX_ARCH__>=7
@@ -1321,7 +1325,7 @@ sha1_block_data_order_neon:
.align 5
sha1_block_data_order_armv8:
.LARMv8:
- vstmdb sp!,{d8-d15} @ ABI specification says so
+ vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
veor q1,q1,q1
adr r3,.LK_00_19
@@ -1334,119 +1338,119 @@ sha1_block_data_order_armv8:
vld1.32 {d22[],d23[]},[r3,:32]
.Loop_v8:
- vld1.8 {q4-q5},[r1]!
- vld1.8 {q6-q7},[r1]!
+ vld1.8 {q4,q5},[r1]!
+ vld1.8 {q6,q7},[r1]!
vrev32.8 q4,q4
vrev32.8 q5,q5
vadd.i32 q12,q8,q4
vrev32.8 q6,q6
- vmov q14,q0 @ offload
- subs r2,r2,#1
+ vmov q14,q0 @ offload
+ subs r2,r2,#1
vadd.i32 q13,q8,q5
vrev32.8 q7,q7
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 0
- .byte 0x68,0x0c,0x02,0xf2 @ sha1c q0,q1,q12
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 0
+.byte 0x68,0x0c,0x02,0xf2 @ sha1c q0,q1,q12
vadd.i32 q12,q8,q6
- .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 1
- .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
+.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 1
+.byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
vadd.i32 q13,q8,q7
- .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
- .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 2
- .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
+.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 2
+.byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
vadd.i32 q12,q8,q4
- .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
- .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 3
- .byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
+.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 3
+.byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
vadd.i32 q13,q9,q5
- .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
- .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 4
- .byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
+.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 4
+.byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
vadd.i32 q12,q9,q6
- .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
- .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 5
- .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 5
+.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
vadd.i32 q13,q9,q7
- .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
- .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 6
- .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 6
+.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
vadd.i32 q12,q9,q4
- .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
- .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 7
- .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 7
+.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
vadd.i32 q13,q9,q5
- .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
- .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 8
- .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 8
+.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
vadd.i32 q12,q10,q6
- .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
- .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 9
- .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 9
+.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
vadd.i32 q13,q10,q7
- .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
- .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 10
- .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
+.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 10
+.byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
vadd.i32 q12,q10,q4
- .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
- .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 11
- .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
+.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 11
+.byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
vadd.i32 q13,q10,q5
- .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
- .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 12
- .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
+.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 12
+.byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
vadd.i32 q12,q10,q6
- .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
- .byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 13
- .byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
+.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 13
+.byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
vadd.i32 q13,q11,q7
- .byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
- .byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 14
- .byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
+.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
+.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 14
+.byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
vadd.i32 q12,q11,q4
- .byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
- .byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 15
- .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
+.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 15
+.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
vadd.i32 q13,q11,q5
- .byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
- .byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 16
- .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
+.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 16
+.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
vadd.i32 q12,q11,q6
- .byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 17
- .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 17
+.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
vadd.i32 q13,q11,q7
- .byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 18
- .byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
+.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 18
+.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
- .byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 19
- .byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
+.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 19
+.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
vadd.i32 q1,q1,q2
vadd.i32 q0,q0,q14
- bne .Loop_v8
+ bne .Loop_v8
- vst1.32 {q0},[r0]!
- vst1.32 {d2[0]},[r0]
+ vst1.32 {q0},[r0]!
+ vst1.32 {d2[0]},[r0]
- vldmia sp!,{d8-d15}
+ vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
bx lr @ bx lr
.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
#endif
diff --git a/linux-arm/crypto/sha/sha256-armv4.S b/linux-arm/crypto/sha/sha256-armv4.S
index 3c41010..ba37795 100644
--- a/linux-arm/crypto/sha/sha256-armv4.S
+++ b/linux-arm/crypto/sha/sha256-armv4.S
@@ -1,7 +1,60 @@
-#include "arm_arch.h"
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Permission to use under GPL terms is granted.
+@ ====================================================================
+
+@ SHA256 block procedure for ARMv4. May 2007.
+
+@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
+@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+@ byte [on single-issue Xscale PXA250 core].
+
+@ July 2010.
+@
+@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
+@ Cortex A8 core and ~20 cycles per processed byte.
+
+@ February 2011.
+@
+@ Profiler-assisted and platform-specific optimization resulted in 16%
+@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+@ September 2013.
+@
+@ Add NEON implementation. On Cortex A8 it was measured to process one
+@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+@ code (meaning that latter performs sub-optimally, nothing was done
+@ about it).
+
+@ May 2014.
+@
+@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
.text
+#if __ARM_ARCH__<7
.code 32
+#else
+.syntax unified
+# if defined(__thumb2__) && !defined(__APPLE__)
+# define adrl adr
+.thumb
+# else
+.code 32
+# endif
+#endif
.type K256,%object
.align 5
@@ -24,25 +77,33 @@ K256:
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size K256,.-K256
.word 0 @ terminator
-#if __ARM_MAX_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_block_data_order
+.word OPENSSL_armcap_P-.Lsha256_block_data_order
#endif
.align 5
-.global sha256_block_data_order
+.globl sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
+#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-#if __ARM_MAX_ARCH__>=7
+#else
+ adr r3,sha256_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
+#ifdef __APPLE__
+ ldr r12,[r12]
+#endif
tst r12,#ARMV8_SHA256
bne .LARMv8
tst r12,#ARMV7_NEON
bne .LNEON
#endif
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
stmdb sp!,{r0,r1,r2,r4-r11,lr}
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r14,r3,#256+32 @ K256
@@ -63,7 +124,9 @@ sha256_block_data_order:
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 0
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
@@ -119,7 +182,9 @@ sha256_block_data_order:
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 1
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
@@ -175,7 +240,9 @@ sha256_block_data_order:
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 2
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
@@ -231,7 +298,9 @@ sha256_block_data_order:
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 3
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
@@ -287,7 +356,9 @@ sha256_block_data_order:
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 4
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
@@ -343,7 +414,9 @@ sha256_block_data_order:
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
@@ -399,7 +472,9 @@ sha256_block_data_order:
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 6
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
@@ -455,7 +530,9 @@ sha256_block_data_order:
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
@@ -511,7 +588,9 @@ sha256_block_data_order:
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 8
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
@@ -567,7 +646,9 @@ sha256_block_data_order:
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 9
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
@@ -623,7 +704,9 @@ sha256_block_data_order:
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 10
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
@@ -679,7 +762,9 @@ sha256_block_data_order:
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 11
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
@@ -735,7 +820,9 @@ sha256_block_data_order:
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 12
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
@@ -791,7 +878,9 @@ sha256_block_data_order:
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 13
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
@@ -847,7 +936,9 @@ sha256_block_data_order:
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 14
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
@@ -903,7 +994,9 @@ sha256_block_data_order:
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
+# ifndef __ARMEB__
rev r2,r2
+# endif
#else
@ ldrb r2,[r1,#3] @ 15
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
@@ -1736,6 +1829,9 @@ sha256_block_data_order:
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
+#if __ARM_ARCH__>=7
+ ite eq @ Thumb2 thing, sanity check in ARM
+#endif
ldreq r3,[sp,#16*4] @ pull ctx
bne .Lrounds_16_xx
@@ -1765,61 +1861,64 @@ sha256_block_data_order:
add sp,sp,#19*4 @ destroy frame
#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r11,pc}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
- ldmia sp!,{r4-r11,lr}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha256_block_data_order,.-sha256_block_data_order
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
+.globl sha256_block_data_order_neon
.type sha256_block_data_order_neon,%function
.align 4
sha256_block_data_order_neon:
.LNEON:
- stmdb sp!,{r4-r12,lr}
+ stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
+ sub r11,sp,#16*4+16
+ adr r14,K256
+ bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
- sub sp,sp,#16*4+16 @ alloca
- sub r14,r3,#256+32 @ K256
- bic sp,sp,#15 @ align for 128-bit stores
+ mov sp,r11 @ alloca
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
- vld1.8 {q0},[r1]!
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- vld1.32 {q8},[r14,:128]!
- vld1.32 {q9},[r14,:128]!
- vld1.32 {q10},[r14,:128]!
- vld1.32 {q11},[r14,:128]!
+ vld1.8 {q0},[r1]!
+ vld1.8 {q1},[r1]!
+ vld1.8 {q2},[r1]!
+ vld1.8 {q3},[r1]!
+ vld1.32 {q8},[r14,:128]!
+ vld1.32 {q9},[r14,:128]!
+ vld1.32 {q10},[r14,:128]!
+ vld1.32 {q11},[r14,:128]!
vrev32.8 q0,q0 @ yes, even on
- str r0,[sp,#64]
+ str r0,[sp,#64]
vrev32.8 q1,q1 @ big-endian
- str r1,[sp,#68]
- mov r1,sp
+ str r1,[sp,#68]
+ mov r1,sp
vrev32.8 q2,q2
- str r2,[sp,#72]
+ str r2,[sp,#72]
vrev32.8 q3,q3
- str r12,[sp,#76] @ save original sp
+ str r12,[sp,#76] @ save original sp
vadd.i32 q8,q8,q0
vadd.i32 q9,q9,q1
- vst1.32 {q8},[r1,:128]!
+ vst1.32 {q8},[r1,:128]!
vadd.i32 q10,q10,q2
- vst1.32 {q9},[r1,:128]!
+ vst1.32 {q9},[r1,:128]!
vadd.i32 q11,q11,q3
- vst1.32 {q10},[r1,:128]!
- vst1.32 {q11},[r1,:128]!
+ vst1.32 {q10},[r1,:128]!
+ vst1.32 {q11},[r1,:128]!
- ldmia r0,{r4-r11}
- sub r1,r1,#64
- ldr r2,[sp,#0]
- eor r12,r12,r12
- eor r3,r5,r6
- b .L_00_48
+ ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+ sub r1,r1,#64
+ ldr r2,[sp,#0]
+ eor r12,r12,r12
+ eor r3,r5,r6
+ b .L_00_48
.align 4
.L_00_48:
@@ -2220,17 +2319,19 @@ sha256_block_data_order_neon:
sub r1,r1,#64
bne .L_00_48
- ldr r1,[sp,#68]
- ldr r0,[sp,#72]
- sub r14,r14,#256 @ rewind r14
- teq r1,r0
- subeq r1,r1,#64 @ avoid SEGV
- vld1.8 {q0},[r1]! @ load next input block
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- strne r1,[sp,#68]
- mov r1,sp
+ ldr r1,[sp,#68]
+ ldr r0,[sp,#72]
+ sub r14,r14,#256 @ rewind r14
+ teq r1,r0
+ it eq
+ subeq r1,r1,#64 @ avoid SEGV
+ vld1.8 {q0},[r1]! @ load next input block
+ vld1.8 {q1},[r1]!
+ vld1.8 {q2},[r1]!
+ vld1.8 {q3},[r1]!
+ it ne
+ strne r1,[sp,#68]
+ mov r1,sp
add r11,r11,r2
eor r2,r9,r10
eor r0,r8,r8,ror#5
@@ -2540,157 +2641,176 @@ sha256_block_data_order_neon:
str r6,[r2],#4
add r11,r11,r1
str r7,[r2],#4
- stmia r2,{r8-r11}
+ stmia r2,{r8,r9,r10,r11}
+ ittte ne
movne r1,sp
ldrne r2,[sp,#0]
eorne r12,r12,r12
ldreq sp,[sp,#76] @ restore original sp
+ itt ne
eorne r3,r5,r6
bne .L_00_48
- ldmia sp!,{r4-r12,pc}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
#endif
-#if __ARM_MAX_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+
+# if defined(__thumb2__) && !defined(__APPLE__)
+# define INST(a,b,c,d) .byte c,d|0xc,a,b
+# else
+# define INST(a,b,c,d) .byte a,b,c,d
+# endif
+
.type sha256_block_data_order_armv8,%function
.align 5
sha256_block_data_order_armv8:
.LARMv8:
vld1.32 {q0,q1},[r0]
- sub r3,r3,#sha256_block_data_order-K256
+# ifdef __APPLE__
+ sub r3,r3,#256+32
+# elif defined(__thumb2__)
+ adr r3,.LARMv8
+ sub r3,r3,#.LARMv8-K256
+# else
+ adrl r3,K256
+# endif
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
.Loop_v8:
- vld1.8 {q8-q9},[r1]!
- vld1.8 {q10-q11},[r1]!
- vld1.32 {q12},[r3]!
+ vld1.8 {q8,q9},[r1]!
+ vld1.8 {q10,q11},[r1]!
+ vld1.32 {q12},[r3]!
vrev32.8 q8,q8
vrev32.8 q9,q9
vrev32.8 q10,q10
vrev32.8 q11,q11
- vmov q14,q0 @ offload
- vmov q15,q1
- teq r1,r2
- vld1.32 {q13},[r3]!
+ vmov q14,q0 @ offload
+ vmov q15,q1
+ teq r1,r2
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
- .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
- .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
+ INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
- .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
- .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
+ INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q10
- .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
- .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
+ INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q11
- .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
- .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
+ INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
- .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
- .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
+ INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
- .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
- .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
+ INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q10
- .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
- .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
+ INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q11
- .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
- .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
+ INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
- .byte 0xe2,0x03,0xfa,0xf3 @ sha256su0 q8,q9
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
- .byte 0xe6,0x0c,0x64,0xf3 @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
+ INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
- .byte 0xe4,0x23,0xfa,0xf3 @ sha256su0 q9,q10
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
- .byte 0xe0,0x2c,0x66,0xf3 @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
+ INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q10
- .byte 0xe6,0x43,0xfa,0xf3 @ sha256su0 q10,q11
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
- .byte 0xe2,0x4c,0x60,0xf3 @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
+ INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
+ INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q11
- .byte 0xe0,0x63,0xfa,0xf3 @ sha256su0 q11,q8
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
- .byte 0xe4,0x6c,0x62,0xf3 @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
+ INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
+ INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
+ vld1.32 {q13},[r3]!
vadd.i32 q12,q12,q8
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- vld1.32 {q12},[r3]!
+ vld1.32 {q12},[r3]!
vadd.i32 q13,q13,q9
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- vld1.32 {q13},[r3]
+ vld1.32 {q13},[r3]
vadd.i32 q12,q12,q10
- sub r3,r3,#256-16 @ rewind
- vmov q2,q0
- .byte 0x68,0x0c,0x02,0xf3 @ sha256h q0,q1,q12
- .byte 0x68,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q12
+ sub r3,r3,#256-16 @ rewind
+ vmov q2,q0
+ INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
+ INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
vadd.i32 q13,q13,q11
- vmov q2,q0
- .byte 0x6a,0x0c,0x02,0xf3 @ sha256h q0,q1,q13
- .byte 0x6a,0x2c,0x14,0xf3 @ sha256h2 q1,q2,q13
+ vmov q2,q0
+ INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
+ INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
vadd.i32 q0,q0,q14
vadd.i32 q1,q1,q15
- bne .Loop_v8
+ it ne
+ bne .Loop_v8
- vst1.32 {q0,q1},[r0]
+ vst1.32 {q0,q1},[r0]
bx lr @ bx lr
.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
#endif
-.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
-#if __ARM_MAX_ARCH__>=7
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
+.align 2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm OPENSSL_armcap_P,4,4
+.hidden OPENSSL_armcap_P
#endif
diff --git a/linux-arm/crypto/sha/sha512-armv4.S b/linux-arm/crypto/sha/sha512-armv4.S
index 37cfca3..1a3d467 100644
--- a/linux-arm/crypto/sha/sha512-armv4.S
+++ b/linux-arm/crypto/sha/sha512-armv4.S
@@ -1,4 +1,61 @@
-#include "arm_arch.h"
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Permission to use under GPL terms is granted.
+@ ====================================================================
+
+@ SHA512 block procedure for ARMv4. September 2007.
+
+@ This code is ~4.5 (four and a half) times faster than code generated
+@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+@ Xscale PXA250 core].
+@
+@ July 2010.
+@
+@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
+@ Cortex A8 core and ~40 cycles per processed byte.
+
+@ February 2011.
+@
+@ Profiler-assisted and platform-specific optimization resulted in 7%
+@ improvement on Coxtex A8 core and ~38 cycles per byte.
+
+@ March 2011.
+@
+@ Add NEON implementation. On Cortex A8 it was measured to process
+@ one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+@ August 2012.
+@
+@ Improve NEON performance by 12% on Snapdragon S4. In absolute
+@ terms it's 22.6 cycles per byte, which is disappointing result.
+@ Technical writers asserted that 3-way S4 pipeline can sustain
+@ multiple NEON instructions per cycle, but dual NEON issue could
+@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
+@ for further details. On side note Cortex-A15 processes one byte in
+@ 16 cycles.
+
+@ Byte order [in]dependence. =========================================
+@
+@ Originally caller was expected to maintain specific *dword* order in
+@ h[0-7], namely with most significant dword at *lower* address, which
+@ was reflected in below two parameters as 0 and 4. Now caller is
+@ expected to maintain native byte order for whole 64-bit values.
+#ifndef __KERNEL__
+# include "arm_arch.h"
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+#endif
+
#ifdef __ARMEL__
# define LO 0
# define HI 4
@@ -10,71 +67,90 @@
#endif
.text
+#if __ARM_ARCH__<7 || defined(__APPLE__)
+.code 32
+#else
+.syntax unified
+# ifdef __thumb2__
+# define adrl adr
+.thumb
+# else
.code 32
+# endif
+#endif
+
.type K512,%object
.align 5
K512:
-WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
-WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
-WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
-WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
-WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
-WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
-WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
-WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
-WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
-WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
-WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
-WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
-WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
-WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
-WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
-WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
-WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
-WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
-WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
-WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
-WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
-WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
-WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
-WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
-WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
-WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
-WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
-WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
-WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
-WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
-WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
-WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
-WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
-WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
-WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
-WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
-WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
-WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
-WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
-WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+ WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+ WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+ WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+ WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+ WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+ WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+ WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+ WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+ WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+ WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+ WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+ WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+ WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+ WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+ WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+ WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+ WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+ WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+ WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+ WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+ WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+ WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+ WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+ WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+ WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+ WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+ WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+ WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+ WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+ WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+ WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+ WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+ WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+ WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+ WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+ WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+ WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+ WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+ WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
-#if __ARM_MAX_ARCH__>=7
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha512_block_data_order
+.word OPENSSL_armcap_P-.Lsha512_block_data_order
.skip 32-4
#else
.skip 32
#endif
-.global sha512_block_data_order
+.globl sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
+#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
-#if __ARM_MAX_ARCH__>=7
+#else
+ adr r3,sha512_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
+#ifdef __APPLE__
+ ldr r12,[r12]
+#endif
tst r12,#1
bne .LNEON
#endif
- stmdb sp!,{r4-r12,lr}
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+ stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r14,r3,#672 @ K512
sub sp,sp,#9*8
@@ -186,6 +262,9 @@ sha512_block_data_order:
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
+#if __ARM_ARCH__>=7
+ it eq @ Thumb2 thing, sanity check in ARM
+#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@@ -323,6 +402,9 @@ sha512_block_data_order:
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
+#if __ARM_ARCH__>=7
+ it eq @ Thumb2 thing, sanity check in ARM
+#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@@ -357,6 +439,9 @@ sha512_block_data_order:
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
+#if __ARM_ARCH__>=7
+ ittt eq @ Thumb2 thing, sanity check in ARM
+#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
@@ -434,1343 +519,1349 @@ sha512_block_data_order:
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
- ldmia sp!,{r4-r12,lr}
+ ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
+.size sha512_block_data_order,.-sha512_block_data_order
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
+.globl sha512_block_data_order_neon
+.type sha512_block_data_order_neon,%function
.align 4
+sha512_block_data_order_neon:
.LNEON:
- dmb @ errata #451034 on early Cortex A8
- vstmdb sp!,{d8-d15} @ ABI specification says so
- sub r3,r3,#672 @ K512
- vldmia r0,{d16-d23} @ load context
+ dmb @ errata #451034 on early Cortex A8
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+ adr r3,K512
+ VFP_ABI_PUSH
+ vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
- vld1.64 {d0},[r1]! @ handles unaligned
+ vld1.64 {d0},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 0>0
- vadd.i64 d16,d30 @ h+=Maj from the past
+ vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
#if 0<16 && defined(__ARMEL__)
vrev64.8 d0,d0
#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
+ vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 1
#if 1<16
- vld1.64 {d1},[r1]! @ handles unaligned
+ vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 1>0
- vadd.i64 d23,d30 @ h+=Maj from the past
+ vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
#if 1<16 && defined(__ARMEL__)
vrev64.8 d1,d1
#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
+ vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 2
#if 2<16
- vld1.64 {d2},[r1]! @ handles unaligned
+ vld1.64 {d2},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 2>0
- vadd.i64 d22,d30 @ h+=Maj from the past
+ vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
#if 2<16 && defined(__ARMEL__)
vrev64.8 d2,d2
#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
+ vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 3
#if 3<16
- vld1.64 {d3},[r1]! @ handles unaligned
+ vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 3>0
- vadd.i64 d21,d30 @ h+=Maj from the past
+ vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
#if 3<16 && defined(__ARMEL__)
vrev64.8 d3,d3
#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
+ vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 4
#if 4<16
- vld1.64 {d4},[r1]! @ handles unaligned
+ vld1.64 {d4},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 4>0
- vadd.i64 d20,d30 @ h+=Maj from the past
+ vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
#if 4<16 && defined(__ARMEL__)
vrev64.8 d4,d4
#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
+ vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 5
#if 5<16
- vld1.64 {d5},[r1]! @ handles unaligned
+ vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 5>0
- vadd.i64 d19,d30 @ h+=Maj from the past
+ vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
#if 5<16 && defined(__ARMEL__)
vrev64.8 d5,d5
#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
+ vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 6
#if 6<16
- vld1.64 {d6},[r1]! @ handles unaligned
+ vld1.64 {d6},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 6>0
- vadd.i64 d18,d30 @ h+=Maj from the past
+ vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
#if 6<16 && defined(__ARMEL__)
vrev64.8 d6,d6
#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
+ vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 7
#if 7<16
- vld1.64 {d7},[r1]! @ handles unaligned
+ vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 7>0
- vadd.i64 d17,d30 @ h+=Maj from the past
+ vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
#if 7<16 && defined(__ARMEL__)
vrev64.8 d7,d7
#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
+ vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 d24,d20,#14 @ 8
#if 8<16
- vld1.64 {d8},[r1]! @ handles unaligned
+ vld1.64 {d8},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d20,#18
#if 8>0
- vadd.i64 d16,d30 @ h+=Maj from the past
+ vadd.i64 d16,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
#if 8<16 && defined(__ARMEL__)
vrev64.8 d8,d8
#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
+ vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 9
#if 9<16
- vld1.64 {d9},[r1]! @ handles unaligned
+ vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 9>0
- vadd.i64 d23,d30 @ h+=Maj from the past
+ vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
#if 9<16 && defined(__ARMEL__)
vrev64.8 d9,d9
#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
+ vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 d24,d18,#14 @ 10
#if 10<16
- vld1.64 {d10},[r1]! @ handles unaligned
+ vld1.64 {d10},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d18,#18
#if 10>0
- vadd.i64 d22,d30 @ h+=Maj from the past
+ vadd.i64 d22,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
#if 10<16 && defined(__ARMEL__)
vrev64.8 d10,d10
#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
+ vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 11
#if 11<16
- vld1.64 {d11},[r1]! @ handles unaligned
+ vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 11>0
- vadd.i64 d21,d30 @ h+=Maj from the past
+ vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
#if 11<16 && defined(__ARMEL__)
vrev64.8 d11,d11
#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
+ vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 d24,d16,#14 @ 12
#if 12<16
- vld1.64 {d12},[r1]! @ handles unaligned
+ vld1.64 {d12},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d16,#18
#if 12>0
- vadd.i64 d20,d30 @ h+=Maj from the past
+ vadd.i64 d20,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
#if 12<16 && defined(__ARMEL__)
vrev64.8 d12,d12
#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
+ vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 13
#if 13<16
- vld1.64 {d13},[r1]! @ handles unaligned
+ vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 13>0
- vadd.i64 d19,d30 @ h+=Maj from the past
+ vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
#if 13<16 && defined(__ARMEL__)
vrev64.8 d13,d13
#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
+ vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 d24,d22,#14 @ 14
#if 14<16
- vld1.64 {d14},[r1]! @ handles unaligned
+ vld1.64 {d14},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d22,#18
#if 14>0
- vadd.i64 d18,d30 @ h+=Maj from the past
+ vadd.i64 d18,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
#if 14<16 && defined(__ARMEL__)
vrev64.8 d14,d14
#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
+ vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 15
#if 15<16
- vld1.64 {d15},[r1]! @ handles unaligned
+ vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 15>0
- vadd.i64 d17,d30 @ h+=Maj from the past
+ vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
#if 15<16 && defined(__ARMEL__)
vrev64.8 d15,d15
#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
+ vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
- mov r12,#4
+ mov r12,#4
.L16_79_neon:
- subs r12,#1
+ subs r12,#1
vshr.u64 q12,q7,#19
vshr.u64 q13,q7,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
+ vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q7,#6
- vsli.64 q12,q7,#45
- vext.8 q14,q0,q1,#8 @ X[i+1]
- vsli.64 q13,q7,#3
- veor q15,q12
+ vsli.64 q12,q7,#45
+ vext.8 q14,q0,q1,#8 @ X[i+1]
+ vsli.64 q13,q7,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q0,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q4,q5,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q4,q5,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q0,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q0,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
#if 16<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
+ vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 17
#if 17<16
- vld1.64 {d1},[r1]! @ handles unaligned
+ vld1.64 {d1},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 17>0
- vadd.i64 d23,d30 @ h+=Maj from the past
+ vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
#if 17<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
+ vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q0,#19
vshr.u64 q13,q0,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
+ vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q0,#6
- vsli.64 q12,q0,#45
- vext.8 q14,q1,q2,#8 @ X[i+1]
- vsli.64 q13,q0,#3
- veor q15,q12
+ vsli.64 q12,q0,#45
+ vext.8 q14,q1,q2,#8 @ X[i+1]
+ vsli.64 q13,q0,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q1,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q5,q6,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q5,q6,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q1,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q1,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
#if 18<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
+ vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 19
#if 19<16
- vld1.64 {d3},[r1]! @ handles unaligned
+ vld1.64 {d3},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 19>0
- vadd.i64 d21,d30 @ h+=Maj from the past
+ vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
#if 19<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
+ vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q1,#19
vshr.u64 q13,q1,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
+ vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q1,#6
- vsli.64 q12,q1,#45
- vext.8 q14,q2,q3,#8 @ X[i+1]
- vsli.64 q13,q1,#3
- veor q15,q12
+ vsli.64 q12,q1,#45
+ vext.8 q14,q2,q3,#8 @ X[i+1]
+ vsli.64 q13,q1,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q2,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q6,q7,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q6,q7,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q2,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q2,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
#if 20<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
+ vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 21
#if 21<16
- vld1.64 {d5},[r1]! @ handles unaligned
+ vld1.64 {d5},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 21>0
- vadd.i64 d19,d30 @ h+=Maj from the past
+ vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
#if 21<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
+ vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q2,#19
vshr.u64 q13,q2,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
+ vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q2,#6
- vsli.64 q12,q2,#45
- vext.8 q14,q3,q4,#8 @ X[i+1]
- vsli.64 q13,q2,#3
- veor q15,q12
+ vsli.64 q12,q2,#45
+ vext.8 q14,q3,q4,#8 @ X[i+1]
+ vsli.64 q13,q2,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q3,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q7,q0,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q7,q0,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q3,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q3,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
#if 22<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
+ vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 23
#if 23<16
- vld1.64 {d7},[r1]! @ handles unaligned
+ vld1.64 {d7},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 23>0
- vadd.i64 d17,d30 @ h+=Maj from the past
+ vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
#if 23<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
+ vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
vshr.u64 q12,q3,#19
vshr.u64 q13,q3,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
+ vadd.i64 d16,d30 @ h+=Maj from the past
vshr.u64 q15,q3,#6
- vsli.64 q12,q3,#45
- vext.8 q14,q4,q5,#8 @ X[i+1]
- vsli.64 q13,q3,#3
- veor q15,q12
+ vsli.64 q12,q3,#45
+ vext.8 q14,q4,q5,#8 @ X[i+1]
+ vsli.64 q13,q3,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q4,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q0,q1,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q0,q1,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d20,#14 @ from NEON_00_15
vadd.i64 q4,q14
vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d20,#41 @ from NEON_00_15
vadd.i64 q4,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
#if 24<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d23
vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
+ vsli.64 d24,d16,#36
vadd.i64 d27,d26
vshr.u64 d26,d16,#39
vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
vadd.i64 d19,d27
vadd.i64 d30,d27
@ vadd.i64 d23,d30
vshr.u64 d24,d19,#14 @ 25
#if 25<16
- vld1.64 {d9},[r1]! @ handles unaligned
+ vld1.64 {d9},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d19,#18
#if 25>0
- vadd.i64 d23,d30 @ h+=Maj from the past
+ vadd.i64 d23,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
#if 25<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d22
vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
+ vsli.64 d24,d23,#36
vadd.i64 d27,d26
vshr.u64 d26,d23,#39
vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
vadd.i64 d18,d27
vadd.i64 d30,d27
@ vadd.i64 d22,d30
vshr.u64 q12,q4,#19
vshr.u64 q13,q4,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
+ vadd.i64 d22,d30 @ h+=Maj from the past
vshr.u64 q15,q4,#6
- vsli.64 q12,q4,#45
- vext.8 q14,q5,q6,#8 @ X[i+1]
- vsli.64 q13,q4,#3
- veor q15,q12
+ vsli.64 q12,q4,#45
+ vext.8 q14,q5,q6,#8 @ X[i+1]
+ vsli.64 q13,q4,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q5,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q1,q2,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q1,q2,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d18,#14 @ from NEON_00_15
vadd.i64 q5,q14
vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d18,#41 @ from NEON_00_15
vadd.i64 q5,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
#if 26<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d21
vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
+ vsli.64 d24,d22,#36
vadd.i64 d27,d26
vshr.u64 d26,d22,#39
vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
vadd.i64 d17,d27
vadd.i64 d30,d27
@ vadd.i64 d21,d30
vshr.u64 d24,d17,#14 @ 27
#if 27<16
- vld1.64 {d11},[r1]! @ handles unaligned
+ vld1.64 {d11},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d17,#18
#if 27>0
- vadd.i64 d21,d30 @ h+=Maj from the past
+ vadd.i64 d21,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
#if 27<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d20
vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
+ vsli.64 d24,d21,#36
vadd.i64 d27,d26
vshr.u64 d26,d21,#39
vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
vadd.i64 d16,d27
vadd.i64 d30,d27
@ vadd.i64 d20,d30
vshr.u64 q12,q5,#19
vshr.u64 q13,q5,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
+ vadd.i64 d20,d30 @ h+=Maj from the past
vshr.u64 q15,q5,#6
- vsli.64 q12,q5,#45
- vext.8 q14,q6,q7,#8 @ X[i+1]
- vsli.64 q13,q5,#3
- veor q15,q12
+ vsli.64 q12,q5,#45
+ vext.8 q14,q6,q7,#8 @ X[i+1]
+ vsli.64 q13,q5,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q6,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q2,q3,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q2,q3,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d16,#14 @ from NEON_00_15
vadd.i64 q6,q14
vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d16,#41 @ from NEON_00_15
vadd.i64 q6,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
#if 28<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d19
vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
+ vsli.64 d24,d20,#36
vadd.i64 d27,d26
vshr.u64 d26,d20,#39
vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
vadd.i64 d23,d27
vadd.i64 d30,d27
@ vadd.i64 d19,d30
vshr.u64 d24,d23,#14 @ 29
#if 29<16
- vld1.64 {d13},[r1]! @ handles unaligned
+ vld1.64 {d13},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d23,#18
#if 29>0
- vadd.i64 d19,d30 @ h+=Maj from the past
+ vadd.i64 d19,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
#if 29<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d18
vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
+ vsli.64 d24,d19,#36
vadd.i64 d27,d26
vshr.u64 d26,d19,#39
vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
vadd.i64 d22,d27
vadd.i64 d30,d27
@ vadd.i64 d18,d30
vshr.u64 q12,q6,#19
vshr.u64 q13,q6,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
+ vadd.i64 d18,d30 @ h+=Maj from the past
vshr.u64 q15,q6,#6
- vsli.64 q12,q6,#45
- vext.8 q14,q7,q0,#8 @ X[i+1]
- vsli.64 q13,q6,#3
- veor q15,q12
+ vsli.64 q12,q6,#45
+ vext.8 q14,q7,q0,#8 @ X[i+1]
+ vsli.64 q13,q6,#3
+ veor q15,q12
vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
+ veor q15,q13 @ sigma1(X[i+14])
vshr.u64 q13,q14,#8
vadd.i64 q7,q15
vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q3,q4,#8 @ X[i+9]
- veor q15,q12
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q3,q4,#8 @ X[i+9]
+ veor q15,q12
vshr.u64 d24,d22,#14 @ from NEON_00_15
vadd.i64 q7,q14
vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
+ veor q15,q13 @ sigma0(X[i+1])
vshr.u64 d26,d22,#41 @ from NEON_00_15
vadd.i64 q7,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
#if 30<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d17
vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
+ vsli.64 d24,d18,#36
vadd.i64 d27,d26
vshr.u64 d26,d18,#39
vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
vadd.i64 d21,d27
vadd.i64 d30,d27
@ vadd.i64 d17,d30
vshr.u64 d24,d21,#14 @ 31
#if 31<16
- vld1.64 {d15},[r1]! @ handles unaligned
+ vld1.64 {d15},[r1]! @ handles unaligned
#endif
vshr.u64 d25,d21,#18
#if 31>0
- vadd.i64 d17,d30 @ h+=Maj from the past
+ vadd.i64 d17,d30 @ h+=Maj from the past
#endif
vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
#if 31<16 && defined(__ARMEL__)
vrev64.8 ,
#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
+ veor d26,d25 @ Sigma1(e)
vadd.i64 d27,d29,d16
vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
+ vsli.64 d24,d17,#36
vadd.i64 d27,d26
vshr.u64 d26,d17,#39
vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
vadd.i64 d20,d27
vadd.i64 d30,d27
@ vadd.i64 d16,d30
- bne .L16_79_neon
+ bne .L16_79_neon
- vadd.i64 d16,d30 @ h+=Maj from the past
- vldmia r0,{d24-d31} @ load context to temp
+ vadd.i64 d16,d30 @ h+=Maj from the past
+ vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
- vstmia r0,{d16-d23} @ save context
- teq r1,r2
- sub r3,#640 @ rewind K512
- bne .Loop_neon
+ vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
+ teq r1,r2
+ sub r3,#640 @ rewind K512
+ bne .Loop_neon
- vldmia sp!,{d8-d15} @ epilogue
+ VFP_ABI_POP
bx lr @ .word 0xe12fff1e
+.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
#endif
-.size sha512_block_data_order,.-sha512_block_data_order
-.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
-#if __ARM_MAX_ARCH__>=7
+.align 2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P
#endif