diff options
author | Brian Carlstrom <bdc@google.com> | 2010-11-16 11:19:35 -0800 |
---|---|---|
committer | Brian Carlstrom <bdc@google.com> | 2010-11-16 11:21:11 -0800 |
commit | 43c12e3d4f9bbbbd4a8ba7b149686437514bc6b6 (patch) | |
tree | 520464b8c2f2e19d00e6ea143c6e1e9523b3367a /crypto/aes | |
parent | 8be882eb81101ceede7641e88ccbdaded610ff5f (diff) | |
download | replicant_openssl-43c12e3d4f9bbbbd4a8ba7b149686437514bc6b6.zip replicant_openssl-43c12e3d4f9bbbbd4a8ba7b149686437514bc6b6.tar.gz replicant_openssl-43c12e3d4f9bbbbd4a8ba7b149686437514bc6b6.tar.bz2 |
Upgrade to openssl-1.0.0b
Bug: 3201137
Change-Id: I20cd6bed7717e5982abc3734e9a6522067f2908e
Diffstat (limited to 'crypto/aes')
-rw-r--r-- | crypto/aes/aes_wrap.c | 12 | ||||
-rw-r--r-- | crypto/aes/asm/aes-armv4.pl | 397 | ||||
-rw-r--r-- | crypto/aes/asm/aes-armv4.s | 386 |
3 files changed, 392 insertions, 403 deletions
diff --git a/crypto/aes/aes_wrap.c b/crypto/aes/aes_wrap.c index 9feacd6..e2d73d3 100644 --- a/crypto/aes/aes_wrap.c +++ b/crypto/aes/aes_wrap.c @@ -85,9 +85,9 @@ int AES_wrap_key(AES_KEY *key, const unsigned char *iv, A[7] ^= (unsigned char)(t & 0xff); if (t > 0xff) { - A[6] ^= (unsigned char)((t & 0xff) >> 8); - A[5] ^= (unsigned char)((t & 0xff) >> 16); - A[4] ^= (unsigned char)((t & 0xff) >> 24); + A[6] ^= (unsigned char)((t >> 8) & 0xff); + A[5] ^= (unsigned char)((t >> 16) & 0xff); + A[4] ^= (unsigned char)((t >> 24) & 0xff); } memcpy(R, B + 8, 8); } @@ -119,9 +119,9 @@ int AES_unwrap_key(AES_KEY *key, const unsigned char *iv, A[7] ^= (unsigned char)(t & 0xff); if (t > 0xff) { - A[6] ^= (unsigned char)((t & 0xff) >> 8); - A[5] ^= (unsigned char)((t & 0xff) >> 16); - A[4] ^= (unsigned char)((t & 0xff) >> 24); + A[6] ^= (unsigned char)((t >> 8) & 0xff); + A[5] ^= (unsigned char)((t >> 16) & 0xff); + A[4] ^= (unsigned char)((t >> 24) & 0xff); } memcpy(B + 8, R, 8); AES_decrypt(B, B, key); diff --git a/crypto/aes/asm/aes-armv4.pl b/crypto/aes/asm/aes-armv4.pl index 6902441..c51ee1f 100644 --- a/crypto/aes/asm/aes-armv4.pl +++ b/crypto/aes/asm/aes-armv4.pl @@ -16,12 +16,20 @@ # allows to merge logical or arithmetic operation with shift or rotate # in one instruction and emit combined result every cycle. The module # is endian-neutral. The performance is ~42 cycles/byte for 128-bit -# key. +# key [on single-issue Xscale PXA250 core]. # May 2007. # # AES_set_[en|de]crypt_key is added. +# July 2010. +# +# Rescheduling for dual-issue pipeline resulted in 12% improvement on +# Cortex A8 core and ~25 cycles per byte processed with 128-bit key. + +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} +open STDOUT,">$output"; + $s0="r0"; $s1="r1"; $s2="r2"; @@ -164,24 +172,24 @@ AES_encrypt: ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 @@ -196,24 +204,24 @@ AES_encrypt: mov $t3,$s0,lsr#8 strb $t1,[$rounds,#0] strb $t2,[$rounds,#1] - strb $t3,[$rounds,#2] - strb $s0,[$rounds,#3] mov $t1,$s1,lsr#24 + strb $t3,[$rounds,#2] mov $t2,$s1,lsr#16 + strb $s0,[$rounds,#3] mov $t3,$s1,lsr#8 strb $t1,[$rounds,#4] strb $t2,[$rounds,#5] - strb $t3,[$rounds,#6] - strb $s1,[$rounds,#7] mov $t1,$s2,lsr#24 + strb $t3,[$rounds,#6] mov $t2,$s2,lsr#16 + strb $s1,[$rounds,#7] mov $t3,$s2,lsr#8 strb $t1,[$rounds,#8] strb $t2,[$rounds,#9] - strb $t3,[$rounds,#10] - strb $s2,[$rounds,#11] mov $t1,$s3,lsr#24 + strb $t3,[$rounds,#10] mov $t2,$s3,lsr#16 + strb $s2,[$rounds,#11] mov $t3,$s3,lsr#8 strb $t1,[$rounds,#12] strb $t2,[$rounds,#13] @@ -230,141 +238,137 @@ AES_encrypt: .align 2 _armv4_AES_encrypt: str lr,[sp,#-4]! @ push lr - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - ldr $rounds,[$key,#240-16] + ldmia $key!,{$t1-$i1} eor $s0,$s0,$t1 + ldr $rounds,[$key,#240-16] eor $s1,$s1,$t2 eor $s2,$s2,$t3 eor $s3,$s3,$i1 sub $rounds,$rounds,#1 mov lr,#255 -.Lenc_loop: + and $i1,lr,$s0 and $i2,lr,$s0,lsr#8 and $i3,lr,$s0,lsr#16 - and $i1,lr,$s0 mov $s0,$s0,lsr#24 +.Lenc_loop: ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0] - ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24] - ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8] - ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16] - and $i1,lr,$s1,lsr#16 @ i0 + ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8] and $i2,lr,$s1 + ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16] and $i3,lr,$s1,lsr#8 + ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24] mov $s1,$s1,lsr#24 + ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16] - ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24] ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0] ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8] eor $s0,$s0,$i1,ror#8 - eor $s1,$s1,$t1,ror#24 - eor $t2,$t2,$i2,ror#8 - eor $t3,$t3,$i3,ror#8 - + ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$t2,$i2,ror#8 and $i2,lr,$s2,lsr#16 @ i1 + eor $t3,$t3,$i3,ror#8 and $i3,lr,$s2 - mov $s2,$s2,lsr#24 + eor $s1,$s1,$t1,ror#24 ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8] + mov $s2,$s2,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16] - ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24] ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0] eor $s0,$s0,$i1,ror#16 - eor $s1,$s1,$i2,ror#8 - eor $s2,$s2,$t2,ror#16 - eor $t3,$t3,$i3,ror#16 - + ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24] and $i1,lr,$s3 @ i0 + eor $s1,$s1,$i2,ror#8 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$t3,$i3,ror#16 and $i3,lr,$s3,lsr#16 @ i2 - mov $s3,$s3,lsr#24 + eor $s2,$s2,$t2,ror#16 ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0] + mov $s3,$s3,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8] ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16] - ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24] eor $s0,$s0,$i1,ror#24 + ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24] eor $s1,$s1,$i2,ror#16 + ldr $i1,[$key],#16 eor $s2,$s2,$i3,ror#8 + ldr $t1,[$key,#-12] eor $s3,$s3,$t3,ror#8 - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + ldr $t2,[$key,#-8] + eor $s0,$s0,$i1 + ldr $t3,[$key,#-4] + and $i1,lr,$s0 + eor $s1,$s1,$t1 + and $i2,lr,$s0,lsr#8 + eor $s2,$s2,$t2 + and $i3,lr,$s0,lsr#16 + eor $s3,$s3,$t3 + mov $s0,$s0,lsr#24 subs $rounds,$rounds,#1 bne .Lenc_loop add $tbl,$tbl,#2 - and $i1,lr,$s0 - and $i2,lr,$s0,lsr#8 - and $i3,lr,$s0,lsr#16 - mov $s0,$s0,lsr#24 ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0] - ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24] - ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8] - ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16] - and $i1,lr,$s1,lsr#16 @ i0 + ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8] and $i2,lr,$s1 + ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16] and $i3,lr,$s1,lsr#8 + ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24] mov $s1,$s1,lsr#24 + ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16] - ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24] ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8] eor $s0,$i1,$s0,lsl#8 - eor $s1,$t1,$s1,lsl#24 - eor $t2,$i2,$t2,lsl#8 - eor $t3,$i3,$t3,lsl#8 - + ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$i2,$t2,lsl#8 and $i2,lr,$s2,lsr#16 @ i1 + eor $t3,$i3,$t3,lsl#8 and $i3,lr,$s2 - mov $s2,$s2,lsr#24 + eor $s1,$t1,$s1,lsl#24 ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8] + mov $s2,$s2,lsr#24 + ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16] - ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0] eor $s0,$i1,$s0,lsl#8 - eor $s1,$s1,$i2,lsl#16 - eor $s2,$t2,$s2,lsl#24 - eor $t3,$i3,$t3,lsl#8 - + ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24] and $i1,lr,$s3 @ i0 + eor $s1,$s1,$i2,lsl#16 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$i3,$t3,lsl#8 and $i3,lr,$s3,lsr#16 @ i2 - mov $s3,$s3,lsr#24 + eor $s2,$t2,$s2,lsl#24 ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0] + mov $s3,$s3,lsr#24 + ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8] ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16] - ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24] eor $s0,$i1,$s0,lsl#8 + ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24] + ldr $i1,[$key,#0] eor $s1,$s1,$i2,lsl#8 + ldr $t1,[$key,#4] eor $s2,$s2,$i3,lsl#16 + ldr $t2,[$key,#8] eor $s3,$t3,$s3,lsl#24 + ldr $t3,[$key,#12] - ldr lr,[sp],#4 @ pop lr - ldr $t1,[$key,#0] - ldr $t2,[$key,#4] - ldr $t3,[$key,#8] - ldr $i1,[$key,#12] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + eor $s0,$s0,$i1 + eor $s1,$s1,$t1 + eor $s2,$s2,$t2 + eor $s3,$s3,$t3 sub $tbl,$tbl,#2 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_encrypt,.-_armv4_AES_encrypt .global AES_set_encrypt_key @@ -399,31 +403,31 @@ AES_set_encrypt_key: ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 - orr $s3,$s3,$t2,lsl#16 - orr $s3,$s3,$t3,lsl#24 str $s0,[$key],#16 + orr $s3,$s3,$t2,lsl#16 str $s1,[$key,#-12] + orr $s3,$s3,$t3,lsl#24 str $s2,[$key,#-8] str $s3,[$key,#-4] @@ -437,27 +441,26 @@ AES_set_encrypt_key: .L128_loop: and $t2,lr,$s3,lsr#24 and $i1,lr,$s3,lsr#16 - and $i2,lr,$s3,lsr#8 - and $i3,lr,$s3 ldrb $t2,[$tbl,$t2] + and $i2,lr,$s3,lsr#8 ldrb $i1,[$tbl,$i1] + and $i3,lr,$s3 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] - ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i3,lsl#8 eor $t2,$t2,$t1 eor $s0,$s0,$t2 @ rk[4]=rk[0]^... eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4] - eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5] - eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6] str $s0,[$key],#16 + eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5] str $s1,[$key,#-12] + eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6] str $s2,[$key,#-8] - str $s3,[$key,#-4] - subs $rounds,$rounds,#1 + str $s3,[$key,#-4] bne .L128_loop sub r2,$key,#176 b .Ldone @@ -468,16 +471,16 @@ AES_set_encrypt_key: ldrb $t2,[$rounds,#17] ldrb $t3,[$rounds,#16] orr $i2,$i2,$t1,lsl#8 - orr $i2,$i2,$t2,lsl#16 - orr $i2,$i2,$t3,lsl#24 ldrb $i3,[$rounds,#23] + orr $i2,$i2,$t2,lsl#16 ldrb $t1,[$rounds,#22] + orr $i2,$i2,$t3,lsl#24 ldrb $t2,[$rounds,#21] ldrb $t3,[$rounds,#20] orr $i3,$i3,$t1,lsl#8 orr $i3,$i3,$t2,lsl#16 - orr $i3,$i3,$t3,lsl#24 str $i2,[$key],#8 + orr $i3,$i3,$t3,lsl#24 str $i3,[$key,#-4] teq lr,#192 @@ -491,27 +494,26 @@ AES_set_encrypt_key: .L192_loop: and $t2,lr,$i3,lsr#24 and $i1,lr,$i3,lsr#16 - and $i2,lr,$i3,lsr#8 - and $i3,lr,$i3 ldrb $t2,[$tbl,$t2] + and $i2,lr,$i3,lsr#8 ldrb $i1,[$tbl,$i1] + and $i3,lr,$i3 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] - ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i3,lsl#8 eor $i3,$t2,$t1 eor $s0,$s0,$i3 @ rk[6]=rk[0]^... eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6] - eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7] - eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8] str $s0,[$key],#24 + eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7] str $s1,[$key,#-20] + eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8] str $s2,[$key,#-16] - str $s3,[$key,#-12] - subs $rounds,$rounds,#1 + str $s3,[$key,#-12] subeq r2,$key,#216 beq .Ldone @@ -529,16 +531,16 @@ AES_set_encrypt_key: ldrb $t2,[$rounds,#25] ldrb $t3,[$rounds,#24] orr $i2,$i2,$t1,lsl#8 - orr $i2,$i2,$t2,lsl#16 - orr $i2,$i2,$t3,lsl#24 ldrb $i3,[$rounds,#31] + orr $i2,$i2,$t2,lsl#16 ldrb $t1,[$rounds,#30] + orr $i2,$i2,$t3,lsl#24 ldrb $t2,[$rounds,#29] ldrb $t3,[$rounds,#28] orr $i3,$i3,$t1,lsl#8 orr $i3,$i3,$t2,lsl#16 - orr $i3,$i3,$t3,lsl#24 str $i2,[$key],#8 + orr $i3,$i3,$t3,lsl#24 str $i3,[$key,#-4] mov $rounds,#14 @@ -550,52 +552,51 @@ AES_set_encrypt_key: .L256_loop: and $t2,lr,$i3,lsr#24 and $i1,lr,$i3,lsr#16 - and $i2,lr,$i3,lsr#8 - and $i3,lr,$i3 ldrb $t2,[$tbl,$t2] + and $i2,lr,$i3,lsr#8 ldrb $i1,[$tbl,$i1] + and $i3,lr,$i3 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] - ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i1,lsl#24 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$t3],#4 @ rcon[i++] orr $t2,$t2,$i3,lsl#8 eor $i3,$t2,$t1 eor $s0,$s0,$i3 @ rk[8]=rk[0]^... eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8] - eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9] - eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10] str $s0,[$key],#32 + eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9] str $s1,[$key,#-28] + eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10] str $s2,[$key,#-24] - str $s3,[$key,#-20] - subs $rounds,$rounds,#1 + str $s3,[$key,#-20] subeq r2,$key,#256 beq .Ldone and $t2,lr,$s3 and $i1,lr,$s3,lsr#8 - and $i2,lr,$s3,lsr#16 - and $i3,lr,$s3,lsr#24 ldrb $t2,[$tbl,$t2] + and $i2,lr,$s3,lsr#16 ldrb $i1,[$tbl,$i1] + and $i3,lr,$s3,lsr#24 ldrb $i2,[$tbl,$i2] - ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i1,lsl#8 + ldrb $i3,[$tbl,$i3] orr $t2,$t2,$i2,lsl#16 + ldr $t1,[$key,#-48] orr $t2,$t2,$i3,lsl#24 - ldr $t1,[$key,#-48] ldr $i1,[$key,#-44] ldr $i2,[$key,#-40] - ldr $i3,[$key,#-36] eor $t1,$t1,$t2 @ rk[12]=rk[4]^... + ldr $i3,[$key,#-36] eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12] - eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13] - eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14] str $t1,[$key,#-16] + eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13] str $i1,[$key,#-12] + eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14] str $i2,[$key,#-8] str $i3,[$key,#-4] b .L256_loop @@ -816,24 +817,24 @@ AES_decrypt: ldrb $t2,[$rounds,#1] ldrb $t3,[$rounds,#0] orr $s0,$s0,$t1,lsl#8 - orr $s0,$s0,$t2,lsl#16 - orr $s0,$s0,$t3,lsl#24 ldrb $s1,[$rounds,#7] + orr $s0,$s0,$t2,lsl#16 ldrb $t1,[$rounds,#6] + orr $s0,$s0,$t3,lsl#24 ldrb $t2,[$rounds,#5] ldrb $t3,[$rounds,#4] orr $s1,$s1,$t1,lsl#8 - orr $s1,$s1,$t2,lsl#16 - orr $s1,$s1,$t3,lsl#24 ldrb $s2,[$rounds,#11] + orr $s1,$s1,$t2,lsl#16 ldrb $t1,[$rounds,#10] + orr $s1,$s1,$t3,lsl#24 ldrb $t2,[$rounds,#9] ldrb $t3,[$rounds,#8] orr $s2,$s2,$t1,lsl#8 - orr $s2,$s2,$t2,lsl#16 - orr $s2,$s2,$t3,lsl#24 ldrb $s3,[$rounds,#15] + orr $s2,$s2,$t2,lsl#16 ldrb $t1,[$rounds,#14] + orr $s2,$s2,$t3,lsl#24 ldrb $t2,[$rounds,#13] ldrb $t3,[$rounds,#12] orr $s3,$s3,$t1,lsl#8 @@ -848,24 +849,24 @@ AES_decrypt: mov $t3,$s0,lsr#8 strb $t1,[$rounds,#0] strb $t2,[$rounds,#1] - strb $t3,[$rounds,#2] - strb $s0,[$rounds,#3] mov $t1,$s1,lsr#24 + strb $t3,[$rounds,#2] mov $t2,$s1,lsr#16 + strb $s0,[$rounds,#3] mov $t3,$s1,lsr#8 strb $t1,[$rounds,#4] strb $t2,[$rounds,#5] - strb $t3,[$rounds,#6] - strb $s1,[$rounds,#7] mov $t1,$s2,lsr#24 + strb $t3,[$rounds,#6] mov $t2,$s2,lsr#16 + strb $s1,[$rounds,#7] mov $t3,$s2,lsr#8 strb $t1,[$rounds,#8] strb $t2,[$rounds,#9] - strb $t3,[$rounds,#10] - strb $s2,[$rounds,#11] mov $t1,$s3,lsr#24 + strb $t3,[$rounds,#10] mov $t2,$s3,lsr#16 + strb $s2,[$rounds,#11] mov $t3,$s3,lsr#8 strb $t1,[$rounds,#12] strb $t2,[$rounds,#13] @@ -882,146 +883,143 @@ AES_decrypt: .align 2 _armv4_AES_decrypt: str lr,[sp,#-4]! @ push lr - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - ldr $rounds,[$key,#240-16] + ldmia $key!,{$t1-$i1} eor $s0,$s0,$t1 + ldr $rounds,[$key,#240-16] eor $s1,$s1,$t2 eor $s2,$s2,$t3 eor $s3,$s3,$i1 sub $rounds,$rounds,#1 mov lr,#255 -.Ldec_loop: and $i1,lr,$s0,lsr#16 and $i2,lr,$s0,lsr#8 and $i3,lr,$s0 mov $s0,$s0,lsr#24 +.Ldec_loop: ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16] - ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24] - ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8] - ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0] - and $i1,lr,$s1 @ i0 + ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8] and $i2,lr,$s1,lsr#16 + ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0] and $i3,lr,$s1,lsr#8 + ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24] mov $s1,$s1,lsr#24 + ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0] - ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24] ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16] ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8] eor $s0,$s0,$i1,ror#24 - eor $s1,$s1,$t1,ror#8 - eor $t2,$i2,$t2,ror#8 - eor $t3,$i3,$t3,ror#8 - + ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24] and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$i2,$t2,ror#8 and $i2,lr,$s2 @ i1 + eor $t3,$i3,$t3,ror#8 and $i3,lr,$s2,lsr#16 - mov $s2,$s2,lsr#24 + eor $s1,$s1,$t1,ror#8 ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8] + mov $s2,$s2,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0] - ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24] ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16] eor $s0,$s0,$i1,ror#16 - eor $s1,$s1,$i2,ror#24 - eor $s2,$s2,$t2,ror#8 - eor $t3,$i3,$t3,ror#8 - + ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24] and $i1,lr,$s3,lsr#16 @ i0 + eor $s1,$s1,$i2,ror#24 and $i2,lr,$s3,lsr#8 @ i1 + eor $t3,$i3,$t3,ror#8 and $i3,lr,$s3 @ i2 - mov $s3,$s3,lsr#24 + eor $s2,$s2,$t2,ror#8 ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16] + mov $s3,$s3,lsr#24 + ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8] ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0] - ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24] eor $s0,$s0,$i1,ror#8 + ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24] eor $s1,$s1,$i2,ror#16 eor $s2,$s2,$i3,ror#24 + ldr $i1,[$key],#16 eor $s3,$s3,$t3,ror#8 - ldr $t1,[$key],#16 - ldr $t2,[$key,#-12] - ldr $t3,[$key,#-8] - ldr $i1,[$key,#-4] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + ldr $t1,[$key,#-12] + ldr $t2,[$key,#-8] + eor $s0,$s0,$i1 + ldr $t3,[$key,#-4] + and $i1,lr,$s0,lsr#16 + eor $s1,$s1,$t1 + and $i2,lr,$s0,lsr#8 + eor $s2,$s2,$t2 + and $i3,lr,$s0 + eor $s3,$s3,$t3 + mov $s0,$s0,lsr#24 subs $rounds,$rounds,#1 bne .Ldec_loop add $tbl,$tbl,#1024 - ldr $t1,[$tbl,#0] @ prefetch Td4 - ldr $t2,[$tbl,#32] - ldr $t3,[$tbl,#64] - ldr $i1,[$tbl,#96] - ldr $i2,[$tbl,#128] - ldr $i3,[$tbl,#160] - ldr $t1,[$tbl,#192] - ldr $t2,[$tbl,#224] + ldr $t2,[$tbl,#0] @ prefetch Td4 + ldr $t3,[$tbl,#32] + ldr $t1,[$tbl,#64] + ldr $t2,[$tbl,#96] + ldr $t3,[$tbl,#128] + ldr $t1,[$tbl,#160] + ldr $t2,[$tbl,#192] + ldr $t3,[$tbl,#224] - and $i1,lr,$s0,lsr#16 - and $i2,lr,$s0,lsr#8 - and $i3,lr,$s0 - ldrb $s0,[$tbl,$s0,lsr#24] @ Td4[s0>>24] + ldrb $s0,[$tbl,$s0] @ Td4[s0>>24] ldrb $t1,[$tbl,$i1] @ Td4[s0>>16] - ldrb $t2,[$tbl,$i2] @ Td4[s0>>8] - ldrb $t3,[$tbl,$i3] @ Td4[s0>>0] - and $i1,lr,$s1 @ i0 + ldrb $t2,[$tbl,$i2] @ Td4[s0>>8] and $i2,lr,$s1,lsr#16 + ldrb $t3,[$tbl,$i3] @ Td4[s0>>0] and $i3,lr,$s1,lsr#8 + ldrb $i1,[$tbl,$i1] @ Td4[s1>>0] ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24] ldrb $i2,[$tbl,$i2] @ Td4[s1>>16] - ldrb $i3,[$tbl,$i3] @ Td4[s1>>8] eor $s0,$i1,$s0,lsl#24 + ldrb $i3,[$tbl,$i3] @ Td4[s1>>8] eor $s1,$t1,$s1,lsl#8 - eor $t2,$t2,$i2,lsl#8 - eor $t3,$t3,$i3,lsl#8 - and $i1,lr,$s2,lsr#8 @ i0 + eor $t2,$t2,$i2,lsl#8 and $i2,lr,$s2 @ i1 - and $i3,lr,$s2,lsr#16 + eor $t3,$t3,$i3,lsl#8 ldrb $i1,[$tbl,$i1] @ Td4[s2>>8] + and $i3,lr,$s2,lsr#16 + ldrb $i2,[$tbl,$i2] @ Td4[s2>>0] ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24] - ldrb $i3,[$tbl,$i3] @ Td4[s2>>16] eor $s0,$s0,$i1,lsl#8 + ldrb $i3,[$tbl,$i3] @ Td4[s2>>16] eor $s1,$i2,$s1,lsl#16 - eor $s2,$t2,$s2,lsl#16 - eor $t3,$t3,$i3,lsl#16 - and $i1,lr,$s3,lsr#16 @ i0 + eor $s2,$t2,$s2,lsl#16 and $i2,lr,$s3,lsr#8 @ i1 - and $i3,lr,$s3 @ i2 + eor $t3,$t3,$i3,lsl#16 ldrb $i1,[$tbl,$i1] @ Td4[s3>>16] + and $i3,lr,$s3 @ i2 + ldrb $i2,[$tbl,$i2] @ Td4[s3>>8] ldrb $i3,[$tbl,$i3] @ Td4[s3>>0] ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24] eor $s0,$s0,$i1,lsl#16 + ldr $i1,[$key,#0] eor $s1,$s1,$i2,lsl#8 + ldr $t1,[$key,#4] eor $s2,$i3,$s2,lsl#8 + ldr $t2,[$key,#8] eor $s3,$t3,$s3,lsl#24 + ldr $t3,[$key,#12] - ldr lr,[sp],#4 @ pop lr - ldr $t1,[$key,#0] - ldr $t2,[$key,#4] - ldr $t3,[$key,#8] - ldr $i1,[$key,#12] - eor $s0,$s0,$t1 - eor $s1,$s1,$t2 - eor $s2,$s2,$t3 - eor $s3,$s3,$i1 + eor $s0,$s0,$i1 + eor $s1,$s1,$t1 + eor $s2,$s2,$t2 + eor $s3,$s3,$t3 sub $tbl,$tbl,#1024 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_decrypt,.-_armv4_AES_decrypt .asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>" .align 2 @@ -1029,3 +1027,4 @@ ___ $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4 print $code; +close STDOUT; # enforce flush diff --git a/crypto/aes/asm/aes-armv4.s b/crypto/aes/asm/aes-armv4.s index e340023..27c681c 100644 --- a/crypto/aes/asm/aes-armv4.s +++ b/crypto/aes/asm/aes-armv4.s @@ -124,24 +124,24 @@ AES_encrypt: ldrb r5,[r12,#1] ldrb r6,[r12,#0] orr r0,r0,r4,lsl#8 - orr r0,r0,r5,lsl#16 - orr r0,r0,r6,lsl#24 ldrb r1,[r12,#7] + orr r0,r0,r5,lsl#16 ldrb r4,[r12,#6] + orr r0,r0,r6,lsl#24 ldrb r5,[r12,#5] ldrb r6,[r12,#4] orr r1,r1,r4,lsl#8 - orr r1,r1,r5,lsl#16 - orr r1,r1,r6,lsl#24 ldrb r2,[r12,#11] + orr r1,r1,r5,lsl#16 ldrb r4,[r12,#10] + orr r1,r1,r6,lsl#24 ldrb r5,[r12,#9] ldrb r6,[r12,#8] orr r2,r2,r4,lsl#8 - orr r2,r2,r5,lsl#16 - orr r2,r2,r6,lsl#24 ldrb r3,[r12,#15] + orr r2,r2,r5,lsl#16 ldrb r4,[r12,#14] + orr r2,r2,r6,lsl#24 ldrb r5,[r12,#13] ldrb r6,[r12,#12] orr r3,r3,r4,lsl#8 @@ -156,24 +156,24 @@ AES_encrypt: mov r6,r0,lsr#8 strb r4,[r12,#0] strb r5,[r12,#1] - strb r6,[r12,#2] - strb r0,[r12,#3] mov r4,r1,lsr#24 + strb r6,[r12,#2] mov r5,r1,lsr#16 + strb r0,[r12,#3] mov r6,r1,lsr#8 strb r4,[r12,#4] strb r5,[r12,#5] - strb r6,[r12,#6] - strb r1,[r12,#7] mov r4,r2,lsr#24 + strb r6,[r12,#6] mov r5,r2,lsr#16 + strb r1,[r12,#7] mov r6,r2,lsr#8 strb r4,[r12,#8] strb r5,[r12,#9] - strb r6,[r12,#10] - strb r2,[r12,#11] mov r4,r3,lsr#24 + strb r6,[r12,#10] mov r5,r3,lsr#16 + strb r2,[r12,#11] mov r6,r3,lsr#8 strb r4,[r12,#12] strb r5,[r12,#13] @@ -190,141 +190,137 @@ AES_encrypt: .align 2 _armv4_AES_encrypt: str lr,[sp,#-4]! @ push lr - ldr r4,[r11],#16 - ldr r5,[r11,#-12] - ldr r6,[r11,#-8] - ldr r7,[r11,#-4] - ldr r12,[r11,#240-16] + ldmia r11!,{r4-r7} eor r0,r0,r4 + ldr r12,[r11,#240-16] eor r1,r1,r5 eor r2,r2,r6 eor r3,r3,r7 sub r12,r12,#1 mov lr,#255 -.Lenc_loop: + and r7,lr,r0 and r8,lr,r0,lsr#8 and r9,lr,r0,lsr#16 - and r7,lr,r0 mov r0,r0,lsr#24 +.Lenc_loop: ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0] - ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24] - ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8] - ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16] - and r7,lr,r1,lsr#16 @ i0 + ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8] and r8,lr,r1 + ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16] and r9,lr,r1,lsr#8 + ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24] mov r1,r1,lsr#24 + ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16] - ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24] ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0] ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8] eor r0,r0,r7,ror#8 - eor r1,r1,r4,ror#24 - eor r5,r5,r8,ror#8 - eor r6,r6,r9,ror#8 - + ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24] and r7,lr,r2,lsr#8 @ i0 + eor r5,r5,r8,ror#8 and r8,lr,r2,lsr#16 @ i1 + eor r6,r6,r9,ror#8 and r9,lr,r2 - mov r2,r2,lsr#24 + eor r1,r1,r4,ror#24 ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8] + mov r2,r2,lsr#24 + ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16] - ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24] ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0] eor r0,r0,r7,ror#16 - eor r1,r1,r8,ror#8 - eor r2,r2,r5,ror#16 - eor r6,r6,r9,ror#16 - + ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24] and r7,lr,r3 @ i0 + eor r1,r1,r8,ror#8 and r8,lr,r3,lsr#8 @ i1 + eor r6,r6,r9,ror#16 and r9,lr,r3,lsr#16 @ i2 - mov r3,r3,lsr#24 + eor r2,r2,r5,ror#16 ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0] + mov r3,r3,lsr#24 + ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8] ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16] - ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24] eor r0,r0,r7,ror#24 + ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24] eor r1,r1,r8,ror#16 + ldr r7,[r11],#16 eor r2,r2,r9,ror#8 + ldr r4,[r11,#-12] eor r3,r3,r6,ror#8 - ldr r4,[r11],#16 - ldr r5,[r11,#-12] - ldr r6,[r11,#-8] - ldr r7,[r11,#-4] - eor r0,r0,r4 - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 + ldr r5,[r11,#-8] + eor r0,r0,r7 + ldr r6,[r11,#-4] + and r7,lr,r0 + eor r1,r1,r4 + and r8,lr,r0,lsr#8 + eor r2,r2,r5 + and r9,lr,r0,lsr#16 + eor r3,r3,r6 + mov r0,r0,lsr#24 subs r12,r12,#1 bne .Lenc_loop add r10,r10,#2 - and r7,lr,r0 - and r8,lr,r0,lsr#8 - and r9,lr,r0,lsr#16 - mov r0,r0,lsr#24 ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0] - ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24] - ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8] - ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16] - and r7,lr,r1,lsr#16 @ i0 + ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8] and r8,lr,r1 + ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16] and r9,lr,r1,lsr#8 + ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24] mov r1,r1,lsr#24 + ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16] - ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24] ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0] ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8] eor r0,r7,r0,lsl#8 - eor r1,r4,r1,lsl#24 - eor r5,r8,r5,lsl#8 - eor r6,r9,r6,lsl#8 - + ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24] and r7,lr,r2,lsr#8 @ i0 + eor r5,r8,r5,lsl#8 and r8,lr,r2,lsr#16 @ i1 + eor r6,r9,r6,lsl#8 and r9,lr,r2 - mov r2,r2,lsr#24 + eor r1,r4,r1,lsl#24 ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8] + mov r2,r2,lsr#24 + ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16] - ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24] ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0] eor r0,r7,r0,lsl#8 - eor r1,r1,r8,lsl#16 - eor r2,r5,r2,lsl#24 - eor r6,r9,r6,lsl#8 - + ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24] and r7,lr,r3 @ i0 + eor r1,r1,r8,lsl#16 and r8,lr,r3,lsr#8 @ i1 + eor r6,r9,r6,lsl#8 and r9,lr,r3,lsr#16 @ i2 - mov r3,r3,lsr#24 + eor r2,r5,r2,lsl#24 ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0] + mov r3,r3,lsr#24 + ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8] ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16] - ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24] eor r0,r7,r0,lsl#8 + ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24] + ldr r7,[r11,#0] eor r1,r1,r8,lsl#8 + ldr r4,[r11,#4] eor r2,r2,r9,lsl#16 + ldr r5,[r11,#8] eor r3,r6,r3,lsl#24 + ldr r6,[r11,#12] - ldr lr,[sp],#4 @ pop lr - ldr r4,[r11,#0] - ldr r5,[r11,#4] - ldr r6,[r11,#8] - ldr r7,[r11,#12] - eor r0,r0,r4 - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 + eor r0,r0,r7 + eor r1,r1,r4 + eor r2,r2,r5 + eor r3,r3,r6 sub r10,r10,#2 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_encrypt,.-_armv4_AES_encrypt .global AES_set_encrypt_key @@ -359,31 +355,31 @@ AES_set_encrypt_key: ldrb r5,[r12,#1] ldrb r6,[r12,#0] orr r0,r0,r4,lsl#8 - orr r0,r0,r5,lsl#16 - orr r0,r0,r6,lsl#24 ldrb r1,[r12,#7] + orr r0,r0,r5,lsl#16 ldrb r4,[r12,#6] + orr r0,r0,r6,lsl#24 ldrb r5,[r12,#5] ldrb r6,[r12,#4] orr r1,r1,r4,lsl#8 - orr r1,r1,r5,lsl#16 - orr r1,r1,r6,lsl#24 ldrb r2,[r12,#11] + orr r1,r1,r5,lsl#16 ldrb r4,[r12,#10] + orr r1,r1,r6,lsl#24 ldrb r5,[r12,#9] ldrb r6,[r12,#8] orr r2,r2,r4,lsl#8 - orr r2,r2,r5,lsl#16 - orr r2,r2,r6,lsl#24 ldrb r3,[r12,#15] + orr r2,r2,r5,lsl#16 ldrb r4,[r12,#14] + orr r2,r2,r6,lsl#24 ldrb r5,[r12,#13] ldrb r6,[r12,#12] orr r3,r3,r4,lsl#8 - orr r3,r3,r5,lsl#16 - orr r3,r3,r6,lsl#24 str r0,[r11],#16 + orr r3,r3,r5,lsl#16 str r1,[r11,#-12] + orr r3,r3,r6,lsl#24 str r2,[r11,#-8] str r3,[r11,#-4] @@ -397,27 +393,26 @@ AES_set_encrypt_key: .L128_loop: and r5,lr,r3,lsr#24 and r7,lr,r3,lsr#16 - and r8,lr,r3,lsr#8 - and r9,lr,r3 ldrb r5,[r10,r5] + and r8,lr,r3,lsr#8 ldrb r7,[r10,r7] + and r9,lr,r3 ldrb r8,[r10,r8] - ldrb r9,[r10,r9] - ldr r4,[r6],#4 @ rcon[i++] orr r5,r5,r7,lsl#24 + ldrb r9,[r10,r9] orr r5,r5,r8,lsl#16 + ldr r4,[r6],#4 @ rcon[i++] orr r5,r5,r9,lsl#8 eor r5,r5,r4 eor r0,r0,r5 @ rk[4]=rk[0]^... eor r1,r1,r0 @ rk[5]=rk[1]^rk[4] - eor r2,r2,r1 @ rk[6]=rk[2]^rk[5] - eor r3,r3,r2 @ rk[7]=rk[3]^rk[6] str r0,[r11],#16 + eor r2,r2,r1 @ rk[6]=rk[2]^rk[5] str r1,[r11,#-12] + eor r3,r3,r2 @ rk[7]=rk[3]^rk[6] str r2,[r11,#-8] - str r3,[r11,#-4] - subs r12,r12,#1 + str r3,[r11,#-4] bne .L128_loop sub r2,r11,#176 b .Ldone @@ -428,16 +423,16 @@ AES_set_encrypt_key: ldrb r5,[r12,#17] ldrb r6,[r12,#16] orr r8,r8,r4,lsl#8 - orr r8,r8,r5,lsl#16 - orr r8,r8,r6,lsl#24 ldrb r9,[r12,#23] + orr r8,r8,r5,lsl#16 ldrb r4,[r12,#22] + orr r8,r8,r6,lsl#24 ldrb r5,[r12,#21] ldrb r6,[r12,#20] orr r9,r9,r4,lsl#8 orr r9,r9,r5,lsl#16 - orr r9,r9,r6,lsl#24 str r8,[r11],#8 + orr r9,r9,r6,lsl#24 str r9,[r11,#-4] teq lr,#192 @@ -451,27 +446,26 @@ AES_set_encrypt_key: .L192_loop: and r5,lr,r9,lsr#24 and r7,lr,r9,lsr#16 - and r8,lr,r9,lsr#8 - and r9,lr,r9 ldrb r5,[r10,r5] + and r8,lr,r9,lsr#8 ldrb r7,[r10,r7] + and r9,lr,r9 ldrb r8,[r10,r8] - ldrb r9,[r10,r9] - ldr r4,[r6],#4 @ rcon[i++] orr r5,r5,r7,lsl#24 + ldrb r9,[r10,r9] orr r5,r5,r8,lsl#16 + ldr r4,[r6],#4 @ rcon[i++] orr r5,r5,r9,lsl#8 eor r9,r5,r4 eor r0,r0,r9 @ rk[6]=rk[0]^... eor r1,r1,r0 @ rk[7]=rk[1]^rk[6] - eor r2,r2,r1 @ rk[8]=rk[2]^rk[7] - eor r3,r3,r2 @ rk[9]=rk[3]^rk[8] str r0,[r11],#24 + eor r2,r2,r1 @ rk[8]=rk[2]^rk[7] str r1,[r11,#-20] + eor r3,r3,r2 @ rk[9]=rk[3]^rk[8] str r2,[r11,#-16] - str r3,[r11,#-12] - subs r12,r12,#1 + str r3,[r11,#-12] subeq r2,r11,#216 beq .Ldone @@ -489,16 +483,16 @@ AES_set_encrypt_key: ldrb r5,[r12,#25] ldrb r6,[r12,#24] orr r8,r8,r4,lsl#8 - orr r8,r8,r5,lsl#16 - orr r8,r8,r6,lsl#24 ldrb r9,[r12,#31] + orr r8,r8,r5,lsl#16 ldrb r4,[r12,#30] + orr r8,r8,r6,lsl#24 ldrb r5,[r12,#29] ldrb r6,[r12,#28] orr r9,r9,r4,lsl#8 orr r9,r9,r5,lsl#16 - orr r9,r9,r6,lsl#24 str r8,[r11],#8 + orr r9,r9,r6,lsl#24 str r9,[r11,#-4] mov r12,#14 @@ -510,52 +504,51 @@ AES_set_encrypt_key: .L256_loop: and r5,lr,r9,lsr#24 and r7,lr,r9,lsr#16 - and r8,lr,r9,lsr#8 - and r9,lr,r9 ldrb r5,[r10,r5] + and r8,lr,r9,lsr#8 ldrb r7,[r10,r7] + and r9,lr,r9 ldrb r8,[r10,r8] - ldrb r9,[r10,r9] - ldr r4,[r6],#4 @ rcon[i++] orr r5,r5,r7,lsl#24 + ldrb r9,[r10,r9] orr r5,r5,r8,lsl#16 + ldr r4,[r6],#4 @ rcon[i++] orr r5,r5,r9,lsl#8 eor r9,r5,r4 eor r0,r0,r9 @ rk[8]=rk[0]^... eor r1,r1,r0 @ rk[9]=rk[1]^rk[8] - eor r2,r2,r1 @ rk[10]=rk[2]^rk[9] - eor r3,r3,r2 @ rk[11]=rk[3]^rk[10] str r0,[r11],#32 + eor r2,r2,r1 @ rk[10]=rk[2]^rk[9] str r1,[r11,#-28] + eor r3,r3,r2 @ rk[11]=rk[3]^rk[10] str r2,[r11,#-24] - str r3,[r11,#-20] - subs r12,r12,#1 + str r3,[r11,#-20] subeq r2,r11,#256 beq .Ldone and r5,lr,r3 and r7,lr,r3,lsr#8 - and r8,lr,r3,lsr#16 - and r9,lr,r3,lsr#24 ldrb r5,[r10,r5] + and r8,lr,r3,lsr#16 ldrb r7,[r10,r7] + and r9,lr,r3,lsr#24 ldrb r8,[r10,r8] - ldrb r9,[r10,r9] orr r5,r5,r7,lsl#8 + ldrb r9,[r10,r9] orr r5,r5,r8,lsl#16 + ldr r4,[r11,#-48] orr r5,r5,r9,lsl#24 - ldr r4,[r11,#-48] ldr r7,[r11,#-44] ldr r8,[r11,#-40] - ldr r9,[r11,#-36] eor r4,r4,r5 @ rk[12]=rk[4]^... + ldr r9,[r11,#-36] eor r7,r7,r4 @ rk[13]=rk[5]^rk[12] - eor r8,r8,r7 @ rk[14]=rk[6]^rk[13] - eor r9,r9,r8 @ rk[15]=rk[7]^rk[14] str r4,[r11,#-16] + eor r8,r8,r7 @ rk[14]=rk[6]^rk[13] str r7,[r11,#-12] + eor r9,r9,r8 @ rk[15]=rk[7]^rk[14] str r8,[r11,#-8] str r9,[r11,#-4] b .L256_loop @@ -771,24 +764,24 @@ AES_decrypt: ldrb r5,[r12,#1] ldrb r6,[r12,#0] orr r0,r0,r4,lsl#8 - orr r0,r0,r5,lsl#16 - orr r0,r0,r6,lsl#24 ldrb r1,[r12,#7] + orr r0,r0,r5,lsl#16 ldrb r4,[r12,#6] + orr r0,r0,r6,lsl#24 ldrb r5,[r12,#5] ldrb r6,[r12,#4] orr r1,r1,r4,lsl#8 - orr r1,r1,r5,lsl#16 - orr r1,r1,r6,lsl#24 ldrb r2,[r12,#11] + orr r1,r1,r5,lsl#16 ldrb r4,[r12,#10] + orr r1,r1,r6,lsl#24 ldrb r5,[r12,#9] ldrb r6,[r12,#8] orr r2,r2,r4,lsl#8 - orr r2,r2,r5,lsl#16 - orr r2,r2,r6,lsl#24 ldrb r3,[r12,#15] + orr r2,r2,r5,lsl#16 ldrb r4,[r12,#14] + orr r2,r2,r6,lsl#24 ldrb r5,[r12,#13] ldrb r6,[r12,#12] orr r3,r3,r4,lsl#8 @@ -803,24 +796,24 @@ AES_decrypt: mov r6,r0,lsr#8 strb r4,[r12,#0] strb r5,[r12,#1] - strb r6,[r12,#2] - strb r0,[r12,#3] mov r4,r1,lsr#24 + strb r6,[r12,#2] mov r5,r1,lsr#16 + strb r0,[r12,#3] mov r6,r1,lsr#8 strb r4,[r12,#4] strb r5,[r12,#5] - strb r6,[r12,#6] - strb r1,[r12,#7] mov r4,r2,lsr#24 + strb r6,[r12,#6] mov r5,r2,lsr#16 + strb r1,[r12,#7] mov r6,r2,lsr#8 strb r4,[r12,#8] strb r5,[r12,#9] - strb r6,[r12,#10] - strb r2,[r12,#11] mov r4,r3,lsr#24 + strb r6,[r12,#10] mov r5,r3,lsr#16 + strb r2,[r12,#11] mov r6,r3,lsr#8 strb r4,[r12,#12] strb r5,[r12,#13] @@ -837,146 +830,143 @@ AES_decrypt: .align 2 _armv4_AES_decrypt: str lr,[sp,#-4]! @ push lr - ldr r4,[r11],#16 - ldr r5,[r11,#-12] - ldr r6,[r11,#-8] - ldr r7,[r11,#-4] - ldr r12,[r11,#240-16] + ldmia r11!,{r4-r7} eor r0,r0,r4 + ldr r12,[r11,#240-16] eor r1,r1,r5 eor r2,r2,r6 eor r3,r3,r7 sub r12,r12,#1 mov lr,#255 -.Ldec_loop: and r7,lr,r0,lsr#16 and r8,lr,r0,lsr#8 and r9,lr,r0 mov r0,r0,lsr#24 +.Ldec_loop: ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16] - ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24] - ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8] - ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0] - and r7,lr,r1 @ i0 + ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8] and r8,lr,r1,lsr#16 + ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0] and r9,lr,r1,lsr#8 + ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24] mov r1,r1,lsr#24 + ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0] - ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24] ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16] ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8] eor r0,r0,r7,ror#24 - eor r1,r1,r4,ror#8 - eor r5,r8,r5,ror#8 - eor r6,r9,r6,ror#8 - + ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24] and r7,lr,r2,lsr#8 @ i0 + eor r5,r8,r5,ror#8 and r8,lr,r2 @ i1 + eor r6,r9,r6,ror#8 and r9,lr,r2,lsr#16 - mov r2,r2,lsr#24 + eor r1,r1,r4,ror#8 ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8] + mov r2,r2,lsr#24 + ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0] - ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24] ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16] eor r0,r0,r7,ror#16 - eor r1,r1,r8,ror#24 - eor r2,r2,r5,ror#8 - eor r6,r9,r6,ror#8 - + ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24] and r7,lr,r3,lsr#16 @ i0 + eor r1,r1,r8,ror#24 and r8,lr,r3,lsr#8 @ i1 + eor r6,r9,r6,ror#8 and r9,lr,r3 @ i2 - mov r3,r3,lsr#24 + eor r2,r2,r5,ror#8 ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16] + mov r3,r3,lsr#24 + ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8] ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0] - ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24] eor r0,r0,r7,ror#8 + ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24] eor r1,r1,r8,ror#16 eor r2,r2,r9,ror#24 + ldr r7,[r11],#16 eor r3,r3,r6,ror#8 - ldr r4,[r11],#16 - ldr r5,[r11,#-12] - ldr r6,[r11,#-8] - ldr r7,[r11,#-4] - eor r0,r0,r4 - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 + ldr r4,[r11,#-12] + ldr r5,[r11,#-8] + eor r0,r0,r7 + ldr r6,[r11,#-4] + and r7,lr,r0,lsr#16 + eor r1,r1,r4 + and r8,lr,r0,lsr#8 + eor r2,r2,r5 + and r9,lr,r0 + eor r3,r3,r6 + mov r0,r0,lsr#24 subs r12,r12,#1 bne .Ldec_loop add r10,r10,#1024 - ldr r4,[r10,#0] @ prefetch Td4 - ldr r5,[r10,#32] - ldr r6,[r10,#64] - ldr r7,[r10,#96] - ldr r8,[r10,#128] - ldr r9,[r10,#160] - ldr r4,[r10,#192] - ldr r5,[r10,#224] + ldr r5,[r10,#0] @ prefetch Td4 + ldr r6,[r10,#32] + ldr r4,[r10,#64] + ldr r5,[r10,#96] + ldr r6,[r10,#128] + ldr r4,[r10,#160] + ldr r5,[r10,#192] + ldr r6,[r10,#224] - and r7,lr,r0,lsr#16 - and r8,lr,r0,lsr#8 - and r9,lr,r0 - ldrb r0,[r10,r0,lsr#24] @ Td4[s0>>24] + ldrb r0,[r10,r0] @ Td4[s0>>24] ldrb r4,[r10,r7] @ Td4[s0>>16] - ldrb r5,[r10,r8] @ Td4[s0>>8] - ldrb r6,[r10,r9] @ Td4[s0>>0] - and r7,lr,r1 @ i0 + ldrb r5,[r10,r8] @ Td4[s0>>8] and r8,lr,r1,lsr#16 + ldrb r6,[r10,r9] @ Td4[s0>>0] and r9,lr,r1,lsr#8 + ldrb r7,[r10,r7] @ Td4[s1>>0] ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] ldrb r8,[r10,r8] @ Td4[s1>>16] - ldrb r9,[r10,r9] @ Td4[s1>>8] eor r0,r7,r0,lsl#24 + ldrb r9,[r10,r9] @ Td4[s1>>8] eor r1,r4,r1,lsl#8 - eor r5,r5,r8,lsl#8 - eor r6,r6,r9,lsl#8 - and r7,lr,r2,lsr#8 @ i0 + eor r5,r5,r8,lsl#8 and r8,lr,r2 @ i1 - and r9,lr,r2,lsr#16 + eor r6,r6,r9,lsl#8 ldrb r7,[r10,r7] @ Td4[s2>>8] + and r9,lr,r2,lsr#16 + ldrb r8,[r10,r8] @ Td4[s2>>0] ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] - ldrb r9,[r10,r9] @ Td4[s2>>16] eor r0,r0,r7,lsl#8 + ldrb r9,[r10,r9] @ Td4[s2>>16] eor r1,r8,r1,lsl#16 - eor r2,r5,r2,lsl#16 - eor r6,r6,r9,lsl#16 - and r7,lr,r3,lsr#16 @ i0 + eor r2,r5,r2,lsl#16 and r8,lr,r3,lsr#8 @ i1 - and r9,lr,r3 @ i2 + eor r6,r6,r9,lsl#16 ldrb r7,[r10,r7] @ Td4[s3>>16] + and r9,lr,r3 @ i2 + ldrb r8,[r10,r8] @ Td4[s3>>8] ldrb r9,[r10,r9] @ Td4[s3>>0] ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] eor r0,r0,r7,lsl#16 + ldr r7,[r11,#0] eor r1,r1,r8,lsl#8 + ldr r4,[r11,#4] eor r2,r9,r2,lsl#8 + ldr r5,[r11,#8] eor r3,r6,r3,lsl#24 + ldr r6,[r11,#12] - ldr lr,[sp],#4 @ pop lr - ldr r4,[r11,#0] - ldr r5,[r11,#4] - ldr r6,[r11,#8] - ldr r7,[r11,#12] - eor r0,r0,r4 - eor r1,r1,r5 - eor r2,r2,r6 - eor r3,r3,r7 + eor r0,r0,r7 + eor r1,r1,r4 + eor r2,r2,r5 + eor r3,r3,r6 sub r10,r10,#1024 - mov pc,lr @ return + ldr pc,[sp],#4 @ pop and return .size _armv4_AES_decrypt,.-_armv4_AES_decrypt .asciz "AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>" .align 2 |