summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorDavid Benjamin <davidben@google.com>2016-10-10 12:01:24 -0400
committerMatt Caswell <matt@openssl.org>2016-10-10 23:36:21 +0100
commit609b0852e4d50251857dbbac3141ba042e35a9ae (patch)
treeee559ebc14734fdf2a273f845cb98d8d8f93eb7d /crypto
parent11542af65a82242b47e97506695fa0d306d24fb6 (diff)
Remove trailing whitespace from some files.
The prevailing style seems to not have trailing whitespace, but a few lines do. This is mostly in the perlasm files, but a few C files got them after the reformat. This is the result of: find . -name '*.pl' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.c' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.h' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' Then bn_prime.h was excluded since this is a generated file. Note mkerr.pl has some changes in a heredoc for some help output, but other lines there lack trailing whitespace too. Reviewed-by: Kurt Roeckx <kurt@openssl.org> Reviewed-by: Matt Caswell <matt@openssl.org>
Diffstat (limited to 'crypto')
-rwxr-xr-xcrypto/aes/asm/aes-586.pl12
-rw-r--r--crypto/aes/asm/aes-ppc.pl8
-rw-r--r--crypto/aes/asm/aes-s390x.pl16
-rwxr-xr-xcrypto/aes/asm/aes-x86_64.pl4
-rw-r--r--crypto/aes/asm/aesni-mb-x86_64.pl28
-rw-r--r--crypto/aes/asm/aesni-sha1-x86_64.pl2
-rw-r--r--crypto/aes/asm/aesni-sha256-x86_64.pl2
-rw-r--r--crypto/aes/asm/aesni-x86.pl2
-rw-r--r--crypto/aes/asm/aesni-x86_64.pl10
-rwxr-xr-xcrypto/aes/asm/aesp8-ppc.pl2
-rwxr-xr-xcrypto/aes/asm/aesv8-armx.pl6
-rw-r--r--crypto/aes/asm/bsaes-armv7.pl2
-rw-r--r--crypto/aes/asm/bsaes-x86_64.pl4
-rwxr-xr-xcrypto/aes/asm/vpaes-armv8.pl8
-rw-r--r--crypto/aes/asm/vpaes-ppc.pl8
-rw-r--r--crypto/aes/asm/vpaes-x86.pl8
-rw-r--r--crypto/aes/asm/vpaes-x86_64.pl20
-rw-r--r--crypto/bn/asm/armv4-gf2m.pl2
-rw-r--r--crypto/bn/asm/armv4-mont.pl2
-rw-r--r--crypto/bn/asm/bn-586.pl24
-rw-r--r--crypto/bn/asm/co-586.pl12
-rw-r--r--crypto/bn/asm/ia64-mont.pl4
-rw-r--r--crypto/bn/asm/mips.pl6
-rw-r--r--crypto/bn/asm/parisc-mont.pl4
-rw-r--r--crypto/bn/asm/ppc-mont.pl6
-rw-r--r--crypto/bn/asm/ppc.pl264
-rwxr-xr-xcrypto/bn/asm/rsaz-avx2.pl8
-rwxr-xr-xcrypto/bn/asm/rsaz-x86_64.pl36
-rw-r--r--crypto/bn/asm/s390x-gf2m.pl2
-rw-r--r--crypto/bn/asm/via-mont.pl2
-rwxr-xr-xcrypto/bn/asm/x86-mont.pl2
-rwxr-xr-xcrypto/bn/asm/x86_64-mont5.pl10
-rw-r--r--crypto/camellia/asm/cmll-x86.pl6
-rw-r--r--crypto/cast/asm/cast-586.pl6
-rwxr-xr-xcrypto/chacha/asm/chacha-armv4.pl4
-rwxr-xr-xcrypto/chacha/asm/chacha-armv8.pl4
-rwxr-xr-xcrypto/chacha/asm/chacha-ppc.pl4
-rw-r--r--crypto/des/asm/crypt586.pl4
-rw-r--r--crypto/des/asm/des-586.pl6
-rw-r--r--crypto/des/asm/desboth.pl2
-rw-r--r--crypto/ec/asm/ecp_nistz256-armv8.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-sparcv9.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86_64.pl16
-rw-r--r--crypto/md5/asm/md5-586.pl2
-rw-r--r--crypto/md5/asm/md5-sparcv9.pl2
-rw-r--r--crypto/mips_arch.h2
-rw-r--r--crypto/modes/asm/ghash-armv4.pl4
-rw-r--r--crypto/modes/asm/ghash-s390x.pl2
-rw-r--r--crypto/modes/asm/ghash-x86.pl6
-rw-r--r--crypto/modes/asm/ghash-x86_64.pl8
-rw-r--r--crypto/perlasm/cbc.pl10
-rwxr-xr-xcrypto/perlasm/ppc-xlate.pl2
-rw-r--r--crypto/perlasm/sparcv9_modes.pl16
-rwxr-xr-xcrypto/perlasm/x86_64-xlate.pl12
-rw-r--r--crypto/perlasm/x86nasm.pl2
-rw-r--r--crypto/rc4/asm/rc4-c64xplus.pl2
-rw-r--r--crypto/rc4/asm/rc4-md5-x86_64.pl4
-rw-r--r--crypto/rc4/asm/rc4-parisc.pl4
-rwxr-xr-xcrypto/rc4/asm/rc4-x86_64.pl2
-rw-r--r--crypto/ripemd/asm/rmd-586.pl12
-rw-r--r--crypto/sha/asm/sha1-586.pl2
-rw-r--r--crypto/sha/asm/sha1-mb-x86_64.pl4
-rw-r--r--crypto/sha/asm/sha1-sparcv9.pl2
-rw-r--r--crypto/sha/asm/sha1-sparcv9a.pl2
-rwxr-xr-xcrypto/sha/asm/sha1-x86_64.pl2
-rw-r--r--crypto/sha/asm/sha256-586.pl6
-rw-r--r--crypto/sha/asm/sha256-mb-x86_64.pl2
-rw-r--r--crypto/sha/asm/sha512-586.pl2
-rw-r--r--crypto/sha/asm/sha512-armv8.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-parisc.pl4
-rw-r--r--crypto/sha/asm/sha512-s390x.pl2
-rw-r--r--crypto/sha/asm/sha512-sparcv9.pl4
-rwxr-xr-xcrypto/sha/asm/sha512-x86_64.pl2
-rw-r--r--crypto/ts/ts_rsp_verify.c2
-rw-r--r--crypto/whrlpool/asm/wp-mmx.pl4
-rw-r--r--crypto/x509v3/v3_enum.c2
-rw-r--r--crypto/x509v3/v3_skey.c2
-rw-r--r--crypto/x86cpuid.pl2
79 files changed, 362 insertions, 362 deletions
diff --git a/crypto/aes/asm/aes-586.pl b/crypto/aes/asm/aes-586.pl
index 1ba356508a..61bdce865c 100755
--- a/crypto/aes/asm/aes-586.pl
+++ b/crypto/aes/asm/aes-586.pl
@@ -123,7 +123,7 @@
# words every cache-line is *guaranteed* to be accessed within ~50
# cycles window. Why just SSE? Because it's needed on hyper-threading
# CPU! Which is also why it's prefetched with 64 byte stride. Best
-# part is that it has no negative effect on performance:-)
+# part is that it has no negative effect on performance:-)
#
# Version 4.3 implements switch between compact and non-compact block
# functions in AES_cbc_encrypt depending on how much data was asked
@@ -585,7 +585,7 @@ sub enctransform()
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | mm4 | mm0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# | s3 | s2 | s1 | s0 |
+# | s3 | s2 | s1 | s0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
@@ -805,7 +805,7 @@ sub encstep()
if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
- else { &mov ($tmp,$s[3]);
+ else { &mov ($tmp,$s[3]);
&shr ($tmp,24) }
&xor ($out,&DWP(1,$te,$tmp,8));
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
@@ -1558,7 +1558,7 @@ sub sse_deccompact()
&pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp4
&pshufw ("mm3","mm1",0xb1); &pshufw ("mm7","mm5",0xb1);
&pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp4
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16)
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16)
&pxor ("mm3","mm3"); &pxor ("mm7","mm7");
&pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5");
@@ -2028,7 +2028,7 @@ sub declast()
{
# stack frame layout
# -4(%esp) # return address 0(%esp)
-# 0(%esp) # s0 backing store 4(%esp)
+# 0(%esp) # s0 backing store 4(%esp)
# 4(%esp) # s1 backing store 8(%esp)
# 8(%esp) # s2 backing store 12(%esp)
# 12(%esp) # s3 backing store 16(%esp)
@@ -2738,7 +2738,7 @@ sub enckey()
&mov (&DWP(80,"edi"),10); # setup number of rounds
&xor ("eax","eax");
&jmp (&label("exit"));
-
+
&set_label("12rounds");
&mov ("eax",&DWP(0,"esi")); # copy first 6 dwords
&mov ("ebx",&DWP(4,"esi"));
diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl
index 1558d8e454..184c28a291 100644
--- a/crypto/aes/asm/aes-ppc.pl
+++ b/crypto/aes/asm/aes-ppc.pl
@@ -1433,10 +1433,10 @@ $code.=<<___;
xor $s1,$s1,$acc05
xor $s2,$s2,$acc06
xor $s3,$s3,$acc07
- xor $s0,$s0,$acc08 # ^= ROTATE(r8,8)
- xor $s1,$s1,$acc09
- xor $s2,$s2,$acc10
- xor $s3,$s3,$acc11
+ xor $s0,$s0,$acc08 # ^= ROTATE(r8,8)
+ xor $s1,$s1,$acc09
+ xor $s2,$s2,$acc10
+ xor $s3,$s3,$acc11
b Ldec_compact_loop
.align 4
diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl
index a93d601913..9c17f0e5c2 100644
--- a/crypto/aes/asm/aes-s390x.pl
+++ b/crypto/aes/asm/aes-s390x.pl
@@ -404,7 +404,7 @@ _s390x_AES_encrypt:
or $s1,$t1
or $t2,$i2
or $t3,$i3
-
+
srlg $i1,$s2,`8-3` # i0
srlg $i2,$s2,`16-3` # i1
nr $i1,$mask
@@ -457,7 +457,7 @@ _s390x_AES_encrypt:
x $s2,24($key)
x $s3,28($key)
- br $ra
+ br $ra
.size _s390x_AES_encrypt,.-_s390x_AES_encrypt
___
@@ -779,7 +779,7 @@ _s390x_AES_decrypt:
x $s2,24($key)
x $s3,28($key)
- br $ra
+ br $ra
.size _s390x_AES_decrypt,.-_s390x_AES_decrypt
___
@@ -1297,7 +1297,7 @@ $code.=<<___;
.Lcbc_enc_done:
l${g} $ivp,6*$SIZE_T($sp)
st $s0,0($ivp)
- st $s1,4($ivp)
+ st $s1,4($ivp)
st $s2,8($ivp)
st $s3,12($ivp)
@@ -1635,7 +1635,7 @@ $code.=<<___ if(1);
llgc $len,2*$SIZE_T-1($sp)
nill $len,0x0f # $len%=16
br $ra
-
+
.align 16
.Lxts_km_vanilla:
___
@@ -1862,7 +1862,7 @@ $code.=<<___;
xgr $s1,%r1
lrvgr $s1,$s1 # flip byte order
lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
stg $s1,$tweak+0($sp) # save the tweak
llgfr $s1,$s1
srlg $s2,$s3,32
@@ -1913,7 +1913,7 @@ $code.=<<___;
xgr $s1,%r1
lrvgr $s1,$s1 # flip byte order
lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
stg $s1,$tweak+0($sp) # save the tweak
llgfr $s1,$s1
srlg $s2,$s3,32
@@ -2105,7 +2105,7 @@ $code.=<<___;
xgr $s1,%r1
lrvgr $s1,$s1 # flip byte order
lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
stg $s1,$tweak+0($sp) # save the tweak
llgfr $s1,$s1
srlg $s2,$s3,32
diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl
index ce4ca30b1a..ae7fde20fe 100755
--- a/crypto/aes/asm/aes-x86_64.pl
+++ b/crypto/aes/asm/aes-x86_64.pl
@@ -1298,7 +1298,7 @@ $code.=<<___;
AES_set_encrypt_key:
push %rbx
push %rbp
- push %r12 # redundant, but allows to share
+ push %r12 # redundant, but allows to share
push %r13 # exception handler...
push %r14
push %r15
@@ -1424,7 +1424,7 @@ $code.=<<___;
xor %rax,%rax
jmp .Lexit
-.L14rounds:
+.L14rounds:
mov 0(%rsi),%rax # copy first 8 dwords
mov 8(%rsi),%rbx
mov 16(%rsi),%rcx
diff --git a/crypto/aes/asm/aesni-mb-x86_64.pl b/crypto/aes/asm/aesni-mb-x86_64.pl
index aa2735e06a..fcef7c62fa 100644
--- a/crypto/aes/asm/aesni-mb-x86_64.pl
+++ b/crypto/aes/asm/aesni-mb-x86_64.pl
@@ -134,7 +134,7 @@ $code.=<<___ if ($win64);
movaps %xmm10,0x40(%rsp)
movaps %xmm11,0x50(%rsp)
movaps %xmm12,0x60(%rsp)
- movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
+ movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
movaps %xmm14,-0x58(%rax)
movaps %xmm15,-0x48(%rax)
___
@@ -308,9 +308,9 @@ $code.=<<___;
movups @out[0],-16(@outptr[0],$offset)
pxor @inp[0],@out[0]
- movups @out[1],-16(@outptr[1],$offset)
+ movups @out[1],-16(@outptr[1],$offset)
pxor @inp[1],@out[1]
- movups @out[2],-16(@outptr[2],$offset)
+ movups @out[2],-16(@outptr[2],$offset)
pxor @inp[2],@out[2]
movups @out[3],-16(@outptr[3],$offset)
pxor @inp[3],@out[3]
@@ -393,7 +393,7 @@ $code.=<<___ if ($win64);
movaps %xmm10,0x40(%rsp)
movaps %xmm11,0x50(%rsp)
movaps %xmm12,0x60(%rsp)
- movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
+ movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
movaps %xmm14,-0x58(%rax)
movaps %xmm15,-0x48(%rax)
___
@@ -563,10 +563,10 @@ $code.=<<___;
movups @out[0],-16(@outptr[0],$offset)
movdqu (@inptr[0],$offset),@out[0]
- movups @out[1],-16(@outptr[1],$offset)
+ movups @out[1],-16(@outptr[1],$offset)
movdqu (@inptr[1],$offset),@out[1]
pxor $zero,@out[0]
- movups @out[2],-16(@outptr[2],$offset)
+ movups @out[2],-16(@outptr[2],$offset)
movdqu (@inptr[2],$offset),@out[2]
pxor $zero,@out[1]
movups @out[3],-16(@outptr[3],$offset)
@@ -835,10 +835,10 @@ $code.=<<___;
vmovups @out[0],-16(@ptr[0]) # write output
sub $offset,@ptr[0] # switch to input
vpxor 0x00($offload),@out[0],@out[0]
- vmovups @out[1],-16(@ptr[1])
+ vmovups @out[1],-16(@ptr[1])
sub `64+1*8`(%rsp),@ptr[1]
vpxor 0x10($offload),@out[1],@out[1]
- vmovups @out[2],-16(@ptr[2])
+ vmovups @out[2],-16(@ptr[2])
sub `64+2*8`(%rsp),@ptr[2]
vpxor 0x20($offload),@out[2],@out[2]
vmovups @out[3],-16(@ptr[3])
@@ -847,10 +847,10 @@ $code.=<<___;
vmovups @out[4],-16(@ptr[4])
sub `64+4*8`(%rsp),@ptr[4]
vpxor @inp[0],@out[4],@out[4]
- vmovups @out[5],-16(@ptr[5])
+ vmovups @out[5],-16(@ptr[5])
sub `64+5*8`(%rsp),@ptr[5]
vpxor @inp[1],@out[5],@out[5]
- vmovups @out[6],-16(@ptr[6])
+ vmovups @out[6],-16(@ptr[6])
sub `64+6*8`(%rsp),@ptr[6]
vpxor @inp[2],@out[6],@out[6]
vmovups @out[7],-16(@ptr[7])
@@ -1128,12 +1128,12 @@ $code.=<<___;
sub $offset,@ptr[0] # switch to input
vmovdqu 128+0(%rsp),@out[0]
vpxor 0x70($offload),@out[7],@out[7]
- vmovups @out[1],-16(@ptr[1])
+ vmovups @out[1],-16(@ptr[1])
sub `64+1*8`(%rsp),@ptr[1]
vmovdqu @out[0],0x00($offload)
vpxor $zero,@out[0],@out[0]
vmovdqu 128+16(%rsp),@out[1]
- vmovups @out[2],-16(@ptr[2])
+ vmovups @out[2],-16(@ptr[2])
sub `64+2*8`(%rsp),@ptr[2]
vmovdqu @out[1],0x10($offload)
vpxor $zero,@out[1],@out[1]
@@ -1149,11 +1149,11 @@ $code.=<<___;
vpxor $zero,@out[3],@out[3]
vmovdqu @inp[0],0x40($offload)
vpxor @inp[0],$zero,@out[4]
- vmovups @out[5],-16(@ptr[5])
+ vmovups @out[5],-16(@ptr[5])
sub `64+5*8`(%rsp),@ptr[5]
vmovdqu @inp[1],0x50($offload)
vpxor @inp[1],$zero,@out[5]
- vmovups @out[6],-16(@ptr[6])
+ vmovups @out[6],-16(@ptr[6])
sub `64+6*8`(%rsp),@ptr[6]
vmovdqu @inp[2],0x60($offload)
vpxor @inp[2],$zero,@out[6]
diff --git a/crypto/aes/asm/aesni-sha1-x86_64.pl b/crypto/aes/asm/aesni-sha1-x86_64.pl
index 4b979a7346..a50795e263 100644
--- a/crypto/aes/asm/aesni-sha1-x86_64.pl
+++ b/crypto/aes/asm/aesni-sha1-x86_64.pl
@@ -793,7 +793,7 @@ sub body_00_19_dec () { # ((c^d)&b)^d
sub body_20_39_dec () { # b^d^c
# on entry @T[0]=b^d
return &body_40_59_dec() if ($rx==39);
-
+
my @r=@body_20_39;
unshift (@r,@aes256_dec[$rx]) if (@aes256_dec[$rx]);
diff --git a/crypto/aes/asm/aesni-sha256-x86_64.pl b/crypto/aes/asm/aesni-sha256-x86_64.pl
index a5fde2e4d1..ba4964a850 100644
--- a/crypto/aes/asm/aesni-sha256-x86_64.pl
+++ b/crypto/aes/asm/aesni-sha256-x86_64.pl
@@ -884,7 +884,7 @@ if ($avx>1) {{
######################################################################
# AVX2+BMI code path
#
-my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
+my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
my $PUSH8=8*2*$SZ;
use integer;
diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl
index ed1a47c30c..c34d9bf4af 100644
--- a/crypto/aes/asm/aesni-x86.pl
+++ b/crypto/aes/asm/aesni-x86.pl
@@ -1051,7 +1051,7 @@ if ($PREFIX eq "aesni") {
&set_label("ctr32_one_shortcut",16);
&movups ($inout0,&QWP(0,$rounds_)); # load ivec
&mov ($rounds,&DWP(240,$key));
-
+
&set_label("ctr32_one");
if ($inline)
{ &aesni_inline_generate1("enc"); }
diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl
index 25dd120dd2..397e82f8c7 100644
--- a/crypto/aes/asm/aesni-x86_64.pl
+++ b/crypto/aes/asm/aesni-x86_64.pl
@@ -34,7 +34,7 @@
# ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26
# CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26
# CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28
-# CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
+# CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
# OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38
# CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55
#
@@ -118,7 +118,7 @@
# performance is achieved by interleaving instructions working on
# independent blocks. In which case asymptotic limit for such modes
# can be obtained by dividing above mentioned numbers by AES
-# instructions' interleave factor. Westmere can execute at most 3
+# instructions' interleave factor. Westmere can execute at most 3
# instructions at a time, meaning that optimal interleave factor is 3,
# and that's where the "magic" number of 1.25 come from. "Optimal
# interleave factor" means that increase of interleave factor does
@@ -312,7 +312,7 @@ ___
# on 2x subroutine on Atom Silvermont account. For processors that
# can schedule aes[enc|dec] every cycle optimal interleave factor
# equals to corresponding instructions latency. 8x is optimal for
-# * Bridge and "super-optimal" for other Intel CPUs...
+# * Bridge and "super-optimal" for other Intel CPUs...
sub aesni_generate2 {
my $dir=shift;
@@ -1271,7 +1271,7 @@ $code.=<<___;
lea 7($ctr),%r9
mov %r10d,0x60+12(%rsp)
bswap %r9d
- mov OPENSSL_ia32cap_P+4(%rip),%r10d
+ mov OPENSSL_ia32cap_P+4(%rip),%r10d
xor $key0,%r9d
and \$`1<<26|1<<22`,%r10d # isolate XSAVE+MOVBE
mov %r9d,0x70+12(%rsp)
@@ -1551,7 +1551,7 @@ $code.=<<___;
.Lctr32_tail:
# note that at this point $inout0..5 are populated with
- # counter values xor-ed with 0-round key
+ # counter values xor-ed with 0-round key
lea 16($key),$key
cmp \$4,$len
jb .Lctr32_loop3
diff --git a/crypto/a