summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Benjamin <davidben@google.com>2016-10-10 12:01:24 -0400
committerMatt Caswell <matt@openssl.org>2016-10-10 23:36:21 +0100
commit609b0852e4d50251857dbbac3141ba042e35a9ae (patch)
treeee559ebc14734fdf2a273f845cb98d8d8f93eb7d
parent11542af65a82242b47e97506695fa0d306d24fb6 (diff)
Remove trailing whitespace from some files.
The prevailing style seems to not have trailing whitespace, but a few lines do. This is mostly in the perlasm files, but a few C files got them after the reformat. This is the result of: find . -name '*.pl' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.c' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.h' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' Then bn_prime.h was excluded since this is a generated file. Note mkerr.pl has some changes in a heredoc for some help output, but other lines there lack trailing whitespace too. Reviewed-by: Kurt Roeckx <kurt@openssl.org> Reviewed-by: Matt Caswell <matt@openssl.org>
-rw-r--r--apps/cms.c2
-rw-r--r--apps/smime.c2
-rw-r--r--apps/speed.c4
-rwxr-xr-xcrypto/aes/asm/aes-586.pl12
-rw-r--r--crypto/aes/asm/aes-ppc.pl8
-rw-r--r--crypto/aes/asm/aes-s390x.pl16
-rwxr-xr-xcrypto/aes/asm/aes-x86_64.pl4
-rw-r--r--crypto/aes/asm/aesni-mb-x86_64.pl28
-rw-r--r--crypto/aes/asm/aesni-sha1-x86_64.pl2
-rw-r--r--crypto/aes/asm/aesni-sha256-x86_64.pl2
-rw-r--r--crypto/aes/asm/aesni-x86.pl2
-rw-r--r--crypto/aes/asm/aesni-x86_64.pl10
-rwxr-xr-xcrypto/aes/asm/aesp8-ppc.pl2
-rwxr-xr-xcrypto/aes/asm/aesv8-armx.pl6
-rw-r--r--crypto/aes/asm/bsaes-armv7.pl2
-rw-r--r--crypto/aes/asm/bsaes-x86_64.pl4
-rwxr-xr-xcrypto/aes/asm/vpaes-armv8.pl8
-rw-r--r--crypto/aes/asm/vpaes-ppc.pl8
-rw-r--r--crypto/aes/asm/vpaes-x86.pl8
-rw-r--r--crypto/aes/asm/vpaes-x86_64.pl20
-rw-r--r--crypto/bn/asm/armv4-gf2m.pl2
-rw-r--r--crypto/bn/asm/armv4-mont.pl2
-rw-r--r--crypto/bn/asm/bn-586.pl24
-rw-r--r--crypto/bn/asm/co-586.pl12
-rw-r--r--crypto/bn/asm/ia64-mont.pl4
-rw-r--r--crypto/bn/asm/mips.pl6
-rw-r--r--crypto/bn/asm/parisc-mont.pl4
-rw-r--r--crypto/bn/asm/ppc-mont.pl6
-rw-r--r--crypto/bn/asm/ppc.pl264
-rwxr-xr-xcrypto/bn/asm/rsaz-avx2.pl8
-rwxr-xr-xcrypto/bn/asm/rsaz-x86_64.pl36
-rw-r--r--crypto/bn/asm/s390x-gf2m.pl2
-rw-r--r--crypto/bn/asm/via-mont.pl2
-rwxr-xr-xcrypto/bn/asm/x86-mont.pl2
-rwxr-xr-xcrypto/bn/asm/x86_64-mont5.pl10
-rw-r--r--crypto/camellia/asm/cmll-x86.pl6
-rw-r--r--crypto/cast/asm/cast-586.pl6
-rwxr-xr-xcrypto/chacha/asm/chacha-armv4.pl4
-rwxr-xr-xcrypto/chacha/asm/chacha-armv8.pl4
-rwxr-xr-xcrypto/chacha/asm/chacha-ppc.pl4
-rw-r--r--crypto/des/asm/crypt586.pl4
-rw-r--r--crypto/des/asm/des-586.pl6
-rw-r--r--crypto/des/asm/desboth.pl2
-rw-r--r--crypto/ec/asm/ecp_nistz256-armv8.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-sparcv9.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86.pl2
-rwxr-xr-xcrypto/ec/asm/ecp_nistz256-x86_64.pl16
-rw-r--r--crypto/md5/asm/md5-586.pl2
-rw-r--r--crypto/md5/asm/md5-sparcv9.pl2
-rw-r--r--crypto/mips_arch.h2
-rw-r--r--crypto/modes/asm/ghash-armv4.pl4
-rw-r--r--crypto/modes/asm/ghash-s390x.pl2
-rw-r--r--crypto/modes/asm/ghash-x86.pl6
-rw-r--r--crypto/modes/asm/ghash-x86_64.pl8
-rw-r--r--crypto/perlasm/cbc.pl10
-rwxr-xr-xcrypto/perlasm/ppc-xlate.pl2
-rw-r--r--crypto/perlasm/sparcv9_modes.pl16
-rwxr-xr-xcrypto/perlasm/x86_64-xlate.pl12
-rw-r--r--crypto/perlasm/x86nasm.pl2
-rw-r--r--crypto/rc4/asm/rc4-c64xplus.pl2
-rw-r--r--crypto/rc4/asm/rc4-md5-x86_64.pl4
-rw-r--r--crypto/rc4/asm/rc4-parisc.pl4
-rwxr-xr-xcrypto/rc4/asm/rc4-x86_64.pl2
-rw-r--r--crypto/ripemd/asm/rmd-586.pl12
-rw-r--r--crypto/sha/asm/sha1-586.pl2
-rw-r--r--crypto/sha/asm/sha1-mb-x86_64.pl4
-rw-r--r--crypto/sha/asm/sha1-sparcv9.pl2
-rw-r--r--crypto/sha/asm/sha1-sparcv9a.pl2
-rwxr-xr-xcrypto/sha/asm/sha1-x86_64.pl2
-rw-r--r--crypto/sha/asm/sha256-586.pl6
-rw-r--r--crypto/sha/asm/sha256-mb-x86_64.pl2
-rw-r--r--crypto/sha/asm/sha512-586.pl2
-rw-r--r--crypto/sha/asm/sha512-armv8.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-parisc.pl4
-rw-r--r--crypto/sha/asm/sha512-s390x.pl2
-rw-r--r--crypto/sha/asm/sha512-sparcv9.pl4
-rwxr-xr-xcrypto/sha/asm/sha512-x86_64.pl2
-rw-r--r--crypto/ts/ts_rsp_verify.c2
-rw-r--r--crypto/whrlpool/asm/wp-mmx.pl4
-rw-r--r--crypto/x509v3/v3_enum.c2
-rw-r--r--crypto/x509v3/v3_skey.c2
-rw-r--r--crypto/x86cpuid.pl2
-rw-r--r--engines/asm/e_padlock-x86_64.pl2
-rw-r--r--include/openssl/x509.h2
-rw-r--r--ssl/packet.c2
-rw-r--r--ssl/packet_locl.h2
-rw-r--r--test/pkits-test.pl2
-rw-r--r--test/recipes/tconversion.pl2
-rw-r--r--test/wpackettest.c2
-rwxr-xr-xutil/ck_errf.pl2
-rw-r--r--util/copy.pl4
-rw-r--r--util/fipslink.pl2
-rwxr-xr-xutil/mkdef.pl8
-rw-r--r--util/mkerr.pl16
-rw-r--r--util/su-filter.pl2
95 files changed, 390 insertions, 390 deletions
diff --git a/apps/cms.c b/apps/cms.c
index 133dc021ae..21f096192a 100644
--- a/apps/cms.c
+++ b/apps/cms.c
@@ -146,7 +146,7 @@ OPTIONS cms_options[] = {
"Do not load certificates from the default certificates directory"},
{"content", OPT_CONTENT, '<',
"Supply or override content for detached signature"},
- {"print", OPT_PRINT, '-',
+ {"print", OPT_PRINT, '-',
"For the -cmsout operation print out all fields of the CMS structure"},
{"secretkey", OPT_SECRETKEY, 's'},
{"secretkeyid", OPT_SECRETKEYID, 's'},
diff --git a/apps/smime.c b/apps/smime.c
index 1f4091fa99..0c8660f537 100644
--- a/apps/smime.c
+++ b/apps/smime.c
@@ -89,7 +89,7 @@ OPTIONS smime_options[] = {
{"no-CApath", OPT_NOCAPATH, '-',
"Do not load certificates from the default certificates directory"},
{"resign", OPT_RESIGN, '-', "Resign a signed message"},
- {"nochain", OPT_NOCHAIN, '-',
+ {"nochain", OPT_NOCHAIN, '-',
"set PKCS7_NOCHAIN so certificates contained in the message are not used as untrusted CAs" },
{"nosmimecap", OPT_NOSMIMECAP, '-', "Omit the SMIMECapabilities attribute"},
{"stream", OPT_STREAM, '-', "Enable CMS streaming" },
diff --git a/apps/speed.c b/apps/speed.c
index e6bdc5dd2f..e9dc8a9fa7 100644
--- a/apps/speed.c
+++ b/apps/speed.c
@@ -1187,8 +1187,8 @@ static int run_benchmark(int async_jobs,
continue;
#endif
- ret = ASYNC_start_job(&loopargs[i].inprogress_job,
- loopargs[i].wait_ctx, &job_op_count, loop_function,
+ ret = ASYNC_start_job(&loopargs[i].inprogress_job,
+ loopargs[i].wait_ctx, &job_op_count, loop_function,
(void *)(loopargs + i), sizeof(loopargs_t));
switch (ret) {
case ASYNC_PAUSE:
diff --git a/crypto/aes/asm/aes-586.pl b/crypto/aes/asm/aes-586.pl
index 1ba356508a..61bdce865c 100755
--- a/crypto/aes/asm/aes-586.pl
+++ b/crypto/aes/asm/aes-586.pl
@@ -123,7 +123,7 @@
# words every cache-line is *guaranteed* to be accessed within ~50
# cycles window. Why just SSE? Because it's needed on hyper-threading
# CPU! Which is also why it's prefetched with 64 byte stride. Best
-# part is that it has no negative effect on performance:-)
+# part is that it has no negative effect on performance:-)
#
# Version 4.3 implements switch between compact and non-compact block
# functions in AES_cbc_encrypt depending on how much data was asked
@@ -585,7 +585,7 @@ sub enctransform()
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | mm4 | mm0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# | s3 | s2 | s1 | s0 |
+# | s3 | s2 | s1 | s0 |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
@@ -805,7 +805,7 @@ sub encstep()
if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
- else { &mov ($tmp,$s[3]);
+ else { &mov ($tmp,$s[3]);
&shr ($tmp,24) }
&xor ($out,&DWP(1,$te,$tmp,8));
if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
@@ -1558,7 +1558,7 @@ sub sse_deccompact()
&pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp4
&pshufw ("mm3","mm1",0xb1); &pshufw ("mm7","mm5",0xb1);
&pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp4
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16)
+ &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16)
&pxor ("mm3","mm3"); &pxor ("mm7","mm7");
&pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5");
@@ -2028,7 +2028,7 @@ sub declast()
{
# stack frame layout
# -4(%esp) # return address 0(%esp)
-# 0(%esp) # s0 backing store 4(%esp)
+# 0(%esp) # s0 backing store 4(%esp)
# 4(%esp) # s1 backing store 8(%esp)
# 8(%esp) # s2 backing store 12(%esp)
# 12(%esp) # s3 backing store 16(%esp)
@@ -2738,7 +2738,7 @@ sub enckey()
&mov (&DWP(80,"edi"),10); # setup number of rounds
&xor ("eax","eax");
&jmp (&label("exit"));
-
+
&set_label("12rounds");
&mov ("eax",&DWP(0,"esi")); # copy first 6 dwords
&mov ("ebx",&DWP(4,"esi"));
diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl
index 1558d8e454..184c28a291 100644
--- a/crypto/aes/asm/aes-ppc.pl
+++ b/crypto/aes/asm/aes-ppc.pl
@@ -1433,10 +1433,10 @@ $code.=<<___;
xor $s1,$s1,$acc05
xor $s2,$s2,$acc06
xor $s3,$s3,$acc07
- xor $s0,$s0,$acc08 # ^= ROTATE(r8,8)
- xor $s1,$s1,$acc09
- xor $s2,$s2,$acc10
- xor $s3,$s3,$acc11
+ xor $s0,$s0,$acc08 # ^= ROTATE(r8,8)
+ xor $s1,$s1,$acc09
+ xor $s2,$s2,$acc10
+ xor $s3,$s3,$acc11
b Ldec_compact_loop
.align 4
diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl
index a93d601913..9c17f0e5c2 100644
--- a/crypto/aes/asm/aes-s390x.pl
+++ b/crypto/aes/asm/aes-s390x.pl
@@ -404,7 +404,7 @@ _s390x_AES_encrypt:
or $s1,$t1
or $t2,$i2
or $t3,$i3
-
+
srlg $i1,$s2,`8-3` # i0
srlg $i2,$s2,`16-3` # i1
nr $i1,$mask
@@ -457,7 +457,7 @@ _s390x_AES_encrypt:
x $s2,24($key)
x $s3,28($key)
- br $ra
+ br $ra
.size _s390x_AES_encrypt,.-_s390x_AES_encrypt
___
@@ -779,7 +779,7 @@ _s390x_AES_decrypt:
x $s2,24($key)
x $s3,28($key)
- br $ra
+ br $ra
.size _s390x_AES_decrypt,.-_s390x_AES_decrypt
___
@@ -1297,7 +1297,7 @@ $code.=<<___;
.Lcbc_enc_done:
l${g} $ivp,6*$SIZE_T($sp)
st $s0,0($ivp)
- st $s1,4($ivp)
+ st $s1,4($ivp)
st $s2,8($ivp)
st $s3,12($ivp)
@@ -1635,7 +1635,7 @@ $code.=<<___ if(1);
llgc $len,2*$SIZE_T-1($sp)
nill $len,0x0f # $len%=16
br $ra
-
+
.align 16
.Lxts_km_vanilla:
___
@@ -1862,7 +1862,7 @@ $code.=<<___;
xgr $s1,%r1
lrvgr $s1,$s1 # flip byte order
lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
stg $s1,$tweak+0($sp) # save the tweak
llgfr $s1,$s1
srlg $s2,$s3,32
@@ -1913,7 +1913,7 @@ $code.=<<___;
xgr $s1,%r1
lrvgr $s1,$s1 # flip byte order
lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
stg $s1,$tweak+0($sp) # save the tweak
llgfr $s1,$s1
srlg $s2,$s3,32
@@ -2105,7 +2105,7 @@ $code.=<<___;
xgr $s1,%r1
lrvgr $s1,$s1 # flip byte order
lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
+ srlg $s0,$s1,32 # smash the tweak to 4x32-bits
stg $s1,$tweak+0($sp) # save the tweak
llgfr $s1,$s1
srlg $s2,$s3,32
diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl
index ce4ca30b1a..ae7fde20fe 100755
--- a/crypto/aes/asm/aes-x86_64.pl
+++ b/crypto/aes/asm/aes-x86_64.pl
@@ -1298,7 +1298,7 @@ $code.=<<___;
AES_set_encrypt_key:
push %rbx
push %rbp
- push %r12 # redundant, but allows to share
+ push %r12 # redundant, but allows to share
push %r13 # exception handler...
push %r14
push %r15
@@ -1424,7 +1424,7 @@ $code.=<<___;
xor %rax,%rax
jmp .Lexit
-.L14rounds:
+.L14rounds:
mov 0(%rsi),%rax # copy first 8 dwords
mov 8(%rsi),%rbx
mov 16(%rsi),%rcx
diff --git a/crypto/aes/asm/aesni-mb-x86_64.pl b/crypto/aes/asm/aesni-mb-x86_64.pl
index aa2735e06a..fcef7c62fa 100644
--- a/crypto/aes/asm/aesni-mb-x86_64.pl
+++ b/crypto/aes/asm/aesni-mb-x86_64.pl
@@ -134,7 +134,7 @@ $code.=<<___ if ($win64);
movaps %xmm10,0x40(%rsp)
movaps %xmm11,0x50(%rsp)
movaps %xmm12,0x60(%rsp)
- movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
+ movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
movaps %xmm14,-0x58(%rax)
movaps %xmm15,-0x48(%rax)
___
@@ -308,9 +308,9 @@ $code.=<<___;
movups @out[0],-16(@outptr[0],$offset)
pxor @inp[0],@out[0]
- movups @out[1],-16(@outptr[1],$offset)
+ movups @out[1],-16(@outptr[1],$offset)
pxor @inp[1],@out[1]
- movups @out[2],-16(@outptr[2],$offset)
+ movups @out[2],-16(@outptr[2],$offset)
pxor @inp[2],@out[2]
movups @out[3],-16(@outptr[3],$offset)
pxor @inp[3],@out[3]
@@ -393,7 +393,7 @@ $code.=<<___ if ($win64);
movaps %xmm10,0x40(%rsp)
movaps %xmm11,0x50(%rsp)
movaps %xmm12,0x60(%rsp)
- movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
+ movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler
movaps %xmm14,-0x58(%rax)
movaps %xmm15,-0x48(%rax)
___
@@ -563,10 +563,10 @@ $code.=<<___;
movups @out[0],-16(@outptr[0],$offset)
movdqu (@inptr[0],$offset),@out[0]
- movups @out[1],-16(@outptr[1],$offset)
+ movups @out[1],-16(@outptr[1],$offset)
movdqu (@inptr[1],$offset),@out[1]
pxor $zero,@out[0]
- movups @out[2],-16(@outptr[2],$offset)
+ movups @out[2],-16(@outptr[2],$offset)
movdqu (@inptr[2],$offset),@out[2]
pxor