summaryrefslogtreecommitdiffstats
path: root/crypto/sha
diff options
context:
space:
mode:
authorDavid Benjamin <davidben@google.com>2016-10-10 12:01:24 -0400
committerMatt Caswell <matt@openssl.org>2016-10-10 23:36:21 +0100
commit609b0852e4d50251857dbbac3141ba042e35a9ae (patch)
treeee559ebc14734fdf2a273f845cb98d8d8f93eb7d /crypto/sha
parent11542af65a82242b47e97506695fa0d306d24fb6 (diff)
Remove trailing whitespace from some files.
The prevailing style seems to not have trailing whitespace, but a few lines do. This is mostly in the perlasm files, but a few C files got them after the reformat. This is the result of: find . -name '*.pl' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.c' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.h' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' Then bn_prime.h was excluded since this is a generated file. Note mkerr.pl has some changes in a heredoc for some help output, but other lines there lack trailing whitespace too. Reviewed-by: Kurt Roeckx <kurt@openssl.org> Reviewed-by: Matt Caswell <matt@openssl.org>
Diffstat (limited to 'crypto/sha')
-rw-r--r--crypto/sha/asm/sha1-586.pl2
-rw-r--r--crypto/sha/asm/sha1-mb-x86_64.pl4
-rw-r--r--crypto/sha/asm/sha1-sparcv9.pl2
-rw-r--r--crypto/sha/asm/sha1-sparcv9a.pl2
-rwxr-xr-xcrypto/sha/asm/sha1-x86_64.pl2
-rw-r--r--crypto/sha/asm/sha256-586.pl6
-rw-r--r--crypto/sha/asm/sha256-mb-x86_64.pl2
-rw-r--r--crypto/sha/asm/sha512-586.pl2
-rw-r--r--crypto/sha/asm/sha512-armv8.pl2
-rwxr-xr-xcrypto/sha/asm/sha512-parisc.pl4
-rw-r--r--crypto/sha/asm/sha512-s390x.pl2
-rw-r--r--crypto/sha/asm/sha512-sparcv9.pl4
-rwxr-xr-xcrypto/sha/asm/sha512-x86_64.pl2
13 files changed, 18 insertions, 18 deletions
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
index 0efed70a3e..3bf8200dbb 100644
--- a/crypto/sha/asm/sha1-586.pl
+++ b/crypto/sha/asm/sha1-586.pl
@@ -133,7 +133,7 @@ $ymm=1 if ($xmm &&
=~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
$1>=2.19); # first version supporting AVX
-$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
$1>=2.03); # first version supporting AVX
diff --git a/crypto/sha/asm/sha1-mb-x86_64.pl b/crypto/sha/asm/sha1-mb-x86_64.pl
index 51c73c05ac..2f6b35f355 100644
--- a/crypto/sha/asm/sha1-mb-x86_64.pl
+++ b/crypto/sha/asm/sha1-mb-x86_64.pl
@@ -95,7 +95,7 @@ $K="%xmm15";
if (1) {
# Atom-specific optimization aiming to eliminate pshufb with high
- # registers [and thus get rid of 48 cycles accumulated penalty]
+ # registers [and thus get rid of 48 cycles accumulated penalty]
@Xi=map("%xmm$_",(0..4));
($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9));
@V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14));
@@ -126,7 +126,7 @@ my $k=$i+2;
# ...
# $i==13: 14,15,15,15,
# $i==14: 15
-#
+#
# Then at $i==15 Xupdate is applied one iteration in advance...
$code.=<<___ if ($i==0);
movd (@ptr[0]),@Xi[0]
diff --git a/crypto/sha/asm/sha1-sparcv9.pl b/crypto/sha/asm/sha1-sparcv9.pl
index 7437ff4f05..cdd5b9afc5 100644
--- a/crypto/sha/asm/sha1-sparcv9.pl
+++ b/crypto/sha/asm/sha1-sparcv9.pl
@@ -227,7 +227,7 @@ sha1_block_data_order:
ldd [%o1 + 0x20], %f16
ldd [%o1 + 0x28], %f18
ldd [%o1 + 0x30], %f20
- subcc %o2, 1, %o2 ! done yet?
+ subcc %o2, 1, %o2 ! done yet?
ldd [%o1 + 0x38], %f22
add %o1, 0x40, %o1
prefetch [%o1 + 63], 20
diff --git a/crypto/sha/asm/sha1-sparcv9a.pl b/crypto/sha/asm/sha1-sparcv9a.pl
index f9ed5630e8..8dfde463cb 100644
--- a/crypto/sha/asm/sha1-sparcv9a.pl
+++ b/crypto/sha/asm/sha1-sparcv9a.pl
@@ -519,7 +519,7 @@ $code.=<<___;
mov $Cctx,$C
mov $Dctx,$D
mov $Ectx,$E
- alignaddr %g0,$tmp0,%g0
+ alignaddr %g0,$tmp0,%g0
dec 1,$len
ba .Loop
mov $nXfer,$Xfer
diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl
index 97baae37cd..66054ceeae 100755
--- a/crypto/sha/asm/sha1-x86_64.pl
+++ b/crypto/sha/asm/sha1-x86_64.pl
@@ -262,7 +262,7 @@ sha1_block_data_order:
jz .Lialu
___
$code.=<<___ if ($shaext);
- test \$`1<<29`,%r10d # check SHA bit
+ test \$`1<<29`,%r10d # check SHA bit
jnz _shaext_shortcut
___
$code.=<<___ if ($avx>1);
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
index 6af1d84beb..8e7f4eecc3 100644
--- a/crypto/sha/asm/sha256-586.pl
+++ b/crypto/sha/asm/sha256-586.pl
@@ -47,7 +47,7 @@
#
# Performance in clock cycles per processed byte (less is better):
#
-# gcc icc x86 asm(*) SIMD x86_64 asm(**)
+# gcc icc x86 asm(*) SIMD x86_64 asm(**)
# Pentium 46 57 40/38 - -
# PIII 36 33 27/24 - -
# P4 41 38 28 - 17.3
@@ -276,7 +276,7 @@ my $suffix=shift;
&mov ($Coff,"ecx");
&mov ($Doff,"edi");
&mov (&DWP(0,"esp"),"ebx"); # magic
- &mov ($E,&DWP(16,"esi"));
+ &mov ($E,&DWP(16,"esi"));
&mov ("ebx",&DWP(20,"esi"));
&mov ("ecx",&DWP(24,"esi"));
&mov ("edi",&DWP(28,"esi"));
@@ -385,7 +385,7 @@ my @AH=($A,$K256);
&xor ($AH[1],"ecx"); # magic
&mov (&DWP(8,"esp"),"ecx");
&mov (&DWP(12,"esp"),"ebx");
- &mov ($E,&DWP(16,"esi"));
+ &mov ($E,&DWP(16,"esi"));
&mov ("ebx",&DWP(20,"esi"));
&mov ("ecx",&DWP(24,"esi"));
&mov ("esi",&DWP(28,"esi"));
diff --git a/crypto/sha/asm/sha256-mb-x86_64.pl b/crypto/sha/asm/sha256-mb-x86_64.pl
index fbcd29f2e8..b8a77c7fce 100644
--- a/crypto/sha/asm/sha256-mb-x86_64.pl
+++ b/crypto/sha/asm/sha256-mb-x86_64.pl
@@ -36,7 +36,7 @@
# (iii) "this" is for n=8, when we gather twice as much data, result
# for n=4 is 20.3+4.44=24.7;
# (iv) presented improvement coefficients are asymptotic limits and
-# in real-life application are somewhat lower, e.g. for 2KB
+# in real-life application are somewhat lower, e.g. for 2KB
# fragments they range from 75% to 130% (on Haswell);
$flavour = shift;
diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl
index 0887e06148..94cc0114f8 100644
--- a/crypto/sha/asm/sha512-586.pl
+++ b/crypto/sha/asm/sha512-586.pl
@@ -383,7 +383,7 @@ if ($sse2) {
&set_label("16_79_sse2",16);
for ($j=0;$j<2;$j++) { # 2x unroll
- #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15
+ #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15
&movq ("mm5",&QWP(8*(9+16-14),"esp"));
&movq ("mm1","mm7");
&psrlq ("mm7",1);
diff --git a/crypto/sha/asm/sha512-armv8.pl b/crypto/sha/asm/sha512-armv8.pl
index c1aaf778f4..620aa39440 100644
--- a/crypto/sha/asm/sha512-armv8.pl
+++ b/crypto/sha/asm/sha512-armv8.pl
@@ -26,7 +26,7 @@
# Denver 2.01 10.5 (+26%) 6.70 (+8%)
# X-Gene 20.0 (+100%) 12.8 (+300%(***))
# Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-#
+#
# (*) Software SHA256 results are of lesser relevance, presented
# mostly for informational purposes.
# (**) The result is a trade-off: it's possible to improve it by
diff --git a/crypto/sha/asm/sha512-parisc.pl b/crypto/sha/asm/sha512-parisc.pl
index fcb6157902..d28a5af835 100755
--- a/crypto/sha/asm/sha512-parisc.pl
+++ b/crypto/sha/asm/sha512-parisc.pl
@@ -368,7 +368,7 @@ L\$parisc1
___
@V=( $Ahi, $Alo, $Bhi, $Blo, $Chi, $Clo, $Dhi, $Dlo,
- $Ehi, $Elo, $Fhi, $Flo, $Ghi, $Glo, $Hhi, $Hlo) =
+ $Ehi, $Elo, $Fhi, $Flo, $Ghi, $Glo, $Hhi, $Hlo) =
( "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8",
"%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16");
$a0 ="%r17";
@@ -419,7 +419,7 @@ $code.=<<___;
add $t0,$hlo,$hlo
shd $ahi,$alo,$Sigma0[0],$t0
addc $t1,$hhi,$hhi ; h += Sigma1(e)
- shd $alo,$ahi,$Sigma0[0],$t1
+ shd $alo,$ahi,$Sigma0[0],$t1
add $a0,$hlo,$hlo
shd $ahi,$alo,$Sigma0[1],$t2
addc $a1,$hhi,$hhi ; h += Ch(e,f,g)
diff --git a/crypto/sha/asm/sha512-s390x.pl b/crypto/sha/asm/sha512-s390x.pl
index 582d393cef..92d7a7725a 100644
--- a/crypto/sha/asm/sha512-s390x.pl
+++ b/crypto/sha/asm/sha512-s390x.pl
@@ -311,7 +311,7 @@ $code.=<<___;
cl${g} $inp,`$frame+4*$SIZE_T`($sp)
jne .Lloop
- lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp)
+ lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp)
br %r14
.size $Func,.-$Func
.string "SHA${label} block transform for s390x, CRYPTOGAMS by <appro\@openssl.org>"
diff --git a/crypto/sha/asm/sha512-sparcv9.pl b/crypto/sha/asm/sha512-sparcv9.pl
index 4a1ce5fe3e..098c2a118a 100644
--- a/crypto/sha/asm/sha512-sparcv9.pl
+++ b/crypto/sha/asm/sha512-sparcv9.pl
@@ -102,7 +102,7 @@ if ($output =~ /512/) {
$locals=0; # X[16] is register resident
@X=("%o0","%o1","%o2","%o3","%o4","%o5","%g1","%o7");
-
+
$A="%l0";
$B="%l1";
$C="%l2";
@@ -254,7 +254,7 @@ $code.=<<___;
$SLL $a,`$SZ*8-@Sigma0[1]`,$tmp1
xor $tmp0,$h,$h
$SRL $a,@Sigma0[2],$tmp0
- xor $tmp1,$h,$h
+ xor $tmp1,$h,$h
$SLL $a,`$SZ*8-@Sigma0[0]`,$tmp1
xor $tmp0,$h,$h
xor $tmp1,$h,$h ! Sigma0(a)
diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl
index 63a62656f6..01bbb7775f 100755
--- a/crypto/sha/asm/sha512-x86_64.pl
+++ b/crypto/sha/asm/sha512-x86_64.pl
@@ -1782,7 +1782,7 @@ if ($avx>1) {{
######################################################################
# AVX2+BMI code path
#
-my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
+my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
my $PUSH8=8*2*$SZ;
use integer;