summaryrefslogtreecommitdiffstats
path: root/crypto/sha
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2017-06-26 17:29:24 +0200
committerAndy Polyakov <appro@openssl.org>2017-06-29 21:15:51 +0200
commita1613840ddae01334999f7a92265eac0d4f50da4 (patch)
tree18bb5a3c503dbbf91693b6e3e820b70cb1dd0a33 /crypto/sha
parenta078d9dfa92d34458529de19818faa94a75ae908 (diff)
sha/asm/keccak1600-x86_64.pl: optimize by re-ordering instructions.
Reviewed-by: Richard Levitte <levitte@openssl.org>
Diffstat (limited to 'crypto/sha')
-rwxr-xr-xcrypto/sha/asm/keccak1600-x86_64.pl178
1 files changed, 95 insertions, 83 deletions
diff --git a/crypto/sha/asm/keccak1600-x86_64.pl b/crypto/sha/asm/keccak1600-x86_64.pl
index 09160544a0..8eb2367bd0 100755
--- a/crypto/sha/asm/keccak1600-x86_64.pl
+++ b/crypto/sha/asm/keccak1600-x86_64.pl
@@ -22,22 +22,33 @@
# instead of actually unrolling the loop pair-wise I simply flip
# pointers to T[][] and A[][] at the end of round. Since number of
# rounds is even, last round writes to A[][] and everything works out.
+# How does it compare to assembly module in Keccak Code Package? KCP
+# is faster on couple of processors, VIA Nano and Goldmont by 4-6%,
+# otherwise this module is either as fast or faster by up to 15%...
#
########################################################################
# Numbers are cycles per processed byte out of large message.
#
-# r=1088
+# r=1088(*)
#
-# P4 45.8
-# Core 2 14.2
-# Sandy Bridge 13.0
-# Haswell 9.8
+# P4 25.8
+# Core 2 13.0
+# Westmere 13.7
+# Sandy Bridge 12.9(**)
+# Haswell 9.7
# Skylake 9.4
-# Silvermont 22.4
-# Goldmont 18.0
-# VIA Nano 19.1
-# Sledgehammer 13.8
-# Bulldozer 16.7
+# Silvermont 22.8
+# Goldmont 16.4
+# VIA Nano 18.0
+# Sledgehammer 13.3
+# Bulldozer 16.5
+#
+# (*) Corresponds to SHA3-256. Improvement over compiler-generate
+# varies a lot, most commont coefficient is 15% in comparison to
+# gcc-5.x, 50% for gcc-4.x, 90% for gcc-3.x.
+# (**) Sandy Bridge has broken rotate instruction. Performance can be
+# improved by 14% by replacing rotates with double-precision
+# shift with same register as source and destination.
$flavour = shift;
$output = shift;
@@ -82,79 +93,78 @@ __KeccakF1600:
.align 32
.Loop:
- xor $A[0][0](%rdi),@C[0]
- xor $A[0][1](%rdi),@C[1]
+ mov $A[0][0](%rdi),@D[0]
+ mov $A[1][1](%rdi),@D[1]
+ mov $A[2][2](%rdi),@D[2]
+ mov $A[3][3](%rdi),@D[3]
+
xor $A[0][2](%rdi),@C[2]
xor $A[0][3](%rdi),@C[3]
+ xor @D[0], @C[0]
+ xor $A[0][1](%rdi),@C[1]
+ xor $A[1][2](%rdi),@C[2]
+ xor $A[1][0](%rdi),@C[0]
+ mov @C[4],@D[4]
xor $A[0][4](%rdi),@C[4]
- xor $A[1][0](%rdi),@C[0]
- xor $A[1][1](%rdi),@C[1]
- xor $A[1][2](%rdi),@C[2]
- xor $A[1][3](%rdi),@C[3]
- xor $A[1][4](%rdi),@C[4]
-
+ xor @D[2], @C[2]
xor $A[2][0](%rdi),@C[0]
- xor $A[2][1](%rdi),@C[1]
- xor $A[2][2](%rdi),@C[2]
- xor $A[2][3](%rdi),@C[3]
- xor $A[2][4](%rdi),@C[4]
+ xor $A[1][3](%rdi),@C[3]
+ xor @D[1], @C[1]
+ xor $A[1][4](%rdi),@C[4]
- xor $A[3][0](%rdi),@C[0]
- xor $A[3][1](%rdi),@C[1]
xor $A[3][2](%rdi),@C[2]
- xor $A[3][3](%rdi),@C[3]
- xor $A[3][4](%rdi),@C[4]
+ xor $A[3][0](%rdi),@C[0]
+ xor $A[2][3](%rdi),@C[3]
+ xor $A[2][1](%rdi),@C[1]
+ xor $A[2][4](%rdi),@C[4]
mov @C[2],@T[0]
rol \$1,@C[2]
- mov $A[0][0](%rdi),@D[0]
xor @C[0],@C[2] # D[1] = ROL64(C[2], 1) ^ C[0]
+ xor @D[3], @C[3]
rol \$1,@C[0]
- mov $A[1][1](%rdi),@D[1]
xor @C[3],@C[0] # D[4] = ROL64(C[0], 1) ^ C[3]
+ xor $A[3][1](%rdi),@C[1]
rol \$1,@C[3]
- mov $A[2][2](%rdi),@D[2]
xor @C[1],@C[3] # D[2] = ROL64(C[3], 1) ^ C[1]
+ xor $A[3][4](%rdi),@C[4]
rol \$1,@C[1]
- mov $A[3][3](%rdi),@D[3]
xor @C[4],@C[1] # D[0] = ROL64(C[1], 1) ^ C[4]
rol \$1,@C[4]
- mov $A[4][4](%rdi),@D[4]
xor @T[0],@C[4] # D[3] = ROL64(C[4], 1) ^ C[2]
___
my @E = @D;
@D = (@C[1],@C[2],@C[3],@C[4],@C[0]);
@C = @E;
$code.=<<___;
- xor @D[0],@C[0]
xor @D[1],@C[1]
xor @D[2],@C[2]
+ rol \$$rhotates[1][1],@C[1]
xor @D[3],@C[3]
xor @D[4],@C[4]
-
- rol \$$rhotates[1][1],@C[1]
rol \$$rhotates[2][2],@C[2]
+ xor @D[0],@C[0]
+ mov @C[1],@T[0]
rol \$$rhotates[3][3],@C[3]
+ or @C[2],@C[1]
+ xor @C[0],@C[1] # C[0] ^ ( C[1] | C[2])
rol \$$rhotates[4][4],@C[4]
- mov @C[1],@T[0]
- or @C[2],@C[1]
- xor @C[0],@C[1] # C[0] ^ ( C[1] | C[2])
- xor ($iotas),@C[1]
- lea 8($iotas),$iotas
- mov @C[1],$A[0][0](%rsi) # R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i]
+ xor ($iotas),@C[1]
+ lea 8($iotas),$iotas
mov @C[4],@T[1]
and @C[3],@C[4]
+ mov @C[1],$A[0][0](%rsi) # R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i]
xor @C[2],@C[4] # C[2] ^ ( C[4] & C[3])
+ not @C[2]
mov @C[4],$A[0][2](%rsi) # R[0][2] = C[2] ^ ( C[4] & C[3])
- not @C[2]
or @C[3],@C[2]
xor @T[0],@C[2] # C[1] ^ (~C[2] | C[3])
mov @C[2],$A[0][1](%rsi) # R[0][1] = C[1] ^ (~C[2] | C[3])
@@ -169,34 +179,33 @@ $code.=<<___;
mov $A[0][3](%rdi),@C[0]
+ mov $A[4][2](%rdi),@C[4]
+ mov $A[3][1](%rdi),@C[3]
mov $A[1][4](%rdi),@C[1]
mov $A[2][0](%rdi),@C[2]
- mov $A[3][1](%rdi),@C[3]
- mov $A[4][2](%rdi),@C[4]
xor @D[3],@C[0]
- xor @D[4],@C[1]
- xor @D[0],@C[2]
- xor @D[1],@C[3]
xor @D[2],@C[4]
-
rol \$$rhotates[0][3],@C[0]
+ xor @D[1],@C[3]
+ xor @D[4],@C[1]
+ rol \$$rhotates[4][2],@C[4]
+ rol \$$rhotates[3][1],@C[3]
+ xor @D[0],@C[2]
rol \$$rhotates[1][4],@C[1]
+ mov @C[0],@T[0]
+ or @C[4],@C[0]
rol \$$rhotates[2][0],@C[2]
- rol \$$rhotates[3][1],@C[3]
- rol \$$rhotates[4][2],@C[4]
- mov @C[0],@T[0]
- or @C[4],@C[0]
xor @C[3],@C[0] # C[3] ^ (C[0] | C[4])
mov @C[0],$A[1][3](%rsi) # R[1][3] = C[3] ^ (C[0] | C[4])
mov @C[1],@T[1]
and @T[0],@C[1]
xor @C[4],@C[1] # C[4] ^ (C[1] & C[0])
+ not @C[4]
mov @C[1],$A[1][4](%rsi) # R[1][4] = C[4] ^ (C[1] & C[0])
- not @C[4]
or @C[3],@C[4]
xor @C[2],@C[4] # C[2] ^ (~C[4] | C[3])
mov @C[4],$A[1][2](%rsi) # R[1][2] = C[2] ^ (~C[4] | C[3])
@@ -210,31 +219,30 @@ $code.=<<___;
mov @T[1],$A[1][0](%rsi) # R[1][0] = C[0] ^ (C[1] | C[2])
- mov $A[0][1](%rdi),@C[0]
- mov $A[1][2](%rdi),@C[1]
mov $A[2][3](%rdi),@C[2]
mov $A[3][4](%rdi),@C[3]
+ mov $A[1][2](%rdi),@C[1]
mov $A[4][0](%rdi),@C[4]
+ mov $A[0][1](%rdi),@C[0]
- xor @D[1],@C[0]
- xor @D[2],@C[1]
xor @D[3],@C[2]
xor @D[4],@C[3]
- xor @D[0],@C[4]
-
- rol \$$rhotates[0][1],@C[0]
- rol \$$rhotates[1][2],@C[1]
rol \$$rhotates[2][3],@C[2]
+ xor @D[2],@C[1]
rol \$$rhotates[3][4],@C[3]
+ xor @D[0],@C[4]
+ rol \$$rhotates[1][2],@C[1]
+ xor @D[1],@C[0]
rol \$$rhotates[4][0],@C[4]
+ mov @C[2],@T[0]
+ and @C[3],@C[2]
+ rol \$$rhotates[0][1],@C[0]
- mov @C[2],@T[0]
- and @C[3],@C[2]
+ not @C[3]
xor @C[1],@C[2] # C[1] ^ ( C[2] & C[3])
mov @C[2],$A[2][1](%rsi) # R[2][1] = C[1] ^ ( C[2] & C[3])
mov @C[4],@T[1]
- not @C[3]
and @C[3],@C[4]
xor @T[0],@C[4] # C[2] ^ ( C[4] & ~C[3])
mov @C[4],$A[2][2](%rsi) # R[2][2] = C[2] ^ ( C[4] & ~C[3])
@@ -252,31 +260,30 @@ $code.=<<___;
mov @C[0],$A[2][3](%rsi) # R[2][3] = ~C[3] ^ ( C[0] | C[4])
- mov $A[0][4](%rdi),@C[0]
- mov $A[1][0](%rdi),@C[1]
mov $A[2][1](%rdi),@C[2]
mov $A[3][2](%rdi),@C[3]
+ mov $A[1][0](%rdi),@C[1]
mov $A[4][3](%rdi),@C[4]
+ mov $A[0][4](%rdi),@C[0]
- xor @D[4],@C[0]
- xor @D[0],@C[1]
xor @D[1],@C[2]
xor @D[2],@C[3]
- xor @D[3],@C[4]
-
- rol \$$rhotates[0][4],@C[0]
- rol \$$rhotates[1][0],@C[1]
rol \$$rhotates[2][1],@C[2]
+ xor @D[0],@C[1]
rol \$$rhotates[3][2],@C[3]
+ xor @D[3],@C[4]
+ rol \$$rhotates[1][0],@C[1]
+ xor @D[4],@C[0]
rol \$$rhotates[4][3],@C[4]
+ mov @C[2],@T[0]
+ or @C[3],@C[2]
+ rol \$$rhotates[0][4],@C[0]
- mov @C[2],@T[0]
- or @C[3],@C[2]
+ not @C[3]
xor @C[1],@C[2] # C[1] ^ ( C[2] | C[3])
mov @C[2],$A[3][1](%rsi) # R[3][1] = C[1] ^ ( C[2] | C[3])
mov @C[4],@T[1]
- not @C[3]
or @C[3],@C[4]
xor @T[0],@C[4] # C[2] ^ ( C[4] | ~C[3])
mov @C[4],$A[3][2](%rsi) # R[3][2] = C[2] ^ ( C[4] | ~C[3])
@@ -296,26 +303,25 @@ $code.=<<___;
xor $A[0][2](%rdi),@D[2]
xor $A[1][3](%rdi),@D[3]
+ rol \$$rhotates[0][2],@D[2]
+ xor $A[4][1](%rdi),@D[1]
+ rol \$$rhotates[1][3],@D[3]
xor $A[2][4](%rdi),@D[4]
+ rol \$$rhotates[4][1],@D[1]
xor $A[3][0](%rdi),@D[0]
- xor $A[4][1](%rdi),@D[1]
xchg %rsi,%rdi
-
- rol \$$rhotates[0][2],@D[2]
- rol \$$rhotates[1][3],@D[3]
rol \$$rhotates[2][4],@D[4]
rol \$$rhotates[3][0],@D[0]
- rol \$$rhotates[4][1],@D[1]
___
@C = (@D[2],@D[3],@D[4],@D[0],@D[1]);
$code.=<<___;
mov @C[0],@T[0]
and @C[1],@C[0]
+ not @C[1]
xor @C[4],@C[0] # C[4] ^ ( C[0] & C[1])
mov @C[0],$A[4][4](%rdi) # R[4][4] = C[4] ^ ( C[0] & C[1])
mov @C[2],@T[1]
- not @C[1]
and @C[1],@C[2]
xor @T[0],@C[2] # C[0] ^ ( C[2] & ~C[1])
mov @C[2],$A[4][0](%rdi) # R[4][0] = C[0] ^ ( C[2] & ~C[1])
@@ -432,7 +438,7 @@ SHA3_absorb:
lea 8($A_flat),$A_flat
sub \$8,$len
mov %rax,-8($A_flat)
- dec $bsz
+ sub \$1,$bsz
jnz .Lblock_absorb
mov $inp,200-100(%rsi) # save inp
@@ -497,7 +503,7 @@ SHA3_squeeze:
sub \$8,$len # len -= 8
jz .Ldone_squeeze
- dec %rcx # bsz--
+ sub \$1,%rcx # bsz--
jnz .Loop_squeeze
call KeccakF1600
@@ -552,6 +558,12 @@ iotas:
.asciz "Keccak-1600 absorb and squeeze for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
-print $code;
+foreach (split("\n",$code)) {
+ # Below replacement results in 11.3 on Sandy Bridge, 9.4 on
+ # Haswell, but it hurts other processors by up to 2-3-4x...
+ #s/rol\s+(\$[0-9]+),(%[a-z][a-z0-9]+)/shld\t$1,$2,$2/;
+
+ print $_, "\n";
+}
close STDOUT;