summaryrefslogtreecommitdiffstats
path: root/crypto/sha/asm/sha256-586.pl
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2014-06-12 21:45:41 +0200
committerAndy Polyakov <appro@openssl.org>2014-06-12 21:45:41 +0200
commit977f32e85241cba8be53e44dade32231e8a91718 (patch)
treeae76df809dd4f5b5696ad6abae14ac3dc5c922fd /crypto/sha/asm/sha256-586.pl
parentd84ba7ea23b386f3fe56c4fe7a7aa8ece2e0c356 (diff)
Facilitate back-porting of AESNI and SHA modules.
Fix SEH and stack handling in Win64 build.
Diffstat (limited to 'crypto/sha/asm/sha256-586.pl')
-rw-r--r--crypto/sha/asm/sha256-586.pl8
1 files changed, 5 insertions, 3 deletions
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
index 09648a8207..ee094a9214 100644
--- a/crypto/sha/asm/sha256-586.pl
+++ b/crypto/sha/asm/sha256-586.pl
@@ -82,6 +82,8 @@ if ($xmm && !$avx && $ARGV[0] eq "win32" &&
$avx = ($1>=10) + ($1>=11);
}
+$shaext=$xmm; ### set to zero if compiling for 1.0.1
+
$unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
# fully unrolled loop was measured to run about
# 3-4x slower. If slowdown coefficient is N and
@@ -205,8 +207,8 @@ sub BODY_00_15() {
&jz ($unroll_after?&label("no_xmm"):&label("loop"));
&and ("ecx",1<<30); # mask "Intel CPU" bit
&and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
- &test ("edx",1<<29) if ($xmm); # check for SHA
- &jnz (&label("shaext")) if ($xmm);
+ &test ("edx",1<<29) if ($shaext); # check for SHA
+ &jnz (&label("shaext")) if ($shaext);
&or ("ecx","ebx");
&and ("ecx",1<<28|1<<30);
&cmp ("ecx",1<<28|1<<30);
@@ -505,7 +507,7 @@ my @AH=($A,$K256);
&function_end_A();
}
if (!$i386 && $xmm) {{{
-{
+if ($shaext) {
######################################################################
# Intel SHA Extensions implementation of SHA256 update function.
#