summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xConfigure2
-rw-r--r--Makefile.org5
-rw-r--r--crypto/bn/Makefile.ssl2
-rw-r--r--crypto/bn/bn_div.c18
-rw-r--r--crypto/bn/bn_lcl.h17
-rw-r--r--crypto/des/des_locl.h10
-rw-r--r--crypto/md32_common.h29
-rw-r--r--crypto/md4/md4_dgst.c36
-rw-r--r--crypto/md5/md5_dgst.c36
-rw-r--r--crypto/rc5/rc5_locl.h17
-rw-r--r--crypto/ripemd/rmd_dgst.c12
-rw-r--r--crypto/sha/sha_locl.h12
12 files changed, 111 insertions, 85 deletions
diff --git a/Configure b/Configure
index db752c2e21..8e5751f3dc 100755
--- a/Configure
+++ b/Configure
@@ -391,7 +391,7 @@ my %table=(
"linux-s390", "gcc:-DB_ENDIAN -DTERMIO -DNO_ASM -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:BN_LLONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-s390x", "gcc:-DB_ENDIAN -DTERMIO -DNO_ASM -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"linux-ia64", "gcc:-DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR:asm/ia64.o:::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
-"linux-x86_64", "gcc:-DL_ENDIAN -DNO_ASM ::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
+"linux-x86_64", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o:::::::::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"NetBSD-sparc", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -mv8 -Wall -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX DES_UNROLL::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"NetBSD-m68", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -Wall -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX DES_UNROLL::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
"NetBSD-x86", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -m486 -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
diff --git a/Makefile.org b/Makefile.org
index 961bf76c6e..025b9122c9 100644
--- a/Makefile.org
+++ b/Makefile.org
@@ -274,10 +274,7 @@ do_gnu-shared:
libs="$$libs -l$$i"; \
done
-DETECT_GNU_LD=${CC} -v 2>&1 | grep '^gcc' >/dev/null 2>&1 && \
- my_ld=`${CC} -print-prog-name=ld 2>&1` && \
- [ -n "$$my_ld" ] && \
- $$my_ld -v 2>&1 | grep 'GNU ld' >/dev/null 2>&1
+DETECT_GNU_LD=(${CC} -Wl,-V /dev/null 2>&1 | grep '^GNU ld' )>/dev/null
# For Darwin AKA Mac OS/X (dyld)
do_darwin-shared:
diff --git a/crypto/bn/Makefile.ssl b/crypto/bn/Makefile.ssl
index b912bc4882..1d37892013 100644
--- a/crypto/bn/Makefile.ssl
+++ b/crypto/bn/Makefile.ssl
@@ -136,6 +136,8 @@ asm/ia64-cpp.o: asm/ia64.S
$(CC) $(ASFLAGS) -c -o asm/ia64-cpp.o /tmp/ia64.$$$$.s; \
rm -f /tmp/ia64.$$$$.s
+asm/x86_64-gcc.o: asm/x86_64-gcc.c
+
files:
$(PERL) $(TOP)/util/files.pl Makefile.ssl >> $(TOP)/MINFO
diff --git a/crypto/bn/bn_div.c b/crypto/bn/bn_div.c
index f9a095e3b3..ea099e0c2b 100644
--- a/crypto/bn/bn_div.c
+++ b/crypto/bn/bn_div.c
@@ -150,6 +150,20 @@ int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d,
q; \
})
# define REMAINDER_IS_ALREADY_CALCULATED
+# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG)
+ /*
+ * Same story here, but it's 128-bit by 64-bit division. Wow!
+ * <appro@fy.chalmers.se>
+ */
+# define bn_div_words(n0,n1,d0) \
+ ({ asm volatile ( \
+ "divq %4" \
+ : "=a"(q), "=d"(rem) \
+ : "a"(n1), "d"(n0), "g"(d0) \
+ : "cc"); \
+ q; \
+ })
+# define REMAINDER_IS_ALREADY_CALCULATED
# endif /* __<cpu> */
# endif /* __GNUC__ */
#endif /* OPENSSL_NO_ASM */
@@ -296,7 +310,9 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor,
rem=(n1-q*d0)&BN_MASK2;
#endif
-#ifdef BN_UMULT_HIGH
+#if defined(BN_UMULT_LOHI)
+ BN_UMULT_LOHI(t2l,t2h,d1,q);
+#elif defined(BN_UMULT_HIGH)
t2l = d1 * q;
t2h = BN_UMULT_HIGH(d1,q);
#else
diff --git a/crypto/bn/bn_lcl.h b/crypto/bn/bn_lcl.h
index 8a4dba375a..7cf29ba94d 100644
--- a/crypto/bn/bn_lcl.h
+++ b/crypto/bn/bn_lcl.h
@@ -230,6 +230,21 @@ struct bignum_ctx
: "r"(a), "r"(b)); \
ret; })
# endif /* compiler */
+# elif defined(__x86_64) && defined(SIXTY_FOUR_BIT_LONG)
+# if defined(__GNUC__)
+# define BN_UMULT_HIGH(a,b) ({ \
+ register BN_ULONG ret,discard; \
+ asm ("mulq %3" \
+ : "=a"(discard),"=d"(ret) \
+ : "a"(a), "g"(b) \
+ : "cc"); \
+ ret; })
+# define BN_UMULT_LOHI(low,high,a,b) \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(a),"g"(b) \
+ : "cc");
+# endif
# endif /* cpu */
#endif /* OPENSSL_NO_ASM */
@@ -337,7 +352,7 @@ struct bignum_ctx
#define LBITS(a) ((a)&BN_MASK2l)
#define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l)
-#define L2HBITS(a) ((BN_ULONG)((a)&BN_MASK2l)<<BN_BITS4)
+#define L2HBITS(a) (((a)<<BN_BITS4)&BN_MASK2)
#define LLBITS(a) ((a)&BN_MASKl)
#define LHBITS(a) (((a)>>BN_BITS2)&BN_MASKl)
diff --git a/crypto/des/des_locl.h b/crypto/des/des_locl.h
index 70e833be3f..855a462d28 100644
--- a/crypto/des/des_locl.h
+++ b/crypto/des/des_locl.h
@@ -162,6 +162,16 @@
#if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)
#define ROTATE(a,n) (_lrotr(a,n))
+#elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM)
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+# define ROTATE(a,n) ({ register unsigned int ret; \
+ asm ("rorl %1,%0" \
+ : "=r"(ret) \
+ : "I"(n),"0"(a) \
+ : "cc"); \
+ ret; \
+ })
+# endif
#else
#define ROTATE(a,n) (((a)>>(n))+((a)<<(32-(n))))
#endif
diff --git a/crypto/md32_common.h b/crypto/md32_common.h
index 275b93618b..573850b122 100644
--- a/crypto/md32_common.h
+++ b/crypto/md32_common.h
@@ -198,7 +198,7 @@
*
* <appro@fy.chalmers.se>
*/
-# if defined(__i386) || defined(__i386__)
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"roll %1,%0" \
@@ -224,7 +224,7 @@
*/
# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/* some GNU C inline assembler templates by <appro@fy.chalmers.se> */
-# if (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)
+# if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY)
# define BE_FETCH32(a) ({ register unsigned int l=(a);\
asm ( \
"bswapl %0" \
@@ -610,3 +610,28 @@ int HASH_FINAL (unsigned char *md, HASH_CTX *c)
*/
return 1;
}
+
+#ifndef MD32_REG_T
+#define MD32_REG_T long
+/*
+ * This comment was originaly written for MD5, which is why it
+ * discusses A-D. But it basically applies to all 32-bit digests,
+ * which is why it was moved to common header file.
+ *
+ * In case you wonder why A-D are declared as long and not
+ * as MD5_LONG. Doing so results in slight performance
+ * boost on LP64 architectures. The catch is we don't
+ * really care if 32 MSBs of a 64-bit register get polluted
+ * with eventual overflows as we *save* only 32 LSBs in
+ * *either* case. Now declaring 'em long excuses the compiler
+ * from keeping 32 MSBs zeroed resulting in 13% performance
+ * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
+ * Well, to be honest it should say that this *prevents*
+ * performance degradation.
+ * <appro@fy.chalmers.se>
+ * Apparently there're LP64 compilers that generate better
+ * code if A-D are declared int. Most notably GCC-x86_64
+ * generates better code.
+ * <appro@fy.chalmers.se>
+ */
+#endif
diff --git a/crypto/md4/md4_dgst.c b/crypto/md4/md4_dgst.c
index 6446f5f5e7..7afb7185b6 100644
--- a/crypto/md4/md4_dgst.c
+++ b/crypto/md4/md4_dgst.c
@@ -86,21 +86,7 @@ int MD4_Init(MD4_CTX *c)
void md4_block_host_order (MD4_CTX *c, const void *data, int num)
{
const MD4_LONG *X=data;
- register unsigned long A,B,C,D;
- /*
- * In case you wonder why A-D are declared as long and not
- * as MD4_LONG. Doing so results in slight performance
- * boost on LP64 architectures. The catch is we don't
- * really care if 32 MSBs of a 64-bit register get polluted
- * with eventual overflows as we *save* only 32 LSBs in
- * *either* case. Now declaring 'em long excuses the compiler
- * from keeping 32 MSBs zeroed resulting in 13% performance
- * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
- * Well, to be honest it should say that this *prevents*
- * performance degradation.
- *
- * <appro@fy.chalmers.se>
- */
+ register unsigned MD32_REG_T A,B,C,D;
A=c->A;
B=c->B;
@@ -176,25 +162,11 @@ void md4_block_host_order (MD4_CTX *c, const void *data, int num)
void md4_block_data_order (MD4_CTX *c, const void *data_, int num)
{
const unsigned char *data=data_;
- register unsigned long A,B,C,D,l;
- /*
- * In case you wonder why A-D are declared as long and not
- * as MD4_LONG. Doing so results in slight performance
- * boost on LP64 architectures. The catch is we don't
- * really care if 32 MSBs of a 64-bit register get polluted
- * with eventual overflows as we *save* only 32 LSBs in
- * *either* case. Now declaring 'em long excuses the compiler
- * from keeping 32 MSBs zeroed resulting in 13% performance
- * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
- * Well, to be honest it should say that this *prevents*
- * performance degradation.
- *
- * <appro@fy.chalmers.se>
- */
+ register unsigned MD32_REG_T A,B,C,D,l;
#ifndef MD32_XARRAY
/* See comment in crypto/sha/sha_locl.h for details. */
- unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
- XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
# define X(i) XX##i
#else
MD4_LONG XX[MD4_LBLOCK];
diff --git a/crypto/md5/md5_dgst.c b/crypto/md5/md5_dgst.c
index c38a3f021e..9c7abc3697 100644
--- a/crypto/md5/md5_dgst.c
+++ b/crypto/md5/md5_dgst.c
@@ -86,21 +86,7 @@ int MD5_Init(MD5_CTX *c)
void md5_block_host_order (MD5_CTX *c, const void *data, int num)
{
const MD5_LONG *X=data;
- register unsigned long A,B,C,D;
- /*
- * In case you wonder why A-D are declared as long and not
- * as MD5_LONG. Doing so results in slight performance
- * boost on LP64 architectures. The catch is we don't
- * really care if 32 MSBs of a 64-bit register get polluted
- * with eventual overflows as we *save* only 32 LSBs in
- * *either* case. Now declaring 'em long excuses the compiler
- * from keeping 32 MSBs zeroed resulting in 13% performance
- * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
- * Well, to be honest it should say that this *prevents*
- * performance degradation.
- *
- * <appro@fy.chalmers.se>
- */
+ register unsigned MD32_REG_T A,B,C,D;
A=c->A;
B=c->B;
@@ -193,25 +179,11 @@ void md5_block_host_order (MD5_CTX *c, const void *data, int num)
void md5_block_data_order (MD5_CTX *c, const void *data_, int num)
{
const unsigned char *data=data_;
- register unsigned long A,B,C,D,l;
- /*
- * In case you wonder why A-D are declared as long and not
- * as MD5_LONG. Doing so results in slight performance
- * boost on LP64 architectures. The catch is we don't
- * really care if 32 MSBs of a 64-bit register get polluted
- * with eventual overflows as we *save* only 32 LSBs in
- * *either* case. Now declaring 'em long excuses the compiler
- * from keeping 32 MSBs zeroed resulting in 13% performance
- * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
- * Well, to be honest it should say that this *prevents*
- * performance degradation.
- *
- * <appro@fy.chalmers.se>
- */
+ register unsigned MD32_REG_T A,B,C,D,l;
#ifndef MD32_XARRAY
/* See comment in crypto/sha/sha_locl.h for details. */
- unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
- XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
# define X(i) XX##i
#else
MD5_LONG XX[MD5_LBLOCK];
diff --git a/crypto/rc5/rc5_locl.h b/crypto/rc5/rc5_locl.h
index d3871c6555..c700dfb1a5 100644
--- a/crypto/rc5/rc5_locl.h
+++ b/crypto/rc5/rc5_locl.h
@@ -149,6 +149,23 @@
#if defined(OPENSSL_SYS_WIN32) && defined(_MSC_VER)
#define ROTATE_l32(a,n) _lrotl(a,n)
#define ROTATE_r32(a,n) _lrotr(a,n)
+#elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM)
+# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+# define ROTATE_l32(a,n) ({ register unsigned int ret; \
+ asm ("roll %%cl,%0" \
+ : "=r"(ret) \
+ : "c"(n),"0"(a) \
+ : "cc"); \
+ ret; \
+ })
+# define ROTATE_r32(a,n) ({ register unsigned int ret; \
+ asm ("rorl %%cl,%0" \
+ : "=r"(ret) \
+ : "c"(n),"0"(a) \
+ : "cc"); \
+ ret; \
+ })
+# endif
#else
#define ROTATE_l32(a,n) (((a)<<(n&0x1f))|(((a)&0xffffffff)>>(32-(n&0x1f))))
#define ROTATE_r32(a,n) (((a)<<(32-(n&0x1f)))|(((a)&0xffffffff)>>(n&0x1f)))
diff --git a/crypto/ripemd/rmd_dgst.c b/crypto/ripemd/rmd_dgst.c
index a3170f7c8a..f351f00eea 100644
--- a/crypto/ripemd/rmd_dgst.c
+++ b/crypto/ripemd/rmd_dgst.c
@@ -90,8 +90,8 @@ int RIPEMD160_Init(RIPEMD160_CTX *c)
void ripemd160_block_host_order (RIPEMD160_CTX *ctx, const void *p, int num)
{
const RIPEMD160_LONG *XX=p;
- register unsigned long A,B,C,D,E;
- register unsigned long a,b,c,d,e;
+ register unsigned MD32_REG_T A,B,C,D,E;
+ register unsigned MD32_REG_T a,b,c,d,e;
for (;num--;XX+=HASH_LBLOCK)
{
@@ -290,12 +290,12 @@ void ripemd160_block_host_order (RIPEMD160_CTX *ctx, const void *p, int num)
void ripemd160_block_data_order (RIPEMD160_CTX *ctx, const void *p, int num)
{
const unsigned char *data=p;
- register unsigned long A,B,C,D,E;
- unsigned long a,b,c,d,e,l;
+ register unsigned MD32_REG_T A,B,C,D,E;
+ unsigned MD32_REG_T a,b,c,d,e,l;
#ifndef MD32_XARRAY
/* See comment in crypto/sha/sha_locl.h for details. */
- unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
- XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
# define X(i) XX##i
#else
RIPEMD160_LONG XX[16];
diff --git a/crypto/sha/sha_locl.h b/crypto/sha/sha_locl.h
index 471dfb9f8f..2dd63a62a6 100644
--- a/crypto/sha/sha_locl.h
+++ b/crypto/sha/sha_locl.h
@@ -224,10 +224,10 @@ int HASH_INIT (SHA_CTX *c)
void HASH_BLOCK_HOST_ORDER (SHA_CTX *c, const void *d, int num)
{
const SHA_LONG *W=d;
- register unsigned long A,B,C,D,E,T;
+ register unsigned MD32_REG_T A,B,C,D,E,T;
#ifndef MD32_XARRAY
- unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
- XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
#else
SHA_LONG XX[16];
#endif
@@ -349,10 +349,10 @@ void HASH_BLOCK_HOST_ORDER (SHA_CTX *c, const void *d, int num)
void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, int num)
{
const unsigned char *data=p;
- register unsigned long A,B,C,D,E,T,l;
+ register unsigned MD32_REG_T A,B,C,D,E,T,l;
#ifndef MD32_XARRAY
- unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
- XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
#else
SHA_LONG XX[16];
#endif