From c69ed6ea396b184fd56327b475a441081fcade8e Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Wed, 11 Oct 2006 11:55:11 +0000 Subject: Re-implement md32_common.h [make it simpler!] and eliminate code rendered redundant as result. --- crypto/md5/asm/md5-586.pl | 2 +- crypto/md5/asm/md5-x86_64.pl | 8 ++-- crypto/md5/md5_dgst.c | 110 +------------------------------------------ crypto/md5/md5_locl.h | 51 +------------------- 4 files changed, 7 insertions(+), 164 deletions(-) (limited to 'crypto/md5') diff --git a/crypto/md5/asm/md5-586.pl b/crypto/md5/asm/md5-586.pl index fa3fa3bed5..76ac235f7d 100644 --- a/crypto/md5/asm/md5-586.pl +++ b/crypto/md5/asm/md5-586.pl @@ -29,7 +29,7 @@ $X="esi"; 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9, # R3 ); -&md5_block("md5_block_asm_host_order"); +&md5_block("md5_block_asm_data_order"); &asm_finish(); sub Np diff --git a/crypto/md5/asm/md5-x86_64.pl b/crypto/md5/asm/md5-x86_64.pl index 75b4085a7b..4330eddb17 100755 --- a/crypto/md5/asm/md5-x86_64.pl +++ b/crypto/md5/asm/md5-x86_64.pl @@ -115,9 +115,9 @@ $code .= <A=INIT_DATA_A; c->B=INIT_DATA_B; c->C=INIT_DATA_C; c->D=INIT_DATA_D; - c->Nl=0; - c->Nh=0; - c->num=0; return 1; } -#ifndef md5_block_host_order -void md5_block_host_order (MD5_CTX *c, const void *data, size_t num) - { - const MD5_LONG *X=data; - register unsigned MD32_REG_T A,B,C,D; - - A=c->A; - B=c->B; - C=c->C; - D=c->D; - - for (;num--;X+=HASH_LBLOCK) - { - /* Round 0 */ - R0(A,B,C,D,X[ 0], 7,0xd76aa478L); - R0(D,A,B,C,X[ 1],12,0xe8c7b756L); - R0(C,D,A,B,X[ 2],17,0x242070dbL); - R0(B,C,D,A,X[ 3],22,0xc1bdceeeL); - R0(A,B,C,D,X[ 4], 7,0xf57c0fafL); - R0(D,A,B,C,X[ 5],12,0x4787c62aL); - R0(C,D,A,B,X[ 6],17,0xa8304613L); - R0(B,C,D,A,X[ 7],22,0xfd469501L); - R0(A,B,C,D,X[ 8], 7,0x698098d8L); - R0(D,A,B,C,X[ 9],12,0x8b44f7afL); - R0(C,D,A,B,X[10],17,0xffff5bb1L); - R0(B,C,D,A,X[11],22,0x895cd7beL); - R0(A,B,C,D,X[12], 7,0x6b901122L); - R0(D,A,B,C,X[13],12,0xfd987193L); - R0(C,D,A,B,X[14],17,0xa679438eL); - R0(B,C,D,A,X[15],22,0x49b40821L); - /* Round 1 */ - R1(A,B,C,D,X[ 1], 5,0xf61e2562L); - R1(D,A,B,C,X[ 6], 9,0xc040b340L); - R1(C,D,A,B,X[11],14,0x265e5a51L); - R1(B,C,D,A,X[ 0],20,0xe9b6c7aaL); - R1(A,B,C,D,X[ 5], 5,0xd62f105dL); - R1(D,A,B,C,X[10], 9,0x02441453L); - R1(C,D,A,B,X[15],14,0xd8a1e681L); - R1(B,C,D,A,X[ 4],20,0xe7d3fbc8L); - R1(A,B,C,D,X[ 9], 5,0x21e1cde6L); - R1(D,A,B,C,X[14], 9,0xc33707d6L); - R1(C,D,A,B,X[ 3],14,0xf4d50d87L); - R1(B,C,D,A,X[ 8],20,0x455a14edL); - R1(A,B,C,D,X[13], 5,0xa9e3e905L); - R1(D,A,B,C,X[ 2], 9,0xfcefa3f8L); - R1(C,D,A,B,X[ 7],14,0x676f02d9L); - R1(B,C,D,A,X[12],20,0x8d2a4c8aL); - /* Round 2 */ - R2(A,B,C,D,X[ 5], 4,0xfffa3942L); - R2(D,A,B,C,X[ 8],11,0x8771f681L); - R2(C,D,A,B,X[11],16,0x6d9d6122L); - R2(B,C,D,A,X[14],23,0xfde5380cL); - R2(A,B,C,D,X[ 1], 4,0xa4beea44L); - R2(D,A,B,C,X[ 4],11,0x4bdecfa9L); - R2(C,D,A,B,X[ 7],16,0xf6bb4b60L); - R2(B,C,D,A,X[10],23,0xbebfbc70L); - R2(A,B,C,D,X[13], 4,0x289b7ec6L); - R2(D,A,B,C,X[ 0],11,0xeaa127faL); - R2(C,D,A,B,X[ 3],16,0xd4ef3085L); - R2(B,C,D,A,X[ 6],23,0x04881d05L); - R2(A,B,C,D,X[ 9], 4,0xd9d4d039L); - R2(D,A,B,C,X[12],11,0xe6db99e5L); - R2(C,D,A,B,X[15],16,0x1fa27cf8L); - R2(B,C,D,A,X[ 2],23,0xc4ac5665L); - /* Round 3 */ - R3(A,B,C,D,X[ 0], 6,0xf4292244L); - R3(D,A,B,C,X[ 7],10,0x432aff97L); - R3(C,D,A,B,X[14],15,0xab9423a7L); - R3(B,C,D,A,X[ 5],21,0xfc93a039L); - R3(A,B,C,D,X[12], 6,0x655b59c3L); - R3(D,A,B,C,X[ 3],10,0x8f0ccc92L); - R3(C,D,A,B,X[10],15,0xffeff47dL); - R3(B,C,D,A,X[ 1],21,0x85845dd1L); - R3(A,B,C,D,X[ 8], 6,0x6fa87e4fL); - R3(D,A,B,C,X[15],10,0xfe2ce6e0L); - R3(C,D,A,B,X[ 6],15,0xa3014314L); - R3(B,C,D,A,X[13],21,0x4e0811a1L); - R3(A,B,C,D,X[ 4], 6,0xf7537e82L); - R3(D,A,B,C,X[11],10,0xbd3af235L); - R3(C,D,A,B,X[ 2],15,0x2ad7d2bbL); - R3(B,C,D,A,X[ 9],21,0xeb86d391L); - - A = c->A += A; - B = c->B += B; - C = c->C += C; - D = c->D += D; - } - } -#endif - #ifndef md5_block_data_order #ifdef X #undef X @@ -274,19 +182,3 @@ void md5_block_data_order (MD5_CTX *c, const void *data_, size_t num) } } #endif - -#ifdef undef -int printit(unsigned long *l) - { - int i,ii; - - for (i=0; i<2; i++) - { - for (ii=0; ii<8; ii++) - { - fprintf(stderr,"%08lx ",l[i*8+ii]); - } - fprintf(stderr,"\n"); - } - } -#endif diff --git a/crypto/md5/md5_locl.h b/crypto/md5/md5_locl.h index d1375dd9f4..968d577995 100644 --- a/crypto/md5/md5_locl.h +++ b/crypto/md5/md5_locl.h @@ -68,54 +68,19 @@ #ifdef MD5_ASM # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \ defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64) -# define md5_block_host_order md5_block_asm_host_order -# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) - void md5_block_asm_data_order_aligned (MD5_CTX *c, const MD5_LONG *p,size_t num); -# define HASH_BLOCK_DATA_ORDER_ALIGNED md5_block_asm_data_order_aligned +# define md5_block_data_order md5_block_asm_data_order # elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64) -# define md5_block_host_order md5_block_asm_host_order # define md5_block_data_order md5_block_asm_data_order # endif #endif -void md5_block_host_order (MD5_CTX *c, const void *p,size_t num); void md5_block_data_order (MD5_CTX *c, const void *p,size_t num); -#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \ - defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64) -# if !defined(B_ENDIAN) -/* - * *_block_host_order is expected to handle aligned data while - * *_block_data_order - unaligned. As algorithm and host (x86) - * are in this case of the same "endianness" these two are - * otherwise indistinguishable. But normally you don't want to - * call the same function because unaligned access in places - * where alignment is expected is usually a "Bad Thing". Indeed, - * on RISCs you get punished with BUS ERROR signal or *severe* - * performance degradation. Intel CPUs are in turn perfectly - * capable of loading unaligned data without such drastic side - * effect. Yes, they say it's slower than aligned load, but no - * exception is generated and therefore performance degradation - * is *incomparable* with RISCs. What we should weight here is - * costs of unaligned access against costs of aligning data. - * According to my measurements allowing unaligned access results - * in ~9% performance improvement on Pentium II operating at - * 266MHz. I won't be surprised if the difference will be higher - * on faster systems:-) - * - * - */ -# define md5_block_data_order md5_block_host_order -# endif -#endif - #define DATA_ORDER_IS_LITTLE_ENDIAN #define HASH_LONG MD5_LONG -#define HASH_LONG_LOG2 MD5_LONG_LOG2 #define HASH_CTX MD5_CTX #define HASH_CBLOCK MD5_CBLOCK -#define HASH_LBLOCK MD5_LBLOCK #define HASH_UPDATE MD5_Update #define HASH_TRANSFORM MD5_Transform #define HASH_FINAL MD5_Final @@ -126,21 +91,7 @@ void md5_block_data_order (MD5_CTX *c, const void *p,size_t num); ll=(c)->C; HOST_l2c(ll,(s)); \ ll=(c)->D; HOST_l2c(ll,(s)); \ } while (0) -#define HASH_BLOCK_HOST_ORDER md5_block_host_order -#if !defined(L_ENDIAN) || defined(md5_block_data_order) #define HASH_BLOCK_DATA_ORDER md5_block_data_order -/* - * Little-endians (Intel and Alpha) feel better without this. - * It looks like memcpy does better job than generic - * md5_block_data_order on copying-n-aligning input data. - * But frankly speaking I didn't expect such result on Alpha. - * On the other hand I've got this with egcs-1.0.2 and if - * program is compiled with another (better?) compiler it - * might turn out other way around. - * - * - */ -#endif #include "md32_common.h" -- cgit v1.2.3