summaryrefslogtreecommitdiffstats
path: root/crypto/modes
diff options
context:
space:
mode:
authorAndy Polyakov <appro@openssl.org>2013-05-19 21:55:30 +0200
committerBen Laurie <ben@links.org>2013-09-16 14:14:56 +0100
commit3b4be0018b576675b3690c471519296edf7401d6 (patch)
treef49a1f888991915ebbb6514f3f80db95ca612baf /crypto/modes
parent125c2ed8a3d25d56768f92075547d7fdc8db12be (diff)
gcm128.c: update from master (add AVX and VIS3 support).
Diffstat (limited to 'crypto/modes')
-rw-r--r--crypto/modes/gcm128.c144
-rw-r--r--crypto/modes/modes_lcl.h4
2 files changed, 121 insertions, 27 deletions
diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c
index 0e6ff8b0a1..93fcf11866 100644
--- a/crypto/modes/gcm128.c
+++ b/crypto/modes/gcm128.c
@@ -655,6 +655,16 @@ void gcm_init_clmul(u128 Htable[16],const u64 Xi[2]);
void gcm_gmult_clmul(u64 Xi[2],const u128 Htable[16]);
void gcm_ghash_clmul(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+# define gcm_init_avx gcm_init_clmul
+# define gcm_gmult_avx gcm_gmult_clmul
+# define gcm_ghash_avx gcm_ghash_clmul
+#else
+void gcm_init_avx(u128 Htable[16],const u64 Xi[2]);
+void gcm_gmult_avx(u64 Xi[2],const u128 Htable[16]);
+void gcm_ghash_avx(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
+#endif
+
# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
# define GHASH_ASM_X86
void gcm_gmult_4bit_mmx(u64 Xi[2],const u128 Htable[16]);
@@ -671,6 +681,14 @@ void gcm_ghash_4bit_x86(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len
void gcm_gmult_neon(u64 Xi[2],const u128 Htable[16]);
void gcm_ghash_neon(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
# endif
+# elif defined(__sparc__) || defined(__sparc)
+# include "sparc_arch.h"
+# define GHASH_ASM_SPARC
+# define GCM_FUNCREF_4BIT
+extern unsigned int OPENSSL_sparcv9cap_P[];
+void gcm_init_vis3(u128 Htable[16],const u64 Xi[2]);
+void gcm_gmult_vis3(u64 Xi[2],const u128 Htable[16]);
+void gcm_ghash_vis3(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
# endif
#endif
@@ -715,9 +733,15 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block)
# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
if (OPENSSL_ia32cap_P[0]&(1<<24) && /* check FXSR bit */
OPENSSL_ia32cap_P[1]&(1<<1) ) { /* check PCLMULQDQ bit */
- gcm_init_clmul(ctx->Htable,ctx->H.u);
- ctx->gmult = gcm_gmult_clmul;
- ctx->ghash = gcm_ghash_clmul;
+ if (((OPENSSL_ia32cap_P[1]>>22)&0x41)==0x41) { /* AVX+MOVBE */
+ gcm_init_avx(ctx->Htable,ctx->H.u);
+ ctx->gmult = gcm_gmult_avx;
+ ctx->ghash = gcm_ghash_avx;
+ } else {
+ gcm_init_clmul(ctx->Htable,ctx->H.u);
+ ctx->gmult = gcm_gmult_clmul;
+ ctx->ghash = gcm_ghash_clmul;
+ }
return;
}
# endif
@@ -747,6 +771,16 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx,void *key,block128_f block)
ctx->gmult = gcm_gmult_4bit;
ctx->ghash = gcm_ghash_4bit;
}
+# elif defined(GHASH_ASM_SPARC)
+ if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
+ gcm_init_vis3(ctx->Htable,ctx->H.u);
+ ctx->gmult = gcm_gmult_vis3;
+ ctx->ghash = gcm_ghash_vis3;
+ } else {
+ gcm_init_4bit(ctx->Htable,ctx->H.u);
+ ctx->gmult = gcm_gmult_4bit;
+ ctx->ghash = gcm_ghash_4bit;
+ }
# else
gcm_init_4bit(ctx->Htable,ctx->H.u);
# endif
@@ -941,15 +975,17 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
size_t j=GHASH_CHUNK;
while (j) {
+ size_t *out_t=(size_t *)out;
+ const size_t *in_t=(const size_t *)in;
+
(*block)(ctx->Yi.c,ctx->EKi.c,key);
++ctr;
if (is_endian.little)
PUTU32(ctx->Yi.c+12,ctr);
else
ctx->Yi.d[3] = ctr;
- for (i=0; i<16; i+=sizeof(size_t))
- *(size_t *)(out+i) =
- *(size_t *)(in+i)^*(size_t *)(ctx->EKi.c+i);
+ for (i=0; i<16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
out += 16;
in += 16;
j -= 16;
@@ -961,15 +997,17 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
size_t j=i;
while (len>=16) {
+ size_t *out_t=(size_t *)out;
+ const size_t *in_t=(const size_t *)in;
+
(*block)(ctx->Yi.c,ctx->EKi.c,key);
++ctr;
if (is_endian.little)
PUTU32(ctx->Yi.c+12,ctr);
else
ctx->Yi.d[3] = ctr;
- for (i=0; i<16; i+=sizeof(size_t))
- *(size_t *)(out+i) =
- *(size_t *)(in+i)^*(size_t *)(ctx->EKi.c+i);
+ for (i=0; i<16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i] ^ ctx->EKi.t[i];
out += 16;
in += 16;
len -= 16;
@@ -978,16 +1016,18 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
}
#else
while (len>=16) {
+ size_t *out_t=(size_t *)out;
+ const size_t *in_t=(const size_t *)in;
+
(*block)(ctx->Yi.c,ctx->EKi.c,key);
++ctr;
if (is_endian.little)
PUTU32(ctx->Yi.c+12,ctr);
else
ctx->Yi.d[3] = ctr;
- for (i=0; i<16; i+=sizeof(size_t))
- *(size_t *)(ctx->Xi.c+i) ^=
- *(size_t *)(out+i) =
- *(size_t *)(in+i)^*(size_t *)(ctx->EKi.c+i);
+ for (i=0; i<16/sizeof(size_t); ++i)
+ ctx->Xi.t[i] ^=
+ out_t[i] = in_t[i]^ctx->EKi.t[i];
GCM_MUL(ctx,Xi);
out += 16;
in += 16;
@@ -1091,15 +1131,17 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
GHASH(ctx,in,GHASH_CHUNK);
while (j) {
+ size_t *out_t=(size_t *)out;
+ const size_t *in_t=(const size_t *)in;
+
(*block)(ctx->Yi.c,ctx->EKi.c,key);
++ctr;
if (is_endian.little)
PUTU32(ctx->Yi.c+12,ctr);
else
ctx->Yi.d[3] = ctr;
- for (i=0; i<16; i+=sizeof(size_t))
- *(size_t *)(out+i) =
- *(size_t *)(in+i)^*(size_t *)(ctx->EKi.c+i);
+ for (i=0; i<16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i]^ctx->EKi.t[i];
out += 16;
in += 16;
j -= 16;
@@ -1109,15 +1151,17 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
if ((i = (len&(size_t)-16))) {
GHASH(ctx,in,i);
while (len>=16) {
+ size_t *out_t=(size_t *)out;
+ const size_t *in_t=(const size_t *)in;
+
(*block)(ctx->Yi.c,ctx->EKi.c,key);
++ctr;
if (is_endian.little)
PUTU32(ctx->Yi.c+12,ctr);
else
ctx->Yi.d[3] = ctr;
- for (i=0; i<16; i+=sizeof(size_t))
- *(size_t *)(out+i) =
- *(size_t *)(in+i)^*(size_t *)(ctx->EKi.c+i);
+ for (i=0; i<16/sizeof(size_t); ++i)
+ out_t[i] = in_t[i]^ctx->EKi.t[i];
out += 16;
in += 16;
len -= 16;
@@ -1125,16 +1169,19 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
}
#else
while (len>=16) {
+ size_t *out_t=(size_t *)out;
+ const size_t *in_t=(const size_t *)in;
+
(*block)(ctx->Yi.c,ctx->EKi.c,key);
++ctr;
if (is_endian.little)
PUTU32(ctx->Yi.c+12,ctr);
else
ctx->Yi.d[3] = ctr;
- for (i=0; i<16; i+=sizeof(size_t)) {
- size_t c = *(size_t *)(in+i);
- *(size_t *)(out+i) = c^*(size_t *)(ctx->EKi.c+i);
- *(size_t *)(ctx->Xi.c+i) ^= c;
+ for (i=0; i<16/sizeof(size_t); ++i) {
+ size_t c = in[i];
+ out[i] = c^ctx->EKi.t[i];
+ ctx->Xi.t[i] ^= c;
}
GCM_MUL(ctx,Xi);
out += 16;
@@ -1669,6 +1716,46 @@ static const u8 IV18[]={0x93,0x13,0x22,0x5d,0xf8,0x84,0x06,0xe5,0x55,0x90,0x9c,0
0xa2,0x41,0x89,0x97,0x20,0x0e,0xf8,0x2e,0x44,0xae,0x7e,0x3f},
T18[]= {0xa4,0x4a,0x82,0x66,0xee,0x1c,0x8e,0xb0,0xc8,0xb5,0xd4,0xcf,0x5a,0xe9,0xf1,0x9a};
+/* Test Case 19 */
+#define K19 K1
+#define P19 P1
+#define IV19 IV1
+#define C19 C1
+static const u8 A19[]= {0xd9,0x31,0x32,0x25,0xf8,0x84,0x06,0xe5,0xa5,0x59,0x09,0xc5,0xaf,0xf5,0x26,0x9a,
+ 0x86,0xa7,0xa9,0x53,0x15,0x34,0xf7,0xda,0x2e,0x4c,0x30,0x3d,0x8a,0x31,0x8a,0x72,
+ 0x1c,0x3c,0x0c,0x95,0x95,0x68,0x09,0x53,0x2f,0xcf,0x0e,0x24,0x49,0xa6,0xb5,0x25,
+ 0xb1,0x6a,0xed,0xf5,0xaa,0x0d,0xe6,0x57,0xba,0x63,0x7b,0x39,0x1a,0xaf,0xd2,0x55,
+ 0x52,0x2d,0xc1,0xf0,0x99,0x56,0x7d,0x07,0xf4,0x7f,0x37,0xa3,0x2a,0x84,0x42,0x7d,
+ 0x64,0x3a,0x8c,0xdc,0xbf,0xe5,0xc0,0xc9,0x75,0x98,0xa2,0xbd,0x25,0x55,0xd1,0xaa,
+ 0x8c,0xb0,0x8e,0x48,0x59,0x0d,0xbb,0x3d,0xa7,0xb0,0x8b,0x10,0x56,0x82,0x88,0x38,
+ 0xc5,0xf6,0x1e,0x63,0x93,0xba,0x7a,0x0a,0xbc,0xc9,0xf6,0x62,0x89,0x80,0x15,0xad},
+ T19[]= {0x5f,0xea,0x79,0x3a,0x2d,0x6f,0x97,0x4d,0x37,0xe6,0x8e,0x0c,0xb8,0xff,0x94,0x92};
+
+/* Test Case 20 */
+#define K20 K1
+#define A20 A1
+static const u8 IV20[64]={0xff,0xff,0xff,0xff}, /* this results in 0xff in counter LSB */
+ P20[288],
+ C20[]= {0x56,0xb3,0x37,0x3c,0xa9,0xef,0x6e,0x4a,0x2b,0x64,0xfe,0x1e,0x9a,0x17,0xb6,0x14,
+ 0x25,0xf1,0x0d,0x47,0xa7,0x5a,0x5f,0xce,0x13,0xef,0xc6,0xbc,0x78,0x4a,0xf2,0x4f,
+ 0x41,0x41,0xbd,0xd4,0x8c,0xf7,0xc7,0x70,0x88,0x7a,0xfd,0x57,0x3c,0xca,0x54,0x18,
+ 0xa9,0xae,0xff,0xcd,0x7c,0x5c,0xed,0xdf,0xc6,0xa7,0x83,0x97,0xb9,0xa8,0x5b,0x49,
+ 0x9d,0xa5,0x58,0x25,0x72,0x67,0xca,0xab,0x2a,0xd0,0xb2,0x3c,0xa4,0x76,0xa5,0x3c,
+ 0xb1,0x7f,0xb4,0x1c,0x4b,0x8b,0x47,0x5c,0xb4,0xf3,0xf7,0x16,0x50,0x94,0xc2,0x29,
+ 0xc9,0xe8,0xc4,0xdc,0x0a,0x2a,0x5f,0xf1,0x90,0x3e,0x50,0x15,0x11,0x22,0x13,0x76,
+ 0xa1,0xcd,0xb8,0x36,0x4c,0x50,0x61,0xa2,0x0c,0xae,0x74,0xbc,0x4a,0xcd,0x76,0xce,
+ 0xb0,0xab,0xc9,0xfd,0x32,0x17,0xef,0x9f,0x8c,0x90,0xbe,0x40,0x2d,0xdf,0x6d,0x86,
+ 0x97,0xf4,0xf8,0x80,0xdf,0xf1,0x5b,0xfb,0x7a,0x6b,0x28,0x24,0x1e,0xc8,0xfe,0x18,
+ 0x3c,0x2d,0x59,0xe3,0xf9,0xdf,0xff,0x65,0x3c,0x71,0x26,0xf0,0xac,0xb9,0xe6,0x42,
+ 0x11,0xf4,0x2b,0xae,0x12,0xaf,0x46,0x2b,0x10,0x70,0xbe,0xf1,0xab,0x5e,0x36,0x06,
+ 0x87,0x2c,0xa1,0x0d,0xee,0x15,0xb3,0x24,0x9b,0x1a,0x1b,0x95,0x8f,0x23,0x13,0x4c,
+ 0x4b,0xcc,0xb7,0xd0,0x32,0x00,0xbc,0xe4,0x20,0xa2,0xf8,0xeb,0x66,0xdc,0xf3,0x64,
+ 0x4d,0x14,0x23,0xc1,0xb5,0x69,0x90,0x03,0xc1,0x3e,0xce,0xf4,0xbf,0x38,0xa3,0xb6,
+ 0x0e,0xed,0xc3,0x40,0x33,0xba,0xc1,0x90,0x27,0x83,0xdc,0x6d,0x89,0xe2,0xe7,0x74,
+ 0x18,0x8a,0x43,0x9c,0x7e,0xbc,0xc0,0x67,0x2d,0xbd,0xa4,0xdd,0xcf,0xb2,0x79,0x46,
+ 0x13,0xb0,0xbe,0x41,0x31,0x5e,0xf7,0x78,0x70,0x8a,0x70,0xee,0x7d,0x75,0x16,0x5c},
+ T20[]= {0x8b,0x30,0x7f,0x6b,0x33,0x28,0x6d,0x0a,0xb0,0x26,0xa9,0xed,0x3f,0xe1,0xe8,0x5f};
+
#define TEST_CASE(n) do { \
u8 out[sizeof(P##n)]; \
AES_set_encrypt_key(K##n,sizeof(K##n)*8,&key); \
@@ -1713,6 +1800,8 @@ int main()
TEST_CASE(16);
TEST_CASE(17);
TEST_CASE(18);
+ TEST_CASE(19);
+ TEST_CASE(20);
#ifdef OPENSSL_CPUID_OBJ
{
@@ -1743,11 +1832,16 @@ int main()
ctr_t/(double)sizeof(buf),
(gcm_t-ctr_t)/(double)sizeof(buf));
#ifdef GHASH
- GHASH(&ctx,buf.c,sizeof(buf));
+ {
+ void (*gcm_ghash_p)(u64 Xi[2],const u128 Htable[16],
+ const u8 *inp,size_t len) = ctx.ghash;
+
+ GHASH((&ctx),buf.c,sizeof(buf));
start = OPENSSL_rdtsc();
- for (i=0;i<100;++i) GHASH(&ctx,buf.c,sizeof(buf));
+ for (i=0;i<100;++i) GHASH((&ctx),buf.c,sizeof(buf));
gcm_t = OPENSSL_rdtsc() - start;
printf("%.2f\n",gcm_t/(double)sizeof(buf)/(double)i);
+ }
#endif
}
#endif
diff --git a/crypto/modes/modes_lcl.h b/crypto/modes/modes_lcl.h
index 934e8c8515..9d83e12844 100644
--- a/crypto/modes/modes_lcl.h
+++ b/crypto/modes/modes_lcl.h
@@ -98,8 +98,8 @@ typedef struct { u64 hi,lo; } u128;
struct gcm128_context {
/* Following 6 names follow names in GCM specification */
- union { u64 u[2]; u32 d[4]; u8 c[16]; } Yi,EKi,EK0,len,
- Xi,H;
+ union { u64 u[2]; u32 d[4]; u8 c[16]; size_t t[16/sizeof(size_t)]; }
+ Yi,EKi,EK0,len,Xi,H;
/* Relative position of Xi, H and pre-computed Htable is used
* in some assembler modules, i.e. don't change the order! */
#if TABLE_BITS==8