diff options
author | Todd Short <tshort@akamai.com> | 2022-07-20 10:54:24 -0400 |
---|---|---|
committer | Tomas Mraz <tomas@openssl.org> | 2022-11-23 18:21:42 +0100 |
commit | 6843c1e4a711668c8ebc6201cf8ca1ec18d00a04 (patch) | |
tree | d462777cd58de81392fa704dfe6fab3655bff51c | |
parent | a66a11623102622c43c26a846a891b20a653fcec (diff) |
Use separate function to get GCM functions
Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Paul Dale <pauli@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/18835)
(cherry picked from commit 92c9086e5c2b63606cd28a7f13f09b9ff35a0de3)
-rw-r--r-- | crypto/modes/gcm128.c | 194 | ||||
-rw-r--r-- | include/crypto/aes_platform.h | 6 | ||||
-rw-r--r-- | include/crypto/modes.h | 13 |
3 files changed, 110 insertions, 103 deletions
diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c index a156639527..a22ec91299 100644 --- a/crypto/modes/gcm128.c +++ b/crypto/modes/gcm128.c @@ -84,7 +84,7 @@ typedef size_t size_t_aX; * Value of 1 is not appropriate for performance reasons. */ -static void gcm_init_4bit(u128 Htable[16], u64 H[2]) +static void gcm_init_4bit(u128 Htable[16], const u64 H[2]) { u128 V; # if defined(OPENSSL_SMALL_FOOTPRINT) @@ -418,120 +418,126 @@ void gcm_gmult_clmul_rv64i_zbb_zbc(u64 Xi[2], const u128 Htable[16]); # endif #endif -void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block) +static void gcm_get_funcs(struct gcm_funcs_st *ctx) { - DECLARE_IS_ENDIAN; - - memset(ctx, 0, sizeof(*ctx)); - ctx->block = block; - ctx->key = key; - - (*block) (ctx->H.c, ctx->H.c, key); - - if (IS_LITTLE_ENDIAN) { - /* H is stored in host byte order */ -#ifdef BSWAP8 - ctx->H.u[0] = BSWAP8(ctx->H.u[0]); - ctx->H.u[1] = BSWAP8(ctx->H.u[1]); + /* set defaults -- overridden below as needed */ + ctx->ginit = gcm_init_4bit; +#if !defined(GHASH_ASM) || defined(INCLUDE_C_GMULT_4BIT) + ctx->gmult = gcm_gmult_4bit; #else - u8 *p = ctx->H.c; - u64 hi, lo; - hi = (u64)GETU32(p) << 32 | GETU32(p + 4); - lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12); - ctx->H.u[0] = hi; - ctx->H.u[1] = lo; + ctx->gmult = NULL; #endif - } - -#if defined(GHASH) -# define CTX__GHASH(f) (ctx->ghash = (f)) +#if !defined(GHASH_ASM) && !defined(OPENSSL_SMALL_FOOTPRINT) + ctx->ghash = gcm_ghash_4bit; #else -# define CTX__GHASH(f) (ctx->ghash = NULL) + ctx->ghash = NULL; #endif -#if defined(GHASH_ASM_X86_OR_64) -# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2) + +#if defined(GHASH_ASM_X86_OR_64) +# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2) + /* x86_64 */ if (OPENSSL_ia32cap_P[1] & (1 << 1)) { /* check PCLMULQDQ bit */ if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */ - gcm_init_avx(ctx->Htable, ctx->H.u); + ctx->ginit = gcm_init_avx; ctx->gmult = gcm_gmult_avx; - CTX__GHASH(gcm_ghash_avx); + ctx->ghash = gcm_ghash_avx; } else { - gcm_init_clmul(ctx->Htable, ctx->H.u); + ctx->ginit = gcm_init_clmul; ctx->gmult = gcm_gmult_clmul; - CTX__GHASH(gcm_ghash_clmul); + ctx->ghash = gcm_ghash_clmul; } return; } # endif - gcm_init_4bit(ctx->Htable, ctx->H.u); -# if defined(GHASH_ASM_X86) /* x86 only */ -# if defined(OPENSSL_IA32_SSE2) +# if defined(GHASH_ASM_X86) + /* x86 only */ +# if defined(OPENSSL_IA32_SSE2) if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */ + ctx->gmult = gcm_gmult_4bit_mmx; + ctx->ghash = gcm_ghash_4bit_mmx; + return; + } # else if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */ -# endif ctx->gmult = gcm_gmult_4bit_mmx; - CTX__GHASH(gcm_ghash_4bit_mmx); - } else { - ctx->gmult = gcm_gmult_4bit_x86; - CTX__GHASH(gcm_ghash_4bit_x86); + ctx->ghash = gcm_ghash_4bit_mmx; + return; } -# else - ctx->gmult = gcm_gmult_4bit; - CTX__GHASH(gcm_ghash_4bit); +# endif + ctx->gmult = gcm_gmult_4bit_x86; + ctx->ghash = gcm_ghash_4bit_x86; + return; # endif -#elif defined(GHASH_ASM_ARM) +#elif defined(GHASH_ASM_ARM) + /* ARM */ # ifdef PMULL_CAPABLE if (PMULL_CAPABLE) { - gcm_init_v8(ctx->Htable, ctx->H.u); + ctx->ginit = (gcm_init_fn)gcm_init_v8; ctx->gmult = gcm_gmult_v8; - CTX__GHASH(gcm_ghash_v8); - } else -# endif -# ifdef NEON_CAPABLE + ctx->ghash = gcm_ghash_v8; + } +# elif defined(NEON_CAPABLE) if (NEON_CAPABLE) { - gcm_init_neon(ctx->Htable, ctx->H.u); + ctx->ginit = gcm_init_neon; ctx->gmult = gcm_gmult_neon; - CTX__GHASH(gcm_ghash_neon); - } else -# endif - { - gcm_init_4bit(ctx->Htable, ctx->H.u); - ctx->gmult = gcm_gmult_4bit; - CTX__GHASH(gcm_ghash_4bit); + ctx->ghash = gcm_ghash_neon; } -#elif defined(GHASH_ASM_SPARC) +# endif + return; +#elif defined(GHASH_ASM_SPARC) + /* SPARC */ if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) { - gcm_init_vis3(ctx->Htable, ctx->H.u); + ctx->ginit = gcm_init_vis3; ctx->gmult = gcm_gmult_vis3; - CTX__GHASH(gcm_ghash_vis3); - } else { - gcm_init_4bit(ctx->Htable, ctx->H.u); - ctx->gmult = gcm_gmult_4bit; - CTX__GHASH(gcm_ghash_4bit); + ctx->ghash = gcm_ghash_vis3; } -#elif defined(GHASH_ASM_PPC) + return; +#elif defined(GHASH_ASM_PPC) + /* PowerPC */ if (OPENSSL_ppccap_P & PPC_CRYPTO207) { - gcm_init_p8(ctx->Htable, ctx->H.u); + ctx->ginit = gcm_init_p8; ctx->gmult = gcm_gmult_p8; - CTX__GHASH(gcm_ghash_p8); - } else { - gcm_init_4bit(ctx->Htable, ctx->H.u); - ctx->gmult = gcm_gmult_4bit; - CTX__GHASH(gcm_ghash_4bit); + ctx->ghash = gcm_ghash_p8; } -#elif defined(GHASH_ASM_RISCV) && __riscv_xlen == 64 + return; +#elif defined(GHASH_ASM_RISCV) && __riscv_xlen == 64 + /* RISCV */ + ctx->ghash = NULL; if (RISCV_HAS_ZBB() && RISCV_HAS_ZBC()) { - gcm_init_clmul_rv64i_zbb_zbc(ctx->Htable, ctx->H.u); + ctx->ginit = gcm_init_clmul_rv64i_zbb_zbc; ctx->gmult = gcm_gmult_clmul_rv64i_zbb_zbc; - } else { - gcm_init_4bit(ctx->Htable, ctx->H.u); - ctx->gmult = gcm_gmult_4bit; } + return; +#endif +} + +void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block) +{ + DECLARE_IS_ENDIAN; + + memset(ctx, 0, sizeof(*ctx)); + ctx->block = block; + ctx->key = key; + + (*block) (ctx->H.c, ctx->H.c, key); + + if (IS_LITTLE_ENDIAN) { + /* H is stored in host byte order */ +#ifdef BSWAP8 + ctx->H.u[0] = BSWAP8(ctx->H.u[0]); + ctx->H.u[1] = BSWAP8(ctx->H.u[1]); #else - gcm_init_4bit(ctx->Htable, ctx->H.u); + u8 *p = ctx->H.c; + u64 hi, lo; + hi = (u64)GETU32(p) << 32 | GETU32(p + 4); + lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12); + ctx->H.u[0] = hi; + ctx->H.u[1] = lo; #endif -#undef CTX__GHASH + } + + gcm_get_funcs(&ctx->funcs); + ctx->funcs.ginit(ctx->Htable, ctx->H.u); } void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, @@ -540,7 +546,7 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, DECLARE_IS_ENDIAN; unsigned int ctr; #ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; #endif ctx->len.u[0] = 0; /* AAD length */ @@ -631,10 +637,9 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, unsigned int n; u64 alen = ctx->len.u[0]; #ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; # ifdef GHASH - void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16], - const u8 *inp, size_t len) = ctx->ghash; + gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash; # endif #endif @@ -696,10 +701,9 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, block128_f block = ctx->block; void *key = ctx->key; #ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; # if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) - void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16], - const u8 *inp, size_t len) = ctx->ghash; + gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash; # endif #endif @@ -928,10 +932,9 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, block128_f block = ctx->block; void *key = ctx->key; #ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; # if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) - void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16], - const u8 *inp, size_t len) = ctx->ghash; + gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash; # endif #endif @@ -1170,10 +1173,9 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, u64 mlen = ctx->len.u[1]; void *key = ctx->key; # ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; # ifdef GHASH - void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16], - const u8 *inp, size_t len) = ctx->ghash; + gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash; # endif # endif @@ -1331,10 +1333,9 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, u64 mlen = ctx->len.u[1]; void *key = ctx->key; # ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; # ifdef GHASH - void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16], - const u8 *inp, size_t len) = ctx->ghash; + gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash; # endif # endif @@ -1493,10 +1494,9 @@ int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, u64 alen = ctx->len.u[0] << 3; u64 clen = ctx->len.u[1] << 3; #ifdef GCM_FUNCREF_4BIT - void (*gcm_gmult_p) (u64 Xi[2], const u128 Htable[16]) = ctx->gmult; + gcm_gmult_fn gcm_gmult_p = ctx->funcs.gmult; # if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT) - void (*gcm_ghash_p) (u64 Xi[2], const u128 Htable[16], - const u8 *inp, size_t len) = ctx->ghash; + gcm_ghash_fn gcm_ghash_p = ctx->funcs.ghash; # endif #endif diff --git a/include/crypto/aes_platform.h b/include/crypto/aes_platform.h index e10c076d87..3e26463f7d 100644 --- a/include/crypto/aes_platform.h +++ b/include/crypto/aes_platform.h @@ -92,7 +92,7 @@ size_t ppc_aes_gcm_decrypt_wrap(const unsigned char *in, unsigned char *out, # define AES_gcm_encrypt ppc_aes_gcm_encrypt_wrap # define AES_gcm_decrypt ppc_aes_gcm_decrypt_wrap # define AES_GCM_ASM(gctx) ((gctx)->ctr==aes_p8_ctr32_encrypt_blocks && \ - (gctx)->gcm.ghash==gcm_ghash_p8) + (gctx)->gcm.funcs.ghash==gcm_ghash_p8) void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len); # endif /* PPC */ @@ -124,7 +124,7 @@ void gcm_ghash_p8(u64 Xi[2],const u128 Htable[16],const u8 *inp, size_t len); # define AES_gcm_encrypt armv8_aes_gcm_encrypt # define AES_gcm_decrypt armv8_aes_gcm_decrypt # define AES_GCM_ASM(gctx) ((gctx)->ctr==aes_v8_ctr32_encrypt_blocks && \ - (gctx)->gcm.ghash==gcm_ghash_v8) + (gctx)->gcm.funcs.ghash==gcm_ghash_v8) size_t aes_gcm_enc_128_kernel(const uint8_t * plaintext, uint64_t plaintext_length, uint8_t * ciphertext, uint64_t *Xi, unsigned char ivec[16], const void *key); size_t aes_gcm_enc_192_kernel(const uint8_t * plaintext, uint64_t plaintext_length, uint8_t * ciphertext, @@ -258,7 +258,7 @@ void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *in, size_t len); # define AES_gcm_encrypt aesni_gcm_encrypt # define AES_gcm_decrypt aesni_gcm_decrypt # define AES_GCM_ASM(ctx) (ctx->ctr == aesni_ctr32_encrypt_blocks && \ - ctx->gcm.ghash == gcm_ghash_avx) + ctx->gcm.funcs.ghash == gcm_ghash_avx) # endif diff --git a/include/crypto/modes.h b/include/crypto/modes.h index b1179c6357..d5c5c193c7 100644 --- a/include/crypto/modes.h +++ b/include/crypto/modes.h @@ -107,6 +107,15 @@ _asm mov eax, val _asm bswap eax} u64 hi, lo; } u128; +typedef void (*gcm_init_fn)(u128 Htable[16], const u64 H[2]); +typedef void (*gcm_ghash_fn)(u64 Xi[2], const u128 Htable[16], const u8 *inp, size_t len); +typedef void (*gcm_gmult_fn)(u64 Xi[2], const u128 Htable[16]); +struct gcm_funcs_st { + gcm_init_fn ginit; + gcm_ghash_fn ghash; + gcm_gmult_fn gmult; +}; + struct gcm128_context { /* Following 6 names follow names in GCM specification */ union { @@ -120,9 +129,7 @@ struct gcm128_context { * used in some assembler modules, i.e. don't change the order! */ u128 Htable[16]; - void (*gmult) (u64 Xi[2], const u128 Htable[16]); - void (*ghash) (u64 Xi[2], const u128 Htable[16], const u8 *inp, - size_t len); + struct gcm_funcs_st funcs; unsigned int mres, ares; block128_f block; void *key; |