summaryrefslogtreecommitdiffstats
path: root/crypto/include
diff options
context:
space:
mode:
authorShane Lontis <shane.lontis@oracle.com>2019-07-16 09:46:14 +1000
committerShane Lontis <shane.lontis@oracle.com>2019-07-16 09:46:14 +1000
commit459b15d451194ee90834ea58bfb8c91479e9ef9b (patch)
tree89aee6711deb4b8523865674562cee52522fbfe7 /crypto/include
parent0d03acea7aa45e94903fb12186ed6cc324eb1b03 (diff)
Add Common shared code needed to move aes ciphers to providers
Custom aes ciphers will be placed into multiple new files (instead of the monolithic setup used in the e_aes.c legacy code) so it makes sense to have a header for the platform specific code that needs to be shared between files. modes_lcl.h has also moved to modes_int.h to allow sharing with the provider source. Code that will be common to AEAD ciphers has also been added. These will be used by seperate PR's for GCM, CCM & OCB. Reviewed-by: Matt Caswell <matt@openssl.org> Reviewed-by: Richard Levitte <levitte@openssl.org> (Merged from https://github.com/openssl/openssl/pull/9301)
Diffstat (limited to 'crypto/include')
-rw-r--r--crypto/include/internal/aes_platform.h391
-rw-r--r--crypto/include/internal/modes_int.h229
-rw-r--r--crypto/include/internal/siv_int.h34
3 files changed, 632 insertions, 22 deletions
diff --git a/crypto/include/internal/aes_platform.h b/crypto/include/internal/aes_platform.h
new file mode 100644
index 0000000000..115264e118
--- /dev/null
+++ b/crypto/include/internal/aes_platform.h
@@ -0,0 +1,391 @@
+/*
+ * Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef HEADER_INTERNAL_AES_PLATFORM_H
+# define HEADER_INTERNAL_AES_PLATFORM_H
+
+# ifdef VPAES_ASM
+int vpaes_set_encrypt_key(const unsigned char *userKey, int bits,
+ AES_KEY *key);
+int vpaes_set_decrypt_key(const unsigned char *userKey, int bits,
+ AES_KEY *key);
+void vpaes_encrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void vpaes_decrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void vpaes_cbc_encrypt(const unsigned char *in,
+ unsigned char *out,
+ size_t length,
+ const AES_KEY *key, unsigned char *ivec, int enc);
+# endif /* VPAES_ASM */
+
+# ifdef BSAES_ASM
+void bsaes_cbc_encrypt(const unsigned char *in, unsigned char *out,
+ size_t length, const AES_KEY *key,
+ unsigned char ivec[16], int enc);
+void bsaes_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ const unsigned char ivec[16]);
+void bsaes_xts_encrypt(const unsigned char *inp, unsigned char *out,
+ size_t len, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char iv[16]);
+void bsaes_xts_decrypt(const unsigned char *inp, unsigned char *out,
+ size_t len, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char iv[16]);
+# endif /* BSAES_ASM */
+
+# ifdef AES_CTR_ASM
+void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key,
+ const unsigned char ivec[AES_BLOCK_SIZE]);
+# endif /* AES_CTR_ASM */
+
+# ifdef AES_XTS_ASM
+void AES_xts_encrypt(const unsigned char *inp, unsigned char *out, size_t len,
+ const AES_KEY *key1, const AES_KEY *key2,
+ const unsigned char iv[16]);
+void AES_xts_decrypt(const unsigned char *inp, unsigned char *out, size_t len,
+ const AES_KEY *key1, const AES_KEY *key2,
+ const unsigned char iv[16]);
+# endif /* AES_XTS_ASM */
+
+# if defined(OPENSSL_CPUID_OBJ)
+# if (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
+# include "ppc_arch.h"
+# ifdef VPAES_ASM
+# define VPAES_CAPABLE (OPENSSL_ppccap_P & PPC_ALTIVEC)
+# endif
+# define HWAES_CAPABLE (OPENSSL_ppccap_P & PPC_CRYPTO207)
+# define HWAES_set_encrypt_key aes_p8_set_encrypt_key
+# define HWAES_set_decrypt_key aes_p8_set_decrypt_key
+# define HWAES_encrypt aes_p8_encrypt
+# define HWAES_decrypt aes_p8_decrypt
+# define HWAES_cbc_encrypt aes_p8_cbc_encrypt
+# define HWAES_ctr32_encrypt_blocks aes_p8_ctr32_encrypt_blocks
+# define HWAES_xts_encrypt aes_p8_xts_encrypt
+# define HWAES_xts_decrypt aes_p8_xts_decrypt
+# endif /* PPC */
+
+# if (defined(__arm__) || defined(__arm) || defined(__aarch64__))
+# include "arm_arch.h"
+# if __ARM_MAX_ARCH__>=7
+# if defined(BSAES_ASM)
+# define BSAES_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
+# endif
+# if defined(VPAES_ASM)
+# define VPAES_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
+# endif
+# define HWAES_CAPABLE (OPENSSL_armcap_P & ARMV8_AES)
+# define HWAES_set_encrypt_key aes_v8_set_encrypt_key
+# define HWAES_set_decrypt_key aes_v8_set_decrypt_key
+# define HWAES_encrypt aes_v8_encrypt
+# define HWAES_decrypt aes_v8_decrypt
+# define HWAES_cbc_encrypt aes_v8_cbc_encrypt
+# define HWAES_ctr32_encrypt_blocks aes_v8_ctr32_encrypt_blocks
+# endif
+# endif
+# endif /* OPENSSL_CPUID_OBJ */
+
+# if defined(AES_ASM) && !defined(I386_ONLY) && ( \
+ ((defined(__i386) || defined(__i386__) || \
+ defined(_M_IX86)) && defined(OPENSSL_IA32_SSE2))|| \
+ defined(__x86_64) || defined(__x86_64__) || \
+ defined(_M_AMD64) || defined(_M_X64) )
+
+/* AES-NI section */
+extern unsigned int OPENSSL_ia32cap_P[];
+
+# define AESNI_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(57-32)))
+# ifdef VPAES_ASM
+# define VPAES_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(41-32)))
+# endif
+# ifdef BSAES_ASM
+# define BSAES_CAPABLE (OPENSSL_ia32cap_P[1]&(1<<(41-32)))
+# endif
+
+int aesni_set_encrypt_key(const unsigned char *userKey, int bits,
+ AES_KEY *key);
+int aesni_set_decrypt_key(const unsigned char *userKey, int bits,
+ AES_KEY *key);
+
+void aesni_encrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void aesni_decrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+
+void aesni_ecb_encrypt(const unsigned char *in,
+ unsigned char *out,
+ size_t length, const AES_KEY *key, int enc);
+void aesni_cbc_encrypt(const unsigned char *in,
+ unsigned char *out,
+ size_t length,
+ const AES_KEY *key, unsigned char *ivec, int enc);
+# ifndef OPENSSL_NO_OCB
+void aesni_ocb_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const void *key,
+ size_t start_block_num,
+ unsigned char offset_i[16],
+ const unsigned char L_[][16],
+ unsigned char checksum[16]);
+void aesni_ocb_decrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const void *key,
+ size_t start_block_num,
+ unsigned char offset_i[16],
+ const unsigned char L_[][16],
+ unsigned char checksum[16]);
+# endif /* OPENSSL_NO_OCB */
+
+void aesni_ctr32_encrypt_blocks(const unsigned char *in,
+ unsigned char *out,
+ size_t blocks,
+ const void *key, const unsigned char *ivec);
+
+void aesni_xts_encrypt(const unsigned char *in,
+ unsigned char *out,
+ size_t length,
+ const AES_KEY *key1, const AES_KEY *key2,
+ const unsigned char iv[16]);
+
+void aesni_xts_decrypt(const unsigned char *in,
+ unsigned char *out,
+ size_t length,
+ const AES_KEY *key1, const AES_KEY *key2,
+ const unsigned char iv[16]);
+
+void aesni_ccm64_encrypt_blocks(const unsigned char *in,
+ unsigned char *out,
+ size_t blocks,
+ const void *key,
+ const unsigned char ivec[16],
+ unsigned char cmac[16]);
+
+void aesni_ccm64_decrypt_blocks(const unsigned char *in,
+ unsigned char *out,
+ size_t blocks,
+ const void *key,
+ const unsigned char ivec[16],
+ unsigned char cmac[16]);
+
+# if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
+size_t aesni_gcm_encrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi);
+size_t aesni_gcm_decrypt(const unsigned char *in, unsigned char *out, size_t len,
+ const void *key, unsigned char ivec[16], u64 *Xi);
+void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *in, size_t len);
+
+# define AES_GCM_ASM(ctx) (ctx->ctr == aesni_ctr32_encrypt_blocks && \
+ ctx->gcm.ghash == gcm_ghash_avx)
+# endif
+
+
+# elif defined(AES_ASM) && (defined(__sparc) || defined(__sparc__))
+
+/* Fujitsu SPARC64 X support */
+extern unsigned int OPENSSL_sparcv9cap_P[];
+# include "sparc_arch.h"
+# define SPARC_AES_CAPABLE (OPENSSL_sparcv9cap_P[1] & CFR_AES)
+# define HWAES_CAPABLE (OPENSSL_sparcv9cap_P[0] & SPARCV9_FJAESX)
+# define HWAES_set_encrypt_key aes_fx_set_encrypt_key
+# define HWAES_set_decrypt_key aes_fx_set_decrypt_key
+# define HWAES_encrypt aes_fx_encrypt
+# define HWAES_decrypt aes_fx_decrypt
+# define HWAES_cbc_encrypt aes_fx_cbc_encrypt
+# define HWAES_ctr32_encrypt_blocks aes_fx_ctr32_encrypt_blocks
+
+void aes_t4_set_encrypt_key(const unsigned char *key, int bits, AES_KEY *ks);
+void aes_t4_set_decrypt_key(const unsigned char *key, int bits, AES_KEY *ks);
+void aes_t4_encrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void aes_t4_decrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+/*
+ * Key-length specific subroutines were chosen for following reason.
+ * Each SPARC T4 core can execute up to 8 threads which share core's
+ * resources. Loading as much key material to registers allows to
+ * minimize references to shared memory interface, as well as amount
+ * of instructions in inner loops [much needed on T4]. But then having
+ * non-key-length specific routines would require conditional branches
+ * either in inner loops or on subroutines' entries. Former is hardly
+ * acceptable, while latter means code size increase to size occupied
+ * by multiple key-length specific subroutines, so why fight?
+ */
+void aes128_t4_cbc_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ unsigned char *ivec);
+void aes128_t4_cbc_decrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ unsigned char *ivec);
+void aes192_t4_cbc_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ unsigned char *ivec);
+void aes192_t4_cbc_decrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ unsigned char *ivec);
+void aes256_t4_cbc_encrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ unsigned char *ivec);
+void aes256_t4_cbc_decrypt(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ unsigned char *ivec);
+void aes128_t4_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key,
+ unsigned char *ivec);
+void aes192_t4_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key,
+ unsigned char *ivec);
+void aes256_t4_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key,
+ unsigned char *ivec);
+void aes128_t4_xts_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char *ivec);
+void aes128_t4_xts_decrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char *ivec);
+void aes256_t4_xts_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char *ivec);
+void aes256_t4_xts_decrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char *ivec);
+
+# elif defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
+/* IBM S390X support */
+# include "s390x_arch.h"
+
+
+/* Convert key size to function code: [16,24,32] -> [18,19,20]. */
+# define S390X_AES_FC(keylen) (S390X_AES_128 + ((((keylen) << 3) - 128) >> 6))
+
+/* Most modes of operation need km for partial block processing. */
+# define S390X_aes_128_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
+ S390X_CAPBIT(S390X_AES_128))
+# define S390X_aes_192_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
+ S390X_CAPBIT(S390X_AES_192))
+# define S390X_aes_256_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
+ S390X_CAPBIT(S390X_AES_256))
+
+# define S390X_aes_128_cbc_CAPABLE 1 /* checked by callee */
+# define S390X_aes_192_cbc_CAPABLE 1
+# define S390X_aes_256_cbc_CAPABLE 1
+
+# define S390X_aes_128_ecb_CAPABLE S390X_aes_128_CAPABLE
+# define S390X_aes_192_ecb_CAPABLE S390X_aes_192_CAPABLE
+# define S390X_aes_256_ecb_CAPABLE S390X_aes_256_CAPABLE
+
+# define S390X_aes_128_ofb_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmo[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_ofb_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmo[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_ofb_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmo[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+
+# define S390X_aes_128_cfb_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_cfb_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_cfb_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+# define S390X_aes_128_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_128))
+# define S390X_aes_192_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_192))
+# define S390X_aes_256_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_256))
+# define S390X_aes_128_cfb1_CAPABLE 0
+# define S390X_aes_192_cfb1_CAPABLE 0
+# define S390X_aes_256_cfb1_CAPABLE 0
+
+# define S390X_aes_128_ctr_CAPABLE 1 /* checked by callee */
+# define S390X_aes_192_ctr_CAPABLE 1
+# define S390X_aes_256_ctr_CAPABLE 1
+
+# define S390X_aes_128_xts_CAPABLE 1 /* checked by callee */
+# define S390X_aes_256_xts_CAPABLE 1
+
+# define S390X_aes_128_ccm_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmac[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_ccm_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmac[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_ccm_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmac[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+# define S390X_CCM_AAD_FLAG 0x40
+
+# ifndef OPENSSL_NO_OCB
+# define S390X_aes_128_ocb_CAPABLE 0
+# define S390X_aes_192_ocb_CAPABLE 0
+# define S390X_aes_256_ocb_CAPABLE 0
+# endif /* OPENSSL_NO_OCB */
+
+# ifndef OPENSSL_NO_SIV
+# define S390X_aes_128_siv_CAPABLE 0
+# define S390X_aes_192_siv_CAPABLE 0
+# define S390X_aes_256_siv_CAPABLE 0
+# endif /* OPENSSL_NO_SIV */
+
+/* Convert key size to function code: [16,24,32] -> [18,19,20]. */
+# define S390X_AES_FC(keylen) (S390X_AES_128 + ((((keylen) << 3) - 128) >> 6))
+# endif
+
+# if defined(HWAES_CAPABLE)
+int HWAES_set_encrypt_key(const unsigned char *userKey, const int bits,
+ AES_KEY *key);
+int HWAES_set_decrypt_key(const unsigned char *userKey, const int bits,
+ AES_KEY *key);
+void HWAES_encrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void HWAES_decrypt(const unsigned char *in, unsigned char *out,
+ const AES_KEY *key);
+void HWAES_cbc_encrypt(const unsigned char *in, unsigned char *out,
+ size_t length, const AES_KEY *key,
+ unsigned char *ivec, const int enc);
+void HWAES_ctr32_encrypt_blocks(const unsigned char *in, unsigned char *out,
+ size_t len, const AES_KEY *key,
+ const unsigned char ivec[16]);
+void HWAES_xts_encrypt(const unsigned char *inp, unsigned char *out,
+ size_t len, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char iv[16]);
+void HWAES_xts_decrypt(const unsigned char *inp, unsigned char *out,
+ size_t len, const AES_KEY *key1,
+ const AES_KEY *key2, const unsigned char iv[16]);
+# ifndef OPENSSL_NO_OCB
+# ifdef HWAES_ocb_encrypt
+void HWAES_ocb_encrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const void *key,
+ size_t start_block_num,
+ unsigned char offset_i[16],
+ const unsigned char L_[][16],
+ unsigned char checksum[16]);
+# else
+# define HWAES_ocb_encrypt ((ocb128_f)NULL)
+# endif
+# ifdef HWAES_ocb_decrypt
+void HWAES_ocb_decrypt(const unsigned char *in, unsigned char *out,
+ size_t blocks, const void *key,
+ size_t start_block_num,
+ unsigned char offset_i[16],
+ const unsigned char L_[][16],
+ unsigned char checksum[16]);
+# else
+# define HWAES_ocb_decrypt ((ocb128_f)NULL)
+# endif
+# endif /* OPENSSL_NO_OCB */
+
+# endif /* HWAES_CAPABLE */
+
+#endif /* HEADER_INTERNAL_AES_PLATFORM_H */
diff --git a/crypto/include/internal/modes_int.h b/crypto/include/internal/modes_int.h
index 8a8ef6e15f..5230f08966 100644
--- a/crypto/include/internal/modes_int.h
+++ b/crypto/include/internal/modes_int.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 2010-2019 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -7,28 +7,213 @@
* https://www.openssl.org/source/license.html
*/
+/* TODO(3.0) Move this header into provider when dependencies are removed */
+#include <openssl/modes.h>
+
+#if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__)
+typedef __int64 i64;
+typedef unsigned __int64 u64;
+# define U64(C) C##UI64
+#elif defined(__arch64__)
+typedef long i64;
+typedef unsigned long u64;
+# define U64(C) C##UL
+#else
+typedef long long i64;
+typedef unsigned long long u64;
+# define U64(C) C##ULL
+#endif
+
+typedef unsigned int u32;
+typedef unsigned char u8;
+
+#define STRICT_ALIGNMENT 1
+#ifndef PEDANTIC
+# if defined(__i386) || defined(__i386__) || \
+ defined(__x86_64) || defined(__x86_64__) || \
+ defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \
+ defined(__aarch64__) || \
+ defined(__s390__) || defined(__s390x__)
+# undef STRICT_ALIGNMENT
+# endif
+#endif
+
+#if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__GNUC__) && __GNUC__>=2
+# if defined(__x86_64) || defined(__x86_64__)
+# define BSWAP8(x) ({ u64 ret_=(x); \
+ asm ("bswapq %0" \
+ : "+r"(ret_)); ret_; })
+# define BSWAP4(x) ({ u32 ret_=(x); \
+ asm ("bswapl %0" \
+ : "+r"(ret_)); ret_; })
+# elif (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)
+# define BSWAP8(x) ({ u32 lo_=(u64)(x)>>32,hi_=(x); \
+ asm ("bswapl %0; bswapl %1" \
+ : "+r"(hi_),"+r"(lo_)); \
+ (u64)hi_<<32|lo_; })
+# define BSWAP4(x) ({ u32 ret_=(x); \
+ asm ("bswapl %0" \
+ : "+r"(ret_)); ret_; })
+# elif defined(__aarch64__)
+# define BSWAP8(x) ({ u64 ret_; \
+ asm ("rev %0,%1" \
+ : "=r"(ret_) : "r"(x)); ret_; })
+# define BSWAP4(x) ({ u32 ret_; \
+ asm ("rev %w0,%w1" \
+ : "=r"(ret_) : "r"(x)); ret_; })
+# elif (defined(__arm__) || defined(__arm)) && !defined(STRICT_ALIGNMENT)
+# define BSWAP8(x) ({ u32 lo_=(u64)(x)>>32,hi_=(x); \
+ asm ("rev %0,%0; rev %1,%1" \
+ : "+r"(hi_),"+r"(lo_)); \
+ (u64)hi_<<32|lo_; })
+# define BSWAP4(x) ({ u32 ret_; \
+ asm ("rev %0,%1" \
+ : "=r"(ret_) : "r"((u32)(x))); \
+ ret_; })
+# endif
+# elif defined(_MSC_VER)
+# if _MSC_VER>=1300
+# include <stdlib.h>
+# pragma intrinsic(_byteswap_uint64,_byteswap_ulong)
+# define BSWAP8(x) _byteswap_uint64((u64)(x))
+# define BSWAP4(x) _byteswap_ulong((u32)(x))
+# elif defined(_M_IX86)
+__inline u32 _bswap4(u32 val)
+{
+_asm mov eax, val _asm bswap eax}
+# define BSWAP4(x) _bswap4(x)
+# endif
+# endif
+#endif
+#if defined(BSWAP4) && !defined(STRICT_ALIGNMENT)
+# define GETU32(p) BSWAP4(*(const u32 *)(p))
+# define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v)
+#else
+# define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3])
+# define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v))
+#endif
+/*- GCM definitions */ typedef struct {
+ u64 hi, lo;
+} u128;
+
+#ifdef TABLE_BITS
+# undef TABLE_BITS
+#endif
+/*
+ * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should
+ * never be set to 8 [or 1]. For further information see gcm128.c.
+ */
+#define TABLE_BITS 4
+
+struct gcm128_context {
+ /* Following 6 names follow names in GCM specification */
+ union {
+ u64 u[2];
+ u32 d[4];
+ u8 c[16];
+ size_t t[16 / sizeof(size_t)];
+ } Yi, EKi, EK0, len, Xi, H;
+ /*
+ * Relative position of Xi, H and pre-computed Htable is used in some
+ * assembler modules, i.e. don't change the order!
+ */
+#if TABLE_BITS==8
+ u128 Htable[256];
+#else
+ u128 Htable[16];
+ void (*gmult) (u64 Xi[2], const u128 Htable[16]);
+ void (*ghash) (u64 Xi[2], const u128 Htable[16], const u8 *inp,
+ size_t len);
+#endif
+ unsigned int mres, ares;
+ block128_f block;
+ void *key;
+#if !defined(OPENSSL_SMALL_FOOTPRINT)
+ unsigned char Xn[48];
+#endif
+};
+
+/*
+ * The maximum permitted number of cipher blocks per data unit in XTS mode.
+ * Reference IEEE Std 1619-2018.
+ */
+#define XTS_MAX_BLOCKS_PER_DATA_UNIT (1<<20)
+
+struct xts128_context {
+ void *key1, *key2;
+ block128_f block1, block2;
+};
+
+struct ccm128_context {
+ union {
+ u64 u[2];
+ u8 c[16];
+ } nonce, cmac;
+ u64 blocks;
+ block128_f block;
+ void *key;
+};
+
+#ifndef OPENSSL_NO_OCB
+
+typedef union {
+ u64 a[2];
+ unsigned char c[16];
+} OCB_BLOCK;
+# define ocb_block16_xor(in1,in2,out) \
+ ( (out)->a[0]=(in1)->a[0]^(in2)->a[0], \
+ (out)->a[1]=(in1)->a[1]^(in2)->a[1] )
+# if STRICT_ALIGNMENT
+# define ocb_block16_xor_misaligned(in1,in2,out) \
+ ocb_block_xor((in1)->c,(in2)->c,16,(out)->c)
+# else
+# define ocb_block16_xor_misaligned ocb_block16_xor
+# endif
+
+struct ocb128_context {
+ /* Need both encrypt and decrypt key schedules for decryption */
+ block128_f encrypt;
+ block128_f decrypt;
+ void *keyenc;
+ void *keydec;
+ ocb128_f stream; /* direction dependent */
+ /* Key dependent variables. Can be reused if key remains the same */
+ size_t l_index;
+ size_t max_l_index;
+ OCB_BLOCK l_star;
+ OCB_BLOCK l_dollar;
+ OCB_BLOCK *l;
+ /* Must be reset for each session */
+ struct {
+ u64 blocks_hashed;
+ u64 blocks_processed;
+ OCB_BLOCK offset_aad;
+ OCB_BLOCK sum;
+ OCB_BLOCK offset;
+ OCB_BLOCK checksum;
+ } sess;
+};
+#endif /* OPENSSL_NO_OCB */
+
#ifndef OPENSSL_NO_SIV
-typedef struct siv128_context SIV128_CONTEXT;
-
-SIV128_CONTEXT *CRYPTO_siv128_new(const unsigned char *key, int klen,
- EVP_CIPHER* cbc, EVP_CIPHER* ctr);
-int CRYPTO_siv128_init(SIV128_CONTEXT *ctx, const unsigned char *key, int klen,
- const EVP_CIPHER* cbc, const EVP_CIPHER* ctr);
-int CRYPTO_siv128_copy_ctx(SIV128_CONTEXT *dest, SIV128_CONTEXT *src);
-int CRYPTO_siv128_aad(SIV128_CONTEXT *ctx, const unsigned char *aad,
- size_t len);
-int CRYPTO_siv128_encrypt(SIV128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len);
-int CRYPTO_siv128_decrypt(SIV128_CONTEXT *ctx,
- const unsigned char *in, unsigned char *out,
- size_t len);
-int CRYPTO_siv128_finish(SIV128_CONTEXT *ctx);
-int CRYPTO_siv128_set_tag(SIV128_CONTEXT *ctx, const unsigned char *tag,
- size_t len);
-int CRYPTO_siv128_get_tag(SIV128_CONTEXT *ctx, unsigned char *tag, size_t len);
-int CRYPTO_siv128_cleanup(SIV128_CONTEXT *ctx);
-int CRYPTO_siv128_speed(SIV128_CONTEXT *ctx, int arg);
+#define SIV_LEN 16
+
+typedef union siv_block_u {
+ uint64_t word[SIV_LEN/sizeof(uint64_t)];
+ unsigned char byte[SIV_LEN];
+} SIV_BLOCK;
+
+struct siv128_context {
+ /* d stores intermediate results of S2V; it corresponds to D from the
+ pseudocode in section 2.4 of RFC 5297. */
+ SIV_BLOCK d;
+ SIV_BLOCK tag;
+ EVP_CIPHER_CTX *cipher_ctx;
+ EVP_MAC_CTX *mac_ctx_init;
+ int final_ret;
+ int crypto_ok;
+};
#endif /* OPENSSL_NO_SIV */
diff --git a/crypto/include/internal/siv_int.h b/crypto/include/internal/siv_int.h
new file mode 100644
index 0000000000..8a8ef6e15f
--- /dev/null
+++ b/crypto/include/internal/siv_int.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OPENSSL_NO_SIV
+
+typedef struct siv128_context SIV128_CONTEXT;
+
+SIV128_CONTEXT *CRYPTO_siv128_new(const unsigned char *key, int klen,
+ EVP_CIPHER* cbc, EVP_CIPHER* ctr);
+int CRYPTO_siv128_init(SIV128_CONTEXT *ctx, const unsigned char *key, int klen,
+ const EVP_CIPHER* cbc, const EVP_CIPHER* ctr);
+int CRYPTO_siv128_copy_ctx(SIV128_CONTEXT *dest, SIV128_CONTEXT *src);
+int CRYPTO_siv128_aad(SIV128_CONTEXT *ctx, const unsigned char *aad,
+ size_t len);
+int CRYPTO_siv128_encrypt(SIV128_CONTEXT *ctx,
+ const unsigned char *in, unsigned char *out,
+ size_t len);
+int CRYPTO_siv128_decrypt(SIV128_CONTEXT *ctx,
+ const unsigned char *in, unsigned char *out,
+ size_t len);
+int CRYPTO_siv128_finish(SIV128_CONTEXT *ctx);
+int CRYPTO_siv128_set_tag(SIV128_CONTEXT *ctx, const unsigned char *tag,
+ size_t len);
+int CRYPTO_siv128_get_tag(SIV128_CONTEXT *ctx, unsigned char *tag, size_t len);
+int CRYPTO_siv128_cleanup(SIV128_CONTEXT *ctx);
+int CRYPTO_siv128_speed(SIV128_CONTEXT *ctx, int arg);
+
+#endif /* OPENSSL_NO_SIV */