summaryrefslogtreecommitdiffstats
path: root/ssl
diff options
context:
space:
mode:
authorMatt Caswell <matt@openssl.org>2022-07-27 14:20:23 +0100
committerMatt Caswell <matt@openssl.org>2022-08-18 16:38:14 +0100
commit1704961cf085a64b0e104bd0c9cb81188f061698 (patch)
treeb4db35fcf3b17f45e7fe745e6e1b951a8e68911d /ssl
parent7f7b0be8e3d452ecf5154203c5669f72683fde3f (diff)
Formatting cleanups
Some minor formatting cleanups and other minor tweaks. Reviewed-by: Hugo Landau <hlandau@openssl.org> Reviewed-by: Tomas Mraz <tomas@openssl.org> (Merged from https://github.com/openssl/openssl/pull/18132)
Diffstat (limited to 'ssl')
-rw-r--r--ssl/record/methods/dtls_meth.c42
-rw-r--r--ssl/record/methods/ktls_meth.c50
-rw-r--r--ssl/record/methods/recmethod_local.h12
-rw-r--r--ssl/record/methods/ssl3_cbc.c38
-rw-r--r--ssl/record/methods/ssl3_meth.c19
-rw-r--r--ssl/record/methods/tls13_meth.c9
-rw-r--r--ssl/record/methods/tls1_meth.c64
-rw-r--r--ssl/record/methods/tls_common.c61
-rw-r--r--ssl/record/recordmethod.h24
9 files changed, 154 insertions, 165 deletions
diff --git a/ssl/record/methods/dtls_meth.c b/ssl/record/methods/dtls_meth.c
index eebea6289d..7ac49a0a9a 100644
--- a/ssl/record/methods/dtls_meth.c
+++ b/ssl/record/methods/dtls_meth.c
@@ -93,9 +93,9 @@ static DTLS_BITMAP *dtls_get_bitmap(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rr,
* have already processed all of the unprocessed records from the last
* epoch
*/
- else if (rr->epoch == (unsigned long)(rl->epoch + 1) &&
- rl->unprocessed_rcds.epoch != rl->epoch &&
- (rr->type == SSL3_RT_HANDSHAKE || rr->type == SSL3_RT_ALERT)) {
+ else if (rr->epoch == (unsigned long)(rl->epoch + 1)
+ && rl->unprocessed_rcds.epoch != rl->epoch
+ && (rr->type == SSL3_RT_HANDSHAKE || rr->type == SSL3_RT_ALERT)) {
*is_next_epoch = 1;
return &rl->next_bitmap;
}
@@ -122,7 +122,7 @@ static int dtls_process_record(OSSL_RECORD_LAYER *rl, DTLS_BITMAP *bitmap)
rr = &rl->rrec[0];
/*
- * At this point, rl->packet_length == SSL3_RT_HEADER_LNGTH + rr->length,
+ * At this point, rl->packet_length == DTLS1_RT_HEADER_LENGTH + rr->length,
* and we have that many bytes in rl->packet
*/
rr->input = &(rl->packet[DTLS1_RT_HEADER_LENGTH]);
@@ -155,14 +155,14 @@ static int dtls_process_record(OSSL_RECORD_LAYER *rl, DTLS_BITMAP *bitmap)
if (tmpmd != NULL) {
imac_size = EVP_MD_get_size(tmpmd);
if (!ossl_assert(imac_size >= 0 && imac_size <= EVP_MAX_MD_SIZE)) {
- RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
- return 0;
+ RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
+ return 0;
}
mac_size = (size_t)imac_size;
}
}
- if (rl->use_etm && rl->md_ctx) {
+ if (rl->use_etm && rl->md_ctx != NULL) {
unsigned char *mac;
if (rr->orig_len < mac_size) {
@@ -221,7 +221,7 @@ static int dtls_process_record(OSSL_RECORD_LAYER *rl, DTLS_BITMAP *bitmap)
&& (EVP_MD_CTX_get0_md(rl->md_ctx) != NULL)) {
/* rl->md_ctx != NULL => mac_size != -1 */
- i = rl->funcs->mac(rl, rr, md, 0 /* not send */ );
+ i = rl->funcs->mac(rl, rr, md, 0 /* not send */);
if (i == 0 || macbuf.mac == NULL
|| CRYPTO_memcmp(md, macbuf.mac, mac_size) != 0)
enc_err = 0;
@@ -258,7 +258,6 @@ static int dtls_process_record(OSSL_RECORD_LAYER *rl, DTLS_BITMAP *bitmap)
goto end;
}
-
rr->off = 0;
/*-
* So at this point the following is true
@@ -418,7 +417,7 @@ int dtls_get_more_records(OSSL_RECORD_LAYER *rl)
SSL3_BUFFER_get_len(&rl->rbuf), 0, 1, &n);
/* read timeout is handled by dtls1_read_bytes */
if (rret < OSSL_RECORD_RETURN_SUCCESS) {
- /* SSLfatal() already called if appropriate */
+ /* RLAYERfatal() already called if appropriate */
return rret; /* error or non-blocking */
}
@@ -462,10 +461,9 @@ int dtls_get_more_records(OSSL_RECORD_LAYER *rl)
}
}
-
if (ssl_major !=
(rl->version == DTLS_ANY_VERSION ? DTLS1_VERSION_MAJOR
- : rl->version >> 8)) {
+ : rl->version >> 8)) {
/* wrong version, silently discard record */
rr->length = 0;
rl->packet_length = 0;
@@ -479,7 +477,6 @@ int dtls_get_more_records(OSSL_RECORD_LAYER *rl)
goto again;
}
-
/*
* If received packet overflows maximum possible fragment length then
* silently discard it
@@ -497,8 +494,7 @@ int dtls_get_more_records(OSSL_RECORD_LAYER *rl)
/* rl->rstate == SSL_ST_READ_BODY, get and decode the data */
- if (rr->length >
- rl->packet_length - DTLS1_RT_HEADER_LENGTH) {
+ if (rr->length > rl->packet_length - DTLS1_RT_HEADER_LENGTH) {
/* now rl->packet_length == DTLS1_RT_HEADER_LENGTH */
more = rr->length;
rret = rl->funcs->read_n(rl, more, more, 1, 1, &n);
@@ -553,10 +549,9 @@ int dtls_get_more_records(OSSL_RECORD_LAYER *rl)
*/
if (is_next_epoch) {
if (rl->in_init) {
- if (dtls_rlayer_buffer_record(rl,
- &(rl->unprocessed_rcds),
- rr->seq_num) < 0) {
- /* SSLfatal() already called */
+ if (dtls_rlayer_buffer_record(rl, &(rl->unprocessed_rcds),
+ rr->seq_num) < 0) {
+ /* RLAYERfatal() already called */
return OSSL_RECORD_RETURN_FATAL;
}
}
@@ -577,7 +572,6 @@ int dtls_get_more_records(OSSL_RECORD_LAYER *rl)
rl->num_recs = 1;
return OSSL_RECORD_RETURN_SUCCESS;
-
}
static int dtls_free(OSSL_RECORD_LAYER *rl)
@@ -641,7 +635,6 @@ dtls_new_record_layer(OSSL_LIB_CTX *libctx, const char *propq, int vers,
{
int ret;
-
ret = tls_int_new_record_layer(libctx, propq, vers, role, direction, level,
key, keylen, iv, ivlen, mackey, mackeylen,
ciph, taglen, mactype, md, comp, prev,
@@ -653,7 +646,8 @@ dtls_new_record_layer(OSSL_LIB_CTX *libctx, const char *propq, int vers,
(*retrl)->unprocessed_rcds.q = pqueue_new();
(*retrl)->processed_rcds.q = pqueue_new();
- if ((*retrl)->unprocessed_rcds.q == NULL || (*retrl)->processed_rcds.q == NULL) {
+ if ((*retrl)->unprocessed_rcds.q == NULL
+ || (*retrl)->processed_rcds.q == NULL) {
dtls_free(*retrl);
*retrl = NULL;
RLAYERfatal(*retrl, SSL_AD_INTERNAL_ERROR, ERR_R_MALLOC_FAILURE);
@@ -684,8 +678,8 @@ dtls_new_record_layer(OSSL_LIB_CTX *libctx, const char *propq, int vers,
}
ret = (*retrl)->funcs->set_crypto_state(*retrl, level, key, keylen, iv,
- ivlen, mackey, mackeylen, ciph,
- taglen, mactype, md, comp);
+ ivlen, mackey, mackeylen, ciph,
+ taglen, mactype, md, comp);
err:
if (ret != OSSL_RECORD_RETURN_SUCCESS) {
diff --git a/ssl/record/methods/ktls_meth.c b/ssl/record/methods/ktls_meth.c
index 340356ca5e..d0db365c5b 100644
--- a/ssl/record/methods/ktls_meth.c
+++ b/ssl/record/methods/ktls_meth.c
@@ -38,12 +38,12 @@ int ktls_check_supported_cipher(const SSL_CONNECTION *s, const EVP_CIPHER *c,
}
if (EVP_CIPHER_is_a(c, "AES-128-GCM")
- || EVP_CIPHER_is_a(c, "AES-256-GCM")
+ || EVP_CIPHER_is_a(c, "AES-256-GCM")
# ifdef OPENSSL_KTLS_CHACHA20_POLY1305
- || EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")
+ || EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")
# endif
)
- return 1;
+ return 1;
if (!EVP_CIPHER_is_a(c, "AES-128-CBC")
&& !EVP_CIPHER_is_a(c, "AES-256-CBC"))
@@ -83,12 +83,12 @@ static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
}
if (EVP_CIPHER_is_a(c, "AES-128-GCM")
- || EVP_CIPHER_is_a(c, "AES-256-GCM")
+ || EVP_CIPHER_is_a(c, "AES-256-GCM")
# ifdef OPENSSL_KTLS_CHACHA20_POLY1305
- || EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")
+ || EVP_CIPHER_is_a(c, "CHACHA20-POLY1305")
# endif
)
- return 1;
+ return 1;
if (!EVP_CIPHER_is_a(c, "AES-128-CBC")
&& !EVP_CIPHER_is_a(c, "AES-256-CBC"))
@@ -97,8 +97,10 @@ static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
if (rl->use_etm)
return 0;
- if (md == NULL
- || EVP_MD_is_a(md, "SHA1")
+ if (md == NULL)
+ return 0;
+
+ if (EVP_MD_is_a(md, "SHA1")
|| EVP_MD_is_a(md, "SHA2-256")
|| EVP_MD_is_a(md, "SHA2-384"))
return 1;
@@ -127,6 +129,8 @@ int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c
} else
# endif
if (EVP_CIPHER_is_a(c, "AES-128-CBC") || EVP_CIPHER_is_a(c, "AES-256-CBC")) {
+ if (md == NULL)
+ return 0;
if (EVP_MD_is_a(md, "SHA1"))
crypto_info->auth_algorithm = CRYPTO_SHA1_HMAC;
else if (EVP_MD_is_a(md, "SHA2-256")) {
@@ -176,7 +180,8 @@ int ktls_check_supported_cipher(const SSL_CONNECTION *s, const EVP_CIPHER *c,
return 0;
}
- /* check that cipher is AES_GCM_128, AES_GCM_256, AES_CCM_128
+ /*
+ * Check that cipher is AES_GCM_128, AES_GCM_256, AES_CCM_128
* or Chacha20-Poly1305
*/
# ifdef OPENSSL_KTLS_AES_CCM_128
@@ -217,7 +222,8 @@ static int ktls_int_check_supported_cipher(OSSL_RECORD_LAYER *rl,
return 0;
}
- /* check that cipher is AES_GCM_128, AES_GCM_256, AES_CCM_128
+ /*
+ * Check that cipher is AES_GCM_128, AES_GCM_256, AES_CCM_128
* or Chacha20-Poly1305
*/
# ifdef OPENSSL_KTLS_AES_CCM_128
@@ -286,12 +292,13 @@ int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c
}
memset(crypto_info, 0, sizeof(*crypto_info));
- switch (EVP_CIPHER_get_nid(c))
- {
+ switch (EVP_CIPHER_get_nid(c)) {
# ifdef OPENSSL_KTLS_AES_GCM_128
case NID_aes_128_gcm:
- if (!ossl_assert(TLS_CIPHER_AES_GCM_128_SALT_SIZE == EVP_GCM_TLS_FIXED_IV_LEN)
- || !ossl_assert(TLS_CIPHER_AES_GCM_128_IV_SIZE == EVP_GCM_TLS_EXPLICIT_IV_LEN))
+ if (!ossl_assert(TLS_CIPHER_AES_GCM_128_SALT_SIZE
+ == EVP_GCM_TLS_FIXED_IV_LEN)
+ || !ossl_assert(TLS_CIPHER_AES_GCM_128_IV_SIZE
+ == EVP_GCM_TLS_EXPLICIT_IV_LEN))
return 0;
crypto_info->gcm128.info.cipher_type = TLS_CIPHER_AES_GCM_128;
crypto_info->gcm128.info.version = version;
@@ -305,8 +312,10 @@ int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c
# endif
# ifdef OPENSSL_KTLS_AES_GCM_256
case NID_aes_256_gcm:
- if (!ossl_assert(TLS_CIPHER_AES_GCM_256_SALT_SIZE == EVP_GCM_TLS_FIXED_IV_LEN)
- || !ossl_assert(TLS_CIPHER_AES_GCM_256_IV_SIZE == EVP_GCM_TLS_EXPLICIT_IV_LEN))
+ if (!ossl_assert(TLS_CIPHER_AES_GCM_256_SALT_SIZE
+ == EVP_GCM_TLS_FIXED_IV_LEN)
+ || !ossl_assert(TLS_CIPHER_AES_GCM_256_IV_SIZE
+ == EVP_GCM_TLS_EXPLICIT_IV_LEN))
return 0;
crypto_info->gcm256.info.cipher_type = TLS_CIPHER_AES_GCM_256;
crypto_info->gcm256.info.version = version;
@@ -321,8 +330,10 @@ int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c
# endif
# ifdef OPENSSL_KTLS_AES_CCM_128
case NID_aes_128_ccm:
- if (!ossl_assert(TLS_CIPHER_AES_CCM_128_SALT_SIZE == EVP_CCM_TLS_FIXED_IV_LEN)
- || !ossl_assert(TLS_CIPHER_AES_CCM_128_IV_SIZE == EVP_CCM_TLS_EXPLICIT_IV_LEN))
+ if (!ossl_assert(TLS_CIPHER_AES_CCM_128_SALT_SIZE
+ == EVP_CCM_TLS_FIXED_IV_LEN)
+ || !ossl_assert(TLS_CIPHER_AES_CCM_128_IV_SIZE
+ == EVP_CCM_TLS_EXPLICIT_IV_LEN))
return 0;
crypto_info->ccm128.info.cipher_type = TLS_CIPHER_AES_CCM_128;
crypto_info->ccm128.info.version = version;
@@ -338,7 +349,8 @@ int ktls_configure_crypto(OSSL_LIB_CTX *libctx, int version, const EVP_CIPHER *c
case NID_chacha20_poly1305:
if (!ossl_assert(ivlen == TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE))
return 0;
- crypto_info->chacha20poly1305.info.cipher_type = TLS_CIPHER_CHACHA20_POLY1305;
+ crypto_info->chacha20poly1305.info.cipher_type
+ = TLS_CIPHER_CHACHA20_POLY1305;
crypto_info->chacha20poly1305.info.version = version;
crypto_info->tls_crypto_info_len = sizeof(crypto_info->chacha20poly1305);
memcpy(crypto_info->chacha20poly1305.iv, iv, ivlen);
diff --git a/ssl/record/methods/recmethod_local.h b/ssl/record/methods/recmethod_local.h
index 95ba642b91..e314147491 100644
--- a/ssl/record/methods/recmethod_local.h
+++ b/ssl/record/methods/recmethod_local.h
@@ -217,15 +217,15 @@ extern struct record_functions_st dtls_any_funcs;
void ossl_rlayer_fatal(OSSL_RECORD_LAYER *rl, int al, int reason,
const char *fmt, ...);
-# define RLAYERfatal(rl, al, r) RLAYERfatal_data((rl), (al), (r), NULL)
-# define RLAYERfatal_data \
+#define RLAYERfatal(rl, al, r) RLAYERfatal_data((rl), (al), (r), NULL)
+#define RLAYERfatal_data \
(ERR_new(), \
ERR_set_debug(OPENSSL_FILE, OPENSSL_LINE, OPENSSL_FUNC), \
ossl_rlayer_fatal)
-# define RLAYER_USE_EXPLICIT_IV(rl) ((rl)->version == TLS1_1_VERSION \
- || (rl)->version == TLS1_2_VERSION \
- || (rl)->isdtls)
+#define RLAYER_USE_EXPLICIT_IV(rl) ((rl)->version == TLS1_1_VERSION \
+ || (rl)->version == TLS1_2_VERSION \
+ || (rl)->isdtls)
int ossl_set_tls_provider_parameters(OSSL_RECORD_LAYER *rl,
EVP_CIPHER_CTX *ctx,
@@ -281,7 +281,7 @@ int tls_retry_write_records(OSSL_RECORD_LAYER *rl, size_t allowance,
size_t *sent);
int tls_get_alert_code(OSSL_RECORD_LAYER *rl);
int tls_set1_bio(OSSL_RECORD_LAYER *rl, BIO *bio);
-int tls_read_record(OSSL_RECORD_LAYER *rl, void **rechandle, int *rversion,
+int tls_read_record(OSSL_RECORD_LAYER *rl, void **rechandle, int *rversion,
int *type, unsigned char **data, size_t *datalen,
uint16_t *epoch, unsigned char *seq_num);
int tls_release_record(OSSL_RECORD_LAYER *rl, void *rechandle);
diff --git a/ssl/record/methods/ssl3_cbc.c b/ssl/record/methods/ssl3_cbc.c
index 9b402f80dd..25f3d9e1c6 100644
--- a/ssl/record/methods/ssl3_cbc.c
+++ b/ssl/record/methods/ssl3_cbc.c
@@ -16,7 +16,6 @@
* moved out of libssl.
*/
-
/*
* MD5 and SHA-1 low level APIs are deprecated for public use, but still ok for
* internal use.
@@ -53,10 +52,10 @@
* little-endian order. The value of p is advanced by four.
*/
# define u32toLE(n, p) \
- (*((p)++)=(unsigned char)(n), \
- *((p)++)=(unsigned char)(n>>8), \
- *((p)++)=(unsigned char)(n>>16), \
- *((p)++)=(unsigned char)(n>>24))
+ (*((p)++) = (unsigned char)(n ), \
+ *((p)++) = (unsigned char)(n >> 8), \
+ *((p)++) = (unsigned char)(n >> 16), \
+ *((p)++) = (unsigned char)(n >> 24))
/*
* These functions serialize the state of a hash and thus perform the
@@ -66,6 +65,7 @@
static void tls1_md5_final_raw(void *ctx, unsigned char *md_out)
{
MD5_CTX *md5 = ctx;
+
u32toLE(md5->A, md_out);
u32toLE(md5->B, md_out);
u32toLE(md5->C, md_out);
@@ -76,6 +76,7 @@ static void tls1_md5_final_raw(void *ctx, unsigned char *md_out)
static void tls1_sha1_final_raw(void *ctx, unsigned char *md_out)
{
SHA_CTX *sha1 = ctx;
+
l2n(sha1->h0, md_out);
l2n(sha1->h1, md_out);
l2n(sha1->h2, md_out);
@@ -88,9 +89,8 @@ static void tls1_sha256_final_raw(void *ctx, unsigned char *md_out)
SHA256_CTX *sha256 = ctx;
unsigned i;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++)
l2n(sha256->h[i], md_out);
- }
}
static void tls1_sha512_final_raw(void *ctx, unsigned char *md_out)
@@ -98,9 +98,8 @@ static void tls1_sha512_final_raw(void *ctx, unsigned char *md_out)
SHA512_CTX *sha512 = ctx;
unsigned i;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++)
l2n8(sha512->h[i], md_out);
- }
}
#undef LARGEST_DIGEST_CTX
@@ -196,14 +195,14 @@ int ssl3_cbc_digest_record(const EVP_MD *md,
md_transform =
(void (*)(void *ctx, const unsigned char *block))SHA256_Transform;
md_size = 224 / 8;
- } else if (EVP_MD_is_a(md, "SHA2-256")) {
+ } else if (EVP_MD_is_a(md, "SHA2-256")) {
if (SHA256_Init((SHA256_CTX *)md_state.c) <= 0)
return 0;
md_final_raw = tls1_sha256_final_raw;
md_transform =
(void (*)(void *ctx, const unsigned char *block))SHA256_Transform;
md_size = 32;
- } else if (EVP_MD_is_a(md, "SHA2-384")) {
+ } else if (EVP_MD_is_a(md, "SHA2-384")) {
if (SHA384_Init((SHA512_CTX *)md_state.c) <= 0)
return 0;
md_final_raw = tls1_sha512_final_raw;
@@ -238,10 +237,11 @@ int ssl3_cbc_digest_record(const EVP_MD *md,
header_length = 13;
if (is_sslv3) {
- header_length = mac_secret_length + sslv3_pad_length + 8 /* sequence
- * number */ +
- 1 /* record type */ +
- 2 /* record length */ ;
+ header_length = mac_secret_length
+ + sslv3_pad_length
+ + 8 /* sequence number */
+ + 1 /* record type */
+ + 2; /* record length */
}
/*
@@ -259,7 +259,9 @@ int ssl3_cbc_digest_record(const EVP_MD *md,
* short and there obviously cannot be this many blocks then
* variance_blocks can be reduced.
*/
- variance_blocks = is_sslv3 ? 2 : ( ((255 + 1 + md_size + md_block_size - 1) / md_block_size) + 1);
+ variance_blocks = is_sslv3 ? 2
+ : (((255 + 1 + md_size + md_block_size - 1)
+ / md_block_size) + 1);
/*
* From now on we're dealing with the MAC, which conceptually has 13
* bytes of `header' before the start of the data (TLS) or 71/75 bytes
@@ -401,8 +403,10 @@ int ssl3_cbc_digest_record(const EVP_MD *md,
unsigned char block[MAX_HASH_BLOCK_SIZE];
unsigned char is_block_a = constant_time_eq_8_s(i, index_a);
unsigned char is_block_b = constant_time_eq_8_s(i, index_b);
+
for (j = 0; j < md_block_size; j++) {
unsigned char b = 0, is_past_c, is_past_cp1;
+
if (k < header_length)
b = header[k];
else if (k < data_plus_mac_plus_padding_size + header_length)
@@ -453,7 +457,7 @@ int ssl3_cbc_digest_record(const EVP_MD *md,
if (md_ctx == NULL)
goto err;
- if (EVP_DigestInit_ex(md_ctx, md, NULL /* engine */ ) <= 0)
+ if (EVP_DigestInit_ex(md_ctx, md, NULL /* engine */) <= 0)
goto err;
if (is_sslv3) {
/* We repurpose |hmac_pad| to contain the SSLv3 pad2 block. */
diff --git a/ssl/record/methods/ssl3_meth.c b/ssl/record/methods/ssl3_meth.c
index 21aed43532..254e8cf147 100644
--- a/ssl/record/methods/ssl3_meth.c
+++ b/ssl/record/methods/ssl3_meth.c
@@ -58,6 +58,7 @@ static int ssl3_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
if (EVP_CIPHER_get0_provider(ciph) != NULL
&& !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md)) {
+ /* ERR_raise already called */
return OSSL_RECORD_RETURN_FATAL;
}
@@ -111,17 +112,17 @@ static int ssl3_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *inrecs, size_t n_recs
if ((bs != 1) && sending && !provided) {
/*
- * We only do this for legacy ciphers. Provided ciphers add the
- * padding on the provider side.
- */
+ * We only do this for legacy ciphers. Provided ciphers add the
+ * padding on the provider side.
+ */
i = bs - (l % bs);
/* we need to add 'i-1' padding bytes */
l += i;
/*
- * the last of these zero bytes will be overwritten with the
- * padding length.
- */
+ * the last of these zero bytes will be overwritten with the
+ * padding length.
+ */
memset(&rec->input[rec->length], 0, i);
rec->length += i;
rec->input[l - 1] = (unsigned char)(i - 1);
@@ -139,7 +140,7 @@ static int ssl3_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *inrecs, size_t n_recs
int outlen;
if (!EVP_CipherUpdate(ds, rec->data, &outlen, rec->input,
- (unsigned int)l))
+ (unsigned int)l))
return 0;
rec->length = outlen;
@@ -151,8 +152,8 @@ static int ssl3_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *inrecs, size_t n_recs
mac->alloced = 0;
*p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
- (void **)&mac->mac,
- macsize);
+ (void **)&mac->mac,
+ macsize);
*p = OSSL_PARAM_construct_end();
if (!EVP_CIPHER_CTX_get_params(ds, params)) {
diff --git a/ssl/record/methods/tls13_meth.c b/ssl/record/methods/tls13_meth.c
index aea432c63a..2a24067d14 100644
--- a/ssl/record/methods/tls13_meth.c
+++ b/ssl/record/methods/tls13_meth.c
@@ -43,9 +43,11 @@ static int tls13_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
mode = EVP_CIPHER_get_mode(ciph);
if (EVP_DecryptInit_ex(ciph_ctx, ciph, NULL, NULL, NULL) <= 0
- || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, ivlen, NULL) <= 0
+ || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, ivlen,
+ NULL) <= 0
|| (mode == EVP_CIPH_CCM_MODE
- && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG, taglen, NULL) <= 0)
+ && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG, taglen,
+ NULL) <= 0)
|| EVP_DecryptInit_ex(ciph_ctx, NULL, NULL, key, NULL) <= 0) {
ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
return OSSL_RECORD_RETURN_FATAL;
@@ -216,8 +218,7 @@ static int tls13_post_process_record(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec)
}
/* Strip trailing padding */
- for (end = rec->length - 1; end > 0 && rec->data[end] == 0;
- end--)
+ for (end = rec->length - 1; end > 0 && rec->data[end] == 0; end--)
continue;
rec->length = end;
diff --git a/ssl/record/methods/tls1_meth.c b/ssl/record/methods/tls1_meth.c
index ae162e69a4..db3cad4ee3 100644
--- a/ssl/record/methods/tls1_meth.c
+++ b/ssl/record/methods/tls1_meth.c
@@ -56,7 +56,7 @@ static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
* If we have an AEAD Cipher, then there is no separate MAC, so we can skip
* setting up the MAC key.
*/
- if (!(EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER)) {
+ if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
if (mactype == EVP_PKEY_HMAC) {
mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
rl->propq, mackey,
@@ -123,9 +123,9 @@ static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
#define MAX_PADDING 256
/*-
- * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls SSLfatal on internal
- * error, but not otherwise. It is the responsibility of the caller to report
- * a bad_record_mac - if appropriate (DTLS just drops the record).
+ * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
+ * internal error, but not otherwise. It is the responsibility of the caller to
+ * report a bad_record_mac - if appropriate (DTLS just drops the record).
*
* Returns:
* 0: if the record is publicly invalid, or an internal error, or AEAD
@@ -150,13 +150,14 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
int n = EVP_MD_CTX_get_size(rl->md_ctx);
+
if (!ossl_assert(n >= 0)) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
}
}
ds = rl->enc_ctx;
- if (!ossl_assert(rl->enc_ctx)) {
+ if (!ossl_assert(rl->enc_ctx != NULL)) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
}
@@ -178,7 +179,7 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
} else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
- ivlen, 0) <= 0) {
+ ivlen, 0) <= 0) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
}
@@ -196,7 +197,7 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
if (n_recs > 1) {
if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
- & EVP_CIPH_FLAG_PIPELINE) == 0) {
+ & EVP_CIPH_FLAG_PIPELINE) == 0) {
/*
* We shouldn't have been called with pipeline data if the
* cipher doesn't support pipelining
@@ -209,7 +210,7 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
reclen[ctr] = recs[ctr].length;
if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
- & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
+ & EVP_CIPH_FLAG_AEAD_CIPHER) != 0) {
unsigned char *seq;
seq = rl->sequence;
@@ -235,7 +236,7 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
- EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
+ EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
if (pad <= 0) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
@@ -245,7 +246,6 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
reclen[ctr] += pad;
recs[ctr].length += pad;
}
-
} else if ((bs != 1) && sending && !provided) {
/*
* We only do this for legacy ciphers. Provided ciphers add the
@@ -278,22 +278,22 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
unsigned char *data[SSL_MAX_PIPELINES];
/* Set the output buffers */
- for (ctr = 0; ctr < n_recs; ctr++) {
+ for (ctr = 0; ctr < n_recs; ctr++)
data[ctr] = recs[ctr].data;
- }
+
if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
(int)n_recs, data) <= 0) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
return 0;
}
/* Set the input buffers */
- for (ctr = 0; ctr < n_recs; ctr++) {
+ for (ctr = 0; ctr < n_recs; ctr++)
data[ctr] = recs[ctr].input;
- }
+
if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
(int)n_recs, data) <= 0
|| EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
- (int)n_recs, reclen) <= 0) {
+ (int)n_recs, reclen) <= 0) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
return 0;
}
@@ -322,13 +322,13 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
int outlen;
/* Provided cipher - we do not support pipelining on this path */
- if (n_recs > 1) {
+ if (n_recs > 1) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
}
if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
- (unsigned int)reclen[0]))
+ (unsigned int)reclen[0]))
return 0;
recs[0].length = outlen;
@@ -339,11 +339,11 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
*/
if (!sending) {
if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
- recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
- recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
} else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
- recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
- recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
+ recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
+ recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
} else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
recs[0].data += bs;
recs[0].input += bs;
@@ -358,8 +358,8 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
macs[0].alloced = 0;
*p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
- (void **)&macs[0].mac,
- macsize);
+ (void **)&macs[0].mac,
+ macsize);
*p = OSSL_PARAM_construct_end();
if (!EVP_CIPHER_CTX_get_params(ds, params)) {
@@ -374,9 +374,9 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
/* Legacy cipher */
tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
- (unsigned int)reclen[0]);
+ (unsigned int)reclen[0]);
if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
- & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
+ & EVP_CIPH_FLAG_CUSTOM_CIPHER) != 0
? (tmpr < 0)
: (tmpr == 0)) {
/* AEAD can fail to verify MAC */
@@ -412,7 +412,7 @@ static int tls1_cipher(OSSL_RECORD_LAYER *rl, SSL3_RECORD *recs, size_t n_recs,
recs[ctr].data,
(macs != NULL) ? &macs[ctr].mac : NULL,
(macs != NULL) ? &macs[ctr].alloced
- : NULL,
+ : NULL,
bs,
pad ? (size_t)pad : macsize,
(EVP_CIPHER_get_flags(enc)
@@ -456,9 +456,8 @@ static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
if (!rl->isdtls
&& rl->tlstree
- && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0) {
+ && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
goto end;
- }
if (rl->isdtls) {
unsigned char dtlsseq[8], *p = dtlsseq;
@@ -467,8 +466,9 @@ static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
memcpy(p, &seq[2], 6);
memcpy(header, dtlsseq, 8);
- } else
+ } else {
memcpy(header, seq, 8);
+ }
header[8] = rec->type;
header[9] = (unsigned char)(rl->version >> 8);
@@ -486,16 +486,14 @@ static int tls1_mac(OSSL_RECORD_LAYER *rl, SSL3_RECORD *rec, unsigned char *md,
*p++ = OSSL_PARAM_construct_end();
if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
- tls_hmac_params)) {
+ tls_hmac_params))
goto end;
- }
}
if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
|| EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
- || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0) {
+ || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
goto end;
- }
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "seq:\n");
diff --git a/ssl/record/methods/tls_common.c b/ssl/record/methods/tls_common.c
index a537db6ffd..2c6c73a2a0 100644
--- a/ssl/record/methods/tls_common.c
+++ b/ssl/record/methods/tls_common.c
@@ -106,7 +106,7 @@ int tls_setup_read_buffer(OSSL_RECORD_LAYER *rl)
else
headerlen = SSL3_RT_HEADER_LENGTH;
-#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
+#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
align = (-SSL3_RT_HEADER_LENGTH) & (SSL3_ALIGN_PAYLOAD - 1);
#endif
@@ -140,7 +140,7 @@ static int tls_release_read_buffer(OSSL_RECORD_LAYER *rl)
SSL3_BUFFER *b;
b = &rl->rbuf;
- if (rl->options & SSL_OP_CLEANSE_PLAINTEXT)
+ if ((rl->options & SSL_OP_CLEANSE_PLAINTEXT) != 0)
OPENSSL_cleanse(b->buf, b->len);
OPENSSL_free(b->buf);
b->buf = NULL;
@@ -262,8 +262,8 @@ int tls_default_read_n(OSSL_RECORD_LAYER *rl, size_t n, size_t max, int extend,
BIO *bio = rl->prev != NULL ? rl->prev : rl->bio;
/*
- * Now we have len+left bytes at the front of s->s3.rbuf.buf and
- * need to read in more until we have len+n (up to len+max if