summaryrefslogtreecommitdiffstats
path: root/ssl/t1_enc.c
diff options
context:
space:
mode:
authorBoris Pismenny <borisp@mellanox.com>2019-02-21 16:39:36 +0200
committerMatt Caswell <matt@openssl.org>2019-04-01 11:54:48 +0100
commitc35e921ffa58a84be7f68a37e5799ebefecf7326 (patch)
tree9f4f3ce0fd4821ee19588f284fab3f26bc4b164a /ssl/t1_enc.c
parente401ef801e410d4d0d6dba62cc599cde786024b5 (diff)
ssl: Linux TLS Rx Offload
This patch adds support for the Linux TLS Rx socket option. It completes the previous patch for TLS Tx offload. If the socket option is successful, then the receive data-path of the TCP socket is implemented by the kernel. We choose to set this option at the earliest - just after CCS is complete. Change-Id: I59741e04d89dddca7fb138e88fffcc1259b30132 Signed-off-by: Boris Pismenny <borisp@mellanox.com> Reviewed-by: Bernd Edlinger <bernd.edlinger@hotmail.de> Reviewed-by: Matt Caswell <matt@openssl.org> (Merged from https://github.com/openssl/openssl/pull/7848)
Diffstat (limited to 'ssl/t1_enc.c')
-rw-r--r--ssl/t1_enc.c87
1 files changed, 75 insertions, 12 deletions
diff --git a/ssl/t1_enc.c b/ssl/t1_enc.c
index fe4ba9386d..5925e6a1b8 100644
--- a/ssl/t1_enc.c
+++ b/ssl/t1_enc.c
@@ -83,6 +83,39 @@ static int tls1_generate_key_block(SSL *s, unsigned char *km, size_t num)
return ret;
}
+#ifndef OPENSSL_NO_KTLS
+ /*
+ * Count the number of records that were not processed yet from record boundary.
+ *
+ * This function assumes that there are only fully formed records read in the
+ * record layer. If read_ahead is enabled, then this might be false and this
+ * function will fail.
+ */
+static int count_unprocessed_records(SSL *s)
+{
+ SSL3_BUFFER *rbuf = RECORD_LAYER_get_rbuf(&s->rlayer);
+ PACKET pkt, subpkt;
+ int count = 0;
+
+ if (!PACKET_buf_init(&pkt, rbuf->buf + rbuf->offset, rbuf->left))
+ return -1;
+
+ while (PACKET_remaining(&pkt) > 0) {
+ /* Skip record type and version */
+ if (!PACKET_forward(&pkt, 3))
+ return -1;
+
+ /* Read until next record */
+ if (PACKET_get_length_prefixed_2(&pkt, &subpkt))
+ return -1;
+
+ count += 1;
+ }
+
+ return count;
+}
+#endif
+
int tls1_change_cipher_state(SSL *s, int which)
{
unsigned char *p, *mac_secret;
@@ -101,8 +134,10 @@ int tls1_change_cipher_state(SSL *s, int which)
int reuse_dd = 0;
#ifndef OPENSSL_NO_KTLS
struct tls12_crypto_info_aes_gcm_128 crypto_info;
- BIO *wbio;
+ BIO *bio;
unsigned char geniv[12];
+ int count_unprocessed;
+ int bit;
#endif
c = s->s3->tmp.new_sym_enc;
@@ -326,8 +361,8 @@ int tls1_change_cipher_state(SSL *s, int which)
if (s->compress)
goto skip_ktls;
- if ((which & SSL3_CC_READ) ||
- ((which & SSL3_CC_WRITE) && (s->mode & SSL_MODE_NO_KTLS_TX)))
+ if (((which & SSL3_CC_READ) && (s->mode & SSL_MODE_NO_KTLS_RX))
+ || ((which & SSL3_CC_WRITE) && (s->mode & SSL_MODE_NO_KTLS_TX)))
goto skip_ktls;
/* ktls supports only the maximum fragment size */
@@ -344,19 +379,26 @@ int tls1_change_cipher_state(SSL *s, int which)
if (s->version != TLS1_2_VERSION)
goto skip_ktls;
- wbio = s->wbio;
- if (!ossl_assert(wbio != NULL)) {
+ if (which & SSL3_CC_WRITE)
+ bio = s->wbio;
+ else
+ bio = s->rbio;
+
+ if (!ossl_assert(bio != NULL)) {
SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS1_CHANGE_CIPHER_STATE,
ERR_R_INTERNAL_ERROR);
goto err;
}
/* All future data will get encrypted by ktls. Flush the BIO or skip ktls */
- if (BIO_flush(wbio) <= 0)
- goto skip_ktls;
+ if (which & SSL3_CC_WRITE) {
+ if (BIO_flush(bio) <= 0)
+ goto skip_ktls;
+ }
/* ktls doesn't support renegotiation */
- if (BIO_get_ktls_send(s->wbio)) {
+ if ((BIO_get_ktls_send(s->wbio) && (which & SSL3_CC_WRITE)) ||
+ (BIO_get_ktls_recv(s->rbio) && (which & SSL3_CC_READ))) {
SSLfatal(s, SSL_AD_NO_RENEGOTIATION, SSL_F_TLS1_CHANGE_CIPHER_STATE,
ERR_R_INTERNAL_ERROR);
goto err;
@@ -373,12 +415,33 @@ int tls1_change_cipher_state(SSL *s, int which)
TLS_CIPHER_AES_GCM_128_IV_SIZE);
memcpy(crypto_info.salt, geniv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
memcpy(crypto_info.key, key, EVP_CIPHER_key_length(c));
- memcpy(crypto_info.rec_seq, &s->rlayer.write_sequence,
- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+ if (which & SSL3_CC_WRITE)
+ memcpy(crypto_info.rec_seq, &s->rlayer.write_sequence,
+ TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+ else
+ memcpy(crypto_info.rec_seq, &s->rlayer.read_sequence,
+ TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+
+ if (which & SSL3_CC_READ) {
+ count_unprocessed = count_unprocessed_records(s);
+ if (count_unprocessed < 0)
+ goto skip_ktls;
+
+ /* increment the crypto_info record sequence */
+ while (count_unprocessed) {
+ for (bit = 7; bit >= 0; bit--) { /* increment */
+ ++crypto_info.rec_seq[bit];
+ if (crypto_info.rec_seq[bit] != 0)
+ break;
+ }
+ count_unprocessed--;
+ }
+ }
/* ktls works with user provided buffers directly */
- if (BIO_set_ktls(wbio, &crypto_info, which & SSL3_CC_WRITE)) {
- ssl3_release_write_buffer(s);
+ if (BIO_set_ktls(bio, &crypto_info, which & SSL3_CC_WRITE)) {
+ if (which & SSL3_CC_WRITE)
+ ssl3_release_write_buffer(s);
SSL_set_options(s, SSL_OP_NO_RENEGOTIATION);
}