summaryrefslogtreecommitdiffstats
path: root/ssl/quic
diff options
context:
space:
mode:
authorHugo Landau <hlandau@openssl.org>2022-09-06 13:23:29 +0100
committerTomas Mraz <tomas@openssl.org>2022-10-05 16:15:06 +0200
commit830225901365b7076eaa5afc580529394e2a137f (patch)
treeb6fbf6bc0a40f48ad3eb65afb1fd0da642407745 /ssl/quic
parent928f15e71b0bccabb10cbdcbb9b2d4e85eeb5906 (diff)
QUIC Send Stream Management
Reviewed-by: Matt Caswell <matt@openssl.org> Reviewed-by: Tomas Mraz <tomas@openssl.org> (Merged from https://github.com/openssl/openssl/pull/19159)
Diffstat (limited to 'ssl/quic')
-rw-r--r--ssl/quic/build.info6
-rw-r--r--ssl/quic/quic_ackm.c406
-rw-r--r--ssl/quic/quic_demux.c9
-rw-r--r--ssl/quic/quic_stream.c537
-rw-r--r--ssl/quic/uint_set.c382
5 files changed, 954 insertions, 386 deletions
diff --git a/ssl/quic/build.info b/ssl/quic/build.info
index 56c445e19c..017100407d 100644
--- a/ssl/quic/build.info
+++ b/ssl/quic/build.info
@@ -1,3 +1,7 @@
$LIBSSL=../../libssl
-SOURCE[$LIBSSL]=quic_method.c quic_impl.c quic_wire.c quic_ackm.c quic_statm.c cc_dummy.c quic_demux.c quic_record_rx.c quic_record_rx_wrap.c quic_record_tx.c quic_record_util.c quic_record_shared.c quic_wire_pkt.c quic_rx_depack.c quic_fc.c
+SOURCE[$LIBSSL]=quic_method.c quic_impl.c quic_wire.c quic_ackm.c quic_statm.c
+SOURCE[$LIBSSL]=cc_dummy.c quic_demux.c quic_record_rx.c
+SOURCE[$LIBSSL]=quic_record_tx.c quic_record_util.c quic_record_shared.c quic_wire_pkt.c
+SOURCE[$LIBSSL]=quic_record_rx_wrap.c quic_rx_depack.c
+SOURCE[$LIBSSL]=quic_fc.c uint_set.c quic_stream.c
diff --git a/ssl/quic/quic_ackm.c b/ssl/quic/quic_ackm.c
index b279cc184a..a73fb982f7 100644
--- a/ssl/quic/quic_ackm.c
+++ b/ssl/quic/quic_ackm.c
@@ -8,6 +8,7 @@
*/
#include "internal/quic_ackm.h"
+#include "internal/uint_set.h"
#include "internal/common.h"
#include <assert.h>
@@ -373,45 +374,14 @@ tx_pkt_history_remove(struct tx_pkt_history_st *h, uint64_t pkt_num)
* given PN until that PN becomes provably ACKed and we finally remove it from
* our set (by bumping the watermark) as no longer being our concern.
*
- * The data structure supports the following operations:
+ * The data structure used is the UINT_SET structure defined in uint_set.h,
+ * which is used as a PN set. We use the following operations of the structure:
*
- * Insert Range: Adds an inclusive range of packet numbers [start, end]
- * to the set. Equivalent to Insert for each number
- * in the range. (Used when we receive a new PN.)
+ * Insert Range: Used when we receive a new PN.
*
- * Remove Range: Removes an inclusive range of packet numbers [start, end]
- * from the set. Not all of the range need already be in
- * the set, but any part of the range in the set is removed.
- * (Used when bumping the watermark.)
+ * Remove Range: Used when bumping the watermark.
*
- * Query: Is a PN in the data structure?
- *
- * The data structure can be iterated.
- *
- * For greater efficiency in tracking large numbers of contiguous PNs, we track
- * PN ranges rather than individual PNs. The data structure manages a list of PN
- * ranges [[start, end]...]. Internally this is implemented as a doubly linked
- * sorted list of range structures, which are automatically split and merged as
- * necessary.
- *
- * This data structure requires O(n) traversal of the list for insertion,
- * removal and query when we are not adding/removing ranges which are near the
- * beginning or end of the set of ranges. It is expected that the number of PN
- * ranges needed at any given time will generally be small and that most
- * operations will be close to the beginning or end of the range.
- *
- * Invariant: The data structure is always sorted in ascending order by PN.
- *
- * Invariant: No two adjacent ranges ever 'border' one another (have no
- * numerical gap between them) as the data structure always ensures
- * such ranges are merged.
- *
- * Invariant: No two ranges ever overlap.
- *
- * Invariant: No range [a, b] ever has a > b.
- *
- * Invariant: Since ranges are represented using inclusive bounds, no range
- * item inside the data structure can represent a span of zero PNs.
+ * Query: Used to determine if a PN is in the set.
*
* **Possible duplicates.** A PN is considered a possible duplicate when either:
*
@@ -435,344 +405,8 @@ tx_pkt_history_remove(struct tx_pkt_history_st *h, uint64_t pkt_num)
* used to update the state of the RX side of the ACK manager by bumping the
* watermark accordingly.
*/
-struct pn_set_item_st {
- struct pn_set_item_st *prev, *next;
- OSSL_QUIC_ACK_RANGE range;
-};
-
-struct pn_set_st {
- struct pn_set_item_st *head, *tail;
-
- /* Number of ranges (not PNs) in the set. */
- size_t num_ranges;
-};
-
-static void pn_set_init(struct pn_set_st *s)
-{
- s->head = s->tail = NULL;
- s->num_ranges = 0;
-}
-
-static void pn_set_destroy(struct pn_set_st *s)
-{
- struct pn_set_item_st *x, *xnext;
-
- for (x = s->head; x != NULL; x = xnext) {
- xnext = x->next;
- OPENSSL_free(x);
- }
-}
-
-/* Possible merge of x, x->prev */
-static void pn_set_merge_adjacent(struct pn_set_st *s, struct pn_set_item_st *x)
-{
- struct pn_set_item_st *xprev = x->prev;
-
- if (xprev == NULL)
- return;
-
- if (x->range.start - 1 != xprev->range.end)
- return;
-
- x->range.start = xprev->range.start;
- x->prev = xprev->prev;
- if (x->prev != NULL)
- x->prev->next = x;
-
- if (s->head == xprev)
- s->head = x;
-
- OPENSSL_free(xprev);
- --s->num_ranges;
-}
-
-/* Returns 1 if there exists a PN x which falls within both ranges a and b. */
-static int pn_range_overlaps(const OSSL_QUIC_ACK_RANGE *a,
- const OSSL_QUIC_ACK_RANGE *b)
-{
- return ossl_quic_pn_min(a->end, b->end)
- >= ossl_quic_pn_max(a->start, b->start);
-}
-
-/*
- * Insert a range into a PN set. Returns 0 on allocation failure, in which case
- * the PN set is in a valid but undefined state. Otherwise, returns 1. Ranges
- * can overlap existing ranges without limitation. If a range is a subset of
- * an existing range in the set, this is a no-op and returns 1.
- */
-static int pn_set_insert(struct pn_set_st *s, const OSSL_QUIC_ACK_RANGE *range)
-{
- struct pn_set_item_st *x, *z, *xnext, *f, *fnext;
- QUIC_PN start = range->start, end = range->end;
-
- if (!ossl_assert(start <= end))
- return 0;
-
- if (s->head == NULL) {
- /* Nothing in the set yet, so just add this range. */
- x = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
- if (x == NULL)
- return 0;
-
- x->range.start = start;
- x->range.end = end;
- s->head = s->tail = x;
- ++s->num_ranges;
- return 1;
- }
-
- if (start > s->tail->range.end) {
- /*
- * Range is after the latest range in the set, so append.
- *
- * Note: The case where the range is before the earliest range in the
- * set is handled as a degenerate case of the final case below. See
- * optimization note (*) below.
- */
- if (s->tail->range.end + 1 == start) {
- s->tail->range.end = end;
- return 1;
- }
-
- x = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
- if (x == NULL)
- return 0;
-
- x->range.start = start;
- x->range.end = end;
- x->prev = s->tail;
- if (s->tail != NULL)
- s->tail->next = x;
- s->tail = x;
- ++s->num_ranges;
- return 1;
- }
-
- if (start <= s->head->range.start && end >= s->tail->range.end) {
- /*
- * New range dwarfs all ranges in our set.
- *
- * Free everything except the first range in the set, which we scavenge
- * and reuse.
- */
- for (x = s->head->next; x != NULL; x = xnext) {
- xnext = x->next;
- OPENSSL_free(x);
- }
-
- s->head->range.start = start;
- s->head->range.end = end;
- s->head->next = s->head->prev = NULL;
- s->tail = s->head;
- s->num_ranges = 1;
- return 1;
- }
-
- /*
- * Walk backwards since we will most often be inserting at the end. As an
- * optimization, test the head node first and skip iterating over the
- * entire list if we are inserting at the start. The assumption is that
- * insertion at the start and end of the space will be the most common
- * operations. (*)
- */
- z = end < s->head->range.start ? s->head : s->tail;
-
- for (; z != NULL; z = z->prev) {
- /* An existing range dwarfs our new range (optimisation). */
- if (z->range.start <= start && z->range.end >= end)
- return 1;
-
- if (pn_range_overlaps(&z->range, range)) {
- /*
- * Our new range overlaps an existing range, or possibly several
- * existing ranges.
- */
- struct pn_set_item_st *ovend = z;
- OSSL_QUIC_ACK_RANGE t;
- size_t n = 0;
-
- t.end = ossl_quic_pn_max(end, z->range.end);
-
- /* Get earliest overlapping range. */
- for (; z->prev != NULL && pn_range_overlaps(&z->prev->range, range);
- z = z->prev);
-
- t.start = ossl_quic_pn_min(start, z->range.start);
-
- /* Replace sequence of nodes z..ovend with ovend only. */
- ovend->range = t;
- ovend->prev = z->prev;
- if (z->prev != NULL)
- z->prev->next = ovend;
- if (s->head == z)
- s->head = ovend;
-
- /* Free now unused nodes. */
- for (f = z; f != ovend; f = fnext, ++n) {
- fnext = f->next;
- OPENSSL_free(f);
- }
-
- s->num_ranges -= n;
- break;
- } else if (end < z->range.start
- && (z->prev == NULL || start > z->prev->range.end)) {
- if (z->range.start == end + 1) {
- /* We can extend the following range backwards. */
- z->range.start = start;
-
- /*
- * If this closes a gap we now need to merge
- * consecutive nodes.
- */
- pn_set_merge_adjacent(s, z);
- } else if (z->prev != NULL && z->prev->range.end + 1 == start) {
- /* We can extend the preceding range forwards. */
- z->prev->range.end = end;
-
- /*
- * If this closes a gap we now need to merge
- * consecutive nodes.
- */
- pn_set_merge_adjacent(s, z);
- } else {
- /*
- * The new interval is between intervals without overlapping or
- * touching them, so insert between, preserving sort.
- */
- x = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
- if (x == NULL)
- return 0;
-
- x->range.start = start;
- x->range.end = end;
-
- x->next = z;
- x->prev = z->prev;
- if (x->prev != NULL)
- x->prev->next = x;
- z->prev = x;
- if (s->head == z)
- s->head = x;
-
- ++s->num_ranges;
- }
- break;
- }
- }
-
- return 1;
-}
-
-/*
- * Remove a range from the set. Returns 0 on allocation failure, in which case
- * the PN set is unchanged. Otherwise, returns 1. Ranges which are not already
- * in the set can be removed without issue. If a passed range is not in the PN
- * set at all, this is a no-op and returns 1.
- */
-static int pn_set_remove(struct pn_set_st *s, const OSSL_QUIC_ACK_RANGE *range)
-{
- struct pn_set_item_st *z, *zprev, *y;
- QUIC_PN start = range->start, end = range->end;
-
- if (!ossl_assert(start <= end))
- return 0;
-
- /* Walk backwards since we will most often be removing at the end. */
- for (z = s->tail; z != NULL; z = zprev) {
- zprev = z->prev;
-
- if (start > z->range.end)
- /* No overlapping ranges can exist beyond this point, so stop. */
- break;
-
- if (start <= z->range.start && end >= z->range.end) {
- /*
- * The range being removed dwarfs this range, so it should be
- * removed.
- */
- if (z->next != NULL)
- z->next->prev = z->prev;
- if (z->prev != NULL)
- z->prev->next = z->next;
- if (s->head == z)
- s->head = z->next;
- if (s->tail == z)
- s->tail = z->prev;
-
- OPENSSL_free(z);
- --s->num_ranges;
- } else if (start <= z->range.start) {
- /*
- * The range being removed includes start of this range, but does
- * not cover the entire range (as this would be caught by the case
- * above). Shorten the range.
- */
- assert(end < z->range.end);
- z->range.start = end + 1;
- } else if (end >= z->range.end) {
- /*
- * The range being removed includes the end of this range, but does
- * not cover the entire range (as this would be caught by the case
- * above). Shorten the range. We can also stop iterating.
- */
- assert(start > z->range.start);
- assert(start > 0);
- z->range.end = start - 1;
- break;
- } else if (start > z->range.start && end < z->range.end) {
- /*
- * The range being removed falls entirely in this range, so cut it
- * into two. Cases where a zero-length range would be created are
- * handled by the above cases.
- */
- y = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
- if (y == NULL)
- return 0;
-
- y->range.end = z->range.end;
- y->range.start = end + 1;
- y->next = z->next;
- y->prev = z;
- if (y->next != NULL)
- y->next->prev = y;
-
- z->range.end = start - 1;
- z->next = y;
-
- if (s->tail == z)
- s->tail = y;
-
- ++s->num_ranges;
- break;
- } else {
- /* Assert no partial overlap; all cases should be covered above. */
- assert(!pn_range_overlaps(&z->range, range));
- }
- }
-
- return 1;
-}
-
-/* Returns 1 iff the given PN is in the PN set. */
-static int pn_set_query(const struct pn_set_st *s, QUIC_PN pn)
-{
- struct pn_set_item_st *x;
-
- if (s->head == NULL)
- return 0;
-
- for (x = s->tail; x != NULL; x = x->prev)
- if (x->range.start <= pn && x->range.end >= pn)
- return 1;
- else if (x->range.end < pn)
- return 0;
-
- return 0;
-}
-
struct rx_pkt_history_st {
- struct pn_set_st set;
+ UINT_SET set;
/*
* Invariant: PNs below this are not in the set.
@@ -786,13 +420,13 @@ static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
static void rx_pkt_history_init(struct rx_pkt_history_st *h)
{
- pn_set_init(&h->set);
+ ossl_uint_set_init(&h->set);
h->watermark = 0;
}
static void rx_pkt_history_destroy(struct rx_pkt_history_st *h)
{
- pn_set_destroy(&h->set);
+ ossl_uint_set_destroy(&h->set);
}
/*
@@ -806,12 +440,12 @@ static void rx_pkt_history_trim_range_count(struct rx_pkt_history_st *h)
QUIC_PN highest = QUIC_PN_INVALID;
while (h->set.num_ranges > MAX_RX_ACK_RANGES) {
- OSSL_QUIC_ACK_RANGE r = h->set.head->range;
+ UINT_RANGE r = h->set.head->range;
highest = (highest == QUIC_PN_INVALID)
? r.end : ossl_quic_pn_max(highest, r.end);
- pn_set_remove(&h->set, &r);
+ ossl_uint_set_remove(&h->set, &r);
}
/*
@@ -825,7 +459,7 @@ static void rx_pkt_history_trim_range_count(struct rx_pkt_history_st *h)
static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
QUIC_PN pn)
{
- OSSL_QUIC_ACK_RANGE r;
+ UINT_RANGE r;
r.start = pn;
r.end = pn;
@@ -833,7 +467,7 @@ static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
if (pn < h->watermark)
return 1; /* consider this a success case */
- if (pn_set_insert(&h->set, &r) != 1)
+ if (ossl_uint_set_insert(&h->set, &r) != 1)
return 0;
rx_pkt_history_trim_range_count(h);
@@ -843,7 +477,7 @@ static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
QUIC_PN watermark)
{
- OSSL_QUIC_ACK_RANGE r;
+ UINT_RANGE r;
if (watermark <= h->watermark)
return 1;
@@ -851,7 +485,7 @@ static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
/* Remove existing PNs below the watermark. */
r.start = 0;
r.end = watermark - 1;
- if (pn_set_remove(&h->set, &r) != 1)
+ if (ossl_uint_set_remove(&h->set, &r) != 1)
return 0;
h->watermark = watermark;
@@ -1936,7 +1570,7 @@ static void ackm_fill_rx_ack_ranges(OSSL_ACKM *ackm, int pkt_space,
OSSL_QUIC_FRAME_ACK *ack)
{
struct rx_pkt_history_st *h = get_rx_history(ackm, pkt_space);
- struct pn_set_item_st *x;
+ UINT_SET_ITEM *x;
size_t i = 0;
/*
@@ -1945,8 +1579,10 @@ static void ackm_fill_rx_ack_ranges(OSSL_ACKM *ackm, int pkt_space,
*/
for (x = h->set.tail;
x != NULL && i < OSSL_NELEM(ackm->ack_ranges);
- x = x->prev, ++i)
- ackm->ack_ranges[pkt_space][i] = x->range;
+ x = x->prev, ++i) {
+ ackm->ack_ranges[pkt_space][i].start = x->range.start;
+ ackm->ack_ranges[pkt_space][i].end = x->range.end;
+ }
ack->ack_ranges = ackm->ack_ranges[pkt_space];
ack->num_ack_ranges = i;
@@ -1995,7 +1631,7 @@ int ossl_ackm_is_rx_pn_processable(OSSL_ACKM *ackm, QUIC_PN pn, int pkt_space)
{
struct rx_pkt_history_st *h = get_rx_history(ackm, pkt_space);
- return pn >= h->watermark && pn_set_query(&h->set, pn) == 0;
+ return pn >= h->watermark && ossl_uint_set_query(&h->set, pn) == 0;
}
void ossl_ackm_set_loss_detection_deadline_callback(OSSL_ACKM *ackm,
diff --git a/ssl/quic/quic_demux.c b/ssl/quic/quic_demux.c
index 67a61c691a..efb0997331 100644
--- a/ssl/quic/quic_demux.c
+++ b/ssl/quic/quic_demux.c
@@ -1,3 +1,12 @@
+/*
+ * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
#include "internal/quic_demux.h"
#include "internal/quic_wire_pkt.h"
#include "internal/common.h"
diff --git a/ssl/quic/quic_stream.c b/ssl/quic/quic_stream.c
new file mode 100644
index 0000000000..7e7d5417a4
--- /dev/null
+++ b/ssl/quic/quic_stream.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_stream.h"
+#include "internal/uint_set.h"
+#include "internal/common.h"
+
+/*
+ * ==================================================================
+ * Byte-wise ring buffer which supports pushing and popping blocks of multiple
+ * bytes at a time. The logical offset of each byte for the purposes of a QUIC
+ * stream is tracked. Bytes can be popped from the ring buffer in two stages;
+ * first they are popped, and then they are culled. Bytes which have been popped
+ * but not yet culled will not be overwritten, and can be restored.
+ */
+struct ring_buf {
+ void *start;
+ size_t alloc; /* size of buffer allocation in bytes */
+
+ /*
+ * Logical offset of the head (where we append to). This is the current size
+ * of the QUIC stream. This increases monotonically.
+ */
+ uint64_t head_offset;
+
+ /*
+ * Logical offset of the cull tail. Data is no longer needed and is
+ * deallocated as the cull tail advances, which occurs as data is
+ * acknowledged. This increases monotonically.
+ */
+ uint64_t ctail_offset;
+};
+
+static int ring_buf_init(struct ring_buf *r)
+{
+ r->start = NULL;
+ r->alloc = 0;
+ r->head_offset = r->ctail_offset = 0;
+ return 1;
+}
+
+static void ring_buf_destroy(struct ring_buf *r)
+{
+ OPENSSL_free(r->start);
+ r->start = NULL;
+ r->alloc = 0;
+}
+
+static size_t ring_buf_used(struct ring_buf *r)
+{
+ return r->head_offset - r->ctail_offset;
+}
+
+static size_t ring_buf_avail(struct ring_buf *r)
+{
+ return r->alloc - ring_buf_used(r);
+}
+
+static size_t ring_buf_push(struct ring_buf *r,
+ const unsigned char *buf, size_t buf_len)
+{
+ size_t pushed = 0, avail, idx, l, i;
+ unsigned char *start = r->start;
+
+ for (i = 0;; ++i) {
+ avail = ring_buf_avail(r);
+ if (buf_len > avail)
+ buf_len = avail;
+
+ if (buf_len == 0)
+ break;
+
+ assert(i < 2);
+
+ idx = r->head_offset % r->alloc;
+ l = r->alloc - idx;
+ if (buf_len < l)
+ l = buf_len;
+
+ memcpy(start + idx, buf, l);
+ r->head_offset += l;
+ buf += l;
+ buf_len -= l;
+ pushed += l;
+ }
+
+ return pushed;
+}
+
+/*
+ * Retrieves data out of the read size of the ring buffer starting at the given
+ * logical offset. *buf is set to point to a contiguous span of bytes and
+ * *buf_len is set to the number of contiguous bytes. After this function
+ * returns, there may or may not be more bytes available at the logical offset
+ * of (logical_offset + *buf_len) by calling this function again. If the logical
+ * offset is out of the range retained by the ring buffer, returns 0, else
+ * returns 1. A logical offset at the end of the range retained by the ring
+ * buffer is not considered an error and is returned with a *buf_len of 0.
+ *
+ * The ring buffer state is not changed.
+ */
+static int ring_buf_get_buf_at(const struct ring_buf *r,
+ uint64_t logical_offset,
+ const unsigned char **buf, size_t *buf_len)
+{
+ const unsigned char *start = r->start;
+ size_t idx, l;
+
+ if (logical_offset > r->head_offset || logical_offset < r->ctail_offset)
+ return 0;
+
+ if (r->alloc == 0) {
+ *buf = NULL;
+ *buf_len = 0;
+ return 1;
+ }
+
+ idx = logical_offset % r->alloc;
+ l = r->head_offset - logical_offset;
+ if (l > r->alloc - idx)
+ l = r->alloc - idx;
+
+ *buf = start + idx;
+ *buf_len = l;
+ return 1;
+}
+
+static void ring_buf_cpop_range(struct ring_buf *r,
+ uint64_t start, uint64_t end)
+{
+ assert(end >= start);
+
+ if (start > r->ctail_offset)
+ return;
+
+ r->ctail_offset = end + 1;
+}
+
+static int ring_buf_resize(struct ring_buf *r, size_t num_bytes)
+{
+ struct ring_buf rnew = {0};
+ const unsigned char *src = NULL;
+ size_t src_len = 0, copied = 0;
+
+ if (num_bytes == r->alloc)
+ return 1;
+
+ if (num_bytes < ring_buf_used(r))
+ return 0;
+
+ rnew.start = OPENSSL_malloc(num_bytes);
+ if (rnew.start == NULL)
+ return 0;
+
+ rnew.alloc = num_bytes;
+ rnew.head_offset = r->head_offset - ring_buf_used(r);
+ rnew.ctail_offset = rnew.head_offset;
+
+ for (;;) {
+ if (!ring_buf_get_buf_at(r, r->ctail_offset + copied, &src, &src_len)) {
+ OPENSSL_free(rnew.start);
+ return 0;
+ }
+
+ if (src_len == 0)
+ break;
+
+ if (ring_buf_push(&rnew, src, src_len) != src_len) {
+ OPENSSL_free(rnew.start);
+ return 0;
+ }
+
+ copied += src_len;
+ }
+
+ assert(rnew.head_offset == r->head_offset);
+ rnew.ctail_offset = r->ctail_offset;
+
+ OPENSSL_free(r->start);
+ memcpy(r, &rnew, sizeof(*r));
+ return 1;
+}
+
+/*
+ * ==================================================================
+ * QUIC Send Stream
+ */
+struct quic_sstream_st {
+ struct ring_buf ring_buf;
+
+ /*
+ * Any logical byte in the stream is in one of these states:
+ *
+ * - NEW: The byte has not yet been transmitted, or has been lost and is
+ * in need of retransmission.
+ *
+ * - IN_FLIGHT: The byte has been transmitted but is awaiting
+ * acknowledgement. We continue to store the data in case we return
+ * to the NEW state.
+ *
+ * - ACKED: The byte has been acknowledged and we can cease storing it.
+ * We do not necessarily cull it immediately, so there may be a delay
+ * between reaching the ACKED state and the buffer space actually being
+ * recycled.
+ *
+ * A logical byte in the stream is
+ *
+ * - in the NEW state if it is in new_set;
+ * - is in the ACKED state if it is in acked_set
+ * (and may or may not have been culled);
+ * - is in the IN_FLIGHT state otherwise.
+ *
+ * Invariant: No logical byte is ever in both new_set and acked_set.
+ */
+ UINT_SET new_set, acked_set;
+
+ /*
+ * The current size of the stream is ring_buf.head_offset. If
+ * have_final_size is true, this is also the final size of the stream.
+ */
+ unsigned int have_final_size : 1;
+ unsigned int sent_final_size : 1;
+ unsigned int acked_final_size : 1;
+};
+
+static void qss_cull(QUIC_SSTREAM *qss);
+
+QUIC_SSTREAM *ossl_quic_sstream_new(size_t init_buf_size)
+{
+ QUIC_SSTREAM *qss;
+
+ qss = OPENSSL_zalloc(sizeof(QUIC_SSTREAM));
+ if (qss == NULL)
+ return NULL;
+
+ ring_buf_init(&qss->ring_buf);
+ if (!ring_buf_resize(&qss->ring_buf, init_buf_size)) {
+ ring_buf_destroy(&qss->ring_buf);
+ OPENSSL_free(qss);
+ return NULL;
+ }
+
+ ossl_uint_set_init(&qss->new_set);
+ ossl_uint_set_init(&qss->acked_set);
+ return qss;
+}
+
+void ossl_quic_sstream_free(QUIC_SSTREAM *qss)
+{
+ if (qss == NULL)
+ return;
+
+ ossl_uint_set_destroy(&qss->new_set);
+ ossl_uint_set_destroy(&qss->acked_set);
+ ring_buf_destroy(&qss->ring_buf);
+ OPENSSL_free(qss);
+}
+
+int ossl_quic_sstream_get_stream_frame(QUIC_SSTREAM *qss,
+ size_t skip,
+ OSSL_QUIC_FRAME_STREAM *hdr,
+ OSSL_QTX_IOVEC *iov,
+ size_t *num_iov)
+{
+ size_t num_iov_ = 0, src_len = 0, total_len = 0, i;
+ uint64_t max_len;
+ const unsigned char *src = NULL;
+ UINT_SET_ITEM *range = qss->new_set.head;
+
+ if (*num_iov < 2)
+ return 0;
+
+ for (i = 0; i < skip && range != NULL; ++i)
+ range = range->next;
+
+ if (range == NULL) {
+ /* No new bytes to send, but we might have a FIN */
+ if (!qss->have_final_size || qss->sent_final_size)
+ return 0;
+
+ hdr->offset = qss->ring_buf.head_offset;
+ hdr->len = 0;
+ hdr->is_fin = 1;
+ *num_iov = 0;
+ return 1;
+ }
+
+ /*
+ * We can only send a contiguous range of logical bytes in a single
+ * stream frame, so limit ourselves to the range of the first set entry.
+ *
+ * Set entries never have 'adjacent' entries so we don't have to worry
+ * about them here.
+ */
+ max_len = range->range.end - range->range.start + 1;
+
+ for (i = 0;; ++i) {
+ if (total_len >= max_len)
+ break;
+
+ if (!ring_buf_get_buf_at(&qss->ring_buf,
+ range->range.start + total_len,
+ &src, &src_len))
+ return 0;
+
+ if (src_len == 0)
+ break;
+
+ assert(i < 2);
+
+ if (total_len + src_len > max_len)
+ src_len = max_len - total_len;
+
+ iov[num_iov_].buf = src;
+ iov[num_iov_].buf_len = src_len;
+
+ total_len += src_len;
+ ++num_iov_;
+ }
+
+ hdr->offset = range->range.start;
+ hdr->len = total_len;
+ hdr->is_fin = qss->have_final_size
+ && hdr->offset + hdr->len == qss->ring_buf.head_offset;
+
+ *num_iov = num_iov_;
+ return 1;
+}
+
+int ossl_quic_sstream_mark_transmitted(QUIC_SSTREAM *qss,
+ uint64_t start,
+ uint64_t end)
+{
+ UINT_RANGE r;
+
+ r.start = start;
+ r.end = end;
+
+ if (!ossl_uint_set_remove(&qss->new_set, &r))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_sstream_mark_transmitted_fin(QUIC_SSTREAM *qss,
+ uint64_t final_size)
+{
+ /*
+ * We do not really need final_size since we already know the size of the
+ * stream, but this serves as a sanity check.
+ */
+ if (!qss->have_final_size || final_size != qss->ring_buf.head_offset)
+ return 0;
+
+ qss->sent_final_size = 1;
+ return 1;
+}
+
+int ossl_quic_sstream_mark_lost(QUIC_SSTREAM *qss,
+ uint64_t start,
+ uint64_t end)
+{
+ UINT_RANGE r;
+ r.start = start;
+ r.end = end;
+
+ /*
+ * We lost a range of stream data bytes, so reinsert them into the new set,
+ * so that they are returned once more by ossl_quic_sstream_get_stream_frame.
+ */
+ if (!ossl_uint_set_insert(&qss->new_set, &r))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_sstream_mark_lost_fin(QUIC_SSTREAM *qss)
+{
+ if (qss->acked_final_size)
+ /* Does not make sense to lose a FIN after it has been ACKed */
+ return 0;
+
+ /* FIN was lost, so we need to transmit it again. */
+ qss->sent_final_size = 0;
+ return 1;
+}
+
+int ossl_quic_sstream_mark_acked(QUIC_SSTREAM *qss,
+ uint64_t start,
+ uint64_t end)
+{
+ UINT_RANGE r;
+ r.start = start;
+ r.end = end;
+
+ if (!ossl_uint_set_insert(&qss->acked_set, &r))
+ return 0;
+
+ qss_cull(qss);
+ return 1;
+}
+
+int ossl_quic_sstream_mark_acked_fin(QUIC_SSTREAM *qss)
+{
+ if (!qss->have_final_size)
+ /* Cannot ack final size before we have a final size */
+ return 0;
+
+ qss->acked_final_size = 1;
+ return 1;
+}
+
+void ossl_quic_sstream_fin(QUIC_SSTREAM *qss)
+{
+ if (qss->have_final_size)
+ return;
+
+ qss->have_final_size = 1;
+}
+
+int ossl_quic_sstream_append(QUIC_SSTREAM *qss,
+ const unsigned char *buf,
+ size_t buf_len,
+ size_t *consumed)
+{
+ size_t l, consumed_ = 0;
+ UINT_RANGE r;
+ struct ring_buf old_ring_buf = qss->ring_buf;
+
+ if (qss->have_final_size) {
+ *consumed = 0;
+ return 0;
+ }
+
+ /*
+ * Note: It is assumed that ossl_quic_sstream_append will be called during a
+ * call to e.g. SSL_write and this function is therefore designed to support
+ * such semantics. In particular, the buffer pointed to by buf is only
+ * assumed to be valid for the duration of this call, therefore we must copy
+ * the data here. We will later copy-and-encrypt the data during packet
+ * encryption, so this is a two-copy design. Supporting a one-copy design in
+ * the future will require applications to use a different kind of API.
+ * Supporting such changes in future will require corresponding enhancements
+ * to this code.
+ */
+ while (buf_len > 0) {
+ l = ring_buf_push(&qss->ring_buf, buf, buf_len);
+ if (l == 0)
+ break;
+
+ buf += l;
+ buf_len -= l;
+ consumed_ += l;
+ }
+
+ if (consumed_ > 0) {
+ r.