summaryrefslogtreecommitdiffstats
path: root/net/xdp/xsk.c
diff options
context:
space:
mode:
authorCiara Loftus <ciara.loftus@intel.com>2020-07-08 07:28:33 +0000
committerAlexei Starovoitov <ast@kernel.org>2020-07-13 15:32:56 -0700
commit8aa5a33578e9685d06020bd10d1637557423e945 (patch)
tree848b7defe21085b2fbd44cc2acc42a2a769c01d4 /net/xdp/xsk.c
parent24a38b7c0c2c17f7b86c29f82beb7f2779704ca3 (diff)
xsk: Add new statistics
It can be useful for the user to know the reason behind a dropped packet. Introduce new counters which track drops on the receive path caused by: 1. rx ring being full 2. fill ring being empty Also, on the tx path introduce a counter which tracks the number of times we attempt pull from the tx ring when it is empty. Signed-off-by: Ciara Loftus <ciara.loftus@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200708072835.4427-2-ciara.loftus@intel.com
Diffstat (limited to 'net/xdp/xsk.c')
-rw-r--r--net/xdp/xsk.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3700266229f6..26e3bba8c204 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
addr = xp_get_handle(xskb);
err = xskq_prod_reserve_desc(xs->rx, addr, len);
if (err) {
- xs->rx_dropped++;
+ xs->rx_queue_full++;
return err;
}
@@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
- if (!xskq_cons_peek_desc(xs->tx, desc, umem))
+ if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
+ xs->tx->queue_empty_descs++;
continue;
+ }
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
@@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk)
sent_frame = true;
}
+ xs->tx->queue_empty_descs++;
+
out:
if (sent_frame)
sk->sk_write_space(sk);
@@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
ring->desc = offsetof(struct xdp_umem_ring, desc);
}
+struct xdp_statistics_v1 {
+ __u64 rx_dropped;
+ __u64 rx_invalid_descs;
+ __u64 tx_invalid_descs;
+};
+
static int xsk_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
case XDP_STATISTICS:
{
struct xdp_statistics stats;
+ bool extra_stats = true;
+ size_t stats_size;
- if (len < sizeof(stats))
+ if (len < sizeof(struct xdp_statistics_v1)) {
return -EINVAL;
+ } else if (len < sizeof(stats)) {
+ extra_stats = false;
+ stats_size = sizeof(struct xdp_statistics_v1);
+ } else {
+ stats_size = sizeof(stats);
+ }
mutex_lock(&xs->mutex);
stats.rx_dropped = xs->rx_dropped;
+ if (extra_stats) {
+ stats.rx_ring_full = xs->rx_queue_full;
+ stats.rx_fill_ring_empty_descs =
+ xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+ stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
+ } else {
+ stats.rx_dropped += xs->rx_queue_full;
+ }
stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
mutex_unlock(&xs->mutex);
- if (copy_to_user(optval, &stats, sizeof(stats)))
+ if (copy_to_user(optval, &stats, stats_size))
return -EFAULT;
- if (put_user(sizeof(stats), optlen))
+ if (put_user(stats_size, optlen))
return -EFAULT;
return 0;