summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXuan Zhuo <xuanzhuo@linux.alibaba.com>2020-12-01 21:56:58 +0800
committerDaniel Borkmann <daniel@iogearbox.net>2020-12-03 01:14:38 +0100
commit3413f04141aa440c71da187755e8e22f5093ce83 (patch)
tree05f9cc0aa60f5e1bace31ef5778090229b28e8dc
parentf5da54187e33dce9bea63170667dbb0ca8d98194 (diff)
xsk: Change the tx writeable condition
Modify the tx writeable condition from the queue is not full to the number of present tx queues is less than the half of the total number of queues. Because the tx queue not full is a very short time, this will cause a large number of EPOLLOUT events, and cause a large number of process wake up. Fixes: 35fcde7f8deb ("xsk: support for Tx") Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/bpf/508fef55188d4e1160747ead64c6dcda36735880.1606555939.git.xuanzhuo@linux.alibaba.com
-rw-r--r--net/xdp/xsk.c16
-rw-r--r--net/xdp/xsk_queue.h6
2 files changed, 19 insertions, 3 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9bbfd8adbb73..62504471fd20 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
return 0;
}
+static bool xsk_tx_writeable(struct xdp_sock *xs)
+{
+ if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
+ return false;
+
+ return true;
+}
+
static bool xsk_is_bound(struct xdp_sock *xs)
{
if (READ_ONCE(xs->state) == XSK_BOUND) {
@@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
rcu_read_lock();
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
__xskq_cons_release(xs->tx);
- xs->sk.sk_write_space(&xs->sk);
+ if (xsk_tx_writeable(xs))
+ xs->sk.sk_write_space(&xs->sk);
}
rcu_read_unlock();
}
@@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk)
out:
if (sent_frame)
- sk->sk_write_space(sk);
+ if (xsk_tx_writeable(xs))
+ sk->sk_write_space(sk);
mutex_unlock(&xs->mutex);
return err;
@@ -493,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
- if (xs->tx && !xskq_cons_is_full(xs->tx))
+ if (xs->tx && xsk_tx_writeable(xs))
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index cdb9cf3cd136..9e71b9f27679 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
q->nentries;
}
+static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
+{
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
+}
+
/* Functions for producers */
static inline bool xskq_prod_is_full(struct xsk_queue *q)