summaryrefslogtreecommitdiffstats
path: root/net/xdp/xsk_buff_pool.c
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2020-08-28 10:26:25 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-08-31 21:15:04 +0200
commitb5aea28dca13456c1a08b9b2ef8a8b92598ac426 (patch)
treeb4f2c35abc89d52d72e73339f301c56d00361635 /net/xdp/xsk_buff_pool.c
parent9647c57b11e563f5b33a49ef72b347753917c21c (diff)
xsk: Add shared umem support between queue ids
Add support to share a umem between queue ids on the same device. This mode can be invoked with the XDP_SHARED_UMEM bind flag. Previously, sharing was only supported within the same queue id and device, and you shared one set of fill and completion rings. However, note that when sharing a umem between queue ids, you need to create a fill ring and a completion ring and tie them to the socket before you do the bind with the XDP_SHARED_UMEM flag. This so that the single-producer single-consumer semantics can be upheld. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-12-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'net/xdp/xsk_buff_pool.c')
-rw-r--r--net/xdp/xsk_buff_pool.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 547eb41f7837..795d7c81c0ca 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -123,8 +123,8 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
}
}
-int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
- u16 queue_id, u16 flags)
+static int __xp_assign_dev(struct xsk_buff_pool *pool,
+ struct net_device *netdev, u16 queue_id, u16 flags)
{
bool force_zc, force_copy;
struct netdev_bpf bpf;
@@ -193,6 +193,28 @@ err_unreg_pool:
return err;
}
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags)
+{
+ return __xp_assign_dev(pool, dev, queue_id, flags);
+}
+
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+ struct net_device *dev, u16 queue_id)
+{
+ u16 flags;
+
+ /* One fill and completion ring required for each queue id. */
+ if (!pool->fq || !pool->cq)
+ return -EINVAL;
+
+ flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
+ if (pool->uses_need_wakeup)
+ flags |= XDP_USE_NEED_WAKEUP;
+
+ return __xp_assign_dev(pool, dev, queue_id, flags);
+}
+
void xp_clear_dev(struct xsk_buff_pool *pool)
{
if (!pool->netdev)