summaryrefslogtreecommitdiffstats
path: root/include/net/xsk_buff_pool.h
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2020-08-28 10:26:23 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-08-31 21:15:04 +0200
commit8ef4e27eb3f03edfbfbe5657b8061f2a47757037 (patch)
tree657f2adf5f8665231c2b98d02e59aaf4c14551b3 /include/net/xsk_buff_pool.h
parent921b68692abb4fd02237b6875b2056bc59435116 (diff)
xsk: Rearrange internal structs for better performance
Rearrange the xdp_sock, xdp_umem and xsk_buff_pool structures so that they get smaller and align better to the cache lines. In the previous commits of this patch set, these structs have been reordered with the focus on functionality and simplicity, not performance. This patch improves throughput performance by around 3%. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-10-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'include/net/xsk_buff_pool.h')
-rw-r--r--include/net/xsk_buff_pool.h27
1 files changed, 15 insertions, 12 deletions
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 356d0ac74eba..38d03a64c9ea 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -39,9 +39,22 @@ struct xsk_dma_map {
};
struct xsk_buff_pool {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
/* For performance reasons, each buff pool has its own array of dma_pages
* even when they are identical.
*/
@@ -51,25 +64,15 @@ struct xsk_buff_pool {
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
- u16 queue_id;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
- struct xdp_umem *umem;
void *addrs;
- struct device *dev;
- struct net_device *netdev;
- struct list_head xsk_tx_list;
- /* Protects modifications to the xsk_tx_list */
- spinlock_t xsk_tx_list_lock;
- refcount_t users;
- struct work_struct work;
struct xdp_buff_xsk *free_heads[];
};