From 07bbecb3410617816a99e76a2df7576507a0c8ad Mon Sep 17 00:00:00 2001 From: Alex Belits Date: Thu, 25 Jun 2020 18:34:43 -0400 Subject: net: Restrict receive packets queuing to housekeeping CPUs With the existing implementation of store_rps_map(), packets are queued in the receive path on the backlog queues of other CPUs irrespective of whether they are isolated or not. This could add a latency overhead to any RT workload that is running on the same CPU. Ensure that store_rps_map() only uses available housekeeping CPUs for storing the rps_map. Signed-off-by: Alex Belits Signed-off-by: Nitesh Narayan Lal Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20200625223443.2684-4-nitesh@redhat.com --- net/core/net-sysfs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'net/core/net-sysfs.c') diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index e353b822bb15..677868fea316 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -741,7 +742,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, { struct rps_map *old_map, *map; cpumask_var_t mask; - int err, cpu, i; + int err, cpu, i, hk_flags; static DEFINE_MUTEX(rps_map_mutex); if (!capable(CAP_NET_ADMIN)) @@ -756,6 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, return err; } + hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ; + cpumask_and(mask, mask, housekeeping_cpumask(hk_flags)); + if (cpumask_empty(mask)) { + free_cpumask_var(mask); + return -EINVAL; + } + map = kzalloc(max_t(unsigned int, RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), GFP_KERNEL); -- cgit v1.2.3