net: Restrict receive packets queuing to housekeeping CPUs
authorAlex Belits <abelits@marvell.com>
Thu, 25 Jun 2020 22:34:43 +0000 (18:34 -0400)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 8 Jul 2020 09:39:02 +0000 (11:39 +0200)
With the existing implementation of store_rps_map(), packets are queued
in the receive path on the backlog queues of other CPUs irrespective of
whether they are isolated or not. This could add a latency overhead to
any RT workload that is running on the same CPU.

Ensure that store_rps_map() only uses available housekeeping CPUs for
storing the rps_map.

Signed-off-by: Alex Belits <abelits@marvell.com>
Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200625223443.2684-4-nitesh@redhat.com
net/core/net-sysfs.c

index e353b822bb1574e177438023e3139dc01d1d93af..677868fea3167aab1f0c1088731db89df0f4cb70 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/if_arp.h>
 #include <linux/slab.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/isolation.h>
 #include <linux/nsproxy.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
@@ -741,7 +742,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
 {
        struct rps_map *old_map, *map;
        cpumask_var_t mask;
-       int err, cpu, i;
+       int err, cpu, i, hk_flags;
        static DEFINE_MUTEX(rps_map_mutex);
 
        if (!capable(CAP_NET_ADMIN))
@@ -756,6 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
                return err;
        }
 
+       hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
+       cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
+       if (cpumask_empty(mask)) {
+               free_cpumask_var(mask);
+               return -EINVAL;
+       }
+
        map = kzalloc(max_t(unsigned int,
                            RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
                      GFP_KERNEL);