sched/isolation: Prefer housekeeping CPU in local node
authorWanpeng Li <wanpengli@tencent.com>
Fri, 28 Jun 2019 08:51:41 +0000 (16:51 +0800)
committerIngo Molnar <mingo@kernel.org>
Thu, 25 Jul 2019 13:51:55 +0000 (15:51 +0200)
In real product setup, there will be houseeking CPUs in each nodes, it
is prefer to do housekeeping from local node, fallback to global online
cpumask if failed to find houseeking CPU from local node.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/1561711901-4755-2-git-send-email-wanpengli@tencent.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/isolation.c
kernel/sched/sched.h
kernel/sched/topology.c

index ccb28085b11418f539766b96d32901bf05e0a7f6..9fcb2a695a41289fe490c64a0656300a74fefb99 100644 (file)
@@ -22,9 +22,17 @@ EXPORT_SYMBOL_GPL(housekeeping_enabled);
 
 int housekeeping_any_cpu(enum hk_flags flags)
 {
-       if (static_branch_unlikely(&housekeeping_overridden))
-               if (housekeeping_flags & flags)
+       int cpu;
+
+       if (static_branch_unlikely(&housekeeping_overridden)) {
+               if (housekeeping_flags & flags) {
+                       cpu = sched_numa_find_closest(housekeeping_mask, smp_processor_id());
+                       if (cpu < nr_cpu_ids)
+                               return cpu;
+
                        return cpumask_any_and(housekeeping_mask, cpu_online_mask);
+               }
+       }
        return smp_processor_id();
 }
 EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
index aaca0e7437765f0a8ecd824bee1a71ab1a75dd2a..16126efd14eda8a2182347615a305a24cd3e2551 100644 (file)
@@ -1262,16 +1262,18 @@ enum numa_topology_type {
 extern enum numa_topology_type sched_numa_topology_type;
 extern int sched_max_numa_distance;
 extern bool find_numa_distance(int distance);
-#endif
-
-#ifdef CONFIG_NUMA
 extern void sched_init_numa(void);
 extern void sched_domains_numa_masks_set(unsigned int cpu);
 extern void sched_domains_numa_masks_clear(unsigned int cpu);
+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
 #else
 static inline void sched_init_numa(void) { }
 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{
+       return nr_cpu_ids;
+}
 #endif
 
 #ifdef CONFIG_NUMA_BALANCING
index f751ce0b783e57eff53f4d5feb73be1859d05cd6..4eea2c9bc732f0ca7c2b9ddb04a005308ad9f7a9 100644 (file)
@@ -1724,6 +1724,26 @@ void sched_domains_numa_masks_clear(unsigned int cpu)
        }
 }
 
+/*
+ * sched_numa_find_closest() - given the NUMA topology, find the cpu
+ *                             closest to @cpu from @cpumask.
+ * cpumask: cpumask to find a cpu from
+ * cpu: cpu to be close to
+ *
+ * returns: cpu, or nr_cpu_ids when nothing found.
+ */
+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{
+       int i, j = cpu_to_node(cpu);
+
+       for (i = 0; i < sched_domains_numa_levels; i++) {
+               cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
+               if (cpu < nr_cpu_ids)
+                       return cpu;
+       }
+       return nr_cpu_ids;
+}
+
 #endif /* CONFIG_NUMA */
 
 static int __sdt_alloc(const struct cpumask *cpu_map)