sched_ext: idle: Introduce SCX_OPS_BUILTIN_IDLE_PER_NODE
authorAndrea Righi <arighi@nvidia.com>
Fri, 14 Feb 2025 19:40:05 +0000 (20:40 +0100)
committerTejun Heo <tj@kernel.org>
Sun, 16 Feb 2025 16:52:20 +0000 (06:52 -1000)
Add the new scheduler flag SCX_OPS_BUILTIN_IDLE_PER_NODE, which allows
BPF schedulers to select between using a global flat idle cpumask or
multiple per-node cpumasks.

This only introduces the flag and the mechanism to enable/disable this
feature without affecting any scheduling behavior.

Cc: Yury Norov [NVIDIA] <yury.norov@gmail.com>
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Reviewed-by: Yury Norov [NVIDIA] <yury.norov@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext.c
kernel/sched/ext_idle.c
kernel/sched/ext_idle.h
tools/sched_ext/include/scx/compat.h

index 7c17e05ed15b1410827146b17ba51fb0406ad215..330a359d793011701ce8fc66dda599af36296a02 100644 (file)
@@ -154,6 +154,12 @@ enum scx_ops_flags {
         */
        SCX_OPS_ALLOW_QUEUED_WAKEUP     = 1LLU << 5,
 
+       /*
+        * If set, enable per-node idle cpumasks. If clear, use a single global
+        * flat idle cpumask.
+        */
+       SCX_OPS_BUILTIN_IDLE_PER_NODE   = 1LLU << 6,
+
        /*
         * CPU cgroup support flags
         */
@@ -165,6 +171,7 @@ enum scx_ops_flags {
                                  SCX_OPS_ENQ_MIGRATION_DISABLED |
                                  SCX_OPS_ALLOW_QUEUED_WAKEUP |
                                  SCX_OPS_SWITCH_PARTIAL |
+                                 SCX_OPS_BUILTIN_IDLE_PER_NODE |
                                  SCX_OPS_HAS_CGROUP_WEIGHT,
 };
 
@@ -3427,7 +3434,7 @@ static void handle_hotplug(struct rq *rq, bool online)
        atomic_long_inc(&scx_hotplug_seq);
 
        if (scx_enabled())
-               scx_idle_update_selcpu_topology();
+               scx_idle_update_selcpu_topology(&scx_ops);
 
        if (online && SCX_HAS_OP(cpu_online))
                SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
@@ -5228,6 +5235,16 @@ static int validate_ops(const struct sched_ext_ops *ops)
                return -EINVAL;
        }
 
+       /*
+        * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
+        * selection policy to be enabled.
+        */
+       if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
+           (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
+               scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -5352,7 +5369,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                        static_branch_enable_cpuslocked(&scx_has_op[i]);
 
        check_hotplug_seq(ops);
-       scx_idle_update_selcpu_topology();
+       scx_idle_update_selcpu_topology(ops);
 
        cpus_read_unlock();
 
index ed1804506585bdda9265d6b8f62dd255803b594c..0912f94b95cdcc7aa5ca15a89fbc9365fd92f1d4 100644 (file)
@@ -14,6 +14,9 @@
 /* Enable/disable built-in idle CPU selection policy */
 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
 
+/* Enable/disable per-node idle cpumasks */
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_CPUMASK_OFFSTACK
 #define CL_ALIGNED_IF_ONSTACK
@@ -204,7 +207,7 @@ static bool llc_numa_mismatch(void)
  * CPU belongs to a single LLC domain, and that each LLC domain is entirely
  * contained within a single NUMA node.
  */
-void scx_idle_update_selcpu_topology(void)
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
 {
        bool enable_llc = false, enable_numa = false;
        unsigned int nr_cpus;
@@ -237,13 +240,19 @@ void scx_idle_update_selcpu_topology(void)
         * If all CPUs belong to the same NUMA node and the same LLC domain,
         * enabling both NUMA and LLC optimizations is unnecessary, as checking
         * for an idle CPU in the same domain twice is redundant.
+        *
+        * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
+        * optimization, as we would naturally select idle CPUs within
+        * specific NUMA nodes querying the corresponding per-node cpumask.
         */
-       nr_cpus = numa_weight(cpu);
-       if (nr_cpus > 0) {
-               if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
-                       enable_numa = true;
-               pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
-                        cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
+       if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
+               nr_cpus = numa_weight(cpu);
+               if (nr_cpus > 0) {
+                       if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
+                               enable_numa = true;
+                       pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
+                                cpumask_pr_args(numa_span(cpu)), nr_cpus);
+               }
        }
        rcu_read_unlock();
 
@@ -530,6 +539,11 @@ void scx_idle_enable(struct sched_ext_ops *ops)
        }
        static_branch_enable(&scx_builtin_idle_enabled);
 
+       if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
+               static_branch_enable(&scx_builtin_idle_per_node);
+       else
+               static_branch_disable(&scx_builtin_idle_per_node);
+
 #ifdef CONFIG_SMP
        /*
         * Consider all online cpus idle. Should converge to the actual state
@@ -543,6 +557,7 @@ void scx_idle_enable(struct sched_ext_ops *ops)
 void scx_idle_disable(void)
 {
        static_branch_disable(&scx_builtin_idle_enabled);
+       static_branch_disable(&scx_builtin_idle_per_node);
 }
 
 /********************************************************************************
index bbac0fd9a5dddad496d779cb7409b50d6216d535..339b6ec9c4cb7ac095e4608bbd7a5739380f44a4 100644 (file)
 struct sched_ext_ops;
 
 #ifdef CONFIG_SMP
-void scx_idle_update_selcpu_topology(void);
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops);
 void scx_idle_init_masks(void);
 bool scx_idle_test_and_clear_cpu(int cpu);
 s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags);
 #else /* !CONFIG_SMP */
-static inline void scx_idle_update_selcpu_topology(void) {}
+static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {}
 static inline void scx_idle_init_masks(void) {}
 static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
 static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
index b50280e2ba2bacbd4e4cd447fe2b064fba402fda..d63cf40be8eeedf680409927ce8752d3d3f7844f 100644 (file)
@@ -109,6 +109,9 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field
 #define SCX_OPS_SWITCH_PARTIAL                                                 \
        __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL")
 
+#define SCX_OPS_BUILTIN_IDLE_PER_NODE                                          \
+       __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_BUILTIN_IDLE_PER_NODE")
+
 static inline long scx_hotplug_seq(void)
 {
        int fd;