cpufreq/sched: Move cpufreq-specific EAS checks to cpufreq
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 6 May 2025 20:37:15 +0000 (22:37 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 7 May 2025 19:17:56 +0000 (21:17 +0200)
Doing cpufreq-specific EAS checks that require accessing policy
internals directly from sched_is_eas_possible() is a bit unfortunate,
so introduce cpufreq_ready_for_eas() in cpufreq, move those checks
into that new function and make sched_is_eas_possible() call it.

While at it, address a possible race between the EAS governor check
and governor change by doing the former under the policy rwsem.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Christian Loehle <christian.loehle@arm.com>
Tested-by: Christian Loehle <christian.loehle@arm.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://patch.msgid.link/2317800.iZASKD2KPV@rjwysocki.net
drivers/cpufreq/cpufreq.c
include/linux/cpufreq.h
kernel/sched/topology.c

index 21fa733a2fe8952272e0b0acd2c6389875b3020b..731ecfc178d87bcf4cfaf7e3e462f135b6d4a809 100644 (file)
@@ -3056,6 +3056,38 @@ static int __init cpufreq_core_init(void)
 
        return 0;
 }
+
+static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
+{
+       struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+       policy = cpufreq_cpu_get(cpu);
+       if (!policy) {
+               pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
+               return false;
+       }
+
+       guard(cpufreq_policy_read)(policy);
+
+       return sugov_is_governor(policy);
+}
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask)
+{
+       unsigned int cpu;
+
+       /* Do not attempt EAS if schedutil is not being used. */
+       for_each_cpu(cpu, cpu_mask) {
+               if (!cpufreq_policy_is_good_for_eas(cpu)) {
+                       pr_debug("rd %*pbl: schedutil is mandatory for EAS\n",
+                                cpumask_pr_args(cpu_mask));
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 module_param(off, int, 0444);
 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
 core_initcall(cpufreq_core_init);
index 1d2c6c6d895246951b020f7f0720856d38272514..95f3807c8c551dd9bb4d30e7d05e3f3ae36ec803 100644 (file)
@@ -1237,6 +1237,8 @@ void cpufreq_generic_init(struct cpufreq_policy *policy,
                struct cpufreq_frequency_table *table,
                unsigned int transition_latency);
 
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
 static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
 {
        dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
index 902d37f922b4029a13dac0bc8d029bdfadd93856..580c28fed5395ad104b3e99f37ecddaaaf979afe 100644 (file)
@@ -212,8 +212,6 @@ static bool sched_energy_update;
 static bool sched_is_eas_possible(const struct cpumask *cpu_mask)
 {
        bool any_asym_capacity = false;
-       struct cpufreq_policy *policy;
-       bool policy_is_ready;
        int i;
 
        /* EAS is enabled for asymmetric CPU capacity topologies. */
@@ -248,25 +246,12 @@ static bool sched_is_eas_possible(const struct cpumask *cpu_mask)
                return false;
        }
 
-       /* Do not attempt EAS if schedutil is not being used. */
-       for_each_cpu(i, cpu_mask) {
-               policy = cpufreq_cpu_get(i);
-               if (!policy) {
-                       if (sched_debug()) {
-                               pr_info("rd %*pbl: Checking EAS, cpufreq policy not set for CPU: %d",
-                                       cpumask_pr_args(cpu_mask), i);
-                       }
-                       return false;
-               }
-               policy_is_ready = sugov_is_governor(policy);
-               cpufreq_cpu_put(policy);
-               if (!policy_is_ready) {
-                       if (sched_debug()) {
-                               pr_info("rd %*pbl: Checking EAS, schedutil is mandatory\n",
-                                       cpumask_pr_args(cpu_mask));
-                       }
-                       return false;
+       if (!cpufreq_ready_for_eas(cpu_mask)) {
+               if (sched_debug()) {
+                       pr_info("rd %*pbl: Checking EAS: cpufreq is not ready\n",
+                               cpumask_pr_args(cpu_mask));
                }
+               return false;
        }
 
        return true;