Merge branches 'pm-cpuidle', 'pm-sleep' and 'pm-powercap'
[linux-block.git] / drivers / cpufreq / intel_pstate.c
index a9e36bbea4faa8017adc2478585833ec199bab0a..4b986c044741eda32d5e93f9457c7b606ccfacc0 100644 (file)
@@ -173,7 +173,6 @@ struct vid_data {
  *                     based on the MSR_IA32_MISC_ENABLE value and whether or
  *                     not the maximum reported turbo P-state is different from
  *                     the maximum reported non-turbo one.
- * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
  * @min_perf_pct:      Minimum capacity limit in percent of the maximum turbo
  *                     P-state capacity.
  * @max_perf_pct:      Maximum capacity limit in percent of the maximum turbo
@@ -182,7 +181,6 @@ struct vid_data {
 struct global_params {
        bool no_turbo;
        bool turbo_disabled;
-       bool turbo_disabled_mf;
        int max_perf_pct;
        int min_perf_pct;
 };
@@ -213,7 +211,7 @@ struct global_params {
  * @epp_policy:                Last saved policy used to set EPP/EPB
  * @epp_default:       Power on default HWP energy performance
  *                     preference/bias
- * @epp_cached         Cached HWP energy-performance preference value
+ * @epp_cached:                Cached HWP energy-performance preference value
  * @hwp_req_cached:    Cached value of the last HWP Request MSR
  * @hwp_cap_cached:    Cached value of the last HWP Capabilities MSR
  * @last_io_update:    Last time when IO wake flag was set
@@ -594,12 +592,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
        cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
 }
 
-static inline void update_turbo_state(void)
+static bool turbo_is_disabled(void)
 {
        u64 misc_en;
 
        rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
-       global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+
+       return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
 }
 
 static int min_perf_pct_min(void)
@@ -1154,12 +1153,15 @@ static void intel_pstate_update_policies(void)
 static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
                                           struct cpufreq_policy *policy)
 {
-       policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+       intel_pstate_get_hwp_cap(cpudata);
+
+       policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
                        cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
        refresh_frequency_limits(policy);
 }
 
-static void intel_pstate_update_max_freq(unsigned int cpu)
+static void intel_pstate_update_limits(unsigned int cpu)
 {
        struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
 
@@ -1171,25 +1173,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
        cpufreq_cpu_release(policy);
 }
 
-static void intel_pstate_update_limits(unsigned int cpu)
+static void intel_pstate_update_limits_for_all(void)
 {
-       mutex_lock(&intel_pstate_driver_lock);
-
-       update_turbo_state();
-       /*
-        * If turbo has been turned on or off globally, policy limits for
-        * all CPUs need to be updated to reflect that.
-        */
-       if (global.turbo_disabled_mf != global.turbo_disabled) {
-               global.turbo_disabled_mf = global.turbo_disabled;
-               arch_set_max_freq_ratio(global.turbo_disabled);
-               for_each_possible_cpu(cpu)
-                       intel_pstate_update_max_freq(cpu);
-       } else {
-               cpufreq_update_policy(cpu);
-       }
+       int cpu;
 
-       mutex_unlock(&intel_pstate_driver_lock);
+       for_each_possible_cpu(cpu)
+               intel_pstate_update_limits(cpu);
 }
 
 /************************** sysfs begin ************************/
@@ -1287,11 +1276,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
                return -EAGAIN;
        }
 
-       update_turbo_state();
-       if (global.turbo_disabled)
-               ret = sprintf(buf, "%u\n", global.turbo_disabled);
-       else
-               ret = sprintf(buf, "%u\n", global.no_turbo);
+       ret = sprintf(buf, "%u\n", global.no_turbo);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1302,32 +1287,34 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
                              const char *buf, size_t count)
 {
        unsigned int input;
-       int ret;
+       bool no_turbo;
 
-       ret = sscanf(buf, "%u", &input);
-       if (ret != 1)
+       if (sscanf(buf, "%u", &input) != 1)
                return -EINVAL;
 
        mutex_lock(&intel_pstate_driver_lock);
 
        if (!intel_pstate_driver) {
-               mutex_unlock(&intel_pstate_driver_lock);
-               return -EAGAIN;
+               count = -EAGAIN;
+               goto unlock_driver;
        }
 
-       mutex_lock(&intel_pstate_limits_lock);
+       no_turbo = !!clamp_t(int, input, 0, 1);
+
+       if (no_turbo == global.no_turbo)
+               goto unlock_driver;
 
-       update_turbo_state();
        if (global.turbo_disabled) {
                pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
-               mutex_unlock(&intel_pstate_limits_lock);
-               mutex_unlock(&intel_pstate_driver_lock);
-               return -EPERM;
+               count = -EPERM;
+               goto unlock_driver;
        }
 
-       global.no_turbo = clamp_t(int, input, 0, 1);
+       WRITE_ONCE(global.no_turbo, no_turbo);
+
+       mutex_lock(&intel_pstate_limits_lock);
 
-       if (global.no_turbo) {
+       if (no_turbo) {
                struct cpudata *cpu = all_cpu_data[0];
                int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
 
@@ -1338,9 +1325,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 
        mutex_unlock(&intel_pstate_limits_lock);
 
-       intel_pstate_update_policies();
-       arch_set_max_freq_ratio(global.no_turbo);
+       intel_pstate_update_limits_for_all();
+       arch_set_max_freq_ratio(no_turbo);
 
+unlock_driver:
        mutex_unlock(&intel_pstate_driver_lock);
 
        return count;
@@ -1621,7 +1609,6 @@ static void intel_pstate_notify_work(struct work_struct *work)
        struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);
 
        if (policy) {
-               intel_pstate_get_hwp_cap(cpudata);
                __intel_pstate_update_max_freq(cpudata, policy);
 
                cpufreq_cpu_release(policy);
@@ -1774,7 +1761,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
        u32 vid;
 
        val = (u64)pstate << 8;
-       if (global.no_turbo && !global.turbo_disabled)
+       if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        vid_fp = cpudata->vid.min + mul_fp(
@@ -1939,7 +1926,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
        u64 val;
 
        val = (u64)pstate << 8;
-       if (global.no_turbo && !global.turbo_disabled)
+       if (READ_ONCE(global.no_turbo) && !global.turbo_disabled)
                val |= (u64)1 << 32;
 
        return val;
@@ -2012,14 +1999,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
        intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
 }
 
-static void intel_pstate_max_within_limits(struct cpudata *cpu)
-{
-       int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
-
-       update_turbo_state();
-       intel_pstate_set_pstate(cpu, pstate);
-}
-
 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 {
        int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
@@ -2245,7 +2224,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
 
        sample->busy_scaled = busy_frac * 100;
 
-       target = global.no_turbo || global.turbo_disabled ?
+       target = READ_ONCE(global.no_turbo) ?
                        cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
        target += target >> 2;
        target = mul_fp(target, busy_frac);
@@ -2289,8 +2268,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
        struct sample *sample;
        int target_pstate;
 
-       update_turbo_state();
-
        target_pstate = get_target_pstate(cpu);
        target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
        trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2420,6 +2397,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
 
+#ifdef CONFIG_ACPI
 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
        X86_MATCH(BROADWELL_D,          core_funcs),
        X86_MATCH(BROADWELL_X,          core_funcs),
@@ -2428,6 +2406,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
        X86_MATCH(SAPPHIRERAPIDS_X,     core_funcs),
        {}
 };
+#endif
 
 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
        X86_MATCH(KABYLAKE,             core_funcs),
@@ -2509,7 +2488,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
 
 static int intel_pstate_get_max_freq(struct cpudata *cpu)
 {
-       return global.turbo_disabled || global.no_turbo ?
+       return READ_ONCE(global.no_turbo) ?
                        cpu->pstate.max_freq : cpu->pstate.turbo_freq;
 }
 
@@ -2594,12 +2573,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
 
        if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+               int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
+
                /*
                 * NOHZ_FULL CPUs need this as the governor callback may not
                 * be invoked on them.
                 */
                intel_pstate_clear_update_util_hook(policy->cpu);
-               intel_pstate_max_within_limits(cpu);
+               intel_pstate_set_pstate(cpu, pstate);
        } else {
                intel_pstate_set_update_util_hook(policy->cpu);
        }
@@ -2642,10 +2623,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
 {
        int max_freq;
 
-       update_turbo_state();
        if (hwp_active) {
                intel_pstate_get_hwp_cap(cpu);
-               max_freq = global.no_turbo || global.turbo_disabled ?
+               max_freq = READ_ONCE(global.no_turbo) ?
                                cpu->pstate.max_freq : cpu->pstate.turbo_freq;
        } else {
                max_freq = intel_pstate_get_max_freq(cpu);
@@ -2739,9 +2719,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_freq;
-       update_turbo_state();
-       global.turbo_disabled_mf = global.turbo_disabled;
-       policy->cpuinfo.max_freq = global.turbo_disabled ?
+       policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
                        cpu->pstate.max_freq : cpu->pstate.turbo_freq;
 
        policy->min = policy->cpuinfo.min_freq;
@@ -2906,8 +2884,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
        struct cpufreq_freqs freqs;
        int target_pstate;
 
-       update_turbo_state();
-
        freqs.old = policy->cur;
        freqs.new = target_freq;
 
@@ -2929,8 +2905,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
        struct cpudata *cpu = all_cpu_data[policy->cpu];
        int target_pstate;
 
-       update_turbo_state();
-
        target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
 
        target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2948,9 +2922,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
        int old_pstate = cpu->pstate.current_pstate;
        int cap_pstate, min_pstate, max_pstate, target_pstate;
 
-       update_turbo_state();
-       cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
-                                            HWP_HIGHEST_PERF(hwp_cap);
+       cap_pstate = READ_ONCE(global.no_turbo) ?
+                                       HWP_GUARANTEED_PERF(hwp_cap) :
+                                       HWP_HIGHEST_PERF(hwp_cap);
 
        /* Optimization: Avoid unnecessary divisions. */
 
@@ -3136,6 +3110,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
 
        memset(&global, 0, sizeof(global));
        global.max_perf_pct = 100;
+       global.turbo_disabled = turbo_is_disabled();
+       global.no_turbo = global.turbo_disabled;
+
+       arch_set_max_freq_ratio(global.turbo_disabled);
 
        intel_pstate_driver = driver;
        ret = cpufreq_register_driver(intel_pstate_driver);