intel_pstate: Clean up get_target_pstate_use_performance()
[linux-2.6-block.git] / drivers / cpufreq / intel_pstate.c
index 8a368d2ee25c52a5991d433816ddd437015577f6..b76a98dd9988b4f553ddbdd47b663f8720e5fc89 100644 (file)
@@ -10,6 +10,8 @@
  * of the License.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
 #define ATOM_TURBO_RATIOS      0x66c
 #define ATOM_TURBO_VIDS                0x66d
 
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#endif
+
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
 #define fp_toint(X) ((X) >> FRAC_BITS)
 
+#define EXT_BITS 6
+#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+
 static inline int32_t mul_fp(int32_t x, int32_t y)
 {
        return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -64,12 +73,22 @@ static inline int ceiling_fp(int32_t x)
        return ret;
 }
 
+static inline u64 mul_ext_fp(u64 x, u64 y)
+{
+       return (x * y) >> EXT_FRAC_BITS;
+}
+
+static inline u64 div_ext_fp(u64 x, u64 y)
+{
+       return div64_u64(x << EXT_FRAC_BITS, y);
+}
+
 /**
  * struct sample -     Store performance sample
- * @core_pct_busy:     Ratio of APERF/MPERF in percent, which is actual
+ * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
  *                     performance during last sample period
  * @busy_scaled:       Scaled busy value which is used to calculate next
- *                     P state. This can be different than core_pct_busy
+ *                     P state. This can be different than core_avg_perf
  *                     to account for cpu idle period
  * @aperf:             Difference of actual performance frequency clock count
  *                     read from APERF MSR between last and current sample
@@ -84,7 +103,7 @@ static inline int ceiling_fp(int32_t x)
  * data for choosing next P State.
  */
 struct sample {
-       int32_t core_pct_busy;
+       int32_t core_avg_perf;
        int32_t busy_scaled;
        u64 aperf;
        u64 mperf;
@@ -162,6 +181,7 @@ struct _pid {
  * struct cpudata -    Per CPU instance data storage
  * @cpu:               CPU number for this instance data
  * @update_util:       CPUFreq utility callback information
+ * @update_util_set:   CPUFreq utility callback is set
  * @pstate:            Stores P state limits for this CPU
  * @vid:               Stores VID limits for this CPU
  * @pid:               Stores PID parameters for this CPU
@@ -172,6 +192,8 @@ struct _pid {
  * @prev_cummulative_iowait: IO Wait time difference from last and
  *                     current sample
  * @sample:            Storage for storing last Sample data
+ * @acpi_perf_data:    Stores ACPI perf information read from _PSS
+ * @valid_pss_table:   Set to true for valid ACPI _PSS entries found
  *
  * This structure stores per CPU instance data for all CPUs.
  */
@@ -179,6 +201,7 @@ struct cpudata {
        int cpu;
 
        struct update_util_data update_util;
+       bool   update_util_set;
 
        struct pstate_data pstate;
        struct vid_data vid;
@@ -190,6 +213,10 @@ struct cpudata {
        u64     prev_tsc;
        u64     prev_cummulative_iowait;
        struct sample sample;
+#ifdef CONFIG_ACPI
+       struct acpi_processor_performance acpi_perf_data;
+       bool valid_pss_table;
+#endif
 };
 
 static struct cpudata **all_cpu_data;
@@ -258,6 +285,9 @@ static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
 
+#ifdef CONFIG_ACPI
+static bool acpi_ppc;
+#endif
 
 /**
  * struct perf_limits - Store user and policy limits
@@ -331,6 +361,124 @@ static struct perf_limits *limits = &performance_limits;
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
+#ifdef CONFIG_ACPI
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+       if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
+           acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
+               return true;
+
+       return acpi_ppc;
+}
+
+/*
+ * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
+ * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
+ * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
+ * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
+ * target ratio 0x17. The _PSS control value stores in a format which can be
+ * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
+ * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
+ * This function converts the _PSS control value to intel pstate driver format
+ * for comparison and assignment.
+ */
+static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
+{
+       return cpu->acpi_perf_data.states[index].control >> 8;
+}
+
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+       struct cpudata *cpu;
+       int turbo_pss_ctl;
+       int ret;
+       int i;
+
+       if (hwp_active)
+               return;
+
+       if (!intel_pstate_get_ppc_enable_status())
+               return;
+
+       cpu = all_cpu_data[policy->cpu];
+
+       ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+                                                 policy->cpu);
+       if (ret)
+               return;
+
+       /*
+        * Check if the control value in _PSS is for PERF_CTL MSR, which should
+        * guarantee that the states returned by it map to the states in our
+        * list directly.
+        */
+       if (cpu->acpi_perf_data.control_register.space_id !=
+                                               ACPI_ADR_SPACE_FIXED_HARDWARE)
+               goto err;
+
+       /*
+        * If there is only one entry _PSS, simply ignore _PSS and continue as
+        * usual without taking _PSS into account
+        */
+       if (cpu->acpi_perf_data.state_count < 2)
+               goto err;
+
+       pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
+       for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+               pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
+                        (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+                        (u32) cpu->acpi_perf_data.states[i].core_frequency,
+                        (u32) cpu->acpi_perf_data.states[i].power,
+                        (u32) cpu->acpi_perf_data.states[i].control);
+       }
+
+       /*
+        * The _PSS table doesn't contain whole turbo frequency range.
+        * This just contains +1 MHZ above the max non turbo frequency,
+        * with control value corresponding to max turbo ratio. But
+        * when cpufreq set policy is called, it will call with this
+        * max frequency, which will cause a reduced performance as
+        * this driver uses real max turbo frequency as the max
+        * frequency. So correct this frequency in _PSS table to
+        * correct max turbo frequency based on the turbo ratio.
+        * Also need to convert to MHz as _PSS freq is in MHz.
+        */
+       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
+       if (turbo_pss_ctl > cpu->pstate.max_pstate)
+               cpu->acpi_perf_data.states[0].core_frequency =
+                                       policy->cpuinfo.max_freq / 1000;
+       cpu->valid_pss_table = true;
+       pr_info("_PPC limits will be enforced\n");
+
+       return;
+
+ err:
+       cpu->valid_pss_table = false;
+       acpi_processor_unregister_performance(policy->cpu);
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+       struct cpudata *cpu;
+
+       cpu = all_cpu_data[policy->cpu];
+       if (!cpu->valid_pss_table)
+               return;
+
+       acpi_processor_unregister_performance(policy->cpu);
+}
+
+#else
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+}
+#endif
+
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
                             int deadband, int integral) {
        pid->setpoint = int_tofp(setpoint);
@@ -453,6 +601,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
        }
 }
 
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+       if (hwp_active)
+               intel_pstate_hwp_set(policy->cpus);
+
+       return 0;
+}
+
 static void intel_pstate_hwp_set_online_cpus(void)
 {
        get_online_cpus();
@@ -571,7 +727,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 
        update_turbo_state();
        if (limits->turbo_disabled) {
-               pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
                return -EPERM;
        }
 
@@ -811,6 +967,11 @@ static int core_get_max_pstate(void)
                        if (err)
                                goto skip_tar;
 
+                       /* For level 1 and 2, bits[23:16] contain the ratio */
+                       if (tdp_ctrl)
+                               tdp_ratio >>= 16;
+
+                       tdp_ratio &= 0xff; /* ratios are only 8 bits long */
                        if (tdp_ratio - 1 == tar) {
                                max_pstate = tar;
                                pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -1004,15 +1165,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        intel_pstate_set_min_pstate(cpu);
 }
 
-static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
 {
        struct sample *sample = &cpu->sample;
-       int64_t core_pct;
-
-       core_pct = sample->aperf * int_tofp(100);
-       core_pct = div64_u64(core_pct, sample->mperf);
 
-       sample->core_pct_busy = (int32_t)core_pct;
+       sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
 }
 
 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
@@ -1055,8 +1212,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
 {
-       return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
-               cpu->pstate.scaling, cpu->sample.mperf);
+       return mul_ext_fp(cpu->sample.core_avg_perf,
+                         cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+}
+
+static inline int32_t get_avg_pstate(struct cpudata *cpu)
+{
+       return mul_ext_fp(cpu->pstate.max_pstate_physical,
+                         cpu->sample.core_avg_perf);
 }
 
 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1091,46 +1254,43 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
        cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
        cpu->sample.busy_scaled = cpu_load;
 
-       return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
+       return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
 }
 
 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
 {
-       int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+       int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
        u64 duration_ns;
 
-       intel_pstate_calc_busy(cpu);
-
        /*
-        * core_busy is the ratio of actual performance to max
-        * max_pstate is the max non turbo pstate available
-        * current_pstate was the pstate that was requested during
-        *      the last sample period.
-        *
-        * We normalize core_busy, which was our actual percent
-        * performance to what we requested during the last sample
-        * period. The result will be a percentage of busy at a
-        * specified pstate.
+        * perf_scaled is the average performance during the last sampling
+        * period scaled by the ratio of the maximum P-state to the P-state
+        * requested last time (in percent).  That measures the system's
+        * response to the previous P-state selection.
         */
-       core_busy = cpu->sample.core_pct_busy;
        max_pstate = cpu->pstate.max_pstate_physical;
        current_pstate = cpu->pstate.current_pstate;
-       core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+       perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
+                              div_fp(100 * max_pstate, current_pstate));
 
        /*
         * Since our utilization update callback will not run unless we are
         * in C0, check if the actual elapsed time is significantly greater (3x)
         * than our sample interval.  If it is, then we were idle for a long
-        * enough period of time to adjust our busyness.
+        * enough period of time to adjust our performance metric.
         */
        duration_ns = cpu->sample.time - cpu->last_sample_time;
        if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
                sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
-               core_busy = mul_fp(core_busy, sample_ratio);
+               perf_scaled = mul_fp(perf_scaled, sample_ratio);
+       } else {
+               sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+               if (sample_ratio < int_tofp(1))
+                       perf_scaled = 0;
        }
 
-       cpu->sample.busy_scaled = core_busy;
-       return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
+       cpu->sample.busy_scaled = perf_scaled;
+       return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
 }
 
 static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1160,7 +1320,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
        intel_pstate_update_pstate(cpu, target_pstate);
 
        sample = &cpu->sample;
-       trace_pstate_sample(fp_toint(sample->core_pct_busy),
+       trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
                fp_toint(sample->busy_scaled),
                from,
                cpu->pstate.current_pstate,
@@ -1179,8 +1339,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
        if ((s64)delta_ns >= pid_params.sample_rate_ns) {
                bool sample_taken = intel_pstate_sample(cpu, time);
 
-               if (sample_taken && !hwp_active)
-                       intel_pstate_adjust_busy_pstate(cpu);
+               if (sample_taken) {
+                       intel_pstate_calc_avg_perf(cpu);
+                       if (!hwp_active)
+                               intel_pstate_adjust_busy_pstate(cpu);
+               }
        }
 }
 
@@ -1239,21 +1402,16 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 
        intel_pstate_busy_pid_reset(cpu);
 
-       pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+       pr_debug("controlling: cpu %d\n", cpunum);
 
        return 0;
 }
 
 static unsigned int intel_pstate_get(unsigned int cpu_num)
 {
-       struct sample *sample;
-       struct cpudata *cpu;
+       struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       cpu = all_cpu_data[cpu_num];
-       if (!cpu)
-               return 0;
-       sample = &cpu->sample;
-       return get_avg_frequency(cpu);
+       return cpu ? get_avg_frequency(cpu) : 0;
 }
 
 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
@@ -1264,11 +1422,18 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
        cpu->sample.time = 0;
        cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
                                     intel_pstate_update_util);
+       cpu->update_util_set = true;
 }
 
 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
 {
+       struct cpudata *cpu_data = all_cpu_data[cpu];
+
+       if (!cpu_data->update_util_set)
+               return;
+
        cpufreq_remove_update_util_hook(cpu);
+       cpu_data->update_util_set = false;
        synchronize_sched();
 }
 
@@ -1288,20 +1453,31 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
+       struct cpudata *cpu;
+
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
        intel_pstate_clear_update_util_hook(policy->cpu);
 
+       cpu = all_cpu_data[0];
+       if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
+               if (policy->max < policy->cpuinfo.max_freq &&
+                   policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+                       pr_debug("policy->max > max non turbo frequency\n");
+                       policy->max = policy->cpuinfo.max_freq;
+               }
+       }
+
        if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
                limits = &performance_limits;
                if (policy->max >= policy->cpuinfo.max_freq) {
-                       pr_debug("intel_pstate: set performance\n");
+                       pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(limits);
                        goto out;
                }
        } else {
-               pr_debug("intel_pstate: set powersave\n");
+               pr_debug("set powersave\n");
                limits = &powersave_limits;
        }
 
@@ -1331,8 +1507,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
  out:
        intel_pstate_set_update_util_hook(policy->cpu);
 
-       if (hwp_active)
-               intel_pstate_hwp_set(policy->cpus);
+       intel_pstate_hwp_set_policy(policy);
 
        return 0;
 }
@@ -1353,7 +1528,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
        int cpu_num = policy->cpu;
        struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+       pr_debug("CPU %d exiting\n", cpu_num);
 
        intel_pstate_clear_update_util_hook(cpu_num);
 
@@ -1386,18 +1561,28 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->cpuinfo.max_freq =
                cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       intel_pstate_init_acpi_perf_limits(policy);
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
 
        return 0;
 }
 
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+       intel_pstate_exit_perf_limits(policy);
+
+       return 0;
+}
+
 static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
+       .resume         = intel_pstate_hwp_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
+       .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
 };
@@ -1441,8 +1626,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 
 }
 
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
+#ifdef CONFIG_ACPI
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1598,7 +1782,7 @@ hwp_cpu_matched:
        if (intel_pstate_platform_pwr_mgmt_exists())
                return -ENODEV;
 
-       pr_info("Intel P-state driver initializing.\n");
+       pr_info("Intel P-state driver initializing\n");
 
        all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
        if (!all_cpu_data)
@@ -1615,7 +1799,7 @@ hwp_cpu_matched:
        intel_pstate_sysfs_expose_params();
 
        if (hwp_active)
-               pr_info("intel_pstate: HWP enabled\n");
+               pr_info("HWP enabled\n");
 
        return rc;
 out:
@@ -1641,13 +1825,19 @@ static int __init intel_pstate_setup(char *str)
        if (!strcmp(str, "disable"))
                no_load = 1;
        if (!strcmp(str, "no_hwp")) {
-               pr_info("intel_pstate: HWP disabled\n");
+               pr_info("HWP disabled\n");
                no_hwp = 1;
        }
        if (!strcmp(str, "force"))
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
                hwp_only = 1;
+
+#ifdef CONFIG_ACPI
+       if (!strcmp(str, "support_acpi_ppc"))
+               acpi_ppc = true;
+#endif
+
        return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);