cpufreq/intel_pstate: Use CPPC to get max performance
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 22 Nov 2016 20:24:00 +0000 (12:24 -0800)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 24 Nov 2016 19:44:20 +0000 (20:44 +0100)
Use the acpi cppc_lib interface to get CPPC performance limits and update
the per cpu priority for the ITMT scheduler. If the highest performance of
CPUs differs the ITMT feature is enabled.

Co-developed-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Cc: linux-pm@vger.kernel.org
Cc: peterz@infradead.org
Cc: jolsa@redhat.com
Cc: rjw@rjwysocki.net
Cc: linux-acpi@vger.kernel.org
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: bp@suse.de
Link: http://lkml.kernel.org/r/0998b98943bcdec7d1ddd4ff27358da555ea8e92.1479844244.git.tim.c.chen@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/intel_pstate.c

index adbd1de1cea55cbe6777d3a1c4dab546a6a68adf..c6d273b43ff9ac4d775744c7a5cca685b4f0059f 100644 (file)
@@ -6,6 +6,7 @@ config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
        select ACPI_PROCESSOR if ACPI
+       select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_ITMT
        help
           This driver provides a P state for Intel core processors.
          The driver implements an internal governor and will become
index 4737520ec8230a830d80e81c0dbc9dbaa96d0dc7..e8dc42fc091502064976c12ff48d46a03fd92e7d 100644 (file)
@@ -44,6 +44,7 @@
 
 #ifdef CONFIG_ACPI
 #include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
 #endif
 
 #define FRAC_BITS 8
@@ -379,14 +380,67 @@ static bool intel_pstate_get_ppc_enable_status(void)
        return acpi_ppc;
 }
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+
+/* The work item is needed to avoid CPU hotplug locking issues */
+static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
+{
+       sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+       struct cppc_perf_caps cppc_perf;
+       static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+       int ret;
+
+       ret = cppc_get_perf_caps(cpu, &cppc_perf);
+       if (ret)
+               return;
+
+       /*
+        * The priorities can be set regardless of whether or not
+        * sched_set_itmt_support(true) has been called and it is valid to
+        * update them at any time after it has been called.
+        */
+       sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
+
+       if (max_highest_perf <= min_highest_perf) {
+               if (cppc_perf.highest_perf > max_highest_perf)
+                       max_highest_perf = cppc_perf.highest_perf;
+
+               if (cppc_perf.highest_perf < min_highest_perf)
+                       min_highest_perf = cppc_perf.highest_perf;
+
+               if (max_highest_perf > min_highest_perf) {
+                       /*
+                        * This code can be run during CPU online under the
+                        * CPU hotplug locks, so sched_set_itmt_support()
+                        * cannot be called from here.  Queue up a work item
+                        * to invoke it.
+                        */
+                       schedule_work(&sched_itmt_work);
+               }
+       }
+}
+#else
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+}
+#endif
+
 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
        int ret;
        int i;
 
-       if (hwp_active)
+       if (hwp_active) {
+               intel_pstate_set_itmt_prio(policy->cpu);
                return;
+       }
 
        if (!intel_pstate_get_ppc_enable_status())
                return;