acpi-cpufreq: Add compatibility for legacy AMD cpb sysfs knob
[linux-2.6-block.git] / drivers / cpufreq / acpi-cpufreq.c
index 56c6c6b4eb4d61043bfdd9b61a95e71ee34f424a..0d048f6a2b23a3bf9476a658cac846ccd321bd3c 100644 (file)
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
 MODULE_LICENSE("GPL");
 
+#define PFX "acpi-cpufreq: "
+
 enum {
        UNDEFINED_CAPABLE = 0,
        SYSTEM_INTEL_MSR_CAPABLE,
+       SYSTEM_AMD_MSR_CAPABLE,
        SYSTEM_IO_CAPABLE,
 };
 
 #define INTEL_MSR_RANGE                (0xffff)
+#define AMD_MSR_RANGE          (0x7)
+
+#define MSR_K7_HWCR_CPB_DIS    (1ULL << 25)
 
 struct acpi_cpufreq_data {
        struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
 static struct cpufreq_driver acpi_cpufreq_driver;
 
 static unsigned int acpi_pstate_strict;
+static bool boost_enabled, boost_supported;
+static struct msr __percpu *msrs;
+
+static bool boost_state(unsigned int cpu)
+{
+       u32 lo, hi;
+       u64 msr;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
+               msr = lo | ((u64)hi << 32);
+               return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+       case X86_VENDOR_AMD:
+               rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+               msr = lo | ((u64)hi << 32);
+               return !(msr & MSR_K7_HWCR_CPB_DIS);
+       }
+       return false;
+}
+
+static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+{
+       u32 cpu;
+       u32 msr_addr;
+       u64 msr_mask;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               msr_addr = MSR_IA32_MISC_ENABLE;
+               msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+               break;
+       case X86_VENDOR_AMD:
+               msr_addr = MSR_K7_HWCR;
+               msr_mask = MSR_K7_HWCR_CPB_DIS;
+               break;
+       default:
+               return;
+       }
+
+       rdmsr_on_cpus(cpumask, msr_addr, msrs);
+
+       for_each_cpu(cpu, cpumask) {
+               struct msr *reg = per_cpu_ptr(msrs, cpu);
+               if (enable)
+                       reg->q &= ~msr_mask;
+               else
+                       reg->q |= msr_mask;
+       }
+
+       wrmsr_on_cpus(cpumask, msr_addr, msrs);
+}
+
+static ssize_t _store_boost(const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val = 0;
+
+       if (!boost_supported)
+               return -EINVAL;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret || (val > 1))
+               return -EINVAL;
+
+       if ((val && boost_enabled) || (!val && !boost_enabled))
+               return count;
+
+       get_online_cpus();
+
+       boost_set_msrs(val, cpu_online_mask);
+
+       put_online_cpus();
+
+       boost_enabled = val;
+       pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+
+       return count;
+}
+
+static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
+                                 const char *buf, size_t count)
+{
+       return _store_boost(buf, count);
+}
+
+static ssize_t show_global_boost(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", boost_enabled);
+}
+
+static struct global_attr global_boost = __ATTR(boost, 0644,
+                                               show_global_boost,
+                                               store_global_boost);
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+                        size_t count)
+{
+       return _store_boost(buf, count);
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+       return sprintf(buf, "%u\n", boost_enabled);
+}
+
+static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
+#endif
 
 static int check_est_cpu(unsigned int cpuid)
 {
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
        return cpu_has(cpu, X86_FEATURE_EST);
 }
 
+static int check_amd_hwpstate_cpu(unsigned int cpuid)
+{
+       struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+       return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
+}
+
 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
 {
        struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
        int i;
        struct acpi_processor_performance *perf;
 
-       msr &= INTEL_MSR_RANGE;
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               msr &= AMD_MSR_RANGE;
+       else
+               msr &= INTEL_MSR_RANGE;
+
        perf = data->acpi_data;
 
        for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
 {
        switch (data->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
+       case SYSTEM_AMD_MSR_CAPABLE:
                return extract_msr(val, data);
        case SYSTEM_IO_CAPABLE:
                return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
 
        switch (cmd->type) {
        case SYSTEM_INTEL_MSR_CAPABLE:
+       case SYSTEM_AMD_MSR_CAPABLE:
                rdmsr(cmd->addr.msr.reg, cmd->val, h);
                break;
        case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
                lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
                wrmsr(cmd->addr.msr.reg, lo, hi);
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               wrmsr(cmd->addr.msr.reg, cmd->val, 0);
+               break;
        case SYSTEM_IO_CAPABLE:
                acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
                                cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+               cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+               break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
                perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
                cmd.val = (u32) perf->states[next_perf_state].control;
                break;
+       case SYSTEM_AMD_MSR_CAPABLE:
+               cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+               cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
+               cmd.val = (u32) perf->states[next_perf_state].control;
+               break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
                cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
        free_percpu(acpi_perf_data);
 }
 
+static int boost_notify(struct notifier_block *nb, unsigned long action,
+                     void *hcpu)
+{
+       unsigned cpu = (long)hcpu;
+       const struct cpumask *cpumask;
+
+       cpumask = get_cpu_mask(cpu);
+
+       /*
+        * Clear the boost-disable bit on the CPU_DOWN path so that
+        * this cpu cannot block the remaining ones from boosting. On
+        * the CPU_UP path we simply keep the boost-disable flag in
+        * sync with the current global state.
+        */
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               boost_set_msrs(boost_enabled, cpumask);
+               break;
+
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               boost_set_msrs(1, cpumask);
+               break;
+
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+
+static struct notifier_block boost_nb = {
+       .notifier_call          = boost_notify,
+};
+
 /*
  * acpi_cpufreq_early_init - initialize ACPI P-States library
  *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
                cpumask_copy(policy->cpus, cpu_core_mask(cpu));
        }
+
+       if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+               cpumask_clear(policy->cpus);
+               cpumask_set_cpu(cpu, policy->cpus);
+               cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
+               policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+               pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+       }
 #endif
 
        /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                break;
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
                pr_debug("HARDWARE addr space\n");
-               if (!check_est_cpu(cpu)) {
-                       result = -ENODEV;
-                       goto err_unreg;
+               if (check_est_cpu(cpu)) {
+                       data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+                       break;
                }
-               data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
-               break;
+               if (check_amd_hwpstate_cpu(cpu)) {
+                       data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+                       break;
+               }
+               result = -ENODEV;
+               goto err_unreg;
        default:
                pr_debug("Unknown addr space %d\n",
                        (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 
 static struct freq_attr *acpi_cpufreq_attr[] = {
        &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,   /* this is a placeholder for cpb, do not remove */
        NULL,
 };
 
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
        .attr           = acpi_cpufreq_attr,
 };
 
+static void __init acpi_cpufreq_boost_init(void)
+{
+       if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
+               msrs = msrs_alloc();
+
+               if (!msrs)
+                       return;
+
+               boost_supported = true;
+               boost_enabled = boost_state(0);
+
+               get_online_cpus();
+
+               /* Force all MSRs to the same value */
+               boost_set_msrs(boost_enabled, cpu_online_mask);
+
+               register_cpu_notifier(&boost_nb);
+
+               put_online_cpus();
+       } else
+               global_boost.attr.mode = 0444;
+
+       /* We create the boost file in any case, though for systems without
+        * hardware support it will be read-only and hardwired to return 0.
+        */
+       if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
+               pr_warn(PFX "could not register global boost sysfs file\n");
+       else
+               pr_debug("registered global boost sysfs file\n");
+}
+
+static void __exit acpi_cpufreq_boost_exit(void)
+{
+       sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
+
+       if (msrs) {
+               unregister_cpu_notifier(&boost_nb);
+
+               msrs_free(msrs);
+               msrs = NULL;
+       }
+}
+
 static int __init acpi_cpufreq_init(void)
 {
        int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
        if (ret)
                return ret;
 
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+       /* this is a sysfs file with a strange name and an even stranger
+        * semantic - per CPU instantiation, but system global effect.
+        * Lets enable it only on AMD CPUs for compatibility reasons and
+        * only if configured. This is considered legacy code, which
+        * will probably be removed at some point in the future.
+        */
+       if (check_amd_hwpstate_cpu(0)) {
+               struct freq_attr **iter;
+
+               pr_debug("adding sysfs entry for cpb\n");
+
+               for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
+                       ;
+
+               /* make sure there is a terminator behind it */
+               if (iter[1] == NULL)
+                       *iter = &cpb;
+       }
+#endif
+
        ret = cpufreq_register_driver(&acpi_cpufreq_driver);
        if (ret)
                free_acpi_perf_data();
+       else
+               acpi_cpufreq_boost_init();
 
        return ret;
 }
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
 {
        pr_debug("acpi_cpufreq_exit\n");
 
+       acpi_cpufreq_boost_exit();
+
        cpufreq_unregister_driver(&acpi_cpufreq_driver);
 
        free_acpi_perf_data();