cpufreq: governors: implement generic policy_is_shared
[linux-2.6-block.git] / drivers / cpufreq / acpi-cpufreq.c
CommitLineData
1da177e4 1/*
3a58df35 2 * acpi-cpufreq.c - ACPI Processor P-States Driver
1da177e4
LT
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
fe27cb35 7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
1da177e4
LT
8 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
fe27cb35
VP
31#include <linux/smp.h>
32#include <linux/sched.h>
1da177e4 33#include <linux/cpufreq.h>
d395bf12 34#include <linux/compiler.h>
8adcc0c6 35#include <linux/dmi.h>
5a0e3ad6 36#include <linux/slab.h>
1da177e4
LT
37
38#include <linux/acpi.h>
3a58df35
DJ
39#include <linux/io.h>
40#include <linux/delay.h>
41#include <linux/uaccess.h>
42
1da177e4
LT
43#include <acpi/processor.h>
44
dde9f7ba 45#include <asm/msr.h>
fe27cb35
VP
46#include <asm/processor.h>
47#include <asm/cpufeature.h>
a2fed573 48#include "mperf.h"
fe27cb35 49
1da177e4
LT
50MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL");
53
acd31624
AP
54#define PFX "acpi-cpufreq: "
55
dde9f7ba
VP
56enum {
57 UNDEFINED_CAPABLE = 0,
58 SYSTEM_INTEL_MSR_CAPABLE,
3dc9a633 59 SYSTEM_AMD_MSR_CAPABLE,
dde9f7ba
VP
60 SYSTEM_IO_CAPABLE,
61};
62
63#define INTEL_MSR_RANGE (0xffff)
3dc9a633 64#define AMD_MSR_RANGE (0x7)
dde9f7ba 65
615b7300
AP
66#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
67
fe27cb35 68struct acpi_cpufreq_data {
64be7eed
VP
69 struct acpi_processor_performance *acpi_data;
70 struct cpufreq_frequency_table *freq_table;
71 unsigned int resume;
72 unsigned int cpu_feature;
1da177e4
LT
73};
74
f1625066 75static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
ea348f3e 76
50109292 77/* acpi_perf_data is a pointer to percpu data. */
3f6c4df7 78static struct acpi_processor_performance __percpu *acpi_perf_data;
1da177e4
LT
79
80static struct cpufreq_driver acpi_cpufreq_driver;
81
d395bf12 82static unsigned int acpi_pstate_strict;
615b7300
AP
83static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
11269ff5 136static ssize_t _store_boost(const char *buf, size_t count)
615b7300
AP
137{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159
160 return count;
161}
162
11269ff5
AP
163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
615b7300
AP
169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173}
174
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
d395bf12 178
11269ff5
AP
179#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
181 size_t count)
182{
183 return _store_boost(buf, count);
184}
185
186static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
187{
188 return sprintf(buf, "%u\n", boost_enabled);
189}
190
191static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
192#endif
193
dde9f7ba
VP
194static int check_est_cpu(unsigned int cpuid)
195{
92cb7612 196 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
dde9f7ba 197
0de51088 198 return cpu_has(cpu, X86_FEATURE_EST);
dde9f7ba
VP
199}
200
3dc9a633
MG
201static int check_amd_hwpstate_cpu(unsigned int cpuid)
202{
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
204
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
206}
207
dde9f7ba 208static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
fe27cb35 209{
64be7eed
VP
210 struct acpi_processor_performance *perf;
211 int i;
fe27cb35
VP
212
213 perf = data->acpi_data;
214
3a58df35 215 for (i = 0; i < perf->state_count; i++) {
fe27cb35
VP
216 if (value == perf->states[i].status)
217 return data->freq_table[i].frequency;
218 }
219 return 0;
220}
221
dde9f7ba
VP
222static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
223{
224 int i;
a6f6e6e6 225 struct acpi_processor_performance *perf;
dde9f7ba 226
3dc9a633
MG
227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
229 else
230 msr &= INTEL_MSR_RANGE;
231
a6f6e6e6
VP
232 perf = data->acpi_data;
233
3a58df35 234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
a6f6e6e6 235 if (msr == perf->states[data->freq_table[i].index].status)
dde9f7ba
VP
236 return data->freq_table[i].frequency;
237 }
238 return data->freq_table[0].frequency;
239}
240
dde9f7ba
VP
241static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
242{
243 switch (data->cpu_feature) {
64be7eed 244 case SYSTEM_INTEL_MSR_CAPABLE:
3dc9a633 245 case SYSTEM_AMD_MSR_CAPABLE:
dde9f7ba 246 return extract_msr(val, data);
64be7eed 247 case SYSTEM_IO_CAPABLE:
dde9f7ba 248 return extract_io(val, data);
64be7eed 249 default:
dde9f7ba
VP
250 return 0;
251 }
252}
253
dde9f7ba
VP
254struct msr_addr {
255 u32 reg;
256};
257
fe27cb35
VP
258struct io_addr {
259 u16 port;
260 u8 bit_width;
261};
262
263struct drv_cmd {
dde9f7ba 264 unsigned int type;
bfa318ad 265 const struct cpumask *mask;
3a58df35
DJ
266 union {
267 struct msr_addr msr;
268 struct io_addr io;
269 } addr;
fe27cb35
VP
270 u32 val;
271};
272
01599fca
AM
273/* Called via smp_call_function_single(), on the target CPU */
274static void do_drv_read(void *_cmd)
1da177e4 275{
72859081 276 struct drv_cmd *cmd = _cmd;
dde9f7ba
VP
277 u32 h;
278
279 switch (cmd->type) {
64be7eed 280 case SYSTEM_INTEL_MSR_CAPABLE:
3dc9a633 281 case SYSTEM_AMD_MSR_CAPABLE:
dde9f7ba
VP
282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
283 break;
64be7eed 284 case SYSTEM_IO_CAPABLE:
4e581ff1
VP
285 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
286 &cmd->val,
287 (u32)cmd->addr.io.bit_width);
dde9f7ba 288 break;
64be7eed 289 default:
dde9f7ba
VP
290 break;
291 }
fe27cb35 292}
1da177e4 293
01599fca
AM
294/* Called via smp_call_function_many(), on the target CPUs */
295static void do_drv_write(void *_cmd)
fe27cb35 296{
72859081 297 struct drv_cmd *cmd = _cmd;
13424f65 298 u32 lo, hi;
dde9f7ba
VP
299
300 switch (cmd->type) {
64be7eed 301 case SYSTEM_INTEL_MSR_CAPABLE:
13424f65
VP
302 rdmsr(cmd->addr.msr.reg, lo, hi);
303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
304 wrmsr(cmd->addr.msr.reg, lo, hi);
dde9f7ba 305 break;
3dc9a633
MG
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
308 break;
64be7eed 309 case SYSTEM_IO_CAPABLE:
4e581ff1
VP
310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
311 cmd->val,
312 (u32)cmd->addr.io.bit_width);
dde9f7ba 313 break;
64be7eed 314 default:
dde9f7ba
VP
315 break;
316 }
fe27cb35 317}
1da177e4 318
95dd7227 319static void drv_read(struct drv_cmd *cmd)
fe27cb35 320{
4a28395d 321 int err;
fe27cb35
VP
322 cmd->val = 0;
323
4a28395d
AM
324 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
325 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
fe27cb35
VP
326}
327
328static void drv_write(struct drv_cmd *cmd)
329{
ea34f43a
LT
330 int this_cpu;
331
332 this_cpu = get_cpu();
333 if (cpumask_test_cpu(this_cpu, cmd->mask))
334 do_drv_write(cmd);
01599fca 335 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
ea34f43a 336 put_cpu();
fe27cb35 337}
1da177e4 338
4d8bb537 339static u32 get_cur_val(const struct cpumask *mask)
fe27cb35 340{
64be7eed
VP
341 struct acpi_processor_performance *perf;
342 struct drv_cmd cmd;
1da177e4 343
4d8bb537 344 if (unlikely(cpumask_empty(mask)))
fe27cb35 345 return 0;
1da177e4 346
f1625066 347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
dde9f7ba
VP
348 case SYSTEM_INTEL_MSR_CAPABLE:
349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
351 break;
3dc9a633
MG
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
355 break;
dde9f7ba
VP
356 case SYSTEM_IO_CAPABLE:
357 cmd.type = SYSTEM_IO_CAPABLE;
f1625066 358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
dde9f7ba
VP
359 cmd.addr.io.port = perf->control_register.address;
360 cmd.addr.io.bit_width = perf->control_register.bit_width;
361 break;
362 default:
363 return 0;
364 }
365
bfa318ad 366 cmd.mask = mask;
fe27cb35 367 drv_read(&cmd);
1da177e4 368
2d06d8c4 369 pr_debug("get_cur_val = %u\n", cmd.val);
fe27cb35
VP
370
371 return cmd.val;
372}
1da177e4 373
fe27cb35
VP
374static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
375{
f1625066 376 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
64be7eed 377 unsigned int freq;
e56a727b 378 unsigned int cached_freq;
fe27cb35 379
2d06d8c4 380 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
fe27cb35
VP
381
382 if (unlikely(data == NULL ||
64be7eed 383 data->acpi_data == NULL || data->freq_table == NULL)) {
fe27cb35 384 return 0;
1da177e4
LT
385 }
386
e56a727b 387 cached_freq = data->freq_table[data->acpi_data->state].frequency;
e39ad415 388 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
e56a727b
VP
389 if (freq != cached_freq) {
390 /*
391 * The dreaded BIOS frequency change behind our back.
392 * Force set the frequency on next target call.
393 */
394 data->resume = 1;
395 }
396
2d06d8c4 397 pr_debug("cur freq = %u\n", freq);
1da177e4 398
fe27cb35 399 return freq;
1da177e4
LT
400}
401
72859081 402static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
64be7eed 403 struct acpi_cpufreq_data *data)
fe27cb35 404{
64be7eed
VP
405 unsigned int cur_freq;
406 unsigned int i;
1da177e4 407
3a58df35 408 for (i = 0; i < 100; i++) {
fe27cb35
VP
409 cur_freq = extract_freq(get_cur_val(mask), data);
410 if (cur_freq == freq)
411 return 1;
412 udelay(10);
413 }
414 return 0;
415}
416
417static int acpi_cpufreq_target(struct cpufreq_policy *policy,
64be7eed 418 unsigned int target_freq, unsigned int relation)
1da177e4 419{
f1625066 420 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
64be7eed
VP
421 struct acpi_processor_performance *perf;
422 struct cpufreq_freqs freqs;
64be7eed 423 struct drv_cmd cmd;
8edc59d9
VP
424 unsigned int next_state = 0; /* Index into freq_table */
425 unsigned int next_perf_state = 0; /* Index into perf table */
64be7eed
VP
426 unsigned int i;
427 int result = 0;
fe27cb35 428
2d06d8c4 429 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
fe27cb35
VP
430
431 if (unlikely(data == NULL ||
95dd7227 432 data->acpi_data == NULL || data->freq_table == NULL)) {
fe27cb35
VP
433 return -ENODEV;
434 }
1da177e4 435
fe27cb35 436 perf = data->acpi_data;
1da177e4 437 result = cpufreq_frequency_table_target(policy,
64be7eed
VP
438 data->freq_table,
439 target_freq,
440 relation, &next_state);
4d8bb537
MT
441 if (unlikely(result)) {
442 result = -ENODEV;
443 goto out;
444 }
1da177e4 445
fe27cb35 446 next_perf_state = data->freq_table[next_state].index;
7650b281 447 if (perf->state == next_perf_state) {
fe27cb35 448 if (unlikely(data->resume)) {
2d06d8c4 449 pr_debug("Called after resume, resetting to P%d\n",
64be7eed 450 next_perf_state);
fe27cb35
VP
451 data->resume = 0;
452 } else {
2d06d8c4 453 pr_debug("Already at target state (P%d)\n",
64be7eed 454 next_perf_state);
4d8bb537 455 goto out;
fe27cb35 456 }
09b4d1ee
VP
457 }
458
64be7eed
VP
459 switch (data->cpu_feature) {
460 case SYSTEM_INTEL_MSR_CAPABLE:
461 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
462 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
13424f65 463 cmd.val = (u32) perf->states[next_perf_state].control;
64be7eed 464 break;
3dc9a633
MG
465 case SYSTEM_AMD_MSR_CAPABLE:
466 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
467 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
468 cmd.val = (u32) perf->states[next_perf_state].control;
469 break;
64be7eed
VP
470 case SYSTEM_IO_CAPABLE:
471 cmd.type = SYSTEM_IO_CAPABLE;
472 cmd.addr.io.port = perf->control_register.address;
473 cmd.addr.io.bit_width = perf->control_register.bit_width;
474 cmd.val = (u32) perf->states[next_perf_state].control;
475 break;
476 default:
4d8bb537
MT
477 result = -ENODEV;
478 goto out;
64be7eed 479 }
09b4d1ee 480
4d8bb537 481 /* cpufreq holds the hotplug lock, so we are safe from here on */
fe27cb35 482 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
bfa318ad 483 cmd.mask = policy->cpus;
fe27cb35 484 else
bfa318ad 485 cmd.mask = cpumask_of(policy->cpu);
09b4d1ee 486
8edc59d9
VP
487 freqs.old = perf->states[perf->state].core_frequency * 1000;
488 freqs.new = data->freq_table[next_state].frequency;
6b72e393 489 for_each_cpu(i, policy->cpus) {
fe27cb35
VP
490 freqs.cpu = i;
491 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
09b4d1ee 492 }
1da177e4 493
fe27cb35 494 drv_write(&cmd);
09b4d1ee 495
fe27cb35 496 if (acpi_pstate_strict) {
4d8bb537 497 if (!check_freqs(cmd.mask, freqs.new, data)) {
2d06d8c4 498 pr_debug("acpi_cpufreq_target failed (%d)\n",
64be7eed 499 policy->cpu);
4d8bb537
MT
500 result = -EAGAIN;
501 goto out;
09b4d1ee
VP
502 }
503 }
504
6b72e393 505 for_each_cpu(i, policy->cpus) {
fe27cb35
VP
506 freqs.cpu = i;
507 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
508 }
509 perf->state = next_perf_state;
510
4d8bb537 511out:
fe27cb35 512 return result;
1da177e4
LT
513}
514
64be7eed 515static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
1da177e4 516{
f1625066 517 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
1da177e4 518
2d06d8c4 519 pr_debug("acpi_cpufreq_verify\n");
1da177e4 520
fe27cb35 521 return cpufreq_frequency_table_verify(policy, data->freq_table);
1da177e4
LT
522}
523
1da177e4 524static unsigned long
64be7eed 525acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
1da177e4 526{
64be7eed 527 struct acpi_processor_performance *perf = data->acpi_data;
09b4d1ee 528
1da177e4
LT
529 if (cpu_khz) {
530 /* search the closest match to cpu_khz */
531 unsigned int i;
532 unsigned long freq;
09b4d1ee 533 unsigned long freqn = perf->states[0].core_frequency * 1000;
1da177e4 534
3a58df35 535 for (i = 0; i < (perf->state_count-1); i++) {
1da177e4 536 freq = freqn;
95dd7227 537 freqn = perf->states[i+1].core_frequency * 1000;
1da177e4 538 if ((2 * cpu_khz) > (freqn + freq)) {
09b4d1ee 539 perf->state = i;
64be7eed 540 return freq;
1da177e4
LT
541 }
542 }
95dd7227 543 perf->state = perf->state_count-1;
64be7eed 544 return freqn;
09b4d1ee 545 } else {
1da177e4 546 /* assume CPU is at P0... */
09b4d1ee
VP
547 perf->state = 0;
548 return perf->states[0].core_frequency * 1000;
549 }
1da177e4
LT
550}
551
2fdf66b4
RR
552static void free_acpi_perf_data(void)
553{
554 unsigned int i;
555
556 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
557 for_each_possible_cpu(i)
558 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
559 ->shared_cpu_map);
560 free_percpu(acpi_perf_data);
561}
562
615b7300
AP
563static int boost_notify(struct notifier_block *nb, unsigned long action,
564 void *hcpu)
565{
566 unsigned cpu = (long)hcpu;
567 const struct cpumask *cpumask;
568
569 cpumask = get_cpu_mask(cpu);
570
571 /*
572 * Clear the boost-disable bit on the CPU_DOWN path so that
573 * this cpu cannot block the remaining ones from boosting. On
574 * the CPU_UP path we simply keep the boost-disable flag in
575 * sync with the current global state.
576 */
577
578 switch (action) {
579 case CPU_UP_PREPARE:
580 case CPU_UP_PREPARE_FROZEN:
581 boost_set_msrs(boost_enabled, cpumask);
582 break;
583
584 case CPU_DOWN_PREPARE:
585 case CPU_DOWN_PREPARE_FROZEN:
586 boost_set_msrs(1, cpumask);
587 break;
588
589 default:
590 break;
591 }
592
593 return NOTIFY_OK;
594}
595
596
597static struct notifier_block boost_nb = {
598 .notifier_call = boost_notify,
599};
600
09b4d1ee
VP
601/*
602 * acpi_cpufreq_early_init - initialize ACPI P-States library
603 *
604 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
605 * in order to determine correct frequency and voltage pairings. We can
606 * do _PDC and _PSD and find out the processor dependency for the
607 * actual init that will happen later...
608 */
50109292 609static int __init acpi_cpufreq_early_init(void)
09b4d1ee 610{
2fdf66b4 611 unsigned int i;
2d06d8c4 612 pr_debug("acpi_cpufreq_early_init\n");
09b4d1ee 613
50109292
FY
614 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
615 if (!acpi_perf_data) {
2d06d8c4 616 pr_debug("Memory allocation error for acpi_perf_data.\n");
50109292 617 return -ENOMEM;
09b4d1ee 618 }
2fdf66b4 619 for_each_possible_cpu(i) {
eaa95840 620 if (!zalloc_cpumask_var_node(
80855f73
MT
621 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
622 GFP_KERNEL, cpu_to_node(i))) {
2fdf66b4
RR
623
624 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
625 free_acpi_perf_data();
626 return -ENOMEM;
627 }
628 }
09b4d1ee
VP
629
630 /* Do initialization in ACPI core */
fe27cb35
VP
631 acpi_processor_preregister_performance(acpi_perf_data);
632 return 0;
09b4d1ee
VP
633}
634
95625b8f 635#ifdef CONFIG_SMP
8adcc0c6
VP
636/*
637 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
638 * or do it in BIOS firmware and won't inform about it to OS. If not
639 * detected, this has a side effect of making CPU run at a different speed
640 * than OS intended it to run at. Detect it and handle it cleanly.
641 */
642static int bios_with_sw_any_bug;
643
1855256c 644static int sw_any_bug_found(const struct dmi_system_id *d)
8adcc0c6
VP
645{
646 bios_with_sw_any_bug = 1;
647 return 0;
648}
649
1855256c 650static const struct dmi_system_id sw_any_bug_dmi_table[] = {
8adcc0c6
VP
651 {
652 .callback = sw_any_bug_found,
653 .ident = "Supermicro Server X6DLP",
654 .matches = {
655 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
656 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
657 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
658 },
659 },
660 { }
661};
1a8e42fa
PB
662
663static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
664{
293afe44
JV
665 /* Intel Xeon Processor 7100 Series Specification Update
666 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
1a8e42fa
PB
667 * AL30: A Machine Check Exception (MCE) Occurring during an
668 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
293afe44 669 * Both Processor Cores to Lock Up. */
1a8e42fa
PB
670 if (c->x86_vendor == X86_VENDOR_INTEL) {
671 if ((c->x86 == 15) &&
672 (c->x86_model == 6) &&
293afe44
JV
673 (c->x86_mask == 8)) {
674 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
675 "Xeon(R) 7100 Errata AL30, processors may "
676 "lock up on frequency changes: disabling "
677 "acpi-cpufreq.\n");
1a8e42fa 678 return -ENODEV;
293afe44 679 }
1a8e42fa
PB
680 }
681 return 0;
682}
95625b8f 683#endif
8adcc0c6 684
64be7eed 685static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
1da177e4 686{
64be7eed
VP
687 unsigned int i;
688 unsigned int valid_states = 0;
689 unsigned int cpu = policy->cpu;
690 struct acpi_cpufreq_data *data;
64be7eed 691 unsigned int result = 0;
92cb7612 692 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
64be7eed 693 struct acpi_processor_performance *perf;
293afe44
JV
694#ifdef CONFIG_SMP
695 static int blacklisted;
696#endif
1da177e4 697
2d06d8c4 698 pr_debug("acpi_cpufreq_cpu_init\n");
1da177e4 699
1a8e42fa 700#ifdef CONFIG_SMP
293afe44
JV
701 if (blacklisted)
702 return blacklisted;
703 blacklisted = acpi_cpufreq_blacklist(c);
704 if (blacklisted)
705 return blacklisted;
1a8e42fa
PB
706#endif
707
fe27cb35 708 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
1da177e4 709 if (!data)
64be7eed 710 return -ENOMEM;
1da177e4 711
b36128c8 712 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
f1625066 713 per_cpu(acfreq_data, cpu) = data;
1da177e4 714
95dd7227 715 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
fe27cb35 716 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
1da177e4 717
fe27cb35 718 result = acpi_processor_register_performance(data->acpi_data, cpu);
1da177e4
LT
719 if (result)
720 goto err_free;
721
09b4d1ee 722 perf = data->acpi_data;
09b4d1ee 723 policy->shared_type = perf->shared_type;
95dd7227 724
46f18e3a 725 /*
95dd7227 726 * Will let policy->cpus know about dependency only when software
46f18e3a
VP
727 * coordination is required.
728 */
729 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
8adcc0c6 730 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
835481d9 731 cpumask_copy(policy->cpus, perf->shared_cpu_map);
8adcc0c6 732 }
835481d9 733 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
8adcc0c6
VP
734
735#ifdef CONFIG_SMP
736 dmi_check_system(sw_any_bug_dmi_table);
2624f90c 737 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
8adcc0c6 738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
835481d9 739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
8adcc0c6 740 }
acd31624
AP
741
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 }
8adcc0c6 749#endif
09b4d1ee 750
1da177e4 751 /* capability check */
09b4d1ee 752 if (perf->state_count <= 1) {
2d06d8c4 753 pr_debug("No P-States\n");
1da177e4
LT
754 result = -ENODEV;
755 goto err_unreg;
756 }
09b4d1ee 757
fe27cb35
VP
758 if (perf->control_register.space_id != perf->status_register.space_id) {
759 result = -ENODEV;
760 goto err_unreg;
761 }
762
763 switch (perf->control_register.space_id) {
64be7eed 764 case ACPI_ADR_SPACE_SYSTEM_IO:
c40a4518
MG
765 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
766 boot_cpu_data.x86 == 0xf) {
767 pr_debug("AMD K8 systems must use native drivers.\n");
768 result = -ENODEV;
769 goto err_unreg;
770 }
2d06d8c4 771 pr_debug("SYSTEM IO addr space\n");
dde9f7ba
VP
772 data->cpu_feature = SYSTEM_IO_CAPABLE;
773 break;
64be7eed 774 case ACPI_ADR_SPACE_FIXED_HARDWARE:
2d06d8c4 775 pr_debug("HARDWARE addr space\n");
3dc9a633
MG
776 if (check_est_cpu(cpu)) {
777 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
778 break;
dde9f7ba 779 }
3dc9a633
MG
780 if (check_amd_hwpstate_cpu(cpu)) {
781 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
782 break;
783 }
784 result = -ENODEV;
785 goto err_unreg;
64be7eed 786 default:
2d06d8c4 787 pr_debug("Unknown addr space %d\n",
64be7eed 788 (u32) (perf->control_register.space_id));
1da177e4
LT
789 result = -ENODEV;
790 goto err_unreg;
791 }
792
95dd7227
DJ
793 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
794 (perf->state_count+1), GFP_KERNEL);
1da177e4
LT
795 if (!data->freq_table) {
796 result = -ENOMEM;
797 goto err_unreg;
798 }
799
800 /* detect transition latency */
801 policy->cpuinfo.transition_latency = 0;
3a58df35 802 for (i = 0; i < perf->state_count; i++) {
64be7eed
VP
803 if ((perf->states[i].transition_latency * 1000) >
804 policy->cpuinfo.transition_latency)
805 policy->cpuinfo.transition_latency =
806 perf->states[i].transition_latency * 1000;
1da177e4 807 }
1da177e4 808
a59d1637
PV
809 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
810 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
811 policy->cpuinfo.transition_latency > 20 * 1000) {
a59d1637 812 policy->cpuinfo.transition_latency = 20 * 1000;
61c8c67e
JP
813 printk_once(KERN_INFO
814 "P-state transition latency capped at 20 uS\n");
a59d1637
PV
815 }
816
1da177e4 817 /* table init */
3a58df35
DJ
818 for (i = 0; i < perf->state_count; i++) {
819 if (i > 0 && perf->states[i].core_frequency >=
3cdf552b 820 data->freq_table[valid_states-1].frequency / 1000)
fe27cb35
VP
821 continue;
822
823 data->freq_table[valid_states].index = i;
824 data->freq_table[valid_states].frequency =
64be7eed 825 perf->states[i].core_frequency * 1000;
fe27cb35 826 valid_states++;
1da177e4 827 }
3d4a7ef3 828 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
8edc59d9 829 perf->state = 0;
1da177e4
LT
830
831 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
95dd7227 832 if (result)
1da177e4 833 goto err_freqfree;
1da177e4 834
d876dfbb
TR
835 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
836 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
837
a507ac4b 838 switch (perf->control_register.space_id) {
64be7eed 839 case ACPI_ADR_SPACE_SYSTEM_IO:
dde9f7ba
VP
840 /* Current speed is unknown and not detectable by IO port */
841 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
842 break;
64be7eed 843 case ACPI_ADR_SPACE_FIXED_HARDWARE:
7650b281 844 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
a507ac4b 845 policy->cur = get_cur_freq_on_cpu(cpu);
dde9f7ba 846 break;
64be7eed 847 default:
dde9f7ba
VP
848 break;
849 }
850
1da177e4
LT
851 /* notify BIOS that we exist */
852 acpi_processor_notify_smm(THIS_MODULE);
853
dfde5d62 854 /* Check for APERF/MPERF support in hardware */
92e03c41 855 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
a2fed573 856 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
dfde5d62 857
2d06d8c4 858 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
09b4d1ee 859 for (i = 0; i < perf->state_count; i++)
2d06d8c4 860 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
64be7eed 861 (i == perf->state ? '*' : ' '), i,
09b4d1ee
VP
862 (u32) perf->states[i].core_frequency,
863 (u32) perf->states[i].power,
864 (u32) perf->states[i].transition_latency);
1da177e4
LT
865
866 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
64be7eed 867
4b31e774
DB
868 /*
869 * the first call to ->target() should result in us actually
870 * writing something to the appropriate registers.
871 */
872 data->resume = 1;
64be7eed 873
fe27cb35 874 return result;
1da177e4 875
95dd7227 876err_freqfree:
1da177e4 877 kfree(data->freq_table);
95dd7227 878err_unreg:
09b4d1ee 879 acpi_processor_unregister_performance(perf, cpu);
95dd7227 880err_free:
1da177e4 881 kfree(data);
f1625066 882 per_cpu(acfreq_data, cpu) = NULL;
1da177e4 883
64be7eed 884 return result;
1da177e4
LT
885}
886
64be7eed 887static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
1da177e4 888{
f1625066 889 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
1da177e4 890
2d06d8c4 891 pr_debug("acpi_cpufreq_cpu_exit\n");
1da177e4
LT
892
893 if (data) {
894 cpufreq_frequency_table_put_attr(policy->cpu);
f1625066 895 per_cpu(acfreq_data, policy->cpu) = NULL;
64be7eed
VP
896 acpi_processor_unregister_performance(data->acpi_data,
897 policy->cpu);
dab5fff1 898 kfree(data->freq_table);
1da177e4
LT
899 kfree(data);
900 }
901
64be7eed 902 return 0;
1da177e4
LT
903}
904
64be7eed 905static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
1da177e4 906{
f1625066 907 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
1da177e4 908
2d06d8c4 909 pr_debug("acpi_cpufreq_resume\n");
1da177e4
LT
910
911 data->resume = 1;
912
64be7eed 913 return 0;
1da177e4
LT
914}
915
64be7eed 916static struct freq_attr *acpi_cpufreq_attr[] = {
1da177e4 917 &cpufreq_freq_attr_scaling_available_freqs,
11269ff5 918 NULL, /* this is a placeholder for cpb, do not remove */
1da177e4
LT
919 NULL,
920};
921
922static struct cpufreq_driver acpi_cpufreq_driver = {
e2f74f35
TR
923 .verify = acpi_cpufreq_verify,
924 .target = acpi_cpufreq_target,
925 .bios_limit = acpi_processor_get_bios_limit,
926 .init = acpi_cpufreq_cpu_init,
927 .exit = acpi_cpufreq_cpu_exit,
928 .resume = acpi_cpufreq_resume,
929 .name = "acpi-cpufreq",
930 .owner = THIS_MODULE,
931 .attr = acpi_cpufreq_attr,
1da177e4
LT
932};
933
615b7300
AP
934static void __init acpi_cpufreq_boost_init(void)
935{
936 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
937 msrs = msrs_alloc();
938
939 if (!msrs)
940 return;
941
942 boost_supported = true;
943 boost_enabled = boost_state(0);
944
945 get_online_cpus();
946
947 /* Force all MSRs to the same value */
948 boost_set_msrs(boost_enabled, cpu_online_mask);
949
950 register_cpu_notifier(&boost_nb);
951
952 put_online_cpus();
953 } else
954 global_boost.attr.mode = 0444;
955
956 /* We create the boost file in any case, though for systems without
957 * hardware support it will be read-only and hardwired to return 0.
958 */
959 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
960 pr_warn(PFX "could not register global boost sysfs file\n");
961 else
962 pr_debug("registered global boost sysfs file\n");
963}
964
965static void __exit acpi_cpufreq_boost_exit(void)
966{
967 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
968
969 if (msrs) {
970 unregister_cpu_notifier(&boost_nb);
971
972 msrs_free(msrs);
973 msrs = NULL;
974 }
975}
976
64be7eed 977static int __init acpi_cpufreq_init(void)
1da177e4 978{
50109292
FY
979 int ret;
980
ee297533
YL
981 if (acpi_disabled)
982 return 0;
983
2d06d8c4 984 pr_debug("acpi_cpufreq_init\n");
1da177e4 985
50109292
FY
986 ret = acpi_cpufreq_early_init();
987 if (ret)
988 return ret;
09b4d1ee 989
11269ff5
AP
990#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
991 /* this is a sysfs file with a strange name and an even stranger
992 * semantic - per CPU instantiation, but system global effect.
993 * Lets enable it only on AMD CPUs for compatibility reasons and
994 * only if configured. This is considered legacy code, which
995 * will probably be removed at some point in the future.
996 */
997 if (check_amd_hwpstate_cpu(0)) {
998 struct freq_attr **iter;
999
1000 pr_debug("adding sysfs entry for cpb\n");
1001
1002 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
1003 ;
1004
1005 /* make sure there is a terminator behind it */
1006 if (iter[1] == NULL)
1007 *iter = &cpb;
1008 }
1009#endif
1010
847aef6f
AM
1011 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1012 if (ret)
2fdf66b4 1013 free_acpi_perf_data();
615b7300
AP
1014 else
1015 acpi_cpufreq_boost_init();
847aef6f
AM
1016
1017 return ret;
1da177e4
LT
1018}
1019
64be7eed 1020static void __exit acpi_cpufreq_exit(void)
1da177e4 1021{
2d06d8c4 1022 pr_debug("acpi_cpufreq_exit\n");
1da177e4 1023
615b7300
AP
1024 acpi_cpufreq_boost_exit();
1025
1da177e4
LT
1026 cpufreq_unregister_driver(&acpi_cpufreq_driver);
1027
50f4ddd4 1028 free_acpi_perf_data();
1da177e4
LT
1029}
1030
d395bf12 1031module_param(acpi_pstate_strict, uint, 0644);
64be7eed 1032MODULE_PARM_DESC(acpi_pstate_strict,
95dd7227
DJ
1033 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1034 "performed during frequency changes.");
1da177e4
LT
1035
1036late_initcall(acpi_cpufreq_init);
1037module_exit(acpi_cpufreq_exit);
1038
efa17194
MG
1039static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1040 X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1041 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1042 {}
1043};
1044MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1045
1da177e4 1046MODULE_ALIAS("acpi");