2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
56 #include <asm/uaccess.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/processor.h>
61 #define ACPI_PROCESSOR_COMPONENT 0x01000000
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
64 ACPI_MODULE_NAME("processor_idle");
65 #define ACPI_PROCESSOR_FILE_POWER "power"
66 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68 #ifndef CONFIG_CPU_IDLE
69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71 static void (*pm_idle_save) (void) __read_mostly;
73 #define C2_OVERHEAD 1 /* 1us */
74 #define C3_OVERHEAD 1 /* 1us */
76 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79 #ifdef CONFIG_CPU_IDLE
80 module_param(max_cstate, uint, 0000);
82 module_param(max_cstate, uint, 0644);
84 static unsigned int nocst __read_mostly;
85 module_param(nocst, uint, 0000);
87 #ifndef CONFIG_CPU_IDLE
89 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
90 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
91 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
92 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
93 * reduce history for more aggressive entry into C3
95 static unsigned int bm_history __read_mostly =
96 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
97 module_param(bm_history, uint, 0644);
99 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
104 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
105 * For now disable this. Probably a bug somewhere else.
107 * To skip this limit, boot/load with a large max_cstate limit.
109 static int set_max_cstate(const struct dmi_system_id *id)
111 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
114 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
115 " Override with \"processor.max_cstate=%d\"\n", id->ident,
116 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
118 max_cstate = (long)id->driver_data;
123 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
124 callers to only run once -AK */
125 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
126 { set_max_cstate, "IBM ThinkPad R40e", {
127 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
128 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
129 { set_max_cstate, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
132 { set_max_cstate, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
135 { set_max_cstate, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
138 { set_max_cstate, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
141 { set_max_cstate, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
144 { set_max_cstate, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
147 { set_max_cstate, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
150 { set_max_cstate, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
153 { set_max_cstate, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
156 { set_max_cstate, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
159 { set_max_cstate, "IBM ThinkPad R40e", {
160 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
162 { set_max_cstate, "IBM ThinkPad R40e", {
163 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
165 { set_max_cstate, "IBM ThinkPad R40e", {
166 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
168 { set_max_cstate, "IBM ThinkPad R40e", {
169 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
171 { set_max_cstate, "IBM ThinkPad R40e", {
172 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
174 { set_max_cstate, "Medion 41700", {
175 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
176 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
177 { set_max_cstate, "Clevo 5600D", {
178 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
184 static inline u32 ticks_elapsed(u32 t1, u32 t2)
188 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
189 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
191 return ((0xFFFFFFFF - t1) + t2);
194 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
197 return PM_TIMER_TICKS_TO_US(t2 - t1);
198 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
199 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
201 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
205 * Callers should disable interrupts before the call and enable
206 * interrupts after return.
208 static void acpi_safe_halt(void)
210 current_thread_info()->status &= ~TS_POLLING;
212 * TS_POLLING-cleared state must be visible before we
218 current_thread_info()->status |= TS_POLLING;
221 #ifndef CONFIG_CPU_IDLE
224 acpi_processor_power_activate(struct acpi_processor *pr,
225 struct acpi_processor_cx *new)
227 struct acpi_processor_cx *old;
232 old = pr->power.state;
235 old->promotion.count = 0;
236 new->demotion.count = 0;
238 /* Cleanup from old state. */
242 /* Disable bus master reload */
243 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
244 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
249 /* Prepare to use new state. */
252 /* Enable bus master reload */
253 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
254 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
258 pr->power.state = new;
263 static atomic_t c3_cpu_count;
265 /* Common C-state entry for C2, C3, .. */
266 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
268 if (cstate->entry_method == ACPI_CSTATE_FFH) {
269 /* Call into architectural FFH based C-state */
270 acpi_processor_ffh_cstate_enter(cstate);
273 /* IO port based C-state */
274 inb(cstate->address);
275 /* Dummy wait op - must do something useless after P_LVL2 read
276 because chipsets cannot guarantee that STPCLK# signal
277 gets asserted in time to freeze execution properly. */
278 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
281 #endif /* !CONFIG_CPU_IDLE */
283 #ifdef ARCH_APICTIMER_STOPS_ON_C3
286 * Some BIOS implementations switch to C3 in the published C2 state.
287 * This seems to be a common problem on AMD boxen, but other vendors
288 * are affected too. We pick the most conservative approach: we assume
289 * that the local APIC stops in both C2 and C3.
291 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
292 struct acpi_processor_cx *cx)
294 struct acpi_processor_power *pwr = &pr->power;
295 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
298 * Check, if one of the previous states already marked the lapic
301 if (pwr->timer_broadcast_on_state < state)
304 if (cx->type >= type)
305 pr->power.timer_broadcast_on_state = state;
308 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
310 unsigned long reason;
312 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
313 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
315 clockevents_notify(reason, &pr->id);
318 /* Power(C) State timer broadcast control */
319 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
320 struct acpi_processor_cx *cx,
323 int state = cx - pr->power.states;
325 if (state >= pr->power.timer_broadcast_on_state) {
326 unsigned long reason;
328 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
329 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
330 clockevents_notify(reason, &pr->id);
336 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
337 struct acpi_processor_cx *cstate) { }
338 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
339 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
340 struct acpi_processor_cx *cx,
348 * Suspend / resume control
350 static int acpi_idle_suspend;
352 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
354 acpi_idle_suspend = 1;
358 int acpi_processor_resume(struct acpi_device * device)
360 acpi_idle_suspend = 0;
364 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
365 static int tsc_halts_in_c(int state)
367 switch (boot_cpu_data.x86_vendor) {
370 * AMD Fam10h TSC will tick in all
371 * C/P/S0/S1 states when this bit is set.
373 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
376 case X86_VENDOR_INTEL:
377 /* Several cases known where TSC halts in C2 too */
379 return state > ACPI_STATE_C1;
384 #ifndef CONFIG_CPU_IDLE
385 static void acpi_processor_idle(void)
387 struct acpi_processor *pr = NULL;
388 struct acpi_processor_cx *cx = NULL;
389 struct acpi_processor_cx *next_state = NULL;
394 * Interrupts must be disabled during bus mastering calculations and
395 * for C2/C3 transitions.
399 pr = processors[smp_processor_id()];
406 * Check whether we truly need to go idle, or should
409 if (unlikely(need_resched())) {
414 cx = pr->power.state;
415 if (!cx || acpi_idle_suspend) {
428 * Check for bus mastering activity (if required), record, and check
431 if (pr->flags.bm_check) {
433 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
438 pr->power.bm_activity <<= diff;
440 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
442 pr->power.bm_activity |= 0x1;
443 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
446 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
447 * the true state of bus mastering activity; forcing us to
448 * manually check the BMIDEA bit of each IDE channel.
450 else if (errata.piix4.bmisx) {
451 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
452 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
453 pr->power.bm_activity |= 0x1;
456 pr->power.bm_check_timestamp = jiffies;
459 * If bus mastering is or was active this jiffy, demote
460 * to avoid a faulty transition. Note that the processor
461 * won't enter a low-power state during this call (to this
462 * function) but should upon the next.
464 * TBD: A better policy might be to fallback to the demotion
465 * state (use it for this quantum only) istead of
466 * demoting -- and rely on duration as our sole demotion
467 * qualification. This may, however, introduce DMA
468 * issues (e.g. floppy DMA transfer overrun/underrun).
470 if ((pr->power.bm_activity & 0x1) &&
471 cx->demotion.threshold.bm) {
473 next_state = cx->demotion.state;
478 #ifdef CONFIG_HOTPLUG_CPU
480 * Check for P_LVL2_UP flag before entering C2 and above on
481 * an SMP system. We do it here instead of doing it at _CST/P_LVL
482 * detection phase, to work cleanly with logical CPU hotplug.
484 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
485 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
486 cx = &pr->power.states[ACPI_STATE_C1];
492 * Invoke the current Cx state to put the processor to sleep.
494 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
495 current_thread_info()->status &= ~TS_POLLING;
497 * TS_POLLING-cleared state must be visible before we
501 if (need_resched()) {
502 current_thread_info()->status |= TS_POLLING;
513 * Use the appropriate idle routine, the one that would
514 * be used without acpi C-states.
522 * TBD: Can't get time duration while in C1, as resumes
523 * go to an ISR rather than here. Need to instrument
524 * base interrupt handler.
526 * Note: the TSC better not stop in C1, sched_clock() will
529 sleep_ticks = 0xFFFFFFFF;
534 /* Get start time (ticks) */
535 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
536 /* Tell the scheduler that we are going deep-idle: */
537 sched_clock_idle_sleep_event();
539 acpi_state_timer_broadcast(pr, cx, 1);
540 acpi_cstate_enter(cx);
541 /* Get end time (ticks) */
542 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
544 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
545 /* TSC halts in C2, so notify users */
546 if (tsc_halts_in_c(ACPI_STATE_C2))
547 mark_tsc_unstable("possible TSC halt in C2");
549 /* Compute time (ticks) that we were actually asleep */
550 sleep_ticks = ticks_elapsed(t1, t2);
552 /* Tell the scheduler how much we idled: */
553 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
555 /* Re-enable interrupts */
557 /* Do not account our idle-switching overhead: */
558 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
560 current_thread_info()->status |= TS_POLLING;
561 acpi_state_timer_broadcast(pr, cx, 0);
565 acpi_unlazy_tlb(smp_processor_id());
567 * Must be done before busmaster disable as we might
568 * need to access HPET !
570 acpi_state_timer_broadcast(pr, cx, 1);
573 * bm_check implies we need ARB_DIS
574 * !bm_check implies we need cache flush
575 * bm_control implies whether we can do ARB_DIS
577 * That leaves a case where bm_check is set and bm_control is
578 * not set. In that case we cannot do much, we enter C3
579 * without doing anything.
581 if (pr->flags.bm_check && pr->flags.bm_control) {
582 if (atomic_inc_return(&c3_cpu_count) ==
585 * All CPUs are trying to go to C3
586 * Disable bus master arbitration
588 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
590 } else if (!pr->flags.bm_check) {
591 /* SMP with no shared cache... Invalidate cache */
592 ACPI_FLUSH_CPU_CACHE();
595 /* Get start time (ticks) */
596 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
598 /* Tell the scheduler that we are going deep-idle: */
599 sched_clock_idle_sleep_event();
600 acpi_cstate_enter(cx);
601 /* Get end time (ticks) */
602 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
603 if (pr->flags.bm_check && pr->flags.bm_control) {
604 /* Enable bus master arbitration */
605 atomic_dec(&c3_cpu_count);
606 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
609 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
610 /* TSC halts in C3, so notify users */
611 if (tsc_halts_in_c(ACPI_STATE_C3))
612 mark_tsc_unstable("TSC halts in C3");
614 /* Compute time (ticks) that we were actually asleep */
615 sleep_ticks = ticks_elapsed(t1, t2);
616 /* Tell the scheduler how much we idled: */
617 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
619 /* Re-enable interrupts */
621 /* Do not account our idle-switching overhead: */
622 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
624 current_thread_info()->status |= TS_POLLING;
625 acpi_state_timer_broadcast(pr, cx, 0);
633 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
634 cx->time += sleep_ticks;
636 next_state = pr->power.state;
638 #ifdef CONFIG_HOTPLUG_CPU
639 /* Don't do promotion/demotion */
640 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
641 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
650 * Track the number of longs (time asleep is greater than threshold)
651 * and promote when the count threshold is reached. Note that bus
652 * mastering activity may prevent promotions.
653 * Do not promote above max_cstate.
655 if (cx->promotion.state &&
656 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
657 if (sleep_ticks > cx->promotion.threshold.ticks &&
658 cx->promotion.state->latency <=
659 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
660 cx->promotion.count++;
661 cx->demotion.count = 0;
662 if (cx->promotion.count >=
663 cx->promotion.threshold.count) {
664 if (pr->flags.bm_check) {
666 (pr->power.bm_activity & cx->
667 promotion.threshold.bm)) {
673 next_state = cx->promotion.state;
683 * Track the number of shorts (time asleep is less than time threshold)
684 * and demote when the usage threshold is reached.
686 if (cx->demotion.state) {
687 if (sleep_ticks < cx->demotion.threshold.ticks) {
688 cx->demotion.count++;
689 cx->promotion.count = 0;
690 if (cx->demotion.count >= cx->demotion.threshold.count) {
691 next_state = cx->demotion.state;
699 * Demote if current state exceeds max_cstate
700 * or if the latency of the current state is unacceptable
702 if ((pr->power.state - pr->power.states) > max_cstate ||
703 pr->power.state->latency >
704 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
705 if (cx->demotion.state)
706 next_state = cx->demotion.state;
712 * If we're going to start using a new Cx state we must clean up
713 * from the previous and prepare to use the new.
715 if (next_state != pr->power.state)
716 acpi_processor_power_activate(pr, next_state);
719 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
722 unsigned int state_is_set = 0;
723 struct acpi_processor_cx *lower = NULL;
724 struct acpi_processor_cx *higher = NULL;
725 struct acpi_processor_cx *cx;
732 * This function sets the default Cx state policy (OS idle handler).
733 * Our scheme is to promote quickly to C2 but more conservatively
734 * to C3. We're favoring C2 for its characteristics of low latency
735 * (quick response), good power savings, and ability to allow bus
736 * mastering activity. Note that the Cx state policy is completely
737 * customizable and can be altered dynamically.
741 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
742 cx = &pr->power.states[i];
747 pr->power.state = cx;
756 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
757 cx = &pr->power.states[i];
762 cx->demotion.state = lower;
763 cx->demotion.threshold.ticks = cx->latency_ticks;
764 cx->demotion.threshold.count = 1;
765 if (cx->type == ACPI_STATE_C3)
766 cx->demotion.threshold.bm = bm_history;
773 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
774 cx = &pr->power.states[i];
779 cx->promotion.state = higher;
780 cx->promotion.threshold.ticks = cx->latency_ticks;
781 if (cx->type >= ACPI_STATE_C2)
782 cx->promotion.threshold.count = 4;
784 cx->promotion.threshold.count = 10;
785 if (higher->type == ACPI_STATE_C3)
786 cx->promotion.threshold.bm = bm_history;
794 #endif /* !CONFIG_CPU_IDLE */
796 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
805 /* if info is obtained from pblk/fadt, type equals state */
806 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
807 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
809 #ifndef CONFIG_HOTPLUG_CPU
811 * Check for P_LVL2_UP flag before entering C2 and above on
814 if ((num_online_cpus() > 1) &&
815 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
819 /* determine C2 and C3 address from pblk */
820 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
821 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
823 /* determine latencies from FADT */
824 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
825 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
827 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
828 "lvl2[0x%08x] lvl3[0x%08x]\n",
829 pr->power.states[ACPI_STATE_C2].address,
830 pr->power.states[ACPI_STATE_C3].address));
835 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
837 if (!pr->power.states[ACPI_STATE_C1].valid) {
838 /* set the first C-State to C1 */
839 /* all processors need to support C1 */
840 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
841 pr->power.states[ACPI_STATE_C1].valid = 1;
843 /* the C0 state only exists as a filler in our array */
844 pr->power.states[ACPI_STATE_C0].valid = 1;
848 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
850 acpi_status status = 0;
854 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
855 union acpi_object *cst;
863 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
864 if (ACPI_FAILURE(status)) {
865 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
869 cst = buffer.pointer;
871 /* There must be at least 2 elements */
872 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
873 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
878 count = cst->package.elements[0].integer.value;
880 /* Validate number of power states. */
881 if (count < 1 || count != cst->package.count - 1) {
882 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
887 /* Tell driver that at least _CST is supported. */
888 pr->flags.has_cst = 1;
890 for (i = 1; i <= count; i++) {
891 union acpi_object *element;
892 union acpi_object *obj;
893 struct acpi_power_register *reg;
894 struct acpi_processor_cx cx;
896 memset(&cx, 0, sizeof(cx));
898 element = &(cst->package.elements[i]);
899 if (element->type != ACPI_TYPE_PACKAGE)
902 if (element->package.count != 4)
905 obj = &(element->package.elements[0]);
907 if (obj->type != ACPI_TYPE_BUFFER)
910 reg = (struct acpi_power_register *)obj->buffer.pointer;
912 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
913 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
916 /* There should be an easy way to extract an integer... */
917 obj = &(element->package.elements[1]);
918 if (obj->type != ACPI_TYPE_INTEGER)
921 cx.type = obj->integer.value;
923 * Some buggy BIOSes won't list C1 in _CST -
924 * Let acpi_processor_get_power_info_default() handle them later
926 if (i == 1 && cx.type != ACPI_STATE_C1)
929 cx.address = reg->address;
930 cx.index = current_count + 1;
932 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
933 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
934 if (acpi_processor_ffh_cstate_probe
935 (pr->id, &cx, reg) == 0) {
936 cx.entry_method = ACPI_CSTATE_FFH;
937 } else if (cx.type == ACPI_STATE_C1) {
939 * C1 is a special case where FIXED_HARDWARE
940 * can be handled in non-MWAIT way as well.
941 * In that case, save this _CST entry info.
942 * Otherwise, ignore this info and continue.
944 cx.entry_method = ACPI_CSTATE_HALT;
950 obj = &(element->package.elements[2]);
951 if (obj->type != ACPI_TYPE_INTEGER)
954 cx.latency = obj->integer.value;
956 obj = &(element->package.elements[3]);
957 if (obj->type != ACPI_TYPE_INTEGER)
960 cx.power = obj->integer.value;
963 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
966 * We support total ACPI_PROCESSOR_MAX_POWER - 1
967 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
969 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
971 "Limiting number of power states to max (%d)\n",
972 ACPI_PROCESSOR_MAX_POWER);
974 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
979 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
982 /* Validate number of power states discovered */
983 if (current_count < 2)
987 kfree(buffer.pointer);
992 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
999 * C2 latency must be less than or equal to 100
1002 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1003 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1004 "latency too large [%d]\n", cx->latency));
1009 * Otherwise we've met all of our C2 requirements.
1010 * Normalize the C2 latency to expidite policy
1014 #ifndef CONFIG_CPU_IDLE
1015 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1017 cx->latency_ticks = cx->latency;
1023 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1024 struct acpi_processor_cx *cx)
1026 static int bm_check_flag;
1033 * C3 latency must be less than or equal to 1000
1036 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1037 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1038 "latency too large [%d]\n", cx->latency));
1043 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1044 * DMA transfers are used by any ISA device to avoid livelock.
1045 * Note that we could disable Type-F DMA (as recommended by
1046 * the erratum), but this is known to disrupt certain ISA
1047 * devices thus we take the conservative approach.
1049 else if (errata.piix4.fdma) {
1050 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1051 "C3 not supported on PIIX4 with Type-F DMA\n"));
1055 /* All the logic here assumes flags.bm_check is same across all CPUs */
1056 if (!bm_check_flag) {
1057 /* Determine whether bm_check is needed based on CPU */
1058 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1059 bm_check_flag = pr->flags.bm_check;
1061 pr->flags.bm_check = bm_check_flag;
1064 if (pr->flags.bm_check) {
1065 if (!pr->flags.bm_control) {
1066 if (pr->flags.has_cst != 1) {
1067 /* bus mastering control is necessary */
1068 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1069 "C3 support requires BM control\n"));
1072 /* Here we enter C3 without bus mastering */
1073 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1074 "C3 support without BM control\n"));
1079 * WBINVD should be set in fadt, for C3 state to be
1080 * supported on when bm_check is not required.
1082 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1083 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1084 "Cache invalidation should work properly"
1085 " for C3 to be enabled on SMP systems\n"));
1088 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1092 * Otherwise we've met all of our C3 requirements.
1093 * Normalize the C3 latency to expidite policy. Enable
1094 * checking of bus mastering status (bm_check) so we can
1095 * use this in our C3 policy
1099 #ifndef CONFIG_CPU_IDLE
1100 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1102 cx->latency_ticks = cx->latency;
1108 static int acpi_processor_power_verify(struct acpi_processor *pr)
1111 unsigned int working = 0;
1113 pr->power.timer_broadcast_on_state = INT_MAX;
1115 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1116 struct acpi_processor_cx *cx = &pr->power.states[i];
1124 acpi_processor_power_verify_c2(cx);
1126 acpi_timer_check_state(i, pr, cx);
1130 acpi_processor_power_verify_c3(pr, cx);
1132 acpi_timer_check_state(i, pr, cx);
1140 acpi_propagate_timer_broadcast(pr);
1145 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1151 /* NOTE: the idle thread may not be running while calling
1154 /* Zero initialize all the C-states info. */
1155 memset(pr->power.states, 0, sizeof(pr->power.states));
1157 result = acpi_processor_get_power_info_cst(pr);
1158 if (result == -ENODEV)
1159 result = acpi_processor_get_power_info_fadt(pr);
1164 acpi_processor_get_power_info_default(pr);
1166 pr->power.count = acpi_processor_power_verify(pr);
1168 #ifndef CONFIG_CPU_IDLE
1170 * Set Default Policy
1171 * ------------------
1172 * Now that we know which states are supported, set the default
1173 * policy. Note that this policy can be changed dynamically
1174 * (e.g. encourage deeper sleeps to conserve battery life when
1177 result = acpi_processor_set_power_policy(pr);
1183 * if one state of type C2 or C3 is available, mark this
1184 * CPU as being "idle manageable"
1186 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1187 if (pr->power.states[i].valid) {
1188 pr->power.count = i;
1189 if (pr->power.states[i].type >= ACPI_STATE_C2)
1190 pr->flags.power = 1;
1197 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1199 struct acpi_processor *pr = seq->private;
1206 seq_printf(seq, "active state: C%zd\n"
1208 "bus master activity: %08x\n"
1209 "maximum allowed latency: %d usec\n",
1210 pr->power.state ? pr->power.state - pr->power.states : 0,
1211 max_cstate, (unsigned)pr->power.bm_activity,
1212 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1214 seq_puts(seq, "states:\n");
1216 for (i = 1; i <= pr->power.count; i++) {
1217 seq_printf(seq, " %cC%d: ",
1218 (&pr->power.states[i] ==
1219 pr->power.state ? '*' : ' '), i);
1221 if (!pr->power.states[i].valid) {
1222 seq_puts(seq, "<not supported>\n");
1226 switch (pr->power.states[i].type) {
1228 seq_printf(seq, "type[C1] ");
1231 seq_printf(seq, "type[C2] ");
1234 seq_printf(seq, "type[C3] ");
1237 seq_printf(seq, "type[--] ");
1241 if (pr->power.states[i].promotion.state)
1242 seq_printf(seq, "promotion[C%zd] ",
1243 (pr->power.states[i].promotion.state -
1246 seq_puts(seq, "promotion[--] ");
1248 if (pr->power.states[i].demotion.state)
1249 seq_printf(seq, "demotion[C%zd] ",
1250 (pr->power.states[i].demotion.state -
1253 seq_puts(seq, "demotion[--] ");
1255 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1256 pr->power.states[i].latency,
1257 pr->power.states[i].usage,
1258 (unsigned long long)pr->power.states[i].time);
1265 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1267 return single_open(file, acpi_processor_power_seq_show,
1271 static const struct file_operations acpi_processor_power_fops = {
1272 .open = acpi_processor_power_open_fs,
1274 .llseek = seq_lseek,
1275 .release = single_release,
1278 #ifndef CONFIG_CPU_IDLE
1280 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1292 if (!pr->flags.power_setup_done)
1295 /* Fall back to the default idle loop */
1296 pm_idle = pm_idle_save;
1297 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1299 pr->flags.power = 0;
1300 result = acpi_processor_get_power_info(pr);
1301 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1302 pm_idle = acpi_processor_idle;
1308 static void smp_callback(void *v)
1310 /* we already woke the CPU up, nothing more to do */
1314 * This function gets called when a part of the kernel has a new latency
1315 * requirement. This means we need to get all processors out of their C-state,
1316 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1317 * wakes them all right up.
1319 static int acpi_processor_latency_notify(struct notifier_block *b,
1320 unsigned long l, void *v)
1322 smp_call_function(smp_callback, NULL, 0, 1);
1326 static struct notifier_block acpi_processor_latency_notifier = {
1327 .notifier_call = acpi_processor_latency_notify,
1332 #else /* CONFIG_CPU_IDLE */
1335 * acpi_idle_bm_check - checks if bus master activity was detected
1337 static int acpi_idle_bm_check(void)
1341 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1343 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1345 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1346 * the true state of bus mastering activity; forcing us to
1347 * manually check the BMIDEA bit of each IDE channel.
1349 else if (errata.piix4.bmisx) {
1350 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1351 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1358 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1359 * @pr: the processor
1360 * @target: the new target state
1362 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1363 struct acpi_processor_cx *target)
1365 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1366 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1367 pr->flags.bm_rld_set = 0;
1370 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1371 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1372 pr->flags.bm_rld_set = 1;
1377 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1380 * Caller disables interrupt before call and enables interrupt after return.
1382 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1384 if (cx->entry_method == ACPI_CSTATE_FFH) {
1385 /* Call into architectural FFH based C-state */
1386 acpi_processor_ffh_cstate_enter(cx);
1387 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1391 /* IO port based C-state */
1393 /* Dummy wait op - must do something useless after P_LVL2 read
1394 because chipsets cannot guarantee that STPCLK# signal
1395 gets asserted in time to freeze execution properly. */
1396 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1401 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1402 * @dev: the target CPU
1403 * @state: the state data
1405 * This is equivalent to the HALT instruction.
1407 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1408 struct cpuidle_state *state)
1411 struct acpi_processor *pr;
1412 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1414 pr = processors[smp_processor_id()];
1419 local_irq_disable();
1420 if (pr->flags.bm_check)
1421 acpi_idle_update_bm_rld(pr, cx);
1423 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1424 acpi_idle_do_entry(cx);
1425 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1430 return ticks_elapsed_in_us(t1, t2);
1434 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1435 * @dev: the target CPU
1436 * @state: the state data
1438 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1439 struct cpuidle_state *state)
1441 struct acpi_processor *pr;
1442 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1444 int sleep_ticks = 0;
1446 pr = processors[smp_processor_id()];
1451 if (acpi_idle_suspend)
1452 return(acpi_idle_enter_c1(dev, state));
1454 local_irq_disable();
1455 current_thread_info()->status &= ~TS_POLLING;
1457 * TS_POLLING-cleared state must be visible before we test
1462 if (unlikely(need_resched())) {
1463 current_thread_info()->status |= TS_POLLING;
1468 acpi_unlazy_tlb(smp_processor_id());
1470 * Must be done before busmaster disable as we might need to
1473 acpi_state_timer_broadcast(pr, cx, 1);
1475 if (pr->flags.bm_check)
1476 acpi_idle_update_bm_rld(pr, cx);
1478 if (cx->type == ACPI_STATE_C3)
1479 ACPI_FLUSH_CPU_CACHE();
1481 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1482 /* Tell the scheduler that we are going deep-idle: */
1483 sched_clock_idle_sleep_event();
1484 acpi_idle_do_entry(cx);
1485 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1487 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1488 /* TSC could halt in idle, so notify users */
1489 if (tsc_halts_in_c(cx->type))
1490 mark_tsc_unstable("TSC halts in idle");;
1492 sleep_ticks = ticks_elapsed(t1, t2);
1494 /* Tell the scheduler how much we idled: */
1495 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1498 current_thread_info()->status |= TS_POLLING;
1502 acpi_state_timer_broadcast(pr, cx, 0);
1503 cx->time += sleep_ticks;
1504 return ticks_elapsed_in_us(t1, t2);
1507 static int c3_cpu_count;
1508 static DEFINE_SPINLOCK(c3_lock);
1511 * acpi_idle_enter_bm - enters C3 with proper BM handling
1512 * @dev: the target CPU
1513 * @state: the state data
1515 * If BM is detected, the deepest non-C3 idle state is entered instead.
1517 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1518 struct cpuidle_state *state)
1520 struct acpi_processor *pr;
1521 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1523 int sleep_ticks = 0;
1525 pr = processors[smp_processor_id()];
1530 if (acpi_idle_suspend)
1531 return(acpi_idle_enter_c1(dev, state));
1533 if (acpi_idle_bm_check()) {
1534 if (dev->safe_state) {
1535 return dev->safe_state->enter(dev, dev->safe_state);
1537 local_irq_disable();
1544 local_irq_disable();
1545 current_thread_info()->status &= ~TS_POLLING;
1547 * TS_POLLING-cleared state must be visible before we test
1552 if (unlikely(need_resched())) {
1553 current_thread_info()->status |= TS_POLLING;
1558 /* Tell the scheduler that we are going deep-idle: */
1559 sched_clock_idle_sleep_event();
1561 * Must be done before busmaster disable as we might need to
1564 acpi_state_timer_broadcast(pr, cx, 1);
1566 acpi_idle_update_bm_rld(pr, cx);
1569 * disable bus master
1570 * bm_check implies we need ARB_DIS
1571 * !bm_check implies we need cache flush
1572 * bm_control implies whether we can do ARB_DIS
1574 * That leaves a case where bm_check is set and bm_control is
1575 * not set. In that case we cannot do much, we enter C3
1576 * without doing anything.
1578 if (pr->flags.bm_check && pr->flags.bm_control) {
1579 spin_lock(&c3_lock);
1581 /* Disable bus master arbitration when all CPUs are in C3 */
1582 if (c3_cpu_count == num_online_cpus())
1583 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1584 spin_unlock(&c3_lock);
1585 } else if (!pr->flags.bm_check) {
1586 ACPI_FLUSH_CPU_CACHE();
1589 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1590 acpi_idle_do_entry(cx);
1591 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1593 /* Re-enable bus master arbitration */
1594 if (pr->flags.bm_check && pr->flags.bm_control) {
1595 spin_lock(&c3_lock);
1596 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1598 spin_unlock(&c3_lock);
1601 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1602 /* TSC could halt in idle, so notify users */
1603 if (tsc_halts_in_c(ACPI_STATE_C3))
1604 mark_tsc_unstable("TSC halts in idle");
1606 sleep_ticks = ticks_elapsed(t1, t2);
1607 /* Tell the scheduler how much we idled: */
1608 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1611 current_thread_info()->status |= TS_POLLING;
1615 acpi_state_timer_broadcast(pr, cx, 0);
1616 cx->time += sleep_ticks;
1617 return ticks_elapsed_in_us(t1, t2);
1620 struct cpuidle_driver acpi_idle_driver = {
1621 .name = "acpi_idle",
1622 .owner = THIS_MODULE,
1626 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1627 * @pr: the ACPI processor
1629 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1631 int i, count = CPUIDLE_DRIVER_STATE_START;
1632 struct acpi_processor_cx *cx;
1633 struct cpuidle_state *state;
1634 struct cpuidle_device *dev = &pr->power.dev;
1636 if (!pr->flags.power_setup_done)
1639 if (pr->flags.power == 0) {
1643 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1644 cx = &pr->power.states[i];
1645 state = &dev->states[count];
1650 #ifdef CONFIG_HOTPLUG_CPU
1651 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1652 !pr->flags.has_cst &&
1653 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1656 cpuidle_set_statedata(state, cx);
1658 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1659 state->exit_latency = cx->latency;
1660 state->target_residency = cx->latency * 6;
1661 state->power_usage = cx->power;
1666 state->flags |= CPUIDLE_FLAG_SHALLOW;
1667 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1668 state->enter = acpi_idle_enter_c1;
1669 dev->safe_state = state;
1673 state->flags |= CPUIDLE_FLAG_BALANCED;
1674 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1675 state->enter = acpi_idle_enter_simple;
1676 dev->safe_state = state;
1680 state->flags |= CPUIDLE_FLAG_DEEP;
1681 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1682 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1683 state->enter = pr->flags.bm_check ?
1684 acpi_idle_enter_bm :
1685 acpi_idle_enter_simple;
1690 if (count == CPUIDLE_STATE_MAX)
1694 dev->state_count = count;
1702 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1713 if (!pr->flags.power_setup_done)
1716 cpuidle_pause_and_lock();
1717 cpuidle_disable_device(&pr->power.dev);
1718 acpi_processor_get_power_info(pr);
1719 acpi_processor_setup_cpuidle(pr);
1720 ret = cpuidle_enable_device(&pr->power.dev);
1721 cpuidle_resume_and_unlock();
1726 #endif /* CONFIG_CPU_IDLE */
1728 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1729 struct acpi_device *device)
1731 acpi_status status = 0;
1732 static int first_run;
1733 struct proc_dir_entry *entry = NULL;
1738 dmi_check_system(processor_power_dmi_table);
1739 max_cstate = acpi_processor_cstate_check(max_cstate);
1740 if (max_cstate < ACPI_C_STATES_MAX)
1742 "ACPI: processor limited to max C-state %d\n",
1745 #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1746 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1747 &acpi_processor_latency_notifier);
1754 if (acpi_gbl_FADT.cst_control && !nocst) {
1756 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1757 if (ACPI_FAILURE(status)) {
1758 ACPI_EXCEPTION((AE_INFO, status,
1759 "Notifying BIOS of _CST ability failed"));
1763 acpi_processor_get_power_info(pr);
1764 pr->flags.power_setup_done = 1;
1767 * Install the idle handler if processor power management is supported.
1768 * Note that we use previously set idle handler will be used on
1769 * platforms that only support C1.
1771 if ((pr->flags.power) && (!boot_option_idle_override)) {
1772 #ifdef CONFIG_CPU_IDLE
1773 acpi_processor_setup_cpuidle(pr);
1774 pr->power.dev.cpu = pr->id;
1775 if (cpuidle_register_device(&pr->power.dev))
1779 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1780 for (i = 1; i <= pr->power.count; i++)
1781 if (pr->power.states[i].valid)
1782 printk(" C%d[C%d]", i,
1783 pr->power.states[i].type);
1786 #ifndef CONFIG_CPU_IDLE
1788 pm_idle_save = pm_idle;
1789 pm_idle = acpi_processor_idle;
1795 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1796 S_IRUGO, acpi_device_dir(device));
1800 entry->proc_fops = &acpi_processor_power_fops;
1801 entry->data = acpi_driver_data(device);
1802 entry->owner = THIS_MODULE;
1808 int acpi_processor_power_exit(struct acpi_processor *pr,
1809 struct acpi_device *device)
1811 #ifdef CONFIG_CPU_IDLE
1812 if ((pr->flags.power) && (!boot_option_idle_override))
1813 cpuidle_unregister_device(&pr->power.dev);
1815 pr->flags.power_setup_done = 0;
1817 if (acpi_device_dir(device))
1818 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1819 acpi_device_dir(device));
1821 #ifndef CONFIG_CPU_IDLE
1823 /* Unregister the idle handler when processor #0 is removed. */
1825 pm_idle = pm_idle_save;
1828 * We are about to unload the current idle thread pm callback
1829 * (pm_idle), Wait for all processors to update cached/local
1830 * copies of pm_idle before proceeding.
1834 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1835 &acpi_processor_latency_notifier);