x86-32: use non-lazy io bitmap context switching
[linux-2.6-block.git] / arch / x86 / kernel / process.c
CommitLineData
61c4628b
SS
1#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
f0bc2202 4#include <asm/idle.h>
61c4628b
SS
5#include <linux/smp.h>
6#include <linux/slab.h>
7#include <linux/sched.h>
7f424a8b
PZ
8#include <linux/module.h>
9#include <linux/pm.h>
aa276e1c 10#include <linux/clockchips.h>
f3f47a67 11#include <linux/ftrace.h>
c1e3b377 12#include <asm/system.h>
d3ec5cae 13#include <asm/apic.h>
c1e3b377
ZY
14
15unsigned long idle_halt;
16EXPORT_SYMBOL(idle_halt);
da5e09a1
ZY
17unsigned long idle_nomwait;
18EXPORT_SYMBOL(idle_nomwait);
61c4628b 19
aa283f49 20struct kmem_cache *task_xstate_cachep;
61c4628b
SS
21
22int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
23{
24 *dst = *src;
aa283f49
SS
25 if (src->thread.xstate) {
26 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
27 GFP_KERNEL);
28 if (!dst->thread.xstate)
29 return -ENOMEM;
30 WARN_ON((unsigned long)dst->thread.xstate & 15);
31 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
32 }
61c4628b
SS
33 return 0;
34}
35
aa283f49 36void free_thread_xstate(struct task_struct *tsk)
61c4628b 37{
aa283f49
SS
38 if (tsk->thread.xstate) {
39 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
40 tsk->thread.xstate = NULL;
41 }
42}
43
aa283f49
SS
44void free_thread_info(struct thread_info *ti)
45{
46 free_thread_xstate(ti->task);
1679f271 47 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
61c4628b
SS
48}
49
50void arch_task_cache_init(void)
51{
52 task_xstate_cachep =
53 kmem_cache_create("task_xstate", xstate_size,
54 __alignof__(union thread_xstate),
55 SLAB_PANIC, NULL);
56}
7f424a8b 57
00dba564
TG
58/*
59 * Idle related variables and functions
60 */
61unsigned long boot_option_idle_override = 0;
62EXPORT_SYMBOL(boot_option_idle_override);
63
64/*
65 * Powermanagement idle function, if any..
66 */
67void (*pm_idle)(void);
68EXPORT_SYMBOL(pm_idle);
69
70#ifdef CONFIG_X86_32
71/*
72 * This halt magic was a workaround for ancient floppy DMA
73 * wreckage. It should be safe to remove.
74 */
75static int hlt_counter;
76void disable_hlt(void)
77{
78 hlt_counter++;
79}
80EXPORT_SYMBOL(disable_hlt);
81
82void enable_hlt(void)
83{
84 hlt_counter--;
85}
86EXPORT_SYMBOL(enable_hlt);
87
88static inline int hlt_use_halt(void)
89{
90 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
91}
92#else
93static inline int hlt_use_halt(void)
94{
95 return 1;
96}
97#endif
98
99/*
100 * We use this if we don't have any better
101 * idle routine..
102 */
103void default_idle(void)
104{
105 if (hlt_use_halt()) {
f3f47a67
AV
106 struct power_trace it;
107
108 trace_power_start(&it, POWER_CSTATE, 1);
00dba564
TG
109 current_thread_info()->status &= ~TS_POLLING;
110 /*
111 * TS_POLLING-cleared state must be visible before we
112 * test NEED_RESCHED:
113 */
114 smp_mb();
115
116 if (!need_resched())
117 safe_halt(); /* enables interrupts racelessly */
118 else
119 local_irq_enable();
120 current_thread_info()->status |= TS_POLLING;
f3f47a67 121 trace_power_end(&it);
00dba564
TG
122 } else {
123 local_irq_enable();
124 /* loop is done by the caller */
125 cpu_relax();
126 }
127}
128#ifdef CONFIG_APM_MODULE
129EXPORT_SYMBOL(default_idle);
130#endif
131
d3ec5cae
IV
132void stop_this_cpu(void *dummy)
133{
134 local_irq_disable();
135 /*
136 * Remove this CPU:
137 */
138 cpu_clear(smp_processor_id(), cpu_online_map);
139 disable_local_APIC();
140
141 for (;;) {
142 if (hlt_works(smp_processor_id()))
143 halt();
144 }
145}
146
7f424a8b
PZ
147static void do_nothing(void *unused)
148{
149}
150
151/*
152 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
153 * pm_idle and update to new pm_idle value. Required while changing pm_idle
154 * handler on SMP systems.
155 *
156 * Caller must have changed pm_idle to the new value before the call. Old
157 * pm_idle value will not be used by any CPU after the return of this function.
158 */
159void cpu_idle_wait(void)
160{
161 smp_mb();
162 /* kick all the CPUs so that they exit out of pm_idle */
127a237a 163 smp_call_function(do_nothing, NULL, 1);
7f424a8b
PZ
164}
165EXPORT_SYMBOL_GPL(cpu_idle_wait);
166
167/*
168 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
169 * which can obviate IPI to trigger checking of need_resched.
170 * We execute MONITOR against need_resched and enter optimized wait state
171 * through MWAIT. Whenever someone changes need_resched, we would be woken
172 * up from MWAIT (without an IPI).
173 *
174 * New with Core Duo processors, MWAIT can take some hints based on CPU
175 * capability.
176 */
177void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
178{
f3f47a67
AV
179 struct power_trace it;
180
181 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
7f424a8b 182 if (!need_resched()) {
e736ad54
PV
183 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
184 clflush((void *)&current_thread_info()->flags);
185
7f424a8b
PZ
186 __monitor((void *)&current_thread_info()->flags, 0, 0);
187 smp_mb();
188 if (!need_resched())
189 __mwait(ax, cx);
190 }
f3f47a67 191 trace_power_end(&it);
7f424a8b
PZ
192}
193
194/* Default MONITOR/MWAIT with no hints, used for default C1 state */
195static void mwait_idle(void)
196{
f3f47a67 197 struct power_trace it;
7f424a8b 198 if (!need_resched()) {
f3f47a67 199 trace_power_start(&it, POWER_CSTATE, 1);
e736ad54
PV
200 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
201 clflush((void *)&current_thread_info()->flags);
202
7f424a8b
PZ
203 __monitor((void *)&current_thread_info()->flags, 0, 0);
204 smp_mb();
205 if (!need_resched())
206 __sti_mwait(0, 0);
207 else
208 local_irq_enable();
f3f47a67 209 trace_power_end(&it);
7f424a8b
PZ
210 } else
211 local_irq_enable();
212}
213
7f424a8b
PZ
214/*
215 * On SMP it's slightly faster (but much more power-consuming!)
216 * to poll the ->work.need_resched flag instead of waiting for the
217 * cross-CPU IPI to arrive. Use this option with caution.
218 */
219static void poll_idle(void)
220{
f3f47a67
AV
221 struct power_trace it;
222
223 trace_power_start(&it, POWER_CSTATE, 0);
7f424a8b 224 local_irq_enable();
2c7e9fd4
JK
225 while (!need_resched())
226 cpu_relax();
f3f47a67 227 trace_power_end(&it);
7f424a8b
PZ
228}
229
e9623b35
TG
230/*
231 * mwait selection logic:
232 *
233 * It depends on the CPU. For AMD CPUs that support MWAIT this is
234 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
235 * then depend on a clock divisor and current Pstate of the core. If
236 * all cores of a processor are in halt state (C1) the processor can
237 * enter the C1E (C1 enhanced) state. If mwait is used this will never
238 * happen.
239 *
240 * idle=mwait overrides this decision and forces the usage of mwait.
241 */
08ad8afa 242static int __cpuinitdata force_mwait;
09fd4b4e
TG
243
244#define MWAIT_INFO 0x05
245#define MWAIT_ECX_EXTENDED_INFO 0x01
246#define MWAIT_EDX_C1 0xf0
247
e9623b35
TG
248static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
249{
09fd4b4e
TG
250 u32 eax, ebx, ecx, edx;
251
e9623b35
TG
252 if (force_mwait)
253 return 1;
254
09fd4b4e
TG
255 if (c->cpuid_level < MWAIT_INFO)
256 return 0;
257
258 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
259 /* Check, whether EDX has extended info about MWAIT */
260 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
261 return 1;
262
263 /*
264 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
265 * C1 supports MWAIT
266 */
267 return (edx & MWAIT_EDX_C1);
e9623b35
TG
268}
269
aa276e1c
TG
270/*
271 * Check for AMD CPUs, which have potentially C1E support
272 */
273static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
274{
275 if (c->x86_vendor != X86_VENDOR_AMD)
276 return 0;
277
278 if (c->x86 < 0x0F)
279 return 0;
280
281 /* Family 0x0f models < rev F do not have C1E */
282 if (c->x86 == 0x0f && c->x86_model < 0x40)
283 return 0;
284
285 return 1;
286}
287
4faac97d
TG
288static cpumask_t c1e_mask = CPU_MASK_NONE;
289static int c1e_detected;
290
291void c1e_remove_cpu(int cpu)
292{
293 cpu_clear(cpu, c1e_mask);
294}
295
aa276e1c
TG
296/*
297 * C1E aware idle routine. We check for C1E active in the interrupt
298 * pending message MSR. If we detect C1E, then we handle it the same
299 * way as C3 power states (local apic timer and TSC stop)
300 */
301static void c1e_idle(void)
302{
aa276e1c
TG
303 if (need_resched())
304 return;
305
306 if (!c1e_detected) {
307 u32 lo, hi;
308
309 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
310 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
311 c1e_detected = 1;
40fb1715 312 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
09bfeea1
AH
313 mark_tsc_unstable("TSC halt in AMD C1E");
314 printk(KERN_INFO "System has AMD C1E enabled\n");
a8d68290 315 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
aa276e1c
TG
316 }
317 }
318
319 if (c1e_detected) {
320 int cpu = smp_processor_id();
321
322 if (!cpu_isset(cpu, c1e_mask)) {
323 cpu_set(cpu, c1e_mask);
0beefa20
TG
324 /*
325 * Force broadcast so ACPI can not interfere. Needs
326 * to run with interrupts enabled as it uses
327 * smp_function_call.
328 */
329 local_irq_enable();
aa276e1c
TG
330 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
331 &cpu);
332 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
333 cpu);
0beefa20 334 local_irq_disable();
aa276e1c
TG
335 }
336 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
0beefa20 337
aa276e1c 338 default_idle();
0beefa20
TG
339
340 /*
341 * The switch back from broadcast mode needs to be
342 * called with interrupts disabled.
343 */
344 local_irq_disable();
345 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
346 local_irq_enable();
aa276e1c
TG
347 } else
348 default_idle();
349}
350
7f424a8b
PZ
351void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
352{
3e5095d1 353#ifdef CONFIG_SMP
7f424a8b
PZ
354 if (pm_idle == poll_idle && smp_num_siblings > 1) {
355 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
356 " performance may degrade.\n");
357 }
358#endif
6ddd2a27
TG
359 if (pm_idle)
360 return;
361
e9623b35 362 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
7f424a8b 363 /*
7f424a8b
PZ
364 * One CPU supports mwait => All CPUs supports mwait
365 */
6ddd2a27
TG
366 printk(KERN_INFO "using mwait in idle threads.\n");
367 pm_idle = mwait_idle;
aa276e1c
TG
368 } else if (check_c1e_idle(c)) {
369 printk(KERN_INFO "using C1E aware idle routine\n");
370 pm_idle = c1e_idle;
6ddd2a27
TG
371 } else
372 pm_idle = default_idle;
7f424a8b
PZ
373}
374
375static int __init idle_setup(char *str)
376{
ab6bc3e3
CG
377 if (!str)
378 return -EINVAL;
379
7f424a8b
PZ
380 if (!strcmp(str, "poll")) {
381 printk("using polling idle threads.\n");
382 pm_idle = poll_idle;
383 } else if (!strcmp(str, "mwait"))
384 force_mwait = 1;
c1e3b377
ZY
385 else if (!strcmp(str, "halt")) {
386 /*
387 * When the boot option of idle=halt is added, halt is
388 * forced to be used for CPU idle. In such case CPU C2/C3
389 * won't be used again.
390 * To continue to load the CPU idle driver, don't touch
391 * the boot_option_idle_override.
392 */
393 pm_idle = default_idle;
394 idle_halt = 1;
395 return 0;
da5e09a1
ZY
396 } else if (!strcmp(str, "nomwait")) {
397 /*
398 * If the boot option of "idle=nomwait" is added,
399 * it means that mwait will be disabled for CPU C2/C3
400 * states. In such case it won't touch the variable
401 * of boot_option_idle_override.
402 */
403 idle_nomwait = 1;
404 return 0;
c1e3b377 405 } else
7f424a8b
PZ
406 return -1;
407
408 boot_option_idle_override = 1;
409 return 0;
410}
411early_param("idle", idle_setup);
412