Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
3 | * | |
4 | * This file contains the lowest level x86-specific interrupt | |
5 | * entry, irq-stacks and irq statistics code. All the remaining | |
6 | * irq logic is done by the generic kernel/irq/ code and | |
7 | * by the x86-specific irq controller code. (e.g. i8259.c and | |
8 | * io_apic.c.) | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/seq_file.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/kernel_stat.h> | |
f3705136 ZM |
15 | #include <linux/notifier.h> |
16 | #include <linux/cpu.h> | |
17 | #include <linux/delay.h> | |
72ade5f9 | 18 | #include <linux/uaccess.h> |
1da177e4 | 19 | |
e05d723f | 20 | #include <asm/apic.h> |
e05d723f | 21 | |
f34e3b61 | 22 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
1da177e4 LT |
23 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
24 | ||
7c3576d2 JF |
25 | DEFINE_PER_CPU(struct pt_regs *, irq_regs); |
26 | EXPORT_PER_CPU_SYMBOL(irq_regs); | |
27 | ||
de9b10af TG |
28 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
29 | /* Debugging check for stack overflow: is there less than 1KB free? */ | |
30 | static int check_stack_overflow(void) | |
31 | { | |
32 | long sp; | |
33 | ||
34 | __asm__ __volatile__("andl %%esp,%0" : | |
35 | "=r" (sp) : "0" (THREAD_SIZE - 1)); | |
36 | ||
37 | return sp < (sizeof(struct thread_info) + STACK_WARN); | |
38 | } | |
39 | ||
40 | static void print_stack_overflow(void) | |
41 | { | |
42 | printk(KERN_WARNING "low stack detected by irq handler\n"); | |
43 | dump_stack(); | |
44 | } | |
45 | ||
46 | #else | |
47 | static inline int check_stack_overflow(void) { return 0; } | |
48 | static inline void print_stack_overflow(void) { } | |
49 | #endif | |
50 | ||
1da177e4 LT |
51 | #ifdef CONFIG_4KSTACKS |
52 | /* | |
53 | * per-CPU IRQ handling contexts (thread information and stack) | |
54 | */ | |
55 | union irq_ctx { | |
56 | struct thread_info tinfo; | |
57 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
58 | }; | |
59 | ||
22722051 AM |
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 | 62 | |
cbcd79c2 JF |
63 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
64 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | |
a052b68b | 65 | |
403d8efc | 66 | static void call_on_stack(void *func, void *stack) |
04b361ab | 67 | { |
403d8efc TG |
68 | asm volatile("xchgl %%ebx,%%esp \n" |
69 | "call *%%edi \n" | |
70 | "movl %%ebx,%%esp \n" | |
71 | : "=b" (stack) | |
72 | : "0" (stack), | |
73 | "D"(func) | |
74 | : "memory", "cc", "edx", "ecx", "eax"); | |
04b361ab | 75 | } |
1da177e4 | 76 | |
de9b10af TG |
77 | static inline int |
78 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |
79 | { | |
80 | union irq_ctx *curctx, *irqctx; | |
403d8efc | 81 | u32 *isp, arg1, arg2; |
1da177e4 LT |
82 | |
83 | curctx = (union irq_ctx *) current_thread_info(); | |
84 | irqctx = hardirq_ctx[smp_processor_id()]; | |
85 | ||
86 | /* | |
87 | * this is where we switch to the IRQ stack. However, if we are | |
88 | * already using the IRQ stack (because we interrupted a hardirq | |
89 | * handler) we can't do that and just have to keep using the | |
90 | * current stack (which is the irq stack already after all) | |
91 | */ | |
de9b10af TG |
92 | if (unlikely(curctx == irqctx)) |
93 | return 0; | |
1da177e4 | 94 | |
de9b10af | 95 | /* build the stack frame on the IRQ stack */ |
72ade5f9 | 96 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
de9b10af TG |
97 | irqctx->tinfo.task = curctx->tinfo.task; |
98 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
1da177e4 | 99 | |
de9b10af TG |
100 | /* |
101 | * Copy the softirq bits in preempt_count so that the | |
102 | * softirq checks work in the hardirq context. | |
103 | */ | |
104 | irqctx->tinfo.preempt_count = | |
105 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | |
106 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | |
107 | ||
108 | if (unlikely(overflow)) | |
403d8efc TG |
109 | call_on_stack(print_stack_overflow, isp); |
110 | ||
111 | asm volatile("xchgl %%ebx,%%esp \n" | |
112 | "call *%%edi \n" | |
113 | "movl %%ebx,%%esp \n" | |
114 | : "=a" (arg1), "=d" (arg2), "=b" (isp) | |
115 | : "0" (irq), "1" (desc), "2" (isp), | |
116 | "D" (desc->handle_irq) | |
117 | : "memory", "cc", "ecx"); | |
1da177e4 LT |
118 | return 1; |
119 | } | |
120 | ||
1da177e4 LT |
121 | /* |
122 | * allocate per-cpu stacks for hardirq and for softirq processing | |
123 | */ | |
403d8efc | 124 | void __cpuinit irq_ctx_init(int cpu) |
1da177e4 LT |
125 | { |
126 | union irq_ctx *irqctx; | |
127 | ||
128 | if (hardirq_ctx[cpu]) | |
129 | return; | |
130 | ||
131 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | |
403d8efc TG |
132 | irqctx->tinfo.task = NULL; |
133 | irqctx->tinfo.exec_domain = NULL; | |
134 | irqctx->tinfo.cpu = cpu; | |
135 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | |
136 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
1da177e4 LT |
137 | |
138 | hardirq_ctx[cpu] = irqctx; | |
139 | ||
72ade5f9 | 140 | irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; |
403d8efc TG |
141 | irqctx->tinfo.task = NULL; |
142 | irqctx->tinfo.exec_domain = NULL; | |
143 | irqctx->tinfo.cpu = cpu; | |
144 | irqctx->tinfo.preempt_count = 0; | |
145 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
1da177e4 LT |
146 | |
147 | softirq_ctx[cpu] = irqctx; | |
148 | ||
403d8efc | 149 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
72ade5f9 | 150 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); |
1da177e4 LT |
151 | } |
152 | ||
e1367daf LS |
153 | void irq_ctx_exit(int cpu) |
154 | { | |
155 | hardirq_ctx[cpu] = NULL; | |
156 | } | |
157 | ||
1da177e4 LT |
158 | asmlinkage void do_softirq(void) |
159 | { | |
160 | unsigned long flags; | |
161 | struct thread_info *curctx; | |
162 | union irq_ctx *irqctx; | |
163 | u32 *isp; | |
164 | ||
165 | if (in_interrupt()) | |
166 | return; | |
167 | ||
168 | local_irq_save(flags); | |
169 | ||
170 | if (local_softirq_pending()) { | |
171 | curctx = current_thread_info(); | |
172 | irqctx = softirq_ctx[smp_processor_id()]; | |
173 | irqctx->tinfo.task = curctx->task; | |
174 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
175 | ||
176 | /* build the stack frame on the softirq stack */ | |
72ade5f9 | 177 | isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); |
1da177e4 | 178 | |
403d8efc | 179 | call_on_stack(__do_softirq, isp); |
55f327fa IM |
180 | /* |
181 | * Shouldnt happen, we returned above if in_interrupt(): | |
403d8efc | 182 | */ |
55f327fa | 183 | WARN_ON_ONCE(softirq_count()); |
1da177e4 LT |
184 | } |
185 | ||
186 | local_irq_restore(flags); | |
187 | } | |
403d8efc TG |
188 | |
189 | #else | |
190 | static inline int | |
191 | execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; } | |
1da177e4 LT |
192 | #endif |
193 | ||
9b2b76a3 JF |
194 | bool handle_irq(unsigned irq, struct pt_regs *regs) |
195 | { | |
196 | struct irq_desc *desc; | |
197 | int overflow; | |
198 | ||
199 | overflow = check_stack_overflow(); | |
200 | ||
201 | desc = irq_to_desc(irq); | |
202 | if (unlikely(!desc)) | |
203 | return false; | |
204 | ||
205 | if (!execute_on_irq_stack(overflow, desc, irq)) { | |
206 | if (unlikely(overflow)) | |
207 | print_stack_overflow(); | |
208 | desc->handle_irq(irq, desc); | |
209 | } | |
210 | ||
211 | return true; | |
212 | } | |
213 | ||
f3705136 | 214 | #ifdef CONFIG_HOTPLUG_CPU |
1dcdd3d1 | 215 | #include <asm/genapic.h> |
f3705136 | 216 | |
d7b381bb MT |
217 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
218 | void fixup_irqs(void) | |
f3705136 ZM |
219 | { |
220 | unsigned int irq; | |
221 | static int warned; | |
199751d7 | 222 | struct irq_desc *desc; |
f3705136 | 223 | |
199751d7 | 224 | for_each_irq_desc(irq, desc) { |
d7b381bb | 225 | const struct cpumask *affinity; |
08678b08 | 226 | |
0b8f1efa YL |
227 | if (!desc) |
228 | continue; | |
f3705136 ZM |
229 | if (irq == 2) |
230 | continue; | |
231 | ||
7f7ace0c | 232 | affinity = desc->affinity; |
d7b381bb | 233 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
f3705136 | 234 | printk("Breaking affinity for irq %i\n", irq); |
d7b381bb | 235 | affinity = cpu_all_mask; |
f3705136 | 236 | } |
08678b08 | 237 | if (desc->chip->set_affinity) |
d7b381bb | 238 | desc->chip->set_affinity(irq, affinity); |
08678b08 | 239 | else if (desc->action && !(warned++)) |
f3705136 ZM |
240 | printk("Cannot set affinity for irq %i\n", irq); |
241 | } | |
242 | ||
243 | #if 0 | |
244 | barrier(); | |
245 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected | |
246 | [note the nop - the interrupt-enable boundary on x86 is two | |
247 | instructions from sti] - to flush out pending hardirqs and | |
248 | IPIs. After this point nothing is supposed to reach this CPU." */ | |
249 | __asm__ __volatile__("sti; nop; cli"); | |
250 | barrier(); | |
251 | #else | |
252 | /* That doesn't seem sufficient. Give it 1ms. */ | |
253 | local_irq_enable(); | |
254 | mdelay(1); | |
255 | local_irq_disable(); | |
256 | #endif | |
257 | } | |
258 | #endif | |
259 |