Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
1da177e4 LT |
33 | #include <linux/module.h> |
34 | #include <linux/threads.h> | |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
1da177e4 LT |
55 | |
56 | #include <asm/uaccess.h> | |
57 | #include <asm/system.h> | |
58 | #include <asm/io.h> | |
59 | #include <asm/pgtable.h> | |
60 | #include <asm/irq.h> | |
61 | #include <asm/cache.h> | |
62 | #include <asm/prom.h> | |
63 | #include <asm/ptrace.h> | |
1da177e4 | 64 | #include <asm/machdep.h> |
0ebfff14 | 65 | #include <asm/udbg.h> |
d04c56f7 | 66 | #ifdef CONFIG_PPC64 |
1da177e4 | 67 | #include <asm/paca.h> |
d04c56f7 | 68 | #include <asm/firmware.h> |
0874dd40 | 69 | #include <asm/lv1call.h> |
756e7104 | 70 | #endif |
1da177e4 | 71 | |
868accb7 | 72 | int __irq_offset_value; |
756e7104 SR |
73 | static int ppc_spurious_interrupts; |
74 | ||
756e7104 | 75 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
76 | EXPORT_SYMBOL(__irq_offset_value); |
77 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 78 | |
b9e5b4e6 BH |
79 | #ifndef CONFIG_PPC_MERGE |
80 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | |
756e7104 | 81 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
b9e5b4e6 | 82 | #endif |
756e7104 SR |
83 | |
84 | #ifdef CONFIG_TAU_INT | |
85 | extern int tau_initialized; | |
86 | extern int tau_interrupts(int); | |
87 | #endif | |
b9e5b4e6 | 88 | #endif /* CONFIG_PPC32 */ |
756e7104 SR |
89 | |
90 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | |
91 | extern atomic_t ipi_recv; | |
92 | extern atomic_t ipi_sent; | |
93 | #endif | |
756e7104 SR |
94 | |
95 | #ifdef CONFIG_PPC64 | |
1da177e4 LT |
96 | EXPORT_SYMBOL(irq_desc); |
97 | ||
98 | int distribute_irqs = 1; | |
d04c56f7 | 99 | |
ef2b343e HD |
100 | static inline unsigned long get_hard_enabled(void) |
101 | { | |
102 | unsigned long enabled; | |
103 | ||
104 | __asm__ __volatile__("lbz %0,%1(13)" | |
105 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | |
106 | ||
107 | return enabled; | |
108 | } | |
109 | ||
110 | static inline void set_soft_enabled(unsigned long enable) | |
111 | { | |
112 | __asm__ __volatile__("stb %0,%1(13)" | |
113 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
114 | } | |
115 | ||
d04c56f7 PM |
116 | void local_irq_restore(unsigned long en) |
117 | { | |
ef2b343e HD |
118 | /* |
119 | * get_paca()->soft_enabled = en; | |
120 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | |
121 | * That was allowed before, and in such a case we do need to take care | |
122 | * that gcc will set soft_enabled directly via r13, not choose to use | |
123 | * an intermediate register, lest we're preempted to a different cpu. | |
124 | */ | |
125 | set_soft_enabled(en); | |
d04c56f7 PM |
126 | if (!en) |
127 | return; | |
128 | ||
129 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
ef2b343e HD |
130 | /* |
131 | * Do we need to disable preemption here? Not really: in the | |
132 | * unlikely event that we're preempted to a different cpu in | |
133 | * between getting r13, loading its lppaca_ptr, and loading | |
134 | * its any_int, we might call iseries_handle_interrupts without | |
135 | * an interrupt pending on the new cpu, but that's no disaster, | |
136 | * is it? And the business of preempting us off the old cpu | |
137 | * would itself involve a local_irq_restore which handles the | |
138 | * interrupt to that cpu. | |
139 | * | |
140 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | |
141 | * to avoid any preemption checking added into get_paca(). | |
142 | */ | |
143 | if (local_paca->lppaca_ptr->int_dword.any_int) | |
d04c56f7 PM |
144 | iseries_handle_interrupts(); |
145 | return; | |
146 | } | |
147 | ||
ef2b343e HD |
148 | /* |
149 | * if (get_paca()->hard_enabled) return; | |
150 | * But again we need to take care that gcc gets hard_enabled directly | |
151 | * via r13, not choose to use an intermediate register, lest we're | |
152 | * preempted to a different cpu in between the two instructions. | |
153 | */ | |
154 | if (get_hard_enabled()) | |
d04c56f7 | 155 | return; |
ef2b343e HD |
156 | |
157 | /* | |
158 | * Need to hard-enable interrupts here. Since currently disabled, | |
159 | * no need to take further asm precautions against preemption; but | |
160 | * use local_paca instead of get_paca() to avoid preemption checking. | |
161 | */ | |
162 | local_paca->hard_enabled = en; | |
d04c56f7 PM |
163 | if ((int)mfspr(SPRN_DEC) < 0) |
164 | mtspr(SPRN_DEC, 1); | |
0874dd40 TS |
165 | |
166 | /* | |
167 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
168 | * Any HV call will have this side effect. | |
169 | */ | |
170 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
171 | u64 tmp; | |
172 | lv1_get_version_info(&tmp); | |
173 | } | |
174 | ||
e1fa2e13 | 175 | __hard_irq_enable(); |
d04c56f7 | 176 | } |
756e7104 | 177 | #endif /* CONFIG_PPC64 */ |
1da177e4 LT |
178 | |
179 | int show_interrupts(struct seq_file *p, void *v) | |
180 | { | |
756e7104 SR |
181 | int i = *(loff_t *)v, j; |
182 | struct irqaction *action; | |
1da177e4 LT |
183 | irq_desc_t *desc; |
184 | unsigned long flags; | |
185 | ||
186 | if (i == 0) { | |
756e7104 SR |
187 | seq_puts(p, " "); |
188 | for_each_online_cpu(j) | |
189 | seq_printf(p, "CPU%d ", j); | |
1da177e4 LT |
190 | seq_putc(p, '\n'); |
191 | } | |
192 | ||
193 | if (i < NR_IRQS) { | |
194 | desc = get_irq_desc(i); | |
195 | spin_lock_irqsave(&desc->lock, flags); | |
196 | action = desc->action; | |
197 | if (!action || !action->handler) | |
198 | goto skip; | |
199 | seq_printf(p, "%3d: ", i); | |
200 | #ifdef CONFIG_SMP | |
756e7104 SR |
201 | for_each_online_cpu(j) |
202 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | |
1da177e4 LT |
203 | #else |
204 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
205 | #endif /* CONFIG_SMP */ | |
d1bef4ed IM |
206 | if (desc->chip) |
207 | seq_printf(p, " %s ", desc->chip->typename); | |
1da177e4 | 208 | else |
756e7104 | 209 | seq_puts(p, " None "); |
1da177e4 | 210 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
756e7104 SR |
211 | seq_printf(p, " %s", action->name); |
212 | for (action = action->next; action; action = action->next) | |
1da177e4 LT |
213 | seq_printf(p, ", %s", action->name); |
214 | seq_putc(p, '\n'); | |
215 | skip: | |
216 | spin_unlock_irqrestore(&desc->lock, flags); | |
756e7104 SR |
217 | } else if (i == NR_IRQS) { |
218 | #ifdef CONFIG_PPC32 | |
219 | #ifdef CONFIG_TAU_INT | |
220 | if (tau_initialized){ | |
221 | seq_puts(p, "TAU: "); | |
394e3902 AM |
222 | for_each_online_cpu(j) |
223 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
756e7104 SR |
224 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
225 | } | |
226 | #endif | |
227 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | |
228 | /* should this be per processor send/receive? */ | |
229 | seq_printf(p, "IPI (recv/sent): %10u/%u\n", | |
230 | atomic_read(&ipi_recv), atomic_read(&ipi_sent)); | |
231 | #endif | |
232 | #endif /* CONFIG_PPC32 */ | |
1da177e4 | 233 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
756e7104 | 234 | } |
1da177e4 LT |
235 | return 0; |
236 | } | |
237 | ||
238 | #ifdef CONFIG_HOTPLUG_CPU | |
239 | void fixup_irqs(cpumask_t map) | |
240 | { | |
241 | unsigned int irq; | |
242 | static int warned; | |
243 | ||
244 | for_each_irq(irq) { | |
245 | cpumask_t mask; | |
246 | ||
247 | if (irq_desc[irq].status & IRQ_PER_CPU) | |
248 | continue; | |
249 | ||
a53da52f | 250 | cpus_and(mask, irq_desc[irq].affinity, map); |
1da177e4 LT |
251 | if (any_online_cpu(mask) == NR_CPUS) { |
252 | printk("Breaking affinity for irq %i\n", irq); | |
253 | mask = map; | |
254 | } | |
d1bef4ed IM |
255 | if (irq_desc[irq].chip->set_affinity) |
256 | irq_desc[irq].chip->set_affinity(irq, mask); | |
1da177e4 LT |
257 | else if (irq_desc[irq].action && !(warned++)) |
258 | printk("Cannot set affinity for irq %i\n", irq); | |
259 | } | |
260 | ||
261 | local_irq_enable(); | |
262 | mdelay(1); | |
263 | local_irq_disable(); | |
264 | } | |
265 | #endif | |
266 | ||
1da177e4 LT |
267 | void do_IRQ(struct pt_regs *regs) |
268 | { | |
7d12e780 | 269 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 270 | unsigned int irq; |
b709c083 SR |
271 | #ifdef CONFIG_IRQSTACKS |
272 | struct thread_info *curtp, *irqtp; | |
273 | #endif | |
1da177e4 | 274 | |
4b218e9b | 275 | irq_enter(); |
1da177e4 LT |
276 | |
277 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
278 | /* Debugging check for stack overflow: is there less than 2KB free? */ | |
279 | { | |
280 | long sp; | |
281 | ||
282 | sp = __get_SP() & (THREAD_SIZE-1); | |
283 | ||
284 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
285 | printk("do_IRQ: stack overflow: %ld\n", | |
286 | sp - sizeof(struct thread_info)); | |
287 | dump_stack(); | |
288 | } | |
289 | } | |
290 | #endif | |
291 | ||
756e7104 SR |
292 | /* |
293 | * Every platform is required to implement ppc_md.get_irq. | |
92d4dda3 | 294 | * This function will either return an irq number or NO_IRQ to |
756e7104 | 295 | * indicate there are no more pending. |
92d4dda3 JB |
296 | * The value NO_IRQ_IGNORE is for buggy hardware and means that this |
297 | * IRQ has already been handled. -- Tom | |
756e7104 | 298 | */ |
35a84c2f | 299 | irq = ppc_md.get_irq(); |
1da177e4 | 300 | |
0ebfff14 | 301 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { |
b709c083 SR |
302 | #ifdef CONFIG_IRQSTACKS |
303 | /* Switch to the irq stack to handle this */ | |
304 | curtp = current_thread_info(); | |
305 | irqtp = hardirq_ctx[smp_processor_id()]; | |
306 | if (curtp != irqtp) { | |
b9e5b4e6 BH |
307 | struct irq_desc *desc = irq_desc + irq; |
308 | void *handler = desc->handle_irq; | |
309 | if (handler == NULL) | |
310 | handler = &__do_IRQ; | |
b709c083 SR |
311 | irqtp->task = curtp->task; |
312 | irqtp->flags = 0; | |
7d12e780 | 313 | call_handle_irq(irq, desc, irqtp, handler); |
b709c083 SR |
314 | irqtp->task = NULL; |
315 | if (irqtp->flags) | |
316 | set_bits(irqtp->flags, &curtp->flags); | |
317 | } else | |
318 | #endif | |
7d12e780 | 319 | generic_handle_irq(irq); |
0ebfff14 | 320 | } else if (irq != NO_IRQ_IGNORE) |
e199500c SR |
321 | /* That's not SMP safe ... but who cares ? */ |
322 | ppc_spurious_interrupts++; | |
323 | ||
4b218e9b | 324 | irq_exit(); |
7d12e780 | 325 | set_irq_regs(old_regs); |
756e7104 | 326 | |
e199500c | 327 | #ifdef CONFIG_PPC_ISERIES |
b06a3183 SR |
328 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
329 | get_lppaca()->int_dword.fields.decr_int) { | |
3356bb9f DG |
330 | get_lppaca()->int_dword.fields.decr_int = 0; |
331 | /* Signal a fake decrementer interrupt */ | |
332 | timer_interrupt(regs); | |
e199500c SR |
333 | } |
334 | #endif | |
335 | } | |
1da177e4 LT |
336 | |
337 | void __init init_IRQ(void) | |
338 | { | |
70584578 SR |
339 | if (ppc_md.init_IRQ) |
340 | ppc_md.init_IRQ(); | |
756e7104 | 341 | #ifdef CONFIG_PPC64 |
1da177e4 | 342 | irq_ctx_init(); |
756e7104 | 343 | #endif |
1da177e4 LT |
344 | } |
345 | ||
1da177e4 | 346 | |
1da177e4 | 347 | #ifdef CONFIG_IRQSTACKS |
22722051 AM |
348 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
349 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
350 | |
351 | void irq_ctx_init(void) | |
352 | { | |
353 | struct thread_info *tp; | |
354 | int i; | |
355 | ||
0e551954 | 356 | for_each_possible_cpu(i) { |
1da177e4 LT |
357 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
358 | tp = softirq_ctx[i]; | |
359 | tp->cpu = i; | |
360 | tp->preempt_count = SOFTIRQ_OFFSET; | |
361 | ||
362 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
363 | tp = hardirq_ctx[i]; | |
364 | tp->cpu = i; | |
365 | tp->preempt_count = HARDIRQ_OFFSET; | |
366 | } | |
367 | } | |
368 | ||
c6622f63 PM |
369 | static inline void do_softirq_onstack(void) |
370 | { | |
371 | struct thread_info *curtp, *irqtp; | |
372 | ||
373 | curtp = current_thread_info(); | |
374 | irqtp = softirq_ctx[smp_processor_id()]; | |
375 | irqtp->task = curtp->task; | |
376 | call_do_softirq(irqtp); | |
377 | irqtp->task = NULL; | |
378 | } | |
1da177e4 | 379 | |
c6622f63 PM |
380 | #else |
381 | #define do_softirq_onstack() __do_softirq() | |
382 | #endif /* CONFIG_IRQSTACKS */ | |
383 | ||
1da177e4 LT |
384 | void do_softirq(void) |
385 | { | |
386 | unsigned long flags; | |
1da177e4 LT |
387 | |
388 | if (in_interrupt()) | |
1da177e4 LT |
389 | return; |
390 | ||
1da177e4 | 391 | local_irq_save(flags); |
1da177e4 | 392 | |
912b2539 | 393 | if (local_softirq_pending()) |
c6622f63 | 394 | do_softirq_onstack(); |
1da177e4 LT |
395 | |
396 | local_irq_restore(flags); | |
1da177e4 | 397 | } |
1da177e4 LT |
398 | EXPORT_SYMBOL(do_softirq); |
399 | ||
1da177e4 | 400 | |
1da177e4 | 401 | /* |
0ebfff14 | 402 | * IRQ controller and virtual interrupts |
1da177e4 LT |
403 | */ |
404 | ||
0ebfff14 | 405 | #ifdef CONFIG_PPC_MERGE |
1da177e4 | 406 | |
0ebfff14 | 407 | static LIST_HEAD(irq_hosts); |
057b184a | 408 | static DEFINE_SPINLOCK(irq_big_lock); |
8ec8f2e8 BH |
409 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); |
410 | static unsigned int irq_radix_writer; | |
0ebfff14 BH |
411 | struct irq_map_entry irq_map[NR_IRQS]; |
412 | static unsigned int irq_virq_count = NR_IRQS; | |
413 | static struct irq_host *irq_default_host; | |
1da177e4 | 414 | |
35923f12 OJ |
415 | irq_hw_number_t virq_to_hw(unsigned int virq) |
416 | { | |
417 | return irq_map[virq].hwirq; | |
418 | } | |
419 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
420 | ||
68158006 ME |
421 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
422 | { | |
423 | return h->of_node != NULL && h->of_node == np; | |
424 | } | |
425 | ||
52964f87 ME |
426 | __init_refok struct irq_host *irq_alloc_host(struct device_node *of_node, |
427 | unsigned int revmap_type, | |
428 | unsigned int revmap_arg, | |
429 | struct irq_host_ops *ops, | |
430 | irq_hw_number_t inval_irq) | |
1da177e4 | 431 | { |
0ebfff14 BH |
432 | struct irq_host *host; |
433 | unsigned int size = sizeof(struct irq_host); | |
434 | unsigned int i; | |
435 | unsigned int *rmap; | |
436 | unsigned long flags; | |
437 | ||
438 | /* Allocate structure and revmap table if using linear mapping */ | |
439 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | |
440 | size += revmap_arg * sizeof(unsigned int); | |
441 | if (mem_init_done) | |
442 | host = kzalloc(size, GFP_KERNEL); | |
443 | else { | |
444 | host = alloc_bootmem(size); | |
445 | if (host) | |
446 | memset(host, 0, size); | |
447 | } | |
448 | if (host == NULL) | |
449 | return NULL; | |
7d01c880 | 450 | |
0ebfff14 BH |
451 | /* Fill structure */ |
452 | host->revmap_type = revmap_type; | |
453 | host->inval_irq = inval_irq; | |
454 | host->ops = ops; | |
52964f87 | 455 | host->of_node = of_node; |
7d01c880 | 456 | |
68158006 ME |
457 | if (host->ops->match == NULL) |
458 | host->ops->match = default_irq_host_match; | |
459 | ||
0ebfff14 BH |
460 | spin_lock_irqsave(&irq_big_lock, flags); |
461 | ||
462 | /* If it's a legacy controller, check for duplicates and | |
463 | * mark it as allocated (we use irq 0 host pointer for that | |
464 | */ | |
465 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | |
466 | if (irq_map[0].host != NULL) { | |
467 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
468 | /* If we are early boot, we can't free the structure, | |
469 | * too bad... | |
470 | * this will be fixed once slab is made available early | |
471 | * instead of the current cruft | |
472 | */ | |
473 | if (mem_init_done) | |
474 | kfree(host); | |
475 | return NULL; | |
476 | } | |
477 | irq_map[0].host = host; | |
478 | } | |
479 | ||
480 | list_add(&host->link, &irq_hosts); | |
481 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
482 | ||
483 | /* Additional setups per revmap type */ | |
484 | switch(revmap_type) { | |
485 | case IRQ_HOST_MAP_LEGACY: | |
486 | /* 0 is always the invalid number for legacy */ | |
487 | host->inval_irq = 0; | |
488 | /* setup us as the host for all legacy interrupts */ | |
489 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | |
7866291d | 490 | irq_map[i].hwirq = i; |
0ebfff14 BH |
491 | smp_wmb(); |
492 | irq_map[i].host = host; | |
493 | smp_wmb(); | |
494 | ||
6e99e458 BH |
495 | /* Clear norequest flags */ |
496 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | |
0ebfff14 BH |
497 | |
498 | /* Legacy flags are left to default at this point, | |
499 | * one can then use irq_create_mapping() to | |
500 | * explicitely change them | |
501 | */ | |
6e99e458 | 502 | ops->map(host, i, i); |
0ebfff14 BH |
503 | } |
504 | break; | |
505 | case IRQ_HOST_MAP_LINEAR: | |
506 | rmap = (unsigned int *)(host + 1); | |
507 | for (i = 0; i < revmap_arg; i++) | |
f5921697 | 508 | rmap[i] = NO_IRQ; |
0ebfff14 BH |
509 | host->revmap_data.linear.size = revmap_arg; |
510 | smp_wmb(); | |
511 | host->revmap_data.linear.revmap = rmap; | |
512 | break; | |
513 | default: | |
514 | break; | |
515 | } | |
516 | ||
517 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | |
518 | ||
519 | return host; | |
1da177e4 LT |
520 | } |
521 | ||
0ebfff14 | 522 | struct irq_host *irq_find_host(struct device_node *node) |
1da177e4 | 523 | { |
0ebfff14 BH |
524 | struct irq_host *h, *found = NULL; |
525 | unsigned long flags; | |
526 | ||
527 | /* We might want to match the legacy controller last since | |
528 | * it might potentially be set to match all interrupts in | |
529 | * the absence of a device node. This isn't a problem so far | |
530 | * yet though... | |
531 | */ | |
532 | spin_lock_irqsave(&irq_big_lock, flags); | |
533 | list_for_each_entry(h, &irq_hosts, link) | |
68158006 | 534 | if (h->ops->match(h, node)) { |
0ebfff14 BH |
535 | found = h; |
536 | break; | |
537 | } | |
538 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
539 | return found; | |
540 | } | |
541 | EXPORT_SYMBOL_GPL(irq_find_host); | |
542 | ||
543 | void irq_set_default_host(struct irq_host *host) | |
544 | { | |
545 | pr_debug("irq: Default host set to @0x%p\n", host); | |
1da177e4 | 546 | |
0ebfff14 BH |
547 | irq_default_host = host; |
548 | } | |
1da177e4 | 549 | |
0ebfff14 BH |
550 | void irq_set_virq_count(unsigned int count) |
551 | { | |
552 | pr_debug("irq: Trying to set virq count to %d\n", count); | |
fef1c772 | 553 | |
0ebfff14 BH |
554 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
555 | if (count < NR_IRQS) | |
556 | irq_virq_count = count; | |
557 | } | |
558 | ||
8ec8f2e8 BH |
559 | /* radix tree not lockless safe ! we use a brlock-type mecanism |
560 | * for now, until we can use a lockless radix tree | |
561 | */ | |
562 | static void irq_radix_wrlock(unsigned long *flags) | |
563 | { | |
564 | unsigned int cpu, ok; | |
565 | ||
566 | spin_lock_irqsave(&irq_big_lock, *flags); | |
567 | irq_radix_writer = 1; | |
568 | smp_mb(); | |
569 | do { | |
570 | barrier(); | |
571 | ok = 1; | |
572 | for_each_possible_cpu(cpu) { | |
573 | if (per_cpu(irq_radix_reader, cpu)) { | |
574 | ok = 0; | |
575 | break; | |
576 | } | |
577 | } | |
578 | if (!ok) | |
579 | cpu_relax(); | |
580 | } while(!ok); | |
581 | } | |
582 | ||
583 | static void irq_radix_wrunlock(unsigned long flags) | |
584 | { | |
585 | smp_wmb(); | |
586 | irq_radix_writer = 0; | |
587 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
588 | } | |
589 | ||
590 | static void irq_radix_rdlock(unsigned long *flags) | |
591 | { | |
592 | local_irq_save(*flags); | |
593 | __get_cpu_var(irq_radix_reader) = 1; | |
594 | smp_mb(); | |
595 | if (likely(irq_radix_writer == 0)) | |
596 | return; | |
597 | __get_cpu_var(irq_radix_reader) = 0; | |
598 | smp_wmb(); | |
599 | spin_lock(&irq_big_lock); | |
600 | __get_cpu_var(irq_radix_reader) = 1; | |
601 | spin_unlock(&irq_big_lock); | |
602 | } | |
603 | ||
604 | static void irq_radix_rdunlock(unsigned long flags) | |
605 | { | |
606 | __get_cpu_var(irq_radix_reader) = 0; | |
607 | local_irq_restore(flags); | |
608 | } | |
609 | ||
6fde40f3 ME |
610 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
611 | irq_hw_number_t hwirq) | |
612 | { | |
613 | /* Clear IRQ_NOREQUEST flag */ | |
614 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | |
615 | ||
616 | /* map it */ | |
617 | smp_wmb(); | |
618 | irq_map[virq].hwirq = hwirq; | |
619 | smp_mb(); | |
620 | ||
621 | if (host->ops->map(host, virq, hwirq)) { | |
622 | pr_debug("irq: -> mapping failed, freeing\n"); | |
623 | irq_free_virt(virq, 1); | |
624 | return -1; | |
625 | } | |
626 | ||
627 | return 0; | |
628 | } | |
8ec8f2e8 | 629 | |
ee51de56 ME |
630 | unsigned int irq_create_direct_mapping(struct irq_host *host) |
631 | { | |
632 | unsigned int virq; | |
633 | ||
634 | if (host == NULL) | |
635 | host = irq_default_host; | |
636 | ||
637 | BUG_ON(host == NULL); | |
638 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | |
639 | ||
640 | virq = irq_alloc_virt(host, 1, 0); | |
641 | if (virq == NO_IRQ) { | |
642 | pr_debug("irq: create_direct virq allocation failed\n"); | |
643 | return NO_IRQ; | |
644 | } | |
645 | ||
646 | pr_debug("irq: create_direct obtained virq %d\n", virq); | |
647 | ||
648 | if (irq_setup_virq(host, virq, virq)) | |
649 | return NO_IRQ; | |
650 | ||
651 | return virq; | |
652 | } | |
653 | ||
0ebfff14 | 654 | unsigned int irq_create_mapping(struct irq_host *host, |
6e99e458 | 655 | irq_hw_number_t hwirq) |
0ebfff14 BH |
656 | { |
657 | unsigned int virq, hint; | |
658 | ||
6e99e458 | 659 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); |
0ebfff14 BH |
660 | |
661 | /* Look for default host if nececssary */ | |
662 | if (host == NULL) | |
663 | host = irq_default_host; | |
664 | if (host == NULL) { | |
665 | printk(KERN_WARNING "irq_create_mapping called for" | |
666 | " NULL host, hwirq=%lx\n", hwirq); | |
667 | WARN_ON(1); | |
668 | return NO_IRQ; | |
1da177e4 | 669 | } |
0ebfff14 | 670 | pr_debug("irq: -> using host @%p\n", host); |
1da177e4 | 671 | |
0ebfff14 BH |
672 | /* Check if mapping already exist, if it does, call |
673 | * host->ops->map() to update the flags | |
674 | */ | |
675 | virq = irq_find_mapping(host, hwirq); | |
f5921697 | 676 | if (virq != NO_IRQ) { |
acc900ef IK |
677 | if (host->ops->remap) |
678 | host->ops->remap(host, virq, hwirq); | |
0ebfff14 | 679 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
0ebfff14 | 680 | return virq; |
1da177e4 LT |
681 | } |
682 | ||
0ebfff14 BH |
683 | /* Get a virtual interrupt number */ |
684 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | |
685 | /* Handle legacy */ | |
686 | virq = (unsigned int)hwirq; | |
687 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | |
688 | return NO_IRQ; | |
689 | return virq; | |
690 | } else { | |
691 | /* Allocate a virtual interrupt number */ | |
692 | hint = hwirq % irq_virq_count; | |
693 | virq = irq_alloc_virt(host, 1, hint); | |
694 | if (virq == NO_IRQ) { | |
695 | pr_debug("irq: -> virq allocation failed\n"); | |
696 | return NO_IRQ; | |
697 | } | |
698 | } | |
699 | pr_debug("irq: -> obtained virq %d\n", virq); | |
700 | ||
6fde40f3 | 701 | if (irq_setup_virq(host, virq, hwirq)) |
0ebfff14 | 702 | return NO_IRQ; |
6fde40f3 | 703 | |
1da177e4 | 704 | return virq; |
0ebfff14 BH |
705 | } |
706 | EXPORT_SYMBOL_GPL(irq_create_mapping); | |
707 | ||
f3d2ab41 AV |
708 | unsigned int irq_create_of_mapping(struct device_node *controller, |
709 | u32 *intspec, unsigned int intsize) | |
0ebfff14 BH |
710 | { |
711 | struct irq_host *host; | |
712 | irq_hw_number_t hwirq; | |
6e99e458 BH |
713 | unsigned int type = IRQ_TYPE_NONE; |
714 | unsigned int virq; | |
1da177e4 | 715 | |
0ebfff14 BH |
716 | if (controller == NULL) |
717 | host = irq_default_host; | |
718 | else | |
719 | host = irq_find_host(controller); | |
6e99e458 BH |
720 | if (host == NULL) { |
721 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | |
722 | controller->full_name); | |
0ebfff14 | 723 | return NO_IRQ; |
6e99e458 | 724 | } |
0ebfff14 BH |
725 | |
726 | /* If host has no translation, then we assume interrupt line */ | |
727 | if (host->ops->xlate == NULL) | |
728 | hwirq = intspec[0]; | |
729 | else { | |
730 | if (host->ops->xlate(host, controller, intspec, intsize, | |
6e99e458 | 731 | &hwirq, &type)) |
0ebfff14 | 732 | return NO_IRQ; |
1da177e4 | 733 | } |
0ebfff14 | 734 | |
6e99e458 BH |
735 | /* Create mapping */ |
736 | virq = irq_create_mapping(host, hwirq); | |
737 | if (virq == NO_IRQ) | |
738 | return virq; | |
739 | ||
740 | /* Set type if specified and different than the current one */ | |
741 | if (type != IRQ_TYPE_NONE && | |
742 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | |
743 | set_irq_type(virq, type); | |
744 | return virq; | |
1da177e4 | 745 | } |
0ebfff14 | 746 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
1da177e4 | 747 | |
0ebfff14 | 748 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
1da177e4 | 749 | { |
0ebfff14 | 750 | struct of_irq oirq; |
1da177e4 | 751 | |
0ebfff14 BH |
752 | if (of_irq_map_one(dev, index, &oirq)) |
753 | return NO_IRQ; | |
1da177e4 | 754 | |
0ebfff14 BH |
755 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
756 | oirq.size); | |
757 | } | |
758 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |
1da177e4 | 759 | |
0ebfff14 BH |
760 | void irq_dispose_mapping(unsigned int virq) |
761 | { | |
5414c6be | 762 | struct irq_host *host; |
0ebfff14 BH |
763 | irq_hw_number_t hwirq; |
764 | unsigned long flags; | |
1da177e4 | 765 | |
5414c6be ME |
766 | if (virq == NO_IRQ) |
767 | return; | |
768 | ||
769 | host = irq_map[virq].host; | |
0ebfff14 BH |
770 | WARN_ON (host == NULL); |
771 | if (host == NULL) | |
772 | return; | |
1da177e4 | 773 | |
0ebfff14 BH |
774 | /* Never unmap legacy interrupts */ |
775 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
776 | return; | |
1da177e4 | 777 | |
0ebfff14 BH |
778 | /* remove chip and handler */ |
779 | set_irq_chip_and_handler(virq, NULL, NULL); | |
780 | ||
781 | /* Make sure it's completed */ | |
782 | synchronize_irq(virq); | |
783 | ||
784 | /* Tell the PIC about it */ | |
785 | if (host->ops->unmap) | |
786 | host->ops->unmap(host, virq); | |
787 | smp_mb(); | |
788 | ||
789 | /* Clear reverse map */ | |
790 | hwirq = irq_map[virq].hwirq; | |
791 | switch(host->revmap_type) { | |
792 | case IRQ_HOST_MAP_LINEAR: | |
793 | if (hwirq < host->revmap_data.linear.size) | |
f5921697 | 794 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
0ebfff14 BH |
795 | break; |
796 | case IRQ_HOST_MAP_TREE: | |
797 | /* Check if radix tree allocated yet */ | |
798 | if (host->revmap_data.tree.gfp_mask == 0) | |
799 | break; | |
8ec8f2e8 | 800 | irq_radix_wrlock(&flags); |
0ebfff14 | 801 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
8ec8f2e8 | 802 | irq_radix_wrunlock(flags); |
0ebfff14 BH |
803 | break; |
804 | } | |
1da177e4 | 805 | |
0ebfff14 BH |
806 | /* Destroy map */ |
807 | smp_mb(); | |
808 | irq_map[virq].hwirq = host->inval_irq; | |
1da177e4 | 809 | |
0ebfff14 BH |
810 | /* Set some flags */ |
811 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | |
1da177e4 | 812 | |
0ebfff14 BH |
813 | /* Free it */ |
814 | irq_free_virt(virq, 1); | |
1da177e4 | 815 | } |
0ebfff14 | 816 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
1da177e4 | 817 | |
0ebfff14 BH |
818 | unsigned int irq_find_mapping(struct irq_host *host, |
819 | irq_hw_number_t hwirq) | |
820 | { | |
821 | unsigned int i; | |
822 | unsigned int hint = hwirq % irq_virq_count; | |
823 | ||
824 | /* Look for default host if nececssary */ | |
825 | if (host == NULL) | |
826 | host = irq_default_host; | |
827 | if (host == NULL) | |
828 | return NO_IRQ; | |
829 | ||
830 | /* legacy -> bail early */ | |
831 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
832 | return hwirq; | |
833 | ||
834 | /* Slow path does a linear search of the map */ | |
835 | if (hint < NUM_ISA_INTERRUPTS) | |
836 | hint = NUM_ISA_INTERRUPTS; | |
837 | i = hint; | |
838 | do { | |
839 | if (irq_map[i].host == host && | |
840 | irq_map[i].hwirq == hwirq) | |
841 | return i; | |
842 | i++; | |
843 | if (i >= irq_virq_count) | |
844 | i = NUM_ISA_INTERRUPTS; | |
845 | } while(i != hint); | |
846 | return NO_IRQ; | |
847 | } | |
848 | EXPORT_SYMBOL_GPL(irq_find_mapping); | |
1da177e4 | 849 | |
0ebfff14 BH |
850 | |
851 | unsigned int irq_radix_revmap(struct irq_host *host, | |
852 | irq_hw_number_t hwirq) | |
1da177e4 | 853 | { |
0ebfff14 BH |
854 | struct radix_tree_root *tree; |
855 | struct irq_map_entry *ptr; | |
856 | unsigned int virq; | |
857 | unsigned long flags; | |
1da177e4 | 858 | |
0ebfff14 | 859 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
1da177e4 | 860 | |
0ebfff14 BH |
861 | /* Check if the radix tree exist yet. We test the value of |
862 | * the gfp_mask for that. Sneaky but saves another int in the | |
863 | * structure. If not, we fallback to slow mode | |
864 | */ | |
865 | tree = &host->revmap_data.tree; | |
866 | if (tree->gfp_mask == 0) | |
867 | return irq_find_mapping(host, hwirq); | |
868 | ||
0ebfff14 | 869 | /* Now try to resolve */ |
8ec8f2e8 | 870 | irq_radix_rdlock(&flags); |
0ebfff14 | 871 | ptr = radix_tree_lookup(tree, hwirq); |
8ec8f2e8 BH |
872 | irq_radix_rdunlock(flags); |
873 | ||
0ebfff14 BH |
874 | /* Found it, return */ |
875 | if (ptr) { | |
876 | virq = ptr - irq_map; | |
8ec8f2e8 | 877 | return virq; |
1da177e4 | 878 | } |
0ebfff14 BH |
879 | |
880 | /* If not there, try to insert it */ | |
881 | virq = irq_find_mapping(host, hwirq); | |
8ec8f2e8 BH |
882 | if (virq != NO_IRQ) { |
883 | irq_radix_wrlock(&flags); | |
e5c14ce1 | 884 | radix_tree_insert(tree, hwirq, &irq_map[virq]); |
8ec8f2e8 BH |
885 | irq_radix_wrunlock(flags); |
886 | } | |
0ebfff14 | 887 | return virq; |
1da177e4 LT |
888 | } |
889 | ||
0ebfff14 BH |
890 | unsigned int irq_linear_revmap(struct irq_host *host, |
891 | irq_hw_number_t hwirq) | |
c6622f63 | 892 | { |
0ebfff14 | 893 | unsigned int *revmap; |
c6622f63 | 894 | |
0ebfff14 BH |
895 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
896 | ||
897 | /* Check revmap bounds */ | |
898 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | |
899 | return irq_find_mapping(host, hwirq); | |
900 | ||
901 | /* Check if revmap was allocated */ | |
902 | revmap = host->revmap_data.linear.revmap; | |
903 | if (unlikely(revmap == NULL)) | |
904 | return irq_find_mapping(host, hwirq); | |
905 | ||
906 | /* Fill up revmap with slow path if no mapping found */ | |
907 | if (unlikely(revmap[hwirq] == NO_IRQ)) | |
908 | revmap[hwirq] = irq_find_mapping(host, hwirq); | |
909 | ||
910 | return revmap[hwirq]; | |
c6622f63 PM |
911 | } |
912 | ||
0ebfff14 BH |
913 | unsigned int irq_alloc_virt(struct irq_host *host, |
914 | unsigned int count, | |
915 | unsigned int hint) | |
916 | { | |
917 | unsigned long flags; | |
918 | unsigned int i, j, found = NO_IRQ; | |
c6622f63 | 919 | |
0ebfff14 BH |
920 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
921 | return NO_IRQ; | |
922 | ||
923 | spin_lock_irqsave(&irq_big_lock, flags); | |
924 | ||
925 | /* Use hint for 1 interrupt if any */ | |
926 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | |
927 | hint < irq_virq_count && irq_map[hint].host == NULL) { | |
928 | found = hint; | |
929 | goto hint_found; | |
930 | } | |
931 | ||
932 | /* Look for count consecutive numbers in the allocatable | |
933 | * (non-legacy) space | |
934 | */ | |
e1251465 ME |
935 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { |
936 | if (irq_map[i].host != NULL) | |
937 | j = 0; | |
938 | else | |
939 | j++; | |
940 | ||
941 | if (j == count) { | |
942 | found = i - count + 1; | |
943 | break; | |
944 | } | |
0ebfff14 BH |
945 | } |
946 | if (found == NO_IRQ) { | |
947 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
948 | return NO_IRQ; | |
949 | } | |
950 | hint_found: | |
951 | for (i = found; i < (found + count); i++) { | |
952 | irq_map[i].hwirq = host->inval_irq; | |
953 | smp_wmb(); | |
954 | irq_map[i].host = host; | |
955 | } | |
956 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
957 | return found; | |
958 | } | |
959 | ||
960 | void irq_free_virt(unsigned int virq, unsigned int count) | |
1da177e4 LT |
961 | { |
962 | unsigned long flags; | |
0ebfff14 | 963 | unsigned int i; |
1da177e4 | 964 | |
0ebfff14 BH |
965 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
966 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | |
1da177e4 | 967 | |
0ebfff14 BH |
968 | spin_lock_irqsave(&irq_big_lock, flags); |
969 | for (i = virq; i < (virq + count); i++) { | |
970 | struct irq_host *host; | |
1da177e4 | 971 | |
0ebfff14 BH |
972 | if (i < NUM_ISA_INTERRUPTS || |
973 | (virq + count) > irq_virq_count) | |
974 | continue; | |
1da177e4 | 975 | |
0ebfff14 BH |
976 | host = irq_map[i].host; |
977 | irq_map[i].hwirq = host->inval_irq; | |
978 | smp_wmb(); | |
979 | irq_map[i].host = NULL; | |
980 | } | |
981 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
1da177e4 | 982 | } |
0ebfff14 BH |
983 | |
984 | void irq_early_init(void) | |
985 | { | |
986 | unsigned int i; | |
987 | ||
988 | for (i = 0; i < NR_IRQS; i++) | |
989 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | |
990 | } | |
991 | ||
992 | /* We need to create the radix trees late */ | |
993 | static int irq_late_init(void) | |
994 | { | |
995 | struct irq_host *h; | |
996 | unsigned long flags; | |
997 | ||
8ec8f2e8 | 998 | irq_radix_wrlock(&flags); |
0ebfff14 BH |
999 | list_for_each_entry(h, &irq_hosts, link) { |
1000 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | |
1001 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | |
1002 | } | |
8ec8f2e8 | 1003 | irq_radix_wrunlock(flags); |
0ebfff14 BH |
1004 | |
1005 | return 0; | |
1006 | } | |
1007 | arch_initcall(irq_late_init); | |
1008 | ||
1009 | #endif /* CONFIG_PPC_MERGE */ | |
1da177e4 | 1010 | |
c6622f63 | 1011 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
1012 | static int __init setup_noirqdistrib(char *str) |
1013 | { | |
1014 | distribute_irqs = 0; | |
1015 | return 1; | |
1016 | } | |
1017 | ||
1018 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 1019 | #endif /* CONFIG_PPC64 */ |