sparseirq: fix desc->lock init
[linux-2.6-block.git] / kernel / irq / handle.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/irq/handle.c
3 *
a34db9b2
IM
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
1da177e4
LT
6 *
7 * This file contains the core interrupt handling code.
a34db9b2
IM
8 *
9 * Detailed information is available in Documentation/DocBook/genericirq
10 *
1da177e4
LT
11 */
12
13#include <linux/irq.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
0b8f1efa
YL
18#include <linux/rculist.h>
19#include <linux/hash.h>
1da177e4
LT
20
21#include "internals.h"
22
0b8f1efa
YL
23/*
24 * lockdep: we want to handle all irq_desc locks as a single lock-class:
25 */
48a1b10a 26struct lock_class_key irq_desc_lock_class;
0b8f1efa 27
6a6de9ef
TG
28/**
29 * handle_bad_irq - handle spurious and unhandled irqs
43a1dd50
HK
30 * @irq: the interrupt number
31 * @desc: description of the interrupt
43a1dd50
HK
32 *
33 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
6a6de9ef 34 */
d6c88a50 35void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
6a6de9ef 36{
43f77759 37 print_irq_desc(irq, desc);
d6c88a50 38 kstat_incr_irqs_this_cpu(irq, desc);
6a6de9ef
TG
39 ack_bad_irq(irq);
40}
41
1da177e4
LT
42/*
43 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used
45 * by the main code to do the right thing. Each driver-visible
06fcb0c6 46 * interrupt source is transparently wired to the appropriate
1da177e4
LT
47 * controller. Thus drivers need not be aware of the
48 * interrupt-controller.
49 *
50 * The code is designed to be easily extended with new/different
51 * interrupt controllers, without having to do assembly magic or
52 * having to touch the generic code.
53 *
54 * Controller mappings for all interrupt sources:
55 */
85c0f909 56int nr_irqs = NR_IRQS;
fa42d10d 57EXPORT_SYMBOL_GPL(nr_irqs);
d60458b2 58
0b8f1efa
YL
59#ifdef CONFIG_SPARSE_IRQ
60static struct irq_desc irq_desc_init = {
61 .irq = -1,
62 .status = IRQ_DISABLED,
63 .chip = &no_irq_chip,
64 .handle_irq = handle_bad_irq,
65 .depth = 1,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
67#ifdef CONFIG_SMP
68 .affinity = CPU_MASK_ALL
69#endif
70};
71
48a1b10a 72void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
0b8f1efa
YL
73{
74 unsigned long bytes;
75 char *ptr;
76 int node;
77
78 /* Compute how many bytes we need per irq and allocate them */
79 bytes = nr * sizeof(unsigned int);
80
81 node = cpu_to_node(cpu);
82 ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
83 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
84
85 if (ptr)
86 desc->kstat_irqs = (unsigned int *)ptr;
87}
88
89void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
90{
91}
92
93static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
94{
95 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
793f7b12
IM
96
97 spin_lock_init(&desc->lock);
0b8f1efa
YL
98 desc->irq = irq;
99#ifdef CONFIG_SMP
100 desc->cpu = cpu;
101#endif
102 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
103 init_kstat_irqs(desc, cpu, nr_cpu_ids);
104 if (!desc->kstat_irqs) {
105 printk(KERN_ERR "can not alloc kstat_irqs\n");
106 BUG_ON(1);
107 }
108 arch_init_chip_data(desc, cpu);
109}
110
111/*
112 * Protect the sparse_irqs:
113 */
48a1b10a 114DEFINE_SPINLOCK(sparse_irq_lock);
0b8f1efa
YL
115
116struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
117
99d093d1
YL
118static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
119 [0 ... NR_IRQS_LEGACY-1] = {
0b8f1efa
YL
120 .irq = -1,
121 .status = IRQ_DISABLED,
122 .chip = &no_irq_chip,
123 .handle_irq = handle_bad_irq,
124 .depth = 1,
125 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
126#ifdef CONFIG_SMP
127 .affinity = CPU_MASK_ALL
128#endif
129 }
130};
131
132/* FIXME: use bootmem alloc ...*/
99d093d1 133static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
0b8f1efa
YL
134
135void __init early_irq_init(void)
136{
137 struct irq_desc *desc;
138 int legacy_count;
139 int i;
140
141 desc = irq_desc_legacy;
142 legacy_count = ARRAY_SIZE(irq_desc_legacy);
143
144 for (i = 0; i < legacy_count; i++) {
145 desc[i].irq = i;
146 desc[i].kstat_irqs = kstat_irqs_legacy[i];
147
148 irq_desc_ptrs[i] = desc + i;
149 }
150
151 for (i = legacy_count; i < NR_IRQS; i++)
152 irq_desc_ptrs[i] = NULL;
153
154 arch_early_irq_init();
155}
156
157struct irq_desc *irq_to_desc(unsigned int irq)
158{
159 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
160}
161
162struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
163{
164 struct irq_desc *desc;
165 unsigned long flags;
166 int node;
167
168 if (irq >= NR_IRQS) {
169 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
170 irq, NR_IRQS);
171 WARN_ON(1);
172 return NULL;
173 }
174
175 desc = irq_desc_ptrs[irq];
176 if (desc)
177 return desc;
178
179 spin_lock_irqsave(&sparse_irq_lock, flags);
180
181 /* We have to check it to avoid races with another CPU */
182 desc = irq_desc_ptrs[irq];
183 if (desc)
184 goto out_unlock;
185
186 node = cpu_to_node(cpu);
187 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
188 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
189 irq, cpu, node);
190 if (!desc) {
191 printk(KERN_ERR "can not alloc irq_desc\n");
192 BUG_ON(1);
193 }
194 init_one_irq_desc(irq, desc, cpu);
195
196 irq_desc_ptrs[irq] = desc;
197
198out_unlock:
199 spin_unlock_irqrestore(&sparse_irq_lock, flags);
200
201 return desc;
202}
203
f9af0e70 204#else /* !CONFIG_SPARSE_IRQ */
0b8f1efa 205
e729aa16 206struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
1da177e4 207 [0 ... NR_IRQS-1] = {
4f167fb4 208 .status = IRQ_DISABLED,
f1c2662c 209 .chip = &no_irq_chip,
7a55713a 210 .handle_irq = handle_bad_irq,
94d39e1f 211 .depth = 1,
aac3f2b6 212 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
a53da52f
IM
213#ifdef CONFIG_SMP
214 .affinity = CPU_MASK_ALL
215#endif
1da177e4
LT
216 }
217};
08678b08 218
f9af0e70
KM
219struct irq_desc *irq_to_desc(unsigned int irq)
220{
221 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
222}
223
224struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
225{
226 return irq_to_desc(irq);
227}
228#endif /* !CONFIG_SPARSE_IRQ */
0b8f1efa 229
1da177e4 230/*
77a5afec
IM
231 * What should we do if we get a hw irq event on an illegal vector?
232 * Each architecture has to answer this themself.
1da177e4 233 */
77a5afec 234static void ack_bad(unsigned int irq)
1da177e4 235{
d3c60047 236 struct irq_desc *desc = irq_to_desc(irq);
08678b08 237
08678b08 238 print_irq_desc(irq, desc);
1da177e4
LT
239 ack_bad_irq(irq);
240}
241
77a5afec
IM
242/*
243 * NOP functions
244 */
245static void noop(unsigned int irq)
246{
247}
248
249static unsigned int noop_ret(unsigned int irq)
250{
251 return 0;
252}
253
254/*
255 * Generic no controller implementation
256 */
f1c2662c
IM
257struct irq_chip no_irq_chip = {
258 .name = "none",
77a5afec
IM
259 .startup = noop_ret,
260 .shutdown = noop,
261 .enable = noop,
262 .disable = noop,
263 .ack = ack_bad,
264 .end = noop,
1da177e4
LT
265};
266
f8b5473f
TG
267/*
268 * Generic dummy implementation which can be used for
269 * real dumb interrupt sources
270 */
271struct irq_chip dummy_irq_chip = {
272 .name = "dummy",
273 .startup = noop_ret,
274 .shutdown = noop,
275 .enable = noop,
276 .disable = noop,
277 .ack = noop,
278 .mask = noop,
279 .unmask = noop,
280 .end = noop,
281};
282
1da177e4
LT
283/*
284 * Special, empty irq handler:
285 */
7d12e780 286irqreturn_t no_action(int cpl, void *dev_id)
1da177e4
LT
287{
288 return IRQ_NONE;
289}
290
8d28bc75
IM
291/**
292 * handle_IRQ_event - irq action chain handler
293 * @irq: the interrupt number
8d28bc75
IM
294 * @action: the interrupt action chain for this irq
295 *
296 * Handles the action chain of an irq event
1da177e4 297 */
7d12e780 298irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
1da177e4 299{
908dcecd
JB
300 irqreturn_t ret, retval = IRQ_NONE;
301 unsigned int status = 0;
1da177e4 302
3cca53b0 303 if (!(action->flags & IRQF_DISABLED))
366c7f55 304 local_irq_enable_in_hardirq();
1da177e4
LT
305
306 do {
7d12e780 307 ret = action->handler(irq, action->dev_id);
1da177e4
LT
308 if (ret == IRQ_HANDLED)
309 status |= action->flags;
310 retval |= ret;
311 action = action->next;
312 } while (action);
313
3cca53b0 314 if (status & IRQF_SAMPLE_RANDOM)
1da177e4
LT
315 add_interrupt_randomness(irq);
316 local_irq_disable();
317
318 return retval;
319}
320
af8c65b5 321#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
8d28bc75
IM
322/**
323 * __do_IRQ - original all in one highlevel IRQ handler
324 * @irq: the interrupt number
8d28bc75
IM
325 *
326 * __do_IRQ handles all normal device IRQ's (the special
1da177e4
LT
327 * SMP cross-CPU interrupts have their own specific
328 * handlers).
8d28bc75
IM
329 *
330 * This is the original x86 implementation which is used for every
331 * interrupt type.
1da177e4 332 */
7ad5b3a5 333unsigned int __do_IRQ(unsigned int irq)
1da177e4 334{
08678b08 335 struct irq_desc *desc = irq_to_desc(irq);
06fcb0c6 336 struct irqaction *action;
1da177e4
LT
337 unsigned int status;
338
d6c88a50
TG
339 kstat_incr_irqs_this_cpu(irq, desc);
340
f26fdd59 341 if (CHECK_IRQ_PER_CPU(desc->status)) {
1da177e4
LT
342 irqreturn_t action_ret;
343
344 /*
345 * No locking required for CPU-local interrupts:
346 */
48a1b10a 347 if (desc->chip->ack) {
d1bef4ed 348 desc->chip->ack(irq);
48a1b10a
YL
349 /* get new one */
350 desc = irq_remap_to_desc(irq, desc);
351 }
c642b839
RA
352 if (likely(!(desc->status & IRQ_DISABLED))) {
353 action_ret = handle_IRQ_event(irq, desc->action);
354 if (!noirqdebug)
355 note_interrupt(irq, desc, action_ret);
356 }
d1bef4ed 357 desc->chip->end(irq);
1da177e4
LT
358 return 1;
359 }
360
361 spin_lock(&desc->lock);
48a1b10a 362 if (desc->chip->ack) {
d1bef4ed 363 desc->chip->ack(irq);
48a1b10a
YL
364 desc = irq_remap_to_desc(irq, desc);
365 }
1da177e4
LT
366 /*
367 * REPLAY is when Linux resends an IRQ that was dropped earlier
368 * WAITING is used by probe to mark irqs that are being tested
369 */
370 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
371 status |= IRQ_PENDING; /* we _want_ to handle it */
372
373 /*
374 * If the IRQ is disabled for whatever reason, we cannot
375 * use the action we have.
376 */
377 action = NULL;
378 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
379 action = desc->action;
380 status &= ~IRQ_PENDING; /* we commit to handling */
381 status |= IRQ_INPROGRESS; /* we are handling it */
382 }
383 desc->status = status;
384
385 /*
386 * If there is no IRQ handler or it was disabled, exit early.
387 * Since we set PENDING, if another processor is handling
388 * a different instance of this same irq, the other processor
389 * will take care of it.
390 */
391 if (unlikely(!action))
392 goto out;
393
394 /*
395 * Edge triggered interrupts need to remember
396 * pending events.
397 * This applies to any hw interrupts that allow a second
398 * instance of the same irq to arrive while we are in do_IRQ
399 * or in the handler. But the code here only handles the _second_
400 * instance of the irq, not the third or fourth. So it is mostly
401 * useful for irq hardware that does not mask cleanly in an
402 * SMP environment.
403 */
404 for (;;) {
405 irqreturn_t action_ret;
406
407 spin_unlock(&desc->lock);
408
7d12e780 409 action_ret = handle_IRQ_event(irq, action);
1da177e4 410 if (!noirqdebug)
7d12e780 411 note_interrupt(irq, desc, action_ret);
b42172fc
LT
412
413 spin_lock(&desc->lock);
1da177e4
LT
414 if (likely(!(desc->status & IRQ_PENDING)))
415 break;
416 desc->status &= ~IRQ_PENDING;
417 }
418 desc->status &= ~IRQ_INPROGRESS;
419
420out:
421 /*
422 * The ->end() handler has to deal with interrupts which got
423 * disabled while the handler was running.
424 */
d1bef4ed 425 desc->chip->end(irq);
1da177e4
LT
426 spin_unlock(&desc->lock);
427
428 return 1;
429}
af8c65b5 430#endif
1da177e4 431
243c7621
IM
432void early_init_irq_lock_class(void)
433{
10e58084 434 struct irq_desc *desc;
243c7621
IM
435 int i;
436
0b8f1efa 437 for_each_irq_desc(i, desc) {
10e58084 438 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
0b8f1efa 439 }
0b8f1efa 440}
0b8f1efa
YL
441
442#ifdef CONFIG_SPARSE_IRQ
443unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
444{
445 struct irq_desc *desc = irq_to_desc(irq);
26ddd8d5 446 return desc ? desc->kstat_irqs[cpu] : 0;
243c7621 447}
243c7621 448#endif
0b8f1efa
YL
449EXPORT_SYMBOL(kstat_irqs_cpu);
450