80f15725ecadf1d67660285760dd173fe8fa5083
[linux-2.6-block.git] / arch / mn10300 / kernel / irq.c
1 /* MN10300 Arch-specific interrupt handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/seq_file.h>
15 #include <linux/cpumask.h>
16 #include <asm/setup.h>
17 #include <asm/serial-regs.h>
18
19 #ifdef CONFIG_SMP
20 #undef  GxICR
21 #define GxICR(X) CROSS_GxICR(X, irq_affinity_online[X])
22
23 #undef  GxICR_u8
24 #define GxICR_u8(X) CROSS_GxICR_u8(X, irq_affinity_online[X])
25 #endif /* CONFIG_SMP */
26
27 unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
28         [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
29 };
30 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
31
32 #ifdef CONFIG_SMP
33 static char irq_affinity_online[NR_IRQS] = {
34         [0 ... NR_IRQS - 1] = 0
35 };
36
37 #define NR_IRQ_WORDS    ((NR_IRQS + 31) / 32)
38 static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
39         [0 ... NR_IRQ_WORDS - 1] = 0
40 };
41 #endif  /* CONFIG_SMP */
42
43 atomic_t irq_err_count;
44
45 /*
46  * MN10300 interrupt controller operations
47  */
48 static void mn10300_cpupic_ack(unsigned int irq)
49 {
50         unsigned long flags;
51         u16 tmp;
52
53         flags = arch_local_cli_save();
54         GxICR_u8(irq) = GxICR_DETECT;
55         tmp = GxICR(irq);
56         arch_local_irq_restore(flags);
57 }
58
59 static void __mask_and_set_icr(unsigned int irq,
60                                unsigned int mask, unsigned int set)
61 {
62         unsigned long flags;
63         u16 tmp;
64
65         flags = arch_local_cli_save();
66         tmp = GxICR(irq);
67         GxICR(irq) = (tmp & mask) | set;
68         tmp = GxICR(irq);
69         arch_local_irq_restore(flags);
70 }
71
72 static void mn10300_cpupic_mask(unsigned int irq)
73 {
74         __mask_and_set_icr(irq, GxICR_LEVEL, 0);
75 }
76
77 static void mn10300_cpupic_mask_ack(unsigned int irq)
78 {
79 #ifdef CONFIG_SMP
80         unsigned long flags;
81         u16 tmp;
82
83         flags = arch_local_cli_save();
84
85         if (!test_and_clear_bit(irq, irq_affinity_request)) {
86                 tmp = GxICR(irq);
87                 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
88                 tmp = GxICR(irq);
89         } else {
90                 u16 tmp2;
91                 tmp = GxICR(irq);
92                 GxICR(irq) = (tmp & GxICR_LEVEL);
93                 tmp2 = GxICR(irq);
94
95                 irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
96                 GxICR(irq) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
97                 tmp = GxICR(irq);
98         }
99
100         arch_local_irq_restore(flags);
101 #else  /* CONFIG_SMP */
102         __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
103 #endif /* CONFIG_SMP */
104 }
105
106 static void mn10300_cpupic_unmask(unsigned int irq)
107 {
108         __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
109 }
110
111 static void mn10300_cpupic_unmask_clear(unsigned int irq)
112 {
113         /* the MN10300 PIC latches its interrupt request bit, even after the
114          * device has ceased to assert its interrupt line and the interrupt
115          * channel has been disabled in the PIC, so for level-triggered
116          * interrupts we need to clear the request bit when we re-enable */
117 #ifdef CONFIG_SMP
118         unsigned long flags;
119         u16 tmp;
120
121         flags = arch_local_cli_save();
122
123         if (!test_and_clear_bit(irq, irq_affinity_request)) {
124                 tmp = GxICR(irq);
125                 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
126                 tmp = GxICR(irq);
127         } else {
128                 tmp = GxICR(irq);
129
130                 irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
131                 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
132                 tmp = GxICR(irq);
133         }
134
135         arch_local_irq_restore(flags);
136 #else  /* CONFIG_SMP */
137         __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
138 #endif /* CONFIG_SMP */
139 }
140
141 #ifdef CONFIG_SMP
142 static int
143 mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
144 {
145         unsigned long flags;
146         int err;
147
148         flags = arch_local_cli_save();
149
150         /* check irq no */
151         switch (irq) {
152         case TMJCIRQ:
153         case RESCHEDULE_IPI:
154         case CALL_FUNC_SINGLE_IPI:
155         case LOCAL_TIMER_IPI:
156         case FLUSH_CACHE_IPI:
157         case CALL_FUNCTION_NMI_IPI:
158         case GDB_NMI_IPI:
159 #ifdef CONFIG_MN10300_TTYSM0
160         case SC0RXIRQ:
161         case SC0TXIRQ:
162 #ifdef CONFIG_MN10300_TTYSM0_TIMER8
163         case TM8IRQ:
164 #elif CONFIG_MN10300_TTYSM0_TIMER2
165         case TM2IRQ:
166 #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
167 #endif /* CONFIG_MN10300_TTYSM0 */
168
169 #ifdef CONFIG_MN10300_TTYSM1
170         case SC1RXIRQ:
171         case SC1TXIRQ:
172 #ifdef CONFIG_MN10300_TTYSM1_TIMER12
173         case TM12IRQ:
174 #elif CONFIG_MN10300_TTYSM1_TIMER9
175         case TM9IRQ:
176 #elif CONFIG_MN10300_TTYSM1_TIMER3
177         case TM3IRQ:
178 #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
179 #endif /* CONFIG_MN10300_TTYSM1 */
180
181 #ifdef CONFIG_MN10300_TTYSM2
182         case SC2RXIRQ:
183         case SC2TXIRQ:
184         case TM10IRQ:
185 #endif /* CONFIG_MN10300_TTYSM2 */
186                 err = -1;
187                 break;
188
189         default:
190                 set_bit(irq, irq_affinity_request);
191                 err = 0;
192                 break;
193         }
194
195         arch_local_irq_restore(flags);
196         return err;
197 }
198 #endif /* CONFIG_SMP */
199
200 /*
201  * MN10300 PIC level-triggered IRQ handling.
202  *
203  * The PIC has no 'ACK' function per se.  It is possible to clear individual
204  * channel latches, but each latch relatches whether or not the channel is
205  * masked, so we need to clear the latch when we unmask the channel.
206  *
207  * Also for this reason, we don't supply an ack() op (it's unused anyway if
208  * mask_ack() is provided), and mask_ack() just masks.
209  */
210 static struct irq_chip mn10300_cpu_pic_level = {
211         .name           = "cpu_l",
212         .disable        = mn10300_cpupic_mask,
213         .enable         = mn10300_cpupic_unmask_clear,
214         .ack            = NULL,
215         .mask           = mn10300_cpupic_mask,
216         .mask_ack       = mn10300_cpupic_mask,
217         .unmask         = mn10300_cpupic_unmask_clear,
218 #ifdef CONFIG_SMP
219         .set_affinity   = mn10300_cpupic_setaffinity,
220 #endif /* CONFIG_SMP */
221 };
222
223 /*
224  * MN10300 PIC edge-triggered IRQ handling.
225  *
226  * We use the latch clearing function of the PIC as the 'ACK' function.
227  */
228 static struct irq_chip mn10300_cpu_pic_edge = {
229         .name           = "cpu_e",
230         .disable        = mn10300_cpupic_mask,
231         .enable         = mn10300_cpupic_unmask,
232         .ack            = mn10300_cpupic_ack,
233         .mask           = mn10300_cpupic_mask,
234         .mask_ack       = mn10300_cpupic_mask_ack,
235         .unmask         = mn10300_cpupic_unmask,
236 #ifdef CONFIG_SMP
237         .set_affinity   = mn10300_cpupic_setaffinity,
238 #endif /* CONFIG_SMP */
239 };
240
241 /*
242  * 'what should we do if we get a hw irq event on an illegal vector'.
243  * each architecture has to answer this themselves.
244  */
245 void ack_bad_irq(int irq)
246 {
247         printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
248 }
249
250 /*
251  * change the level at which an IRQ executes
252  * - must not be called whilst interrupts are being processed!
253  */
254 void set_intr_level(int irq, u16 level)
255 {
256         BUG_ON(in_interrupt());
257
258         __mask_and_set_icr(irq, GxICR_ENABLE, level);
259 }
260
261 void mn10300_intc_set_level(unsigned int irq, unsigned int level)
262 {
263         set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
264 }
265
266 void mn10300_intc_clear(unsigned int irq)
267 {
268         __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
269 }
270
271 void mn10300_intc_set(unsigned int irq)
272 {
273         __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
274 }
275
276 void mn10300_intc_enable(unsigned int irq)
277 {
278         mn10300_cpupic_unmask(irq);
279 }
280
281 void mn10300_intc_disable(unsigned int irq)
282 {
283         mn10300_cpupic_mask(irq);
284 }
285
286 /*
287  * mark an interrupt to be ACK'd after interrupt handlers have been run rather
288  * than before
289  * - see Documentation/mn10300/features.txt
290  */
291 void mn10300_set_lateack_irq_type(int irq)
292 {
293         set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
294                                  handle_level_irq);
295 }
296
297 /*
298  * initialise the interrupt system
299  */
300 void __init init_IRQ(void)
301 {
302         int irq;
303
304         for (irq = 0; irq < NR_IRQS; irq++)
305                 if (irq_desc[irq].chip == &no_irq_chip)
306                         /* due to the PIC latching interrupt requests, even
307                          * when the IRQ is disabled, IRQ_PENDING is superfluous
308                          * and we can use handle_level_irq() for edge-triggered
309                          * interrupts */
310                         set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
311                                                  handle_level_irq);
312
313         unit_init_IRQ();
314 }
315
316 /*
317  * handle normal device IRQs
318  */
319 asmlinkage void do_IRQ(void)
320 {
321         unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
322         unsigned int cpu_id = smp_processor_id();
323         int irq;
324
325         sp = current_stack_pointer();
326         BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
327
328         /* make sure local_irq_enable() doesn't muck up the interrupt priority
329          * setting in EPSW */
330         old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
331         local_save_flags(epsw);
332         __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
333         irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
334
335 #ifdef CONFIG_MN10300_WD_TIMER
336         __IRQ_STAT(cpu_id, __irq_count)++;
337 #endif
338
339         irq_enter();
340
341         for (;;) {
342                 /* ask the interrupt controller for the next IRQ to process
343                  * - the result we get depends on EPSW.IM
344                  */
345                 irq = IAGR & IAGR_GN;
346                 if (!irq)
347                         break;
348
349                 local_irq_restore(irq_disabled_epsw);
350
351                 generic_handle_irq(irq >> 2);
352
353                 /* restore IRQ controls for IAGR access */
354                 local_irq_restore(epsw);
355         }
356
357         __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
358
359         irq_exit();
360 }
361
362 /*
363  * Display interrupt management information through /proc/interrupts
364  */
365 int show_interrupts(struct seq_file *p, void *v)
366 {
367         int i = *(loff_t *) v, j, cpu;
368         struct irqaction *action;
369         unsigned long flags;
370
371         switch (i) {
372                 /* display column title bar naming CPUs */
373         case 0:
374                 seq_printf(p, "           ");
375                 for (j = 0; j < NR_CPUS; j++)
376                         if (cpu_online(j))
377                                 seq_printf(p, "CPU%d       ", j);
378                 seq_putc(p, '\n');
379                 break;
380
381                 /* display information rows, one per active CPU */
382         case 1 ... NR_IRQS - 1:
383                 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
384
385                 action = irq_desc[i].action;
386                 if (action) {
387                         seq_printf(p, "%3d: ", i);
388                         for_each_present_cpu(cpu)
389                                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
390                         seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
391                                    (GxICR(i) & GxICR_LEVEL) >>
392                                    GxICR_LEVEL_SHIFT);
393                         seq_printf(p, "  %s", action->name);
394
395                         for (action = action->next;
396                              action;
397                              action = action->next)
398                                 seq_printf(p, ", %s", action->name);
399
400                         seq_putc(p, '\n');
401                 }
402
403                 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
404                 break;
405
406                 /* polish off with NMI and error counters */
407         case NR_IRQS:
408 #ifdef CONFIG_MN10300_WD_TIMER
409                 seq_printf(p, "NMI: ");
410                 for (j = 0; j < NR_CPUS; j++)
411                         if (cpu_online(j))
412                                 seq_printf(p, "%10u ", nmi_count(j));
413                 seq_putc(p, '\n');
414 #endif
415
416                 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
417                 break;
418         }
419
420         return 0;
421 }
422
423 #ifdef CONFIG_HOTPLUG_CPU
424 void migrate_irqs(void)
425 {
426         irq_desc_t *desc;
427         int irq;
428         unsigned int self, new;
429         unsigned long flags;
430
431         self = smp_processor_id();
432         for (irq = 0; irq < NR_IRQS; irq++) {
433                 desc = irq_desc + irq;
434
435                 if (desc->status == IRQ_PER_CPU)
436                         continue;
437
438                 if (cpu_isset(self, irq_desc[irq].affinity) &&
439                     !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
440                         int cpu_id;
441                         cpu_id = first_cpu(cpu_online_map);
442                         cpu_set(cpu_id, irq_desc[irq].affinity);
443                 }
444                 /* We need to operate irq_affinity_online atomically. */
445                 arch_local_cli_save(flags);
446                 if (irq_affinity_online[irq] == self) {
447                         u16 x, tmp;
448
449                         x = CROSS_GxICR(irq, self);
450                         CROSS_GxICR(irq, self) = x & GxICR_LEVEL;
451                         tmp = CROSS_GxICR(irq, self);
452
453                         new = any_online_cpu(irq_desc[irq].affinity);
454                         irq_affinity_online[irq] = new;
455
456                         CROSS_GxICR(irq, new) =
457                                 (x & GxICR_LEVEL) | GxICR_DETECT;
458                         tmp = CROSS_GxICR(irq, new);
459
460                         x &= GxICR_LEVEL | GxICR_ENABLE;
461                         if (CROSS_GxICR(irq, self) & GxICR_REQUEST)
462                                 x |= GxICR_REQUEST | GxICR_DETECT;
463                         CROSS_GxICR(irq, new) = x;
464                         tmp = CROSS_GxICR(irq, new);
465                 }
466                 arch_local_irq_restore(flags);
467         }
468 }
469 #endif /* CONFIG_HOTPLUG_CPU */