1 /* MN10300 Arch-specific interrupt handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/seq_file.h>
15 #include <linux/cpumask.h>
16 #include <asm/setup.h>
17 #include <asm/serial-regs.h>
19 unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
20 [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
22 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
25 static char irq_affinity_online[NR_IRQS] = {
26 [0 ... NR_IRQS - 1] = 0
29 #define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
30 static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
31 [0 ... NR_IRQ_WORDS - 1] = 0
33 #endif /* CONFIG_SMP */
35 atomic_t irq_err_count;
38 * MN10300 interrupt controller operations
40 static void mn10300_cpupic_ack(unsigned int irq)
45 flags = arch_local_cli_save();
46 GxICR_u8(irq) = GxICR_DETECT;
48 arch_local_irq_restore(flags);
51 static void __mask_and_set_icr(unsigned int irq,
52 unsigned int mask, unsigned int set)
57 flags = arch_local_cli_save();
59 GxICR(irq) = (tmp & mask) | set;
61 arch_local_irq_restore(flags);
64 static void mn10300_cpupic_mask(unsigned int irq)
66 __mask_and_set_icr(irq, GxICR_LEVEL, 0);
69 static void mn10300_cpupic_mask_ack(unsigned int irq)
75 flags = arch_local_cli_save();
77 if (!test_and_clear_bit(irq, irq_affinity_request)) {
79 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
84 GxICR(irq) = (tmp & GxICR_LEVEL);
87 irq_affinity_online[irq] =
88 any_online_cpu(*irq_desc[irq].affinity);
89 CROSS_GxICR(irq, irq_affinity_online[irq]) =
90 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
91 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
94 arch_local_irq_restore(flags);
95 #else /* CONFIG_SMP */
96 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
97 #endif /* CONFIG_SMP */
100 static void mn10300_cpupic_unmask(unsigned int irq)
102 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
105 static void mn10300_cpupic_unmask_clear(unsigned int irq)
107 /* the MN10300 PIC latches its interrupt request bit, even after the
108 * device has ceased to assert its interrupt line and the interrupt
109 * channel has been disabled in the PIC, so for level-triggered
110 * interrupts we need to clear the request bit when we re-enable */
115 flags = arch_local_cli_save();
117 if (!test_and_clear_bit(irq, irq_affinity_request)) {
119 GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
124 irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
125 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
126 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
129 arch_local_irq_restore(flags);
130 #else /* CONFIG_SMP */
131 __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
132 #endif /* CONFIG_SMP */
137 mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
142 flags = arch_local_cli_save();
148 case CALL_FUNC_SINGLE_IPI:
149 case LOCAL_TIMER_IPI:
150 case FLUSH_CACHE_IPI:
151 case CALL_FUNCTION_NMI_IPI:
153 #ifdef CONFIG_MN10300_TTYSM0
156 #ifdef CONFIG_MN10300_TTYSM0_TIMER8
158 #elif CONFIG_MN10300_TTYSM0_TIMER2
160 #endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
161 #endif /* CONFIG_MN10300_TTYSM0 */
163 #ifdef CONFIG_MN10300_TTYSM1
166 #ifdef CONFIG_MN10300_TTYSM1_TIMER12
168 #elif CONFIG_MN10300_TTYSM1_TIMER9
170 #elif CONFIG_MN10300_TTYSM1_TIMER3
172 #endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
173 #endif /* CONFIG_MN10300_TTYSM1 */
175 #ifdef CONFIG_MN10300_TTYSM2
179 #endif /* CONFIG_MN10300_TTYSM2 */
184 set_bit(irq, irq_affinity_request);
189 arch_local_irq_restore(flags);
192 #endif /* CONFIG_SMP */
195 * MN10300 PIC level-triggered IRQ handling.
197 * The PIC has no 'ACK' function per se. It is possible to clear individual
198 * channel latches, but each latch relatches whether or not the channel is
199 * masked, so we need to clear the latch when we unmask the channel.
201 * Also for this reason, we don't supply an ack() op (it's unused anyway if
202 * mask_ack() is provided), and mask_ack() just masks.
204 static struct irq_chip mn10300_cpu_pic_level = {
206 .disable = mn10300_cpupic_mask,
207 .enable = mn10300_cpupic_unmask_clear,
209 .mask = mn10300_cpupic_mask,
210 .mask_ack = mn10300_cpupic_mask,
211 .unmask = mn10300_cpupic_unmask_clear,
213 .set_affinity = mn10300_cpupic_setaffinity,
218 * MN10300 PIC edge-triggered IRQ handling.
220 * We use the latch clearing function of the PIC as the 'ACK' function.
222 static struct irq_chip mn10300_cpu_pic_edge = {
224 .disable = mn10300_cpupic_mask,
225 .enable = mn10300_cpupic_unmask,
226 .ack = mn10300_cpupic_ack,
227 .mask = mn10300_cpupic_mask,
228 .mask_ack = mn10300_cpupic_mask_ack,
229 .unmask = mn10300_cpupic_unmask,
231 .set_affinity = mn10300_cpupic_setaffinity,
236 * 'what should we do if we get a hw irq event on an illegal vector'.
237 * each architecture has to answer this themselves.
239 void ack_bad_irq(int irq)
241 printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
245 * change the level at which an IRQ executes
246 * - must not be called whilst interrupts are being processed!
248 void set_intr_level(int irq, u16 level)
250 BUG_ON(in_interrupt());
252 __mask_and_set_icr(irq, GxICR_ENABLE, level);
255 void mn10300_intc_set_level(unsigned int irq, unsigned int level)
257 set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
260 void mn10300_intc_clear(unsigned int irq)
262 __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
265 void mn10300_intc_set(unsigned int irq)
267 __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
270 void mn10300_intc_enable(unsigned int irq)
272 mn10300_cpupic_unmask(irq);
275 void mn10300_intc_disable(unsigned int irq)
277 mn10300_cpupic_mask(irq);
281 * mark an interrupt to be ACK'd after interrupt handlers have been run rather
283 * - see Documentation/mn10300/features.txt
285 void mn10300_set_lateack_irq_type(int irq)
287 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
292 * initialise the interrupt system
294 void __init init_IRQ(void)
298 for (irq = 0; irq < NR_IRQS; irq++)
299 if (irq_desc[irq].chip == &no_irq_chip)
300 /* due to the PIC latching interrupt requests, even
301 * when the IRQ is disabled, IRQ_PENDING is superfluous
302 * and we can use handle_level_irq() for edge-triggered
304 set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
311 * handle normal device IRQs
313 asmlinkage void do_IRQ(void)
315 unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
316 unsigned int cpu_id = smp_processor_id();
319 sp = current_stack_pointer();
320 BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
322 /* make sure local_irq_enable() doesn't muck up the interrupt priority
324 old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
325 local_save_flags(epsw);
326 __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
327 irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
329 #ifdef CONFIG_MN10300_WD_TIMER
330 __IRQ_STAT(cpu_id, __irq_count)++;
336 /* ask the interrupt controller for the next IRQ to process
337 * - the result we get depends on EPSW.IM
339 irq = IAGR & IAGR_GN;
343 local_irq_restore(irq_disabled_epsw);
345 generic_handle_irq(irq >> 2);
347 /* restore IRQ controls for IAGR access */
348 local_irq_restore(epsw);
351 __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
357 * Display interrupt management information through /proc/interrupts
359 int show_interrupts(struct seq_file *p, void *v)
361 int i = *(loff_t *) v, j, cpu;
362 struct irqaction *action;
366 /* display column title bar naming CPUs */
369 for (j = 0; j < NR_CPUS; j++)
371 seq_printf(p, "CPU%d ", j);
375 /* display information rows, one per active CPU */
376 case 1 ... NR_IRQS - 1:
377 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
379 action = irq_desc[i].action;
381 seq_printf(p, "%3d: ", i);
382 for_each_present_cpu(cpu)
383 seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
386 seq_printf(p, " %14s.%u",
387 irq_desc[i].chip->name,
388 (GxICR(i) & GxICR_LEVEL) >>
391 seq_printf(p, " %14s",
392 irq_desc[i].chip->name);
394 seq_printf(p, " %s", action->name);
396 for (action = action->next;
398 action = action->next)
399 seq_printf(p, ", %s", action->name);
404 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
407 /* polish off with NMI and error counters */
409 #ifdef CONFIG_MN10300_WD_TIMER
410 seq_printf(p, "NMI: ");
411 for (j = 0; j < NR_CPUS; j++)
413 seq_printf(p, "%10u ", nmi_count(j));
417 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
424 #ifdef CONFIG_HOTPLUG_CPU
425 void migrate_irqs(void)
429 unsigned int self, new;
432 self = smp_processor_id();
433 for (irq = 0; irq < NR_IRQS; irq++) {
434 desc = irq_desc + irq;
436 if (desc->status == IRQ_PER_CPU)
439 if (cpu_isset(self, irq_desc[irq].affinity) &&
440 !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
442 cpu_id = first_cpu(cpu_online_map);
443 cpu_set(cpu_id, irq_desc[irq].affinity);
445 /* We need to operate irq_affinity_online atomically. */
446 arch_local_cli_save(flags);
447 if (irq_affinity_online[irq] == self) {
451 GxICR(irq) = x & GxICR_LEVEL;
454 new = any_online_cpu(irq_desc[irq].affinity);
455 irq_affinity_online[irq] = new;
457 CROSS_GxICR(irq, new) =
458 (x & GxICR_LEVEL) | GxICR_DETECT;
459 tmp = CROSS_GxICR(irq, new);
461 x &= GxICR_LEVEL | GxICR_ENABLE;
462 if (GxICR(irq) & GxICR_REQUEST) {
463 x |= GxICR_REQUEST | GxICR_DETECT;
464 CROSS_GxICR(irq, new) = x;
465 tmp = CROSS_GxICR(irq, new);
467 arch_local_irq_restore(flags);
470 #endif /* CONFIG_HOTPLUG_CPU */