2 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 * Copyright (C) 1992 Linus Torvalds
5 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
6 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
7 * Copyright (C) 1999-2000 Grant Grundler
8 * Copyright (c) 2005 Matthew Wilcox
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/bitops.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/seq_file.h>
30 #include <linux/spinlock.h>
31 #include <linux/types.h>
36 #undef PARISC_IRQ_CR16_COUNTS
38 extern irqreturn_t timer_interrupt(int, void *);
39 extern irqreturn_t ipi_interrupt(int, void *);
41 #define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
43 /* Bits in EIEM correlate with cpu_irq_action[].
44 ** Numbered *Big Endian*! (ie bit 0 is MSB)
46 static volatile unsigned long cpu_eiem = 0;
49 ** local ACK bitmap ... habitually set to 1, but reset to zero
50 ** between ->ack() and ->end() of the interrupt to prevent
51 ** re-interruption of a processing interrupt.
53 static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
55 static void cpu_mask_irq(struct irq_data *d)
57 unsigned long eirr_bit = EIEM_MASK(d->irq);
59 cpu_eiem &= ~eirr_bit;
60 /* Do nothing on the other CPUs. If they get this interrupt,
61 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
62 * handle it, and the set_eiem() at the bottom will ensure it
63 * then gets disabled */
66 static void __cpu_unmask_irq(unsigned int irq)
68 unsigned long eirr_bit = EIEM_MASK(irq);
72 /* This is just a simple NOP IPI. But what it does is cause
73 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
74 * of the interrupt handler */
78 static void cpu_unmask_irq(struct irq_data *d)
80 __cpu_unmask_irq(d->irq);
83 void cpu_ack_irq(struct irq_data *d)
85 unsigned long mask = EIEM_MASK(d->irq);
86 int cpu = smp_processor_id();
88 /* Clear in EIEM so we can no longer process */
89 per_cpu(local_ack_eiem, cpu) &= ~mask;
91 /* disable the interrupt */
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
98 void cpu_eoi_irq(struct irq_data *d)
100 unsigned long mask = EIEM_MASK(d->irq);
101 int cpu = smp_processor_id();
103 /* set it in the eiems---it's no longer in process */
104 per_cpu(local_ack_eiem, cpu) |= mask;
106 /* enable the interrupt */
107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
111 int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
115 /* timer and ipi have to always be received on all CPUs */
116 if (irqd_is_per_cpu(d))
119 /* whatever mask they set, we just allow one CPU */
120 cpu_dest = first_cpu(*dest);
125 static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
130 cpu_dest = cpu_check_affinity(d, dest);
134 cpumask_copy(d->affinity, dest);
140 static struct irq_chip cpu_interrupt_type = {
142 .irq_mask = cpu_mask_irq,
143 .irq_unmask = cpu_unmask_irq,
144 .irq_ack = cpu_ack_irq,
145 .irq_eoi = cpu_eoi_irq,
147 .irq_set_affinity = cpu_set_affinity_irq,
149 /* XXX: Needs to be written. We managed without it so far, but
150 * we really ought to write it.
152 .irq_retrigger = NULL,
155 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
156 #define irq_stats(x) (&per_cpu(irq_stat, x))
159 * /proc/interrupts printing for arch specific interrupts
161 int arch_show_interrupts(struct seq_file *p, int prec)
165 #ifdef CONFIG_DEBUG_STACKOVERFLOW
166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_puts(p, " Kernel stack usage\n");
170 # ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175 seq_printf(p, "%*s: ", prec, "ISC");
176 for_each_online_cpu(j)
177 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter);
178 seq_puts(p, " Interrupt stack usage counter\n");
182 seq_printf(p, "%*s: ", prec, "RES");
183 for_each_online_cpu(j)
184 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
185 seq_puts(p, " Rescheduling interrupts\n");
186 seq_printf(p, "%*s: ", prec, "CAL");
187 for_each_online_cpu(j)
188 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
189 seq_puts(p, " Function call interrupts\n");
191 seq_printf(p, "%*s: ", prec, "UAH");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
194 seq_puts(p, " Unaligned access handler traps\n");
195 seq_printf(p, "%*s: ", prec, "FPA");
196 for_each_online_cpu(j)
197 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
198 seq_puts(p, " Floating point assist traps\n");
199 seq_printf(p, "%*s: ", prec, "TLB");
200 for_each_online_cpu(j)
201 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
202 seq_puts(p, " TLB shootdowns\n");
206 int show_interrupts(struct seq_file *p, void *v)
208 int i = *(loff_t *) v, j;
213 for_each_online_cpu(j)
214 seq_printf(p, " CPU%d", j);
216 #ifdef PARISC_IRQ_CR16_COUNTS
217 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
223 struct irq_desc *desc = irq_to_desc(i);
224 struct irqaction *action;
226 raw_spin_lock_irqsave(&desc->lock, flags);
227 action = desc->action;
230 seq_printf(p, "%3d: ", i);
232 for_each_online_cpu(j)
233 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
235 seq_printf(p, "%10u ", kstat_irqs(i));
238 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
239 #ifndef PARISC_IRQ_CR16_COUNTS
240 seq_printf(p, " %s", action->name);
242 while ((action = action->next))
243 seq_printf(p, ", %s", action->name);
245 for ( ;action; action = action->next) {
246 unsigned int k, avg, min, max;
248 min = max = action->cr16_hist[0];
250 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
251 int hist = action->cr16_hist[k];
258 if (hist > max) max = hist;
259 if (hist < min) min = hist;
263 seq_printf(p, " %s[%d/%d/%d]", action->name,
270 raw_spin_unlock_irqrestore(&desc->lock, flags);
274 arch_show_interrupts(p, 3);
282 ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
283 ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
285 ** To use txn_XXX() interfaces, get a Virtual IRQ first.
286 ** Then use that to get the Transaction address and data.
289 int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
291 if (irq_has_action(irq))
293 if (irq_get_chip(irq) != &cpu_interrupt_type)
296 /* for iosapic interrupts */
298 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
299 irq_set_chip_data(irq, data);
300 __cpu_unmask_irq(irq);
305 int txn_claim_irq(int irq)
307 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
311 * The bits_wide parameter accommodates the limitations of the HW/SW which
313 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
314 * V-class (EPIC): 6 bits
315 * N/L/A-class (iosapic): 8 bits
316 * PCI 2.2 MSI: 16 bits
317 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
319 * On the service provider side:
320 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
321 * o PA 2.0 wide mode 6-bits (per processor)
322 * o IA64 8-bits (0-256 total)
324 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
325 * by the processor...and the N/L-class I/O subsystem supports more bits than
326 * PA2.0 has. The first case is the problem.
328 int txn_alloc_irq(unsigned int bits_wide)
332 /* never return irq 0 cause that's the interval timer */
333 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
334 if (cpu_claim_irq(irq, NULL, NULL) < 0)
336 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
341 /* unlikely, but be prepared */
346 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
349 struct irq_data *d = irq_get_irq_data(irq);
350 cpumask_copy(d->affinity, cpumask_of(cpu));
353 return per_cpu(cpu_data, cpu).txn_addr;
357 unsigned long txn_alloc_addr(unsigned int virt_irq)
359 static int next_cpu = -1;
361 next_cpu++; /* assign to "next" CPU we want this bugger on */
364 while ((next_cpu < nr_cpu_ids) &&
365 (!per_cpu(cpu_data, next_cpu).txn_addr ||
366 !cpu_online(next_cpu)))
369 if (next_cpu >= nr_cpu_ids)
370 next_cpu = 0; /* nothing else, assign monarch */
372 return txn_affinity_addr(virt_irq, next_cpu);
376 unsigned int txn_alloc_data(unsigned int virt_irq)
378 return virt_irq - CPU_IRQ_BASE;
381 static inline int eirr_to_irq(unsigned long eirr)
383 int bit = fls_long(eirr);
384 return (BITS_PER_LONG - bit) + TIMER_IRQ;
387 int sysctl_panic_on_stackoverflow = 1;
389 static inline void stack_overflow_check(struct pt_regs *regs)
391 #ifdef CONFIG_DEBUG_STACKOVERFLOW
392 #define STACK_MARGIN (256*6)
394 /* Our stack starts directly behind the thread_info struct. */
395 unsigned long stack_start = (unsigned long) current_thread_info();
396 unsigned long sp = regs->gr[30];
397 unsigned long stack_usage;
398 unsigned int *last_usage;
399 int cpu = smp_processor_id();
401 /* if sr7 != 0, we interrupted a userspace process which we do not want
402 * to check for stack overflow. We will only check the kernel stack. */
406 /* calculate kernel stack usage */
407 stack_usage = sp - stack_start;
408 #ifdef CONFIG_IRQSTACKS
409 if (likely(stack_usage <= THREAD_SIZE))
410 goto check_kernel_stack; /* found kernel stack */
412 /* check irq stack usage */
413 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
414 stack_usage = sp - stack_start;
416 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
417 if (unlikely(stack_usage > *last_usage))
418 *last_usage = stack_usage;
420 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
423 pr_emerg("stackcheck: %s will most likely overflow irq stack "
424 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
425 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
431 /* check kernel stack usage */
432 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
434 if (unlikely(stack_usage > *last_usage))
435 *last_usage = stack_usage;
437 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
440 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
441 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
442 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
444 #ifdef CONFIG_IRQSTACKS
447 if (sysctl_panic_on_stackoverflow)
448 panic("low stack detected by irq handler - check messages\n");
452 #ifdef CONFIG_IRQSTACKS
453 DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
454 .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock)
457 static void execute_on_irq_stack(void *func, unsigned long param1)
459 union irq_stack_union *union_ptr;
460 unsigned long irq_stack;
461 raw_spinlock_t *irq_stack_in_use;
463 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
464 irq_stack = (unsigned long) &union_ptr->stack;
465 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock),
466 64); /* align for stack frame usage */
468 /* We may be called recursive. If we are already using the irq stack,
469 * just continue to use it. Use spinlocks to serialize
470 * the irq stack usage.
472 irq_stack_in_use = &union_ptr->lock;
473 if (!raw_spin_trylock(irq_stack_in_use)) {
474 void (*direct_call)(unsigned long p1) = func;
476 /* We are using the IRQ stack already.
477 * Do direct call on current stack. */
482 /* This is where we switch to the IRQ stack. */
483 call_on_stack(param1, func, irq_stack);
485 __inc_irq_stat(irq_stack_counter);
487 /* free up irq stack usage. */
488 do_raw_spin_unlock(irq_stack_in_use);
491 asmlinkage void do_softirq(void)
499 local_irq_save(flags);
501 pending = local_softirq_pending();
504 execute_on_irq_stack(__do_softirq, 0);
506 local_irq_restore(flags);
508 #endif /* CONFIG_IRQSTACKS */
510 /* ONLY called from entry.S:intr_extint() */
511 void do_cpu_irq_mask(struct pt_regs *regs)
513 struct pt_regs *old_regs;
514 unsigned long eirr_val;
515 int irq, cpu = smp_processor_id();
517 struct irq_desc *desc;
521 old_regs = set_irq_regs(regs);
525 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
528 irq = eirr_to_irq(eirr_val);
531 desc = irq_to_desc(irq);
532 cpumask_copy(&dest, desc->irq_data.affinity);
533 if (irqd_is_per_cpu(&desc->irq_data) &&
534 !cpu_isset(smp_processor_id(), dest)) {
535 int cpu = first_cpu(dest);
537 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
538 irq, smp_processor_id(), cpu);
539 gsc_writel(irq + CPU_IRQ_BASE,
540 per_cpu(cpu_data, cpu).hpa);
544 stack_overflow_check(regs);
546 #ifdef CONFIG_IRQSTACKS
547 execute_on_irq_stack(&generic_handle_irq, irq);
549 generic_handle_irq(irq);
550 #endif /* CONFIG_IRQSTACKS */
554 set_irq_regs(old_regs);
558 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
562 static struct irqaction timer_action = {
563 .handler = timer_interrupt,
565 .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
569 static struct irqaction ipi_action = {
570 .handler = ipi_interrupt,
572 .flags = IRQF_PERCPU,
576 static void claim_cpu_irqs(void)
579 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
580 irq_set_chip_and_handler(i, &cpu_interrupt_type,
584 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
585 setup_irq(TIMER_IRQ, &timer_action);
587 irq_set_handler(IPI_IRQ, handle_percpu_irq);
588 setup_irq(IPI_IRQ, &ipi_action);
592 void __init init_IRQ(void)
594 local_irq_disable(); /* PARANOID - should already be disabled */
595 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
599 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
603 cpu_eiem = EIEM_MASK(TIMER_IRQ);
605 set_eiem(cpu_eiem); /* EIEM : enable all external intr */