x86: perf_counter cleanup
[linux-2.6-block.git] / arch / x86 / kernel / irq.c
1 /*
2  * Common interrupt code for 32 and 64 bit
3  */
4 #include <linux/cpu.h>
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/seq_file.h>
8 #include <linux/smp.h>
9 #include <linux/ftrace.h>
10
11 #include <asm/apic.h>
12 #include <asm/io_apic.h>
13 #include <asm/irq.h>
14 #include <asm/idle.h>
15
16 atomic_t irq_err_count;
17
18 /*
19  * 'what should we do if we get a hw irq event on an illegal vector'.
20  * each architecture has to answer this themselves.
21  */
22 void ack_bad_irq(unsigned int irq)
23 {
24         printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
25
26 #ifdef CONFIG_X86_LOCAL_APIC
27         /*
28          * Currently unexpected vectors happen only on SMP and APIC.
29          * We _must_ ack these because every local APIC has only N
30          * irq slots per priority level, and a 'hanging, unacked' IRQ
31          * holds up an irq slot - in excessive cases (when multiple
32          * unexpected vectors occur) that might lock up the APIC
33          * completely.
34          * But only ack when the APIC is enabled -AK
35          */
36         if (cpu_has_apic)
37                 ack_APIC_irq();
38 #endif
39 }
40
41 #define irq_stats(x)            (&per_cpu(irq_stat, x))
42 /*
43  * /proc/interrupts printing:
44  */
45 static int show_other_interrupts(struct seq_file *p)
46 {
47         int j;
48
49         seq_printf(p, "NMI: ");
50         for_each_online_cpu(j)
51                 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
52         seq_printf(p, "  Non-maskable interrupts\n");
53 #ifdef CONFIG_X86_LOCAL_APIC
54         seq_printf(p, "LOC: ");
55         for_each_online_cpu(j)
56                 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
57         seq_printf(p, "  Local timer interrupts\n");
58         seq_printf(p, "CNT: ");
59         for_each_online_cpu(j)
60                 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
61         seq_printf(p, "  Performance counter interrupts\n");
62 #endif
63 #ifdef CONFIG_SMP
64         seq_printf(p, "RES: ");
65         for_each_online_cpu(j)
66                 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
67         seq_printf(p, "  Rescheduling interrupts\n");
68         seq_printf(p, "CAL: ");
69         for_each_online_cpu(j)
70                 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
71         seq_printf(p, "  Function call interrupts\n");
72         seq_printf(p, "TLB: ");
73         for_each_online_cpu(j)
74                 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
75         seq_printf(p, "  TLB shootdowns\n");
76 #endif
77 #ifdef CONFIG_X86_MCE
78         seq_printf(p, "TRM: ");
79         for_each_online_cpu(j)
80                 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
81         seq_printf(p, "  Thermal event interrupts\n");
82 # ifdef CONFIG_X86_64
83         seq_printf(p, "THR: ");
84         for_each_online_cpu(j)
85                 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
86         seq_printf(p, "  Threshold APIC interrupts\n");
87 # endif
88 #endif
89 #ifdef CONFIG_X86_LOCAL_APIC
90         seq_printf(p, "SPU: ");
91         for_each_online_cpu(j)
92                 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
93         seq_printf(p, "  Spurious interrupts\n");
94 #endif
95         seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
96 #if defined(CONFIG_X86_IO_APIC)
97         seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
98 #endif
99         return 0;
100 }
101
102 int show_interrupts(struct seq_file *p, void *v)
103 {
104         unsigned long flags, any_count = 0;
105         int i = *(loff_t *) v, j;
106         struct irqaction *action;
107         struct irq_desc *desc;
108
109         if (i > nr_irqs)
110                 return 0;
111
112         if (i == nr_irqs)
113                 return show_other_interrupts(p);
114
115         /* print header */
116         if (i == 0) {
117                 seq_printf(p, "           ");
118                 for_each_online_cpu(j)
119                         seq_printf(p, "CPU%-8d", j);
120                 seq_putc(p, '\n');
121         }
122
123         desc = irq_to_desc(i);
124         if (!desc)
125                 return 0;
126
127         spin_lock_irqsave(&desc->lock, flags);
128 #ifndef CONFIG_SMP
129         any_count = kstat_irqs(i);
130 #else
131         for_each_online_cpu(j)
132                 any_count |= kstat_irqs_cpu(i, j);
133 #endif
134         action = desc->action;
135         if (!action && !any_count)
136                 goto out;
137
138         seq_printf(p, "%3d: ", i);
139 #ifndef CONFIG_SMP
140         seq_printf(p, "%10u ", kstat_irqs(i));
141 #else
142         for_each_online_cpu(j)
143                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
144 #endif
145         seq_printf(p, " %8s", desc->chip->name);
146         seq_printf(p, "-%-8s", desc->name);
147
148         if (action) {
149                 seq_printf(p, "  %s", action->name);
150                 while ((action = action->next) != NULL)
151                         seq_printf(p, ", %s", action->name);
152         }
153
154         seq_putc(p, '\n');
155 out:
156         spin_unlock_irqrestore(&desc->lock, flags);
157         return 0;
158 }
159
160 /*
161  * /proc/stat helpers
162  */
163 u64 arch_irq_stat_cpu(unsigned int cpu)
164 {
165         u64 sum = irq_stats(cpu)->__nmi_count;
166
167 #ifdef CONFIG_X86_LOCAL_APIC
168         sum += irq_stats(cpu)->apic_timer_irqs;
169         sum += irq_stats(cpu)->apic_perf_irqs;
170 #endif
171 #ifdef CONFIG_SMP
172         sum += irq_stats(cpu)->irq_resched_count;
173         sum += irq_stats(cpu)->irq_call_count;
174         sum += irq_stats(cpu)->irq_tlb_count;
175 #endif
176 #ifdef CONFIG_X86_MCE
177         sum += irq_stats(cpu)->irq_thermal_count;
178 # ifdef CONFIG_X86_64
179         sum += irq_stats(cpu)->irq_threshold_count;
180 #endif
181 #endif
182 #ifdef CONFIG_X86_LOCAL_APIC
183         sum += irq_stats(cpu)->irq_spurious_count;
184 #endif
185         return sum;
186 }
187
188 u64 arch_irq_stat(void)
189 {
190         u64 sum = atomic_read(&irq_err_count);
191
192 #ifdef CONFIG_X86_IO_APIC
193         sum += atomic_read(&irq_mis_count);
194 #endif
195         return sum;
196 }
197
198
199 /*
200  * do_IRQ handles all normal device IRQ's (the special
201  * SMP cross-CPU interrupts have their own specific
202  * handlers).
203  */
204 unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
205 {
206         struct pt_regs *old_regs = set_irq_regs(regs);
207
208         /* high bit used in ret_from_ code  */
209         unsigned vector = ~regs->orig_ax;
210         unsigned irq;
211
212         exit_idle();
213         irq_enter();
214
215         irq = __get_cpu_var(vector_irq)[vector];
216
217         if (!handle_irq(irq, regs)) {
218 #ifdef CONFIG_X86_64
219                 if (!disable_apic)
220                         ack_APIC_irq();
221 #endif
222
223                 if (printk_ratelimit())
224                         printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
225                                __func__, smp_processor_id(), vector, irq);
226         }
227
228         irq_exit();
229
230         set_irq_regs(old_regs);
231         return 1;
232 }
233
234 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);