Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/seq_file.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/irq.h> | |
19 | #include <linux/kernel_stat.h> | |
20 | #include <linux/uaccess.h> | |
21 | #include <hv/drv_pcie_rc_intf.h> | |
fb702b94 CM |
22 | #include <arch/spr_def.h> |
23 | #include <asm/traps.h> | |
24 | ||
25 | /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ | |
26 | #define IS_HW_CLEARED 1 | |
867e359b CM |
27 | |
28 | /* | |
5d966115 | 29 | * The set of interrupts we enable for arch_local_irq_enable(). |
867e359b CM |
30 | * This is initialized to have just a single interrupt that the kernel |
31 | * doesn't actually use as a sentinel. During kernel init, | |
32 | * interrupts are added as the kernel gets prepared to support them. | |
33 | * NOTE: we could probably initialize them all statically up front. | |
34 | */ | |
35 | DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = | |
36 | INITIAL_INTERRUPTS_ENABLED; | |
37 | EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); | |
38 | ||
fb702b94 | 39 | /* Define per-tile device interrupt statistics state. */ |
867e359b CM |
40 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; |
41 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
42 | ||
fb702b94 CM |
43 | /* |
44 | * Define per-tile irq disable mask; the hardware/HV only has a single | |
45 | * mask that we use to implement both masking and disabling. | |
46 | */ | |
47 | static DEFINE_PER_CPU(unsigned long, irq_disable_mask) | |
48 | ____cacheline_internodealigned_in_smp; | |
49 | ||
50 | /* | |
51 | * Per-tile IRQ nesting depth. Used to make sure we enable newly | |
52 | * enabled IRQs before exiting the outermost interrupt. | |
53 | */ | |
54 | static DEFINE_PER_CPU(int, irq_depth); | |
55 | ||
56 | /* State for allocating IRQs on Gx. */ | |
57 | #if CHIP_HAS_IPI() | |
58 | static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); | |
59 | static DEFINE_SPINLOCK(available_irqs_lock); | |
60 | #endif | |
867e359b | 61 | |
fb702b94 CM |
62 | #if CHIP_HAS_IPI() |
63 | /* Use SPRs to manipulate device interrupts. */ | |
a78c942d CM |
64 | #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) |
65 | #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask) | |
66 | #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask) | |
fb702b94 CM |
67 | #else |
68 | /* Use HV to manipulate device interrupts. */ | |
69 | #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) | |
70 | #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) | |
71 | #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) | |
72 | #endif | |
867e359b CM |
73 | |
74 | /* | |
fb702b94 CM |
75 | * The interrupt handling path, implemented in terms of HV interrupt |
76 | * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. | |
867e359b CM |
77 | */ |
78 | void tile_dev_intr(struct pt_regs *regs, int intnum) | |
79 | { | |
fb702b94 CM |
80 | int depth = __get_cpu_var(irq_depth)++; |
81 | unsigned long original_irqs; | |
82 | unsigned long remaining_irqs; | |
83 | struct pt_regs *old_regs; | |
867e359b | 84 | |
fb702b94 | 85 | #if CHIP_HAS_IPI() |
867e359b | 86 | /* |
fb702b94 CM |
87 | * Pending interrupts are listed in an SPR. We might be |
88 | * nested, so be sure to only handle irqs that weren't already | |
89 | * masked by a previous interrupt. Then, mask out the ones | |
90 | * we're going to handle. | |
867e359b | 91 | */ |
a78c942d CM |
92 | unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); |
93 | original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; | |
94 | __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); | |
fb702b94 CM |
95 | #else |
96 | /* | |
97 | * Hypervisor performs the equivalent of the Gx code above and | |
98 | * then puts the pending interrupt mask into a system save reg | |
99 | * for us to find. | |
100 | */ | |
a78c942d | 101 | original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); |
fb702b94 CM |
102 | #endif |
103 | remaining_irqs = original_irqs; | |
867e359b CM |
104 | |
105 | /* Track time spent here in an interrupt context. */ | |
fb702b94 | 106 | old_regs = set_irq_regs(regs); |
867e359b CM |
107 | irq_enter(); |
108 | ||
109 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
110 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | |
111 | { | |
112 | long sp = stack_pointer - (long) current_thread_info(); | |
113 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | |
fb702b94 | 114 | pr_emerg("tile_dev_intr: " |
867e359b CM |
115 | "stack overflow: %ld\n", |
116 | sp - sizeof(struct thread_info)); | |
117 | dump_stack(); | |
118 | } | |
119 | } | |
120 | #endif | |
fb702b94 CM |
121 | while (remaining_irqs) { |
122 | unsigned long irq = __ffs(remaining_irqs); | |
123 | remaining_irqs &= ~(1UL << irq); | |
867e359b | 124 | |
fb702b94 CM |
125 | /* Count device irqs; Linux IPIs are counted elsewhere. */ |
126 | if (irq != IRQ_RESCHEDULE) | |
127 | __get_cpu_var(irq_stat).irq_dev_intr_count++; | |
867e359b | 128 | |
fb702b94 | 129 | generic_handle_irq(irq); |
867e359b CM |
130 | } |
131 | ||
fb702b94 CM |
132 | /* |
133 | * If we weren't nested, turn on all enabled interrupts, | |
134 | * including any that were reenabled during interrupt | |
135 | * handling. | |
136 | */ | |
137 | if (depth == 0) | |
138 | unmask_irqs(~__get_cpu_var(irq_disable_mask)); | |
139 | ||
140 | __get_cpu_var(irq_depth)--; | |
141 | ||
867e359b CM |
142 | /* |
143 | * Track time spent against the current process again and | |
144 | * process any softirqs if they are waiting. | |
145 | */ | |
146 | irq_exit(); | |
147 | set_irq_regs(old_regs); | |
148 | } | |
149 | ||
150 | ||
fb702b94 CM |
151 | /* |
152 | * Remove an irq from the disabled mask. If we're in an interrupt | |
153 | * context, defer enabling the HW interrupt until we leave. | |
154 | */ | |
0c90547b | 155 | static void tile_irq_chip_enable(struct irq_data *d) |
fb702b94 | 156 | { |
0c90547b | 157 | get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); |
fb702b94 | 158 | if (__get_cpu_var(irq_depth) == 0) |
0c90547b | 159 | unmask_irqs(1UL << d->irq); |
fb702b94 CM |
160 | put_cpu_var(irq_disable_mask); |
161 | } | |
fb702b94 CM |
162 | |
163 | /* | |
164 | * Add an irq to the disabled mask. We disable the HW interrupt | |
165 | * immediately so that there's no possibility of it firing. If we're | |
166 | * in an interrupt context, the return path is careful to avoid | |
167 | * unmasking a newly disabled interrupt. | |
168 | */ | |
0c90547b | 169 | static void tile_irq_chip_disable(struct irq_data *d) |
fb702b94 | 170 | { |
0c90547b CM |
171 | get_cpu_var(irq_disable_mask) |= (1UL << d->irq); |
172 | mask_irqs(1UL << d->irq); | |
fb702b94 CM |
173 | put_cpu_var(irq_disable_mask); |
174 | } | |
fb702b94 | 175 | |
867e359b | 176 | /* Mask an interrupt. */ |
f5b42c93 | 177 | static void tile_irq_chip_mask(struct irq_data *d) |
867e359b | 178 | { |
f5b42c93 | 179 | mask_irqs(1UL << d->irq); |
867e359b CM |
180 | } |
181 | ||
182 | /* Unmask an interrupt. */ | |
f5b42c93 | 183 | static void tile_irq_chip_unmask(struct irq_data *d) |
867e359b | 184 | { |
f5b42c93 | 185 | unmask_irqs(1UL << d->irq); |
867e359b CM |
186 | } |
187 | ||
188 | /* | |
fb702b94 CM |
189 | * Clear an interrupt before processing it so that any new assertions |
190 | * will trigger another irq. | |
867e359b | 191 | */ |
f5b42c93 | 192 | static void tile_irq_chip_ack(struct irq_data *d) |
867e359b | 193 | { |
f5b42c93 TG |
194 | if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED) |
195 | clear_irqs(1UL << d->irq); | |
867e359b CM |
196 | } |
197 | ||
198 | /* | |
fb702b94 CM |
199 | * For per-cpu interrupts, we need to avoid unmasking any interrupts |
200 | * that we disabled via disable_percpu_irq(). | |
867e359b | 201 | */ |
f5b42c93 | 202 | static void tile_irq_chip_eoi(struct irq_data *d) |
867e359b | 203 | { |
f5b42c93 TG |
204 | if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) |
205 | unmask_irqs(1UL << d->irq); | |
867e359b CM |
206 | } |
207 | ||
fb702b94 | 208 | static struct irq_chip tile_irq_chip = { |
d1ea13c6 | 209 | .name = "tile_irq_chip", |
0c90547b CM |
210 | .irq_enable = tile_irq_chip_enable, |
211 | .irq_disable = tile_irq_chip_disable, | |
f5b42c93 TG |
212 | .irq_ack = tile_irq_chip_ack, |
213 | .irq_eoi = tile_irq_chip_eoi, | |
214 | .irq_mask = tile_irq_chip_mask, | |
215 | .irq_unmask = tile_irq_chip_unmask, | |
867e359b CM |
216 | }; |
217 | ||
218 | void __init init_IRQ(void) | |
219 | { | |
fb702b94 | 220 | ipi_init(); |
867e359b CM |
221 | } |
222 | ||
fb702b94 | 223 | void __cpuinit setup_irq_regs(void) |
867e359b | 224 | { |
fb702b94 CM |
225 | /* Enable interrupt delivery. */ |
226 | unmask_irqs(~0UL); | |
227 | #if CHIP_HAS_IPI() | |
5d966115 | 228 | arch_local_irq_unmask(INT_IPI_K); |
fb702b94 | 229 | #endif |
867e359b CM |
230 | } |
231 | ||
fb702b94 | 232 | void tile_irq_activate(unsigned int irq, int tile_irq_type) |
867e359b CM |
233 | { |
234 | /* | |
fb702b94 CM |
235 | * We use handle_level_irq() by default because the pending |
236 | * interrupt vector (whether modeled by the HV on TILE64 and | |
237 | * TILEPro or implemented in hardware on TILE-Gx) has | |
238 | * level-style semantics for each bit. An interrupt fires | |
239 | * whenever a bit is high, not just at edges. | |
240 | */ | |
241 | irq_flow_handler_t handle = handle_level_irq; | |
242 | if (tile_irq_type == TILE_IRQ_PERCPU) | |
243 | handle = handle_percpu_irq; | |
1919d641 | 244 | irq_set_chip_and_handler(irq, &tile_irq_chip, handle); |
fb702b94 CM |
245 | |
246 | /* | |
247 | * Flag interrupts that are hardware-cleared so that ack() | |
248 | * won't clear them. | |
867e359b | 249 | */ |
fb702b94 | 250 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) |
1919d641 | 251 | irq_set_chip_data(irq, (void *)IS_HW_CLEARED); |
867e359b | 252 | } |
fb702b94 CM |
253 | EXPORT_SYMBOL(tile_irq_activate); |
254 | ||
867e359b CM |
255 | |
256 | void ack_bad_irq(unsigned int irq) | |
257 | { | |
fb702b94 | 258 | pr_err("unexpected IRQ trap at vector %02x\n", irq); |
867e359b CM |
259 | } |
260 | ||
261 | /* | |
262 | * Generic, controller-independent functions: | |
263 | */ | |
264 | ||
fb702b94 CM |
265 | #if CHIP_HAS_IPI() |
266 | int create_irq(void) | |
267 | { | |
268 | unsigned long flags; | |
269 | int result; | |
270 | ||
271 | spin_lock_irqsave(&available_irqs_lock, flags); | |
272 | if (available_irqs == 0) | |
273 | result = -ENOMEM; | |
274 | else { | |
275 | result = __ffs(available_irqs); | |
276 | available_irqs &= ~(1UL << result); | |
277 | dynamic_irq_init(result); | |
278 | } | |
279 | spin_unlock_irqrestore(&available_irqs_lock, flags); | |
280 | ||
281 | return result; | |
282 | } | |
283 | EXPORT_SYMBOL(create_irq); | |
284 | ||
285 | void destroy_irq(unsigned int irq) | |
286 | { | |
287 | unsigned long flags; | |
288 | ||
289 | spin_lock_irqsave(&available_irqs_lock, flags); | |
290 | available_irqs |= (1UL << irq); | |
291 | dynamic_irq_cleanup(irq); | |
292 | spin_unlock_irqrestore(&available_irqs_lock, flags); | |
293 | } | |
294 | EXPORT_SYMBOL(destroy_irq); | |
295 | #endif |