Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
8e4bebe0 HZ |
2 | /* |
3 | * Hisilicon HiP04 INTC | |
4 | * | |
5 | * Copyright (C) 2002-2014 ARM Limited. | |
6 | * Copyright (c) 2013-2014 Hisilicon Ltd. | |
7 | * Copyright (c) 2013-2014 Linaro Ltd. | |
8 | * | |
8e4bebe0 HZ |
9 | * Interrupt architecture for the HIP04 INTC: |
10 | * | |
11 | * o There is one Interrupt Distributor, which receives interrupts | |
12 | * from system devices and sends them to the Interrupt Controllers. | |
13 | * | |
14 | * o There is one CPU Interface per CPU, which sends interrupts sent | |
15 | * by the Distributor, and interrupts generated locally, to the | |
16 | * associated CPU. The base address of the CPU interface is usually | |
17 | * aliased so that the same address points to different chips depending | |
18 | * on the CPU it is accessed from. | |
19 | * | |
20 | * Note that IRQs 0-31 are special - they are local to each CPU. | |
21 | * As such, the enable set/clear, pending set/clear and active bit | |
22 | * registers are banked per-cpu for these sources. | |
23 | */ | |
24 | ||
25 | #include <linux/init.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/err.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/list.h> | |
30 | #include <linux/smp.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/cpu_pm.h> | |
33 | #include <linux/cpumask.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/of.h> | |
36 | #include <linux/of_address.h> | |
37 | #include <linux/of_irq.h> | |
38 | #include <linux/irqdomain.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/slab.h> | |
41a83e06 | 41 | #include <linux/irqchip.h> |
8e4bebe0 HZ |
42 | #include <linux/irqchip/arm-gic.h> |
43 | ||
44 | #include <asm/irq.h> | |
45 | #include <asm/exception.h> | |
46 | #include <asm/smp_plat.h> | |
47 | ||
48 | #include "irq-gic-common.h" | |
8e4bebe0 HZ |
49 | |
50 | #define HIP04_MAX_IRQS 510 | |
51 | ||
52 | struct hip04_irq_data { | |
53 | void __iomem *dist_base; | |
54 | void __iomem *cpu_base; | |
55 | struct irq_domain *domain; | |
56 | unsigned int nr_irqs; | |
57 | }; | |
58 | ||
59 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | |
60 | ||
61 | /* | |
62 | * The GIC mapping of CPU interfaces does not necessarily match | |
63 | * the logical CPU numbering. Let's use a mapping as returned | |
64 | * by the GIC itself. | |
65 | */ | |
66 | #define NR_HIP04_CPU_IF 16 | |
67 | static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly; | |
68 | ||
69 | static struct hip04_irq_data hip04_data __read_mostly; | |
70 | ||
71 | static inline void __iomem *hip04_dist_base(struct irq_data *d) | |
72 | { | |
73 | struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d); | |
74 | return hip04_data->dist_base; | |
75 | } | |
76 | ||
77 | static inline void __iomem *hip04_cpu_base(struct irq_data *d) | |
78 | { | |
79 | struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d); | |
80 | return hip04_data->cpu_base; | |
81 | } | |
82 | ||
83 | static inline unsigned int hip04_irq(struct irq_data *d) | |
84 | { | |
85 | return d->hwirq; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Routines to acknowledge, disable and enable interrupts | |
90 | */ | |
91 | static void hip04_mask_irq(struct irq_data *d) | |
92 | { | |
93 | u32 mask = 1 << (hip04_irq(d) % 32); | |
94 | ||
95 | raw_spin_lock(&irq_controller_lock); | |
96 | writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR + | |
97 | (hip04_irq(d) / 32) * 4); | |
98 | raw_spin_unlock(&irq_controller_lock); | |
99 | } | |
100 | ||
101 | static void hip04_unmask_irq(struct irq_data *d) | |
102 | { | |
103 | u32 mask = 1 << (hip04_irq(d) % 32); | |
104 | ||
105 | raw_spin_lock(&irq_controller_lock); | |
106 | writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET + | |
107 | (hip04_irq(d) / 32) * 4); | |
108 | raw_spin_unlock(&irq_controller_lock); | |
109 | } | |
110 | ||
111 | static void hip04_eoi_irq(struct irq_data *d) | |
112 | { | |
113 | writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI); | |
114 | } | |
115 | ||
116 | static int hip04_irq_set_type(struct irq_data *d, unsigned int type) | |
117 | { | |
118 | void __iomem *base = hip04_dist_base(d); | |
119 | unsigned int irq = hip04_irq(d); | |
fb7e7deb | 120 | int ret; |
8e4bebe0 HZ |
121 | |
122 | /* Interrupt configuration for SGIs can't be changed */ | |
123 | if (irq < 16) | |
124 | return -EINVAL; | |
125 | ||
fb7e7deb LD |
126 | /* SPIs have restrictions on the supported types */ |
127 | if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && | |
128 | type != IRQ_TYPE_EDGE_RISING) | |
8e4bebe0 HZ |
129 | return -EINVAL; |
130 | ||
131 | raw_spin_lock(&irq_controller_lock); | |
132 | ||
13d22e2e MZ |
133 | ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL); |
134 | if (ret && irq < 32) { | |
135 | /* Misconfigured PPIs are usually not fatal */ | |
136 | pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); | |
137 | ret = 0; | |
138 | } | |
8e4bebe0 HZ |
139 | |
140 | raw_spin_unlock(&irq_controller_lock); | |
141 | ||
fb7e7deb | 142 | return ret; |
8e4bebe0 HZ |
143 | } |
144 | ||
145 | #ifdef CONFIG_SMP | |
146 | static int hip04_irq_set_affinity(struct irq_data *d, | |
147 | const struct cpumask *mask_val, | |
148 | bool force) | |
149 | { | |
150 | void __iomem *reg; | |
151 | unsigned int cpu, shift = (hip04_irq(d) % 2) * 16; | |
152 | u32 val, mask, bit; | |
153 | ||
154 | if (!force) | |
155 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | |
156 | else | |
157 | cpu = cpumask_first(mask_val); | |
158 | ||
159 | if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) | |
160 | return -EINVAL; | |
161 | ||
162 | raw_spin_lock(&irq_controller_lock); | |
163 | reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3); | |
164 | mask = 0xffff << shift; | |
165 | bit = hip04_cpu_map[cpu] << shift; | |
166 | val = readl_relaxed(reg) & ~mask; | |
167 | writel_relaxed(val | bit, reg); | |
168 | raw_spin_unlock(&irq_controller_lock); | |
169 | ||
79a0d4d8 MZ |
170 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
171 | ||
8e4bebe0 HZ |
172 | return IRQ_SET_MASK_OK; |
173 | } | |
174 | #endif | |
175 | ||
176 | static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs) | |
177 | { | |
178 | u32 irqstat, irqnr; | |
179 | void __iomem *cpu_base = hip04_data.cpu_base; | |
180 | ||
181 | do { | |
182 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); | |
183 | irqnr = irqstat & GICC_IAR_INT_ID_MASK; | |
184 | ||
185 | if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) { | |
3fe14927 | 186 | handle_domain_irq(hip04_data.domain, irqnr, regs); |
8e4bebe0 HZ |
187 | continue; |
188 | } | |
189 | if (irqnr < 16) { | |
190 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | |
191 | #ifdef CONFIG_SMP | |
192 | handle_IPI(irqnr, regs); | |
193 | #endif | |
194 | continue; | |
195 | } | |
196 | break; | |
197 | } while (1); | |
198 | } | |
199 | ||
200 | static struct irq_chip hip04_irq_chip = { | |
201 | .name = "HIP04 INTC", | |
202 | .irq_mask = hip04_mask_irq, | |
203 | .irq_unmask = hip04_unmask_irq, | |
204 | .irq_eoi = hip04_eoi_irq, | |
205 | .irq_set_type = hip04_irq_set_type, | |
206 | #ifdef CONFIG_SMP | |
207 | .irq_set_affinity = hip04_irq_set_affinity, | |
208 | #endif | |
aec89ef7 SH |
209 | .flags = IRQCHIP_SET_TYPE_MASKED | |
210 | IRQCHIP_SKIP_SET_WAKE | | |
211 | IRQCHIP_MASK_ON_SUSPEND, | |
8e4bebe0 HZ |
212 | }; |
213 | ||
214 | static u16 hip04_get_cpumask(struct hip04_irq_data *intc) | |
215 | { | |
216 | void __iomem *base = intc->dist_base; | |
217 | u32 mask, i; | |
218 | ||
219 | for (i = mask = 0; i < 32; i += 2) { | |
220 | mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2); | |
221 | mask |= mask >> 16; | |
222 | if (mask) | |
223 | break; | |
224 | } | |
225 | ||
226 | if (!mask) | |
227 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | |
228 | ||
229 | return mask; | |
230 | } | |
231 | ||
232 | static void __init hip04_irq_dist_init(struct hip04_irq_data *intc) | |
233 | { | |
234 | unsigned int i; | |
235 | u32 cpumask; | |
236 | unsigned int nr_irqs = intc->nr_irqs; | |
237 | void __iomem *base = intc->dist_base; | |
238 | ||
239 | writel_relaxed(0, base + GIC_DIST_CTRL); | |
240 | ||
241 | /* | |
242 | * Set all global interrupts to this CPU only. | |
243 | */ | |
244 | cpumask = hip04_get_cpumask(intc); | |
245 | cpumask |= cpumask << 16; | |
246 | for (i = 32; i < nr_irqs; i += 2) | |
247 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3)); | |
248 | ||
249 | gic_dist_config(base, nr_irqs, NULL); | |
250 | ||
251 | writel_relaxed(1, base + GIC_DIST_CTRL); | |
252 | } | |
253 | ||
254 | static void hip04_irq_cpu_init(struct hip04_irq_data *intc) | |
255 | { | |
256 | void __iomem *dist_base = intc->dist_base; | |
257 | void __iomem *base = intc->cpu_base; | |
258 | unsigned int cpu_mask, cpu = smp_processor_id(); | |
259 | int i; | |
260 | ||
261 | /* | |
262 | * Get what the GIC says our CPU mask is. | |
263 | */ | |
264 | BUG_ON(cpu >= NR_HIP04_CPU_IF); | |
265 | cpu_mask = hip04_get_cpumask(intc); | |
266 | hip04_cpu_map[cpu] = cpu_mask; | |
267 | ||
268 | /* | |
269 | * Clear our mask from the other map entries in case they're | |
270 | * still undefined. | |
271 | */ | |
272 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | |
273 | if (i != cpu) | |
274 | hip04_cpu_map[i] &= ~cpu_mask; | |
275 | ||
1a60e1e6 | 276 | gic_cpu_config(dist_base, 32, NULL); |
8e4bebe0 HZ |
277 | |
278 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); | |
279 | writel_relaxed(1, base + GIC_CPU_CTRL); | |
280 | } | |
281 | ||
282 | #ifdef CONFIG_SMP | |
283 | static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq) | |
284 | { | |
285 | int cpu; | |
286 | unsigned long flags, map = 0; | |
287 | ||
288 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | |
289 | ||
290 | /* Convert our logical CPU mask into a physical one. */ | |
291 | for_each_cpu(cpu, mask) | |
292 | map |= hip04_cpu_map[cpu]; | |
293 | ||
294 | /* | |
295 | * Ensure that stores to Normal memory are visible to the | |
296 | * other CPUs before they observe us issuing the IPI. | |
297 | */ | |
298 | dmb(ishst); | |
299 | ||
300 | /* this always happens on GIC0 */ | |
301 | writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT); | |
302 | ||
303 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | |
304 | } | |
305 | #endif | |
306 | ||
307 | static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, | |
308 | irq_hw_number_t hw) | |
309 | { | |
310 | if (hw < 32) { | |
311 | irq_set_percpu_devid(irq); | |
312 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
313 | handle_percpu_devid_irq); | |
d17cab44 | 314 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
8e4bebe0 HZ |
315 | } else { |
316 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
317 | handle_fasteoi_irq); | |
d17cab44 | 318 | irq_set_probe(irq); |
79a0d4d8 | 319 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); |
8e4bebe0 HZ |
320 | } |
321 | irq_set_chip_data(irq, d->host_data); | |
322 | return 0; | |
323 | } | |
324 | ||
325 | static int hip04_irq_domain_xlate(struct irq_domain *d, | |
326 | struct device_node *controller, | |
327 | const u32 *intspec, unsigned int intsize, | |
328 | unsigned long *out_hwirq, | |
329 | unsigned int *out_type) | |
330 | { | |
331 | unsigned long ret = 0; | |
332 | ||
5d4c9bc7 | 333 | if (irq_domain_get_of_node(d) != controller) |
8e4bebe0 HZ |
334 | return -EINVAL; |
335 | if (intsize < 3) | |
336 | return -EINVAL; | |
337 | ||
338 | /* Get the interrupt number and add 16 to skip over SGIs */ | |
339 | *out_hwirq = intspec[1] + 16; | |
340 | ||
341 | /* For SPIs, we need to add 16 more to get the irq ID number */ | |
342 | if (!intspec[0]) | |
343 | *out_hwirq += 16; | |
344 | ||
345 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | |
346 | ||
347 | return ret; | |
348 | } | |
349 | ||
6c034d17 | 350 | static int hip04_irq_starting_cpu(unsigned int cpu) |
8e4bebe0 | 351 | { |
6c034d17 RC |
352 | hip04_irq_cpu_init(&hip04_data); |
353 | return 0; | |
8e4bebe0 HZ |
354 | } |
355 | ||
8e4bebe0 HZ |
356 | static const struct irq_domain_ops hip04_irq_domain_ops = { |
357 | .map = hip04_irq_domain_map, | |
358 | .xlate = hip04_irq_domain_xlate, | |
359 | }; | |
360 | ||
361 | static int __init | |
362 | hip04_of_init(struct device_node *node, struct device_node *parent) | |
363 | { | |
364 | irq_hw_number_t hwirq_base = 16; | |
365 | int nr_irqs, irq_base, i; | |
366 | ||
367 | if (WARN_ON(!node)) | |
368 | return -ENODEV; | |
369 | ||
370 | hip04_data.dist_base = of_iomap(node, 0); | |
371 | WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n"); | |
372 | ||
373 | hip04_data.cpu_base = of_iomap(node, 1); | |
374 | WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n"); | |
375 | ||
376 | /* | |
377 | * Initialize the CPU interface map to all CPUs. | |
378 | * It will be refined as each CPU probes its ID. | |
379 | */ | |
380 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | |
03d3d45b | 381 | hip04_cpu_map[i] = 0xffff; |
8e4bebe0 HZ |
382 | |
383 | /* | |
384 | * Find out how many interrupts are supported. | |
385 | * The HIP04 INTC only supports up to 510 interrupt sources. | |
386 | */ | |
387 | nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f; | |
388 | nr_irqs = (nr_irqs + 1) * 32; | |
389 | if (nr_irqs > HIP04_MAX_IRQS) | |
390 | nr_irqs = HIP04_MAX_IRQS; | |
391 | hip04_data.nr_irqs = nr_irqs; | |
392 | ||
393 | nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */ | |
394 | ||
395 | irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); | |
287980e4 | 396 | if (irq_base < 0) { |
8e4bebe0 HZ |
397 | pr_err("failed to allocate IRQ numbers\n"); |
398 | return -EINVAL; | |
399 | } | |
400 | ||
401 | hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base, | |
402 | hwirq_base, | |
403 | &hip04_irq_domain_ops, | |
404 | &hip04_data); | |
405 | ||
406 | if (WARN_ON(!hip04_data.domain)) | |
407 | return -EINVAL; | |
408 | ||
409 | #ifdef CONFIG_SMP | |
410 | set_smp_cross_call(hip04_raise_softirq); | |
8e4bebe0 HZ |
411 | #endif |
412 | set_handle_irq(hip04_handle_irq); | |
413 | ||
414 | hip04_irq_dist_init(&hip04_data); | |
73c1b41e | 415 | cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting", |
6c034d17 | 416 | hip04_irq_starting_cpu, NULL); |
8e4bebe0 HZ |
417 | return 0; |
418 | } | |
419 | IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); |