Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
8e4bebe0 HZ |
2 | /* |
3 | * Hisilicon HiP04 INTC | |
4 | * | |
5 | * Copyright (C) 2002-2014 ARM Limited. | |
6 | * Copyright (c) 2013-2014 Hisilicon Ltd. | |
7 | * Copyright (c) 2013-2014 Linaro Ltd. | |
8 | * | |
8e4bebe0 HZ |
9 | * Interrupt architecture for the HIP04 INTC: |
10 | * | |
11 | * o There is one Interrupt Distributor, which receives interrupts | |
12 | * from system devices and sends them to the Interrupt Controllers. | |
13 | * | |
14 | * o There is one CPU Interface per CPU, which sends interrupts sent | |
15 | * by the Distributor, and interrupts generated locally, to the | |
16 | * associated CPU. The base address of the CPU interface is usually | |
17 | * aliased so that the same address points to different chips depending | |
18 | * on the CPU it is accessed from. | |
19 | * | |
20 | * Note that IRQs 0-31 are special - they are local to each CPU. | |
21 | * As such, the enable set/clear, pending set/clear and active bit | |
22 | * registers are banked per-cpu for these sources. | |
23 | */ | |
24 | ||
25 | #include <linux/init.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/err.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/list.h> | |
30 | #include <linux/smp.h> | |
31 | #include <linux/cpu.h> | |
32 | #include <linux/cpu_pm.h> | |
33 | #include <linux/cpumask.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/of.h> | |
36 | #include <linux/of_address.h> | |
37 | #include <linux/of_irq.h> | |
38 | #include <linux/irqdomain.h> | |
39 | #include <linux/interrupt.h> | |
40 | #include <linux/slab.h> | |
41a83e06 | 41 | #include <linux/irqchip.h> |
8e4bebe0 HZ |
42 | #include <linux/irqchip/arm-gic.h> |
43 | ||
44 | #include <asm/irq.h> | |
45 | #include <asm/exception.h> | |
46 | #include <asm/smp_plat.h> | |
47 | ||
48 | #include "irq-gic-common.h" | |
8e4bebe0 HZ |
49 | |
50 | #define HIP04_MAX_IRQS 510 | |
51 | ||
52 | struct hip04_irq_data { | |
53 | void __iomem *dist_base; | |
54 | void __iomem *cpu_base; | |
55 | struct irq_domain *domain; | |
56 | unsigned int nr_irqs; | |
57 | }; | |
58 | ||
59 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | |
60 | ||
61 | /* | |
62 | * The GIC mapping of CPU interfaces does not necessarily match | |
63 | * the logical CPU numbering. Let's use a mapping as returned | |
64 | * by the GIC itself. | |
65 | */ | |
66 | #define NR_HIP04_CPU_IF 16 | |
67 | static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly; | |
68 | ||
69 | static struct hip04_irq_data hip04_data __read_mostly; | |
70 | ||
71 | static inline void __iomem *hip04_dist_base(struct irq_data *d) | |
72 | { | |
73 | struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d); | |
74 | return hip04_data->dist_base; | |
75 | } | |
76 | ||
77 | static inline void __iomem *hip04_cpu_base(struct irq_data *d) | |
78 | { | |
79 | struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d); | |
80 | return hip04_data->cpu_base; | |
81 | } | |
82 | ||
83 | static inline unsigned int hip04_irq(struct irq_data *d) | |
84 | { | |
85 | return d->hwirq; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Routines to acknowledge, disable and enable interrupts | |
90 | */ | |
91 | static void hip04_mask_irq(struct irq_data *d) | |
92 | { | |
93 | u32 mask = 1 << (hip04_irq(d) % 32); | |
94 | ||
95 | raw_spin_lock(&irq_controller_lock); | |
96 | writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR + | |
97 | (hip04_irq(d) / 32) * 4); | |
98 | raw_spin_unlock(&irq_controller_lock); | |
99 | } | |
100 | ||
101 | static void hip04_unmask_irq(struct irq_data *d) | |
102 | { | |
103 | u32 mask = 1 << (hip04_irq(d) % 32); | |
104 | ||
105 | raw_spin_lock(&irq_controller_lock); | |
106 | writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET + | |
107 | (hip04_irq(d) / 32) * 4); | |
108 | raw_spin_unlock(&irq_controller_lock); | |
109 | } | |
110 | ||
111 | static void hip04_eoi_irq(struct irq_data *d) | |
112 | { | |
113 | writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI); | |
114 | } | |
115 | ||
116 | static int hip04_irq_set_type(struct irq_data *d, unsigned int type) | |
117 | { | |
118 | void __iomem *base = hip04_dist_base(d); | |
119 | unsigned int irq = hip04_irq(d); | |
fb7e7deb | 120 | int ret; |
8e4bebe0 HZ |
121 | |
122 | /* Interrupt configuration for SGIs can't be changed */ | |
123 | if (irq < 16) | |
124 | return -EINVAL; | |
125 | ||
fb7e7deb LD |
126 | /* SPIs have restrictions on the supported types */ |
127 | if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && | |
128 | type != IRQ_TYPE_EDGE_RISING) | |
8e4bebe0 HZ |
129 | return -EINVAL; |
130 | ||
131 | raw_spin_lock(&irq_controller_lock); | |
132 | ||
13d22e2e MZ |
133 | ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL); |
134 | if (ret && irq < 32) { | |
135 | /* Misconfigured PPIs are usually not fatal */ | |
136 | pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); | |
137 | ret = 0; | |
138 | } | |
8e4bebe0 HZ |
139 | |
140 | raw_spin_unlock(&irq_controller_lock); | |
141 | ||
fb7e7deb | 142 | return ret; |
8e4bebe0 HZ |
143 | } |
144 | ||
145 | #ifdef CONFIG_SMP | |
146 | static int hip04_irq_set_affinity(struct irq_data *d, | |
147 | const struct cpumask *mask_val, | |
148 | bool force) | |
149 | { | |
150 | void __iomem *reg; | |
151 | unsigned int cpu, shift = (hip04_irq(d) % 2) * 16; | |
152 | u32 val, mask, bit; | |
153 | ||
154 | if (!force) | |
155 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | |
156 | else | |
157 | cpu = cpumask_first(mask_val); | |
158 | ||
159 | if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids) | |
160 | return -EINVAL; | |
161 | ||
162 | raw_spin_lock(&irq_controller_lock); | |
163 | reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3); | |
164 | mask = 0xffff << shift; | |
165 | bit = hip04_cpu_map[cpu] << shift; | |
166 | val = readl_relaxed(reg) & ~mask; | |
167 | writel_relaxed(val | bit, reg); | |
168 | raw_spin_unlock(&irq_controller_lock); | |
169 | ||
79a0d4d8 MZ |
170 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
171 | ||
8e4bebe0 HZ |
172 | return IRQ_SET_MASK_OK; |
173 | } | |
a2df12c5 MZ |
174 | |
175 | static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) | |
176 | { | |
177 | int cpu; | |
178 | unsigned long flags, map = 0; | |
179 | ||
180 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | |
181 | ||
182 | /* Convert our logical CPU mask into a physical one. */ | |
183 | for_each_cpu(cpu, mask) | |
184 | map |= hip04_cpu_map[cpu]; | |
185 | ||
186 | /* | |
187 | * Ensure that stores to Normal memory are visible to the | |
188 | * other CPUs before they observe us issuing the IPI. | |
189 | */ | |
190 | dmb(ishst); | |
191 | ||
192 | /* this always happens on GIC0 */ | |
193 | writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT); | |
194 | ||
195 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | |
196 | } | |
8e4bebe0 HZ |
197 | #endif |
198 | ||
199 | static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs) | |
200 | { | |
201 | u32 irqstat, irqnr; | |
202 | void __iomem *cpu_base = hip04_data.cpu_base; | |
203 | ||
204 | do { | |
205 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); | |
206 | irqnr = irqstat & GICC_IAR_INT_ID_MASK; | |
207 | ||
a2df12c5 | 208 | if (irqnr <= HIP04_MAX_IRQS) |
3fe14927 | 209 | handle_domain_irq(hip04_data.domain, irqnr, regs); |
a2df12c5 | 210 | } while (irqnr > HIP04_MAX_IRQS); |
8e4bebe0 HZ |
211 | } |
212 | ||
213 | static struct irq_chip hip04_irq_chip = { | |
214 | .name = "HIP04 INTC", | |
215 | .irq_mask = hip04_mask_irq, | |
216 | .irq_unmask = hip04_unmask_irq, | |
217 | .irq_eoi = hip04_eoi_irq, | |
218 | .irq_set_type = hip04_irq_set_type, | |
219 | #ifdef CONFIG_SMP | |
220 | .irq_set_affinity = hip04_irq_set_affinity, | |
a2df12c5 | 221 | .ipi_send_mask = hip04_ipi_send_mask, |
8e4bebe0 | 222 | #endif |
aec89ef7 SH |
223 | .flags = IRQCHIP_SET_TYPE_MASKED | |
224 | IRQCHIP_SKIP_SET_WAKE | | |
225 | IRQCHIP_MASK_ON_SUSPEND, | |
8e4bebe0 HZ |
226 | }; |
227 | ||
228 | static u16 hip04_get_cpumask(struct hip04_irq_data *intc) | |
229 | { | |
230 | void __iomem *base = intc->dist_base; | |
231 | u32 mask, i; | |
232 | ||
233 | for (i = mask = 0; i < 32; i += 2) { | |
234 | mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2); | |
235 | mask |= mask >> 16; | |
236 | if (mask) | |
237 | break; | |
238 | } | |
239 | ||
240 | if (!mask) | |
241 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); | |
242 | ||
243 | return mask; | |
244 | } | |
245 | ||
246 | static void __init hip04_irq_dist_init(struct hip04_irq_data *intc) | |
247 | { | |
248 | unsigned int i; | |
249 | u32 cpumask; | |
250 | unsigned int nr_irqs = intc->nr_irqs; | |
251 | void __iomem *base = intc->dist_base; | |
252 | ||
253 | writel_relaxed(0, base + GIC_DIST_CTRL); | |
254 | ||
255 | /* | |
256 | * Set all global interrupts to this CPU only. | |
257 | */ | |
258 | cpumask = hip04_get_cpumask(intc); | |
259 | cpumask |= cpumask << 16; | |
260 | for (i = 32; i < nr_irqs; i += 2) | |
261 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3)); | |
262 | ||
263 | gic_dist_config(base, nr_irqs, NULL); | |
264 | ||
265 | writel_relaxed(1, base + GIC_DIST_CTRL); | |
266 | } | |
267 | ||
268 | static void hip04_irq_cpu_init(struct hip04_irq_data *intc) | |
269 | { | |
270 | void __iomem *dist_base = intc->dist_base; | |
271 | void __iomem *base = intc->cpu_base; | |
272 | unsigned int cpu_mask, cpu = smp_processor_id(); | |
273 | int i; | |
274 | ||
275 | /* | |
276 | * Get what the GIC says our CPU mask is. | |
277 | */ | |
278 | BUG_ON(cpu >= NR_HIP04_CPU_IF); | |
279 | cpu_mask = hip04_get_cpumask(intc); | |
280 | hip04_cpu_map[cpu] = cpu_mask; | |
281 | ||
282 | /* | |
283 | * Clear our mask from the other map entries in case they're | |
284 | * still undefined. | |
285 | */ | |
286 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | |
287 | if (i != cpu) | |
288 | hip04_cpu_map[i] &= ~cpu_mask; | |
289 | ||
1a60e1e6 | 290 | gic_cpu_config(dist_base, 32, NULL); |
8e4bebe0 HZ |
291 | |
292 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); | |
293 | writel_relaxed(1, base + GIC_CPU_CTRL); | |
294 | } | |
295 | ||
8e4bebe0 HZ |
296 | static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq, |
297 | irq_hw_number_t hw) | |
298 | { | |
a2df12c5 MZ |
299 | if (hw < 16) { |
300 | irq_set_percpu_devid(irq); | |
301 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
302 | handle_percpu_devid_fasteoi_ipi); | |
303 | } else if (hw < 32) { | |
8e4bebe0 HZ |
304 | irq_set_percpu_devid(irq); |
305 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
306 | handle_percpu_devid_irq); | |
8e4bebe0 HZ |
307 | } else { |
308 | irq_set_chip_and_handler(irq, &hip04_irq_chip, | |
309 | handle_fasteoi_irq); | |
d17cab44 | 310 | irq_set_probe(irq); |
79a0d4d8 | 311 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); |
8e4bebe0 HZ |
312 | } |
313 | irq_set_chip_data(irq, d->host_data); | |
314 | return 0; | |
315 | } | |
316 | ||
317 | static int hip04_irq_domain_xlate(struct irq_domain *d, | |
318 | struct device_node *controller, | |
319 | const u32 *intspec, unsigned int intsize, | |
320 | unsigned long *out_hwirq, | |
321 | unsigned int *out_type) | |
322 | { | |
5d4c9bc7 | 323 | if (irq_domain_get_of_node(d) != controller) |
8e4bebe0 | 324 | return -EINVAL; |
a2df12c5 MZ |
325 | if (intsize == 1 && intspec[0] < 16) { |
326 | *out_hwirq = intspec[0]; | |
327 | *out_type = IRQ_TYPE_EDGE_RISING; | |
328 | return 0; | |
329 | } | |
8e4bebe0 HZ |
330 | if (intsize < 3) |
331 | return -EINVAL; | |
332 | ||
333 | /* Get the interrupt number and add 16 to skip over SGIs */ | |
334 | *out_hwirq = intspec[1] + 16; | |
335 | ||
336 | /* For SPIs, we need to add 16 more to get the irq ID number */ | |
337 | if (!intspec[0]) | |
338 | *out_hwirq += 16; | |
339 | ||
340 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | |
341 | ||
a2df12c5 | 342 | return 0; |
8e4bebe0 HZ |
343 | } |
344 | ||
6c034d17 | 345 | static int hip04_irq_starting_cpu(unsigned int cpu) |
8e4bebe0 | 346 | { |
6c034d17 RC |
347 | hip04_irq_cpu_init(&hip04_data); |
348 | return 0; | |
8e4bebe0 HZ |
349 | } |
350 | ||
8e4bebe0 HZ |
351 | static const struct irq_domain_ops hip04_irq_domain_ops = { |
352 | .map = hip04_irq_domain_map, | |
353 | .xlate = hip04_irq_domain_xlate, | |
354 | }; | |
355 | ||
356 | static int __init | |
357 | hip04_of_init(struct device_node *node, struct device_node *parent) | |
358 | { | |
8e4bebe0 HZ |
359 | int nr_irqs, irq_base, i; |
360 | ||
361 | if (WARN_ON(!node)) | |
362 | return -ENODEV; | |
363 | ||
364 | hip04_data.dist_base = of_iomap(node, 0); | |
365 | WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n"); | |
366 | ||
367 | hip04_data.cpu_base = of_iomap(node, 1); | |
368 | WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n"); | |
369 | ||
370 | /* | |
371 | * Initialize the CPU interface map to all CPUs. | |
372 | * It will be refined as each CPU probes its ID. | |
373 | */ | |
374 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | |
03d3d45b | 375 | hip04_cpu_map[i] = 0xffff; |
8e4bebe0 HZ |
376 | |
377 | /* | |
378 | * Find out how many interrupts are supported. | |
379 | * The HIP04 INTC only supports up to 510 interrupt sources. | |
380 | */ | |
381 | nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f; | |
382 | nr_irqs = (nr_irqs + 1) * 32; | |
383 | if (nr_irqs > HIP04_MAX_IRQS) | |
384 | nr_irqs = HIP04_MAX_IRQS; | |
385 | hip04_data.nr_irqs = nr_irqs; | |
386 | ||
a2df12c5 | 387 | irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id()); |
287980e4 | 388 | if (irq_base < 0) { |
8e4bebe0 HZ |
389 | pr_err("failed to allocate IRQ numbers\n"); |
390 | return -EINVAL; | |
391 | } | |
392 | ||
393 | hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base, | |
a2df12c5 | 394 | 0, |
8e4bebe0 HZ |
395 | &hip04_irq_domain_ops, |
396 | &hip04_data); | |
8e4bebe0 HZ |
397 | if (WARN_ON(!hip04_data.domain)) |
398 | return -EINVAL; | |
399 | ||
400 | #ifdef CONFIG_SMP | |
a2df12c5 | 401 | set_smp_ipi_range(irq_base, 16); |
8e4bebe0 HZ |
402 | #endif |
403 | set_handle_irq(hip04_handle_irq); | |
404 | ||
405 | hip04_irq_dist_init(&hip04_data); | |
73c1b41e | 406 | cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting", |
6c034d17 | 407 | hip04_irq_starting_cpu, NULL); |
8e4bebe0 HZ |
408 | return 0; |
409 | } | |
410 | IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); |