arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap
[linux-2.6-block.git] / drivers / irqchip / irq-gic-v3.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
021f6537 2/*
0edc23ea 3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
021f6537 4 * Author: Marc Zyngier <marc.zyngier@arm.com>
021f6537
MZ
5 */
6
68628bb8
JG
7#define pr_fmt(fmt) "GICv3: " fmt
8
ffa7d616 9#include <linux/acpi.h>
021f6537 10#include <linux/cpu.h>
3708d52f 11#include <linux/cpu_pm.h>
021f6537
MZ
12#include <linux/delay.h>
13#include <linux/interrupt.h>
ffa7d616 14#include <linux/irqdomain.h>
5e279739 15#include <linux/kstrtox.h>
021f6537
MZ
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/percpu.h>
101b35f7 20#include <linux/refcount.h>
021f6537
MZ
21#include <linux/slab.h>
22
41a83e06 23#include <linux/irqchip.h>
1839e576 24#include <linux/irqchip/arm-gic-common.h>
021f6537 25#include <linux/irqchip/arm-gic-v3.h>
e3825ba1 26#include <linux/irqchip/irq-partition-percpu.h>
021f6537
MZ
27
28#include <asm/cputype.h>
29#include <asm/exception.h>
30#include <asm/smp_plat.h>
0b6a3da9 31#include <asm/virt.h>
021f6537
MZ
32
33#include "irq-gic-common.h"
021f6537 34
f32c9266
JT
35#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
36
9c8114c2 37#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
d01fd161 38#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
9c8114c2 39
64b499d8
MZ
40#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
41
f5c1434c
MZ
42struct redist_region {
43 void __iomem *redist_base;
44 phys_addr_t phys_base;
b70fb7af 45 bool single_redist;
f5c1434c
MZ
46};
47
021f6537 48struct gic_chip_data {
e3825ba1 49 struct fwnode_handle *fwnode;
021f6537 50 void __iomem *dist_base;
f5c1434c
MZ
51 struct redist_region *redist_regions;
52 struct rdists rdists;
021f6537
MZ
53 struct irq_domain *domain;
54 u64 redist_stride;
f5c1434c 55 u32 nr_redist_regions;
9c8114c2 56 u64 flags;
eda0d04a 57 bool has_rss;
1a60e1e6 58 unsigned int ppi_nr;
52085d3f 59 struct partition_desc **ppi_descs;
021f6537
MZ
60};
61
62static struct gic_chip_data gic_data __read_mostly;
d01d3274 63static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
021f6537 64
211bddd2 65#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
c107d613 66#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
211bddd2
MZ
67#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
68
d98d0a99
JT
69/*
70 * The behaviours of RPR and PMR registers differ depending on the value of
71 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
72 * distributor and redistributors depends on whether security is enabled in the
73 * GIC.
74 *
75 * When security is enabled, non-secure priority values from the (re)distributor
76 * are presented to the GIC CPUIF as follow:
77 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
78 *
d4034114 79 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
d98d0a99 80 * EL1 are subject to a similar operation thus matching the priorities presented
33678059 81 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
d4034114 82 * these values are unchanged by the GIC.
d98d0a99
JT
83 *
84 * see GICv3/GICv4 Architecture Specification (IHI0069D):
85 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
86 * priorities.
87 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
88 * interrupt.
d98d0a99
JT
89 */
90static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
91
33678059
AE
92DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
93EXPORT_SYMBOL(gic_nonsecure_priorities);
94
8d474dea
CYT
95/*
96 * When the Non-secure world has access to group 0 interrupts (as a
97 * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
98 * return the Distributor's view of the interrupt priority.
99 *
100 * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
101 * written by software is moved to the Non-secure range by the Distributor.
102 *
103 * If both are true (which is when gic_nonsecure_priorities gets enabled),
104 * we need to shift down the priority programmed by software to match it
105 * against the value returned by ICC_RPR_EL1.
106 */
107#define GICD_INT_RPR_PRI(priority) \
108 ({ \
109 u32 __priority = (priority); \
110 if (static_branch_unlikely(&gic_nonsecure_priorities)) \
111 __priority = 0x80 | (__priority >> 1); \
112 \
113 __priority; \
114 })
115
101b35f7 116/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
81a43273 117static refcount_t *ppi_nmi_refs;
101b35f7 118
0e5cb777 119static struct gic_kvm_info gic_v3_kvm_info __initdata;
eda0d04a 120static DEFINE_PER_CPU(bool, has_rss);
1839e576 121
eda0d04a 122#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
f5c1434c
MZ
123#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
124#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
021f6537
MZ
125#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
126
127/* Our default, arbitrary priority value. Linux only uses one anyway. */
128#define DEFAULT_PMR_VALUE 0xf0
129
e91b036e 130enum gic_intid_range {
70a29c32 131 SGI_RANGE,
e91b036e
MZ
132 PPI_RANGE,
133 SPI_RANGE,
5f51f803 134 EPPI_RANGE,
211bddd2 135 ESPI_RANGE,
e91b036e
MZ
136 LPI_RANGE,
137 __INVALID_RANGE__
138};
139
140static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
141{
142 switch (hwirq) {
70a29c32
MZ
143 case 0 ... 15:
144 return SGI_RANGE;
e91b036e
MZ
145 case 16 ... 31:
146 return PPI_RANGE;
147 case 32 ... 1019:
148 return SPI_RANGE;
5f51f803
MZ
149 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
150 return EPPI_RANGE;
211bddd2
MZ
151 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
152 return ESPI_RANGE;
e91b036e
MZ
153 case 8192 ... GENMASK(23, 0):
154 return LPI_RANGE;
155 default:
156 return __INVALID_RANGE__;
157 }
158}
159
160static enum gic_intid_range get_intid_range(struct irq_data *d)
161{
162 return __get_intid_range(d->hwirq);
163}
164
021f6537
MZ
165static inline unsigned int gic_irq(struct irq_data *d)
166{
167 return d->hwirq;
168}
169
70a29c32 170static inline bool gic_irq_in_rdist(struct irq_data *d)
021f6537 171{
70a29c32
MZ
172 switch (get_intid_range(d)) {
173 case SGI_RANGE:
174 case PPI_RANGE:
175 case EPPI_RANGE:
176 return true;
177 default:
178 return false;
179 }
021f6537
MZ
180}
181
182static inline void __iomem *gic_dist_base(struct irq_data *d)
183{
e91b036e 184 switch (get_intid_range(d)) {
70a29c32 185 case SGI_RANGE:
e91b036e 186 case PPI_RANGE:
5f51f803 187 case EPPI_RANGE:
e91b036e 188 /* SGI+PPI -> SGI_base for this CPU */
021f6537
MZ
189 return gic_data_rdist_sgi_base();
190
e91b036e 191 case SPI_RANGE:
211bddd2 192 case ESPI_RANGE:
e91b036e 193 /* SPI -> dist_base */
021f6537
MZ
194 return gic_data.dist_base;
195
e91b036e
MZ
196 default:
197 return NULL;
198 }
021f6537
MZ
199}
200
0df66645 201static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
021f6537
MZ
202{
203 u32 count = 1000000; /* 1s! */
204
0df66645 205 while (readl_relaxed(base + GICD_CTLR) & bit) {
021f6537
MZ
206 count--;
207 if (!count) {
208 pr_err_ratelimited("RWP timeout, gone fishing\n");
209 return;
210 }
211 cpu_relax();
212 udelay(1);
2c542426 213 }
021f6537
MZ
214}
215
216/* Wait for completion of a distributor change */
217static void gic_dist_wait_for_rwp(void)
218{
0df66645 219 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
021f6537
MZ
220}
221
222/* Wait for completion of a redistributor change */
223static void gic_redist_wait_for_rwp(void)
224{
0df66645 225 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
021f6537
MZ
226}
227
7936e914 228#ifdef CONFIG_ARM64
6d4e11c5
RR
229
230static u64 __maybe_unused gic_read_iar(void)
231{
a4023f68 232 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
6d4e11c5
RR
233 return gic_read_iar_cavium_thunderx();
234 else
235 return gic_read_iar_common();
236}
7936e914 237#endif
021f6537 238
a2c22510 239static void gic_enable_redist(bool enable)
021f6537
MZ
240{
241 void __iomem *rbase;
242 u32 count = 1000000; /* 1s! */
243 u32 val;
244
9c8114c2
SK
245 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
246 return;
247
021f6537
MZ
248 rbase = gic_data_rdist_rd_base();
249
021f6537 250 val = readl_relaxed(rbase + GICR_WAKER);
a2c22510
SH
251 if (enable)
252 /* Wake up this CPU redistributor */
253 val &= ~GICR_WAKER_ProcessorSleep;
254 else
255 val |= GICR_WAKER_ProcessorSleep;
021f6537
MZ
256 writel_relaxed(val, rbase + GICR_WAKER);
257
a2c22510
SH
258 if (!enable) { /* Check that GICR_WAKER is writeable */
259 val = readl_relaxed(rbase + GICR_WAKER);
260 if (!(val & GICR_WAKER_ProcessorSleep))
261 return; /* No PM support in this redistributor */
262 }
263
d102eb5c 264 while (--count) {
a2c22510 265 val = readl_relaxed(rbase + GICR_WAKER);
cf1d9d11 266 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
a2c22510 267 break;
021f6537
MZ
268 cpu_relax();
269 udelay(1);
2c542426 270 }
a2c22510
SH
271 if (!count)
272 pr_err_ratelimited("redistributor failed to %s...\n",
273 enable ? "wakeup" : "sleep");
021f6537
MZ
274}
275
276/*
277 * Routines to disable, enable, EOI and route interrupts
278 */
e91b036e
MZ
279static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
280{
281 switch (get_intid_range(d)) {
70a29c32 282 case SGI_RANGE:
e91b036e
MZ
283 case PPI_RANGE:
284 case SPI_RANGE:
285 *index = d->hwirq;
286 return offset;
5f51f803
MZ
287 case EPPI_RANGE:
288 /*
289 * Contrary to the ESPI range, the EPPI range is contiguous
290 * to the PPI range in the registers, so let's adjust the
291 * displacement accordingly. Consistency is overrated.
292 */
293 *index = d->hwirq - EPPI_BASE_INTID + 32;
294 return offset;
211bddd2
MZ
295 case ESPI_RANGE:
296 *index = d->hwirq - ESPI_BASE_INTID;
297 switch (offset) {
298 case GICD_ISENABLER:
299 return GICD_ISENABLERnE;
300 case GICD_ICENABLER:
301 return GICD_ICENABLERnE;
302 case GICD_ISPENDR:
303 return GICD_ISPENDRnE;
304 case GICD_ICPENDR:
305 return GICD_ICPENDRnE;
306 case GICD_ISACTIVER:
307 return GICD_ISACTIVERnE;
308 case GICD_ICACTIVER:
309 return GICD_ICACTIVERnE;
310 case GICD_IPRIORITYR:
311 return GICD_IPRIORITYRnE;
312 case GICD_ICFGR:
313 return GICD_ICFGRnE;
314 case GICD_IROUTER:
315 return GICD_IROUTERnE;
316 default:
317 break;
318 }
319 break;
e91b036e
MZ
320 default:
321 break;
322 }
323
324 WARN_ON(1);
325 *index = d->hwirq;
326 return offset;
327}
328
b594c6e2
MZ
329static int gic_peek_irq(struct irq_data *d, u32 offset)
330{
b594c6e2 331 void __iomem *base;
e91b036e
MZ
332 u32 index, mask;
333
334 offset = convert_offset_index(d, offset, &index);
335 mask = 1 << (index % 32);
b594c6e2
MZ
336
337 if (gic_irq_in_rdist(d))
338 base = gic_data_rdist_sgi_base();
339 else
340 base = gic_data.dist_base;
341
e91b036e 342 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
b594c6e2
MZ
343}
344
021f6537
MZ
345static void gic_poke_irq(struct irq_data *d, u32 offset)
346{
021f6537 347 void __iomem *base;
e91b036e
MZ
348 u32 index, mask;
349
350 offset = convert_offset_index(d, offset, &index);
351 mask = 1 << (index % 32);
021f6537 352
63f13483 353 if (gic_irq_in_rdist(d))
021f6537 354 base = gic_data_rdist_sgi_base();
63f13483 355 else
021f6537 356 base = gic_data.dist_base;
021f6537 357
e91b036e 358 writel_relaxed(mask, base + offset + (index / 32) * 4);
021f6537
MZ
359}
360
021f6537
MZ
361static void gic_mask_irq(struct irq_data *d)
362{
363 gic_poke_irq(d, GICD_ICENABLER);
63f13483
MZ
364 if (gic_irq_in_rdist(d))
365 gic_redist_wait_for_rwp();
366 else
367 gic_dist_wait_for_rwp();
021f6537
MZ
368}
369
0b6a3da9
MZ
370static void gic_eoimode1_mask_irq(struct irq_data *d)
371{
372 gic_mask_irq(d);
530bf353
MZ
373 /*
374 * When masking a forwarded interrupt, make sure it is
375 * deactivated as well.
376 *
377 * This ensures that an interrupt that is getting
378 * disabled/masked will not get "stuck", because there is
379 * noone to deactivate it (guest is being terminated).
380 */
4df7f54d 381 if (irqd_is_forwarded_to_vcpu(d))
530bf353 382 gic_poke_irq(d, GICD_ICACTIVER);
0b6a3da9
MZ
383}
384
021f6537
MZ
385static void gic_unmask_irq(struct irq_data *d)
386{
387 gic_poke_irq(d, GICD_ISENABLER);
388}
389
d98d0a99
JT
390static inline bool gic_supports_nmi(void)
391{
392 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
393 static_branch_likely(&supports_pseudo_nmis);
394}
395
b594c6e2
MZ
396static int gic_irq_set_irqchip_state(struct irq_data *d,
397 enum irqchip_irq_state which, bool val)
398{
399 u32 reg;
400
64b499d8 401 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
b594c6e2
MZ
402 return -EINVAL;
403
404 switch (which) {
405 case IRQCHIP_STATE_PENDING:
406 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
407 break;
408
409 case IRQCHIP_STATE_ACTIVE:
410 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
411 break;
412
413 case IRQCHIP_STATE_MASKED:
63f13483
MZ
414 if (val) {
415 gic_mask_irq(d);
416 return 0;
417 }
418 reg = GICD_ISENABLER;
b594c6e2
MZ
419 break;
420
421 default:
422 return -EINVAL;
423 }
424
425 gic_poke_irq(d, reg);
426 return 0;
427}
428
429static int gic_irq_get_irqchip_state(struct irq_data *d,
430 enum irqchip_irq_state which, bool *val)
431{
211bddd2 432 if (d->hwirq >= 8192) /* PPI/SPI only */
b594c6e2
MZ
433 return -EINVAL;
434
435 switch (which) {
436 case IRQCHIP_STATE_PENDING:
437 *val = gic_peek_irq(d, GICD_ISPENDR);
438 break;
439
440 case IRQCHIP_STATE_ACTIVE:
441 *val = gic_peek_irq(d, GICD_ISACTIVER);
442 break;
443
444 case IRQCHIP_STATE_MASKED:
445 *val = !gic_peek_irq(d, GICD_ISENABLER);
446 break;
447
448 default:
449 return -EINVAL;
450 }
451
452 return 0;
453}
454
101b35f7
JT
455static void gic_irq_set_prio(struct irq_data *d, u8 prio)
456{
457 void __iomem *base = gic_dist_base(d);
e91b036e 458 u32 offset, index;
101b35f7 459
e91b036e
MZ
460 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
461
462 writeb_relaxed(prio, base + offset + index);
101b35f7
JT
463}
464
bfa80ee9 465static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
81a43273 466{
bfa80ee9 467 switch (__get_intid_range(hwirq)) {
81a43273 468 case PPI_RANGE:
bfa80ee9 469 return hwirq - 16;
5f51f803 470 case EPPI_RANGE:
bfa80ee9 471 return hwirq - EPPI_BASE_INTID + 16;
81a43273
MZ
472 default:
473 unreachable();
474 }
475}
476
bfa80ee9
JM
477static u32 gic_get_ppi_index(struct irq_data *d)
478{
479 return __gic_get_ppi_index(d->hwirq);
480}
481
101b35f7
JT
482static int gic_irq_nmi_setup(struct irq_data *d)
483{
484 struct irq_desc *desc = irq_to_desc(d->irq);
485
486 if (!gic_supports_nmi())
487 return -EINVAL;
488
489 if (gic_peek_irq(d, GICD_ISENABLER)) {
490 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
491 return -EINVAL;
492 }
493
494 /*
495 * A secondary irq_chip should be in charge of LPI request,
496 * it should not be possible to get there
497 */
498 if (WARN_ON(gic_irq(d) >= 8192))
499 return -EINVAL;
500
501 /* desc lock should already be held */
81a43273
MZ
502 if (gic_irq_in_rdist(d)) {
503 u32 idx = gic_get_ppi_index(d);
504
101b35f7 505 /* Setting up PPI as NMI, only switch handler for first NMI */
81a43273
MZ
506 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
507 refcount_set(&ppi_nmi_refs[idx], 1);
101b35f7
JT
508 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
509 }
510 } else {
511 desc->handle_irq = handle_fasteoi_nmi;
512 }
513
514 gic_irq_set_prio(d, GICD_INT_NMI_PRI);
515
516 return 0;
517}
518
519static void gic_irq_nmi_teardown(struct irq_data *d)
520{
521 struct irq_desc *desc = irq_to_desc(d->irq);
522
523 if (WARN_ON(!gic_supports_nmi()))
524 return;
525
526 if (gic_peek_irq(d, GICD_ISENABLER)) {
527 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
528 return;
529 }
530
531 /*
532 * A secondary irq_chip should be in charge of LPI request,
533 * it should not be possible to get there
534 */
535 if (WARN_ON(gic_irq(d) >= 8192))
536 return;
537
538 /* desc lock should already be held */
81a43273
MZ
539 if (gic_irq_in_rdist(d)) {
540 u32 idx = gic_get_ppi_index(d);
541
101b35f7 542 /* Tearing down NMI, only switch handler for last NMI */
81a43273 543 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
101b35f7
JT
544 desc->handle_irq = handle_percpu_devid_irq;
545 } else {
546 desc->handle_irq = handle_fasteoi_irq;
547 }
548
549 gic_irq_set_prio(d, GICD_INT_DEF_PRI);
550}
551
021f6537
MZ
552static void gic_eoi_irq(struct irq_data *d)
553{
6efb5092
MR
554 write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
555 isb();
021f6537
MZ
556}
557
0b6a3da9
MZ
558static void gic_eoimode1_eoi_irq(struct irq_data *d)
559{
560 /*
530bf353
MZ
561 * No need to deactivate an LPI, or an interrupt that
562 * is is getting forwarded to a vcpu.
0b6a3da9 563 */
4df7f54d 564 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
0b6a3da9
MZ
565 return;
566 gic_write_dir(gic_irq(d));
567}
568
021f6537
MZ
569static int gic_set_type(struct irq_data *d, unsigned int type)
570{
5f51f803 571 enum gic_intid_range range;
021f6537 572 unsigned int irq = gic_irq(d);
021f6537 573 void __iomem *base;
e91b036e 574 u32 offset, index;
13d22e2e 575 int ret;
021f6537 576
5f51f803
MZ
577 range = get_intid_range(d);
578
64b499d8
MZ
579 /* Interrupt configuration for SGIs can't be changed */
580 if (range == SGI_RANGE)
581 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
582
fb7e7deb 583 /* SPIs have restrictions on the supported types */
5f51f803
MZ
584 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
585 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
021f6537
MZ
586 return -EINVAL;
587
63f13483 588 if (gic_irq_in_rdist(d))
021f6537 589 base = gic_data_rdist_sgi_base();
63f13483 590 else
021f6537 591 base = gic_data.dist_base;
021f6537 592
e91b036e 593 offset = convert_offset_index(d, GICD_ICFGR, &index);
13d22e2e 594
63f13483 595 ret = gic_configure_irq(index, type, base + offset, NULL);
5f51f803 596 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
13d22e2e 597 /* Misconfigured PPIs are usually not fatal */
5f51f803 598 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
13d22e2e
MZ
599 ret = 0;
600 }
601
602 return ret;
021f6537
MZ
603}
604
530bf353
MZ
605static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
606{
64b499d8
MZ
607 if (get_intid_range(d) == SGI_RANGE)
608 return -EINVAL;
609
4df7f54d
TG
610 if (vcpu)
611 irqd_set_forwarded_to_vcpu(d);
612 else
613 irqd_clr_forwarded_to_vcpu(d);
530bf353
MZ
614 return 0;
615}
616
f6c86a41 617static u64 gic_mpidr_to_affinity(unsigned long mpidr)
021f6537
MZ
618{
619 u64 aff;
620
f6c86a41 621 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
021f6537
MZ
622 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
623 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
624 MPIDR_AFFINITY_LEVEL(mpidr, 0));
625
626 return aff;
627}
628
f32c9266
JT
629static void gic_deactivate_unhandled(u32 irqnr)
630{
631 if (static_branch_likely(&supports_deactivate_key)) {
632 if (irqnr < 8192)
633 gic_write_dir(irqnr);
634 } else {
6efb5092
MR
635 write_gicreg(irqnr, ICC_EOIR1_EL1);
636 isb();
f32c9266
JT
637 }
638}
639
6efb5092
MR
640/*
641 * Follow a read of the IAR with any HW maintenance that needs to happen prior
642 * to invoking the relevant IRQ handler. We must do two things:
643 *
644 * (1) Ensure instruction ordering between a read of IAR and subsequent
645 * instructions in the IRQ handler using an ISB.
646 *
647 * It is possible for the IAR to report an IRQ which was signalled *after*
648 * the CPU took an IRQ exception as multiple interrupts can race to be
649 * recognized by the GIC, earlier interrupts could be withdrawn, and/or
650 * later interrupts could be prioritized by the GIC.
651 *
652 * For devices which are tightly coupled to the CPU, such as PMUs, a
653 * context synchronization event is necessary to ensure that system
654 * register state is not stale, as these may have been indirectly written
655 * *after* exception entry.
656 *
657 * (2) Deactivate the interrupt when EOI mode 1 is in use.
658 */
659static inline void gic_complete_ack(u32 irqnr)
f32c9266 660{
f32c9266 661 if (static_branch_likely(&supports_deactivate_key))
6efb5092 662 write_gicreg(irqnr, ICC_EOIR1_EL1);
17ce302f 663
6efb5092 664 isb();
f32c9266
JT
665}
666
614ab80c 667static bool gic_rpr_is_nmi_prio(void)
382e6e17 668{
614ab80c
MR
669 if (!gic_supports_nmi())
670 return false;
f32c9266 671
614ab80c
MR
672 return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
673}
382e6e17 674
614ab80c
MR
675static bool gic_irqnr_is_special(u32 irqnr)
676{
677 return irqnr >= 1020 && irqnr <= 1023;
678}
382e6e17 679
614ab80c
MR
680static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
681{
682 if (gic_irqnr_is_special(irqnr))
683 return;
382e6e17 684
6efb5092 685 gic_complete_ack(irqnr);
382e6e17 686
614ab80c
MR
687 if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
688 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
f32c9266 689 gic_deactivate_unhandled(irqnr);
382e6e17 690 }
f32c9266
JT
691}
692
614ab80c 693static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
382e6e17 694{
614ab80c
MR
695 if (gic_irqnr_is_special(irqnr))
696 return;
382e6e17 697
614ab80c 698 gic_complete_ack(irqnr);
382e6e17 699
614ab80c
MR
700 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
701 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
702 gic_deactivate_unhandled(irqnr);
382e6e17 703 }
382e6e17
MZ
704}
705
614ab80c
MR
706/*
707 * An exception has been taken from a context with IRQs enabled, and this could
708 * be an IRQ or an NMI.
709 *
710 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
711 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
712 * after handling any NMI but before handling any IRQ.
713 *
714 * The entry code has performed IRQ entry, and if an NMI is detected we must
715 * perform NMI entry/exit around invoking the handler.
716 */
717static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
021f6537 718{
614ab80c 719 bool is_nmi;
f6c86a41 720 u32 irqnr;
021f6537 721
614ab80c 722 irqnr = gic_read_iar();
021f6537 723
614ab80c 724 is_nmi = gic_rpr_is_nmi_prio();
a97709f5 725
614ab80c
MR
726 if (is_nmi) {
727 nmi_enter();
728 __gic_handle_nmi(irqnr, regs);
729 nmi_exit();
f32c9266
JT
730 }
731
3f1f3234
JT
732 if (gic_prio_masking_enabled()) {
733 gic_pmr_mask_irqs();
734 gic_arch_enable_irqs();
735 }
736
614ab80c
MR
737 if (!is_nmi)
738 __gic_handle_irq(irqnr, regs);
739}
64b499d8 740
614ab80c
MR
741/*
742 * An exception has been taken from a context with IRQs disabled, which can only
743 * be an NMI.
744 *
745 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
746 * DAIF.IF (and ICC_PMR_EL1) unchanged.
747 *
748 * The entry code has performed NMI entry.
749 */
750static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
751{
752 u64 pmr;
753 u32 irqnr;
754
755 /*
756 * We were in a context with IRQs disabled. However, the
757 * entry code has set PMR to a value that allows any
758 * interrupt to be acknowledged, and not just NMIs. This can
759 * lead to surprising effects if the NMI has been retired in
760 * the meantime, and that there is an IRQ pending. The IRQ
761 * would then be taken in NMI context, something that nobody
762 * wants to debug twice.
763 *
764 * Until we sort this, drop PMR again to a level that will
765 * actually only allow NMIs before reading IAR, and then
766 * restore it to what it was.
767 */
768 pmr = gic_read_pmr();
769 gic_pmr_mask_irqs();
770 isb();
771 irqnr = gic_read_iar();
772 gic_write_pmr(pmr);
773
774 __gic_handle_nmi(irqnr, regs);
775}
776
777static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
778{
779 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
780 __gic_handle_irq_from_irqsoff(regs);
781 else
782 __gic_handle_irq_from_irqson(regs);
021f6537
MZ
783}
784
b5cf6073
JT
785static u32 gic_get_pribits(void)
786{
787 u32 pribits;
788
789 pribits = gic_read_ctlr();
790 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
791 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
792 pribits++;
793
794 return pribits;
795}
796
797static bool gic_has_group0(void)
798{
799 u32 val;
e7932188
JT
800 u32 old_pmr;
801
802 old_pmr = gic_read_pmr();
b5cf6073
JT
803
804 /*
805 * Let's find out if Group0 is under control of EL3 or not by
806 * setting the highest possible, non-zero priority in PMR.
807 *
808 * If SCR_EL3.FIQ is set, the priority gets shifted down in
809 * order for the CPU interface to set bit 7, and keep the
810 * actual priority in the non-secure range. In the process, it
811 * looses the least significant bit and the actual priority
812 * becomes 0x80. Reading it back returns 0, indicating that
813 * we're don't have access to Group0.
814 */
815 gic_write_pmr(BIT(8 - gic_get_pribits()));
816 val = gic_read_pmr();
817
e7932188
JT
818 gic_write_pmr(old_pmr);
819
b5cf6073
JT
820 return val != 0;
821}
822
021f6537
MZ
823static void __init gic_dist_init(void)
824{
825 unsigned int i;
826 u64 affinity;
827 void __iomem *base = gic_data.dist_base;
0b04758b 828 u32 val;
021f6537
MZ
829
830 /* Disable the distributor */
831 writel_relaxed(0, base + GICD_CTLR);
832 gic_dist_wait_for_rwp();
833
7c9b9730
MZ
834 /*
835 * Configure SPIs as non-secure Group-1. This will only matter
836 * if the GIC only has a single security state. This will not
837 * do the right thing if the kernel is running in secure mode,
838 * but that's not the intended use case anyway.
839 */
211bddd2 840 for (i = 32; i < GIC_LINE_NR; i += 32)
7c9b9730
MZ
841 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
842
211bddd2
MZ
843 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
844 for (i = 0; i < GIC_ESPI_NR; i += 32) {
845 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
846 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
847 }
848
849 for (i = 0; i < GIC_ESPI_NR; i += 32)
850 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
851
852 for (i = 0; i < GIC_ESPI_NR; i += 16)
853 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
854
855 for (i = 0; i < GIC_ESPI_NR; i += 4)
856 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
857
63f13483
MZ
858 /* Now do the common stuff */
859 gic_dist_config(base, GIC_LINE_NR, NULL);
021f6537 860
0b04758b
MZ
861 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
862 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
863 pr_info("Enabling SGIs without active state\n");
864 val |= GICD_CTLR_nASSGIreq;
865 }
866
63f13483 867 /* Enable distributor with ARE, Group1, and wait for it to drain */
0b04758b 868 writel_relaxed(val, base + GICD_CTLR);
63f13483 869 gic_dist_wait_for_rwp();
021f6537
MZ
870
871 /*
872 * Set all global interrupts to the boot CPU only. ARE must be
873 * enabled.
874 */
875 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
211bddd2 876 for (i = 32; i < GIC_LINE_NR; i++)
72c97126 877 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
211bddd2
MZ
878
879 for (i = 0; i < GIC_ESPI_NR; i++)
880 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
021f6537
MZ
881}
882
0d94ded2 883static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
021f6537 884{
0d94ded2 885 int ret = -ENODEV;
021f6537
MZ
886 int i;
887
f5c1434c
MZ
888 for (i = 0; i < gic_data.nr_redist_regions; i++) {
889 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
0d94ded2 890 u64 typer;
021f6537
MZ
891 u32 reg;
892
893 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
894 if (reg != GIC_PIDR2_ARCH_GICv3 &&
895 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
896 pr_warn("No redistributor present @%p\n", ptr);
897 break;
898 }
899
900 do {
72c97126 901 typer = gic_read_typer(ptr + GICR_TYPER);
0d94ded2
MZ
902 ret = fn(gic_data.redist_regions + i, ptr);
903 if (!ret)
021f6537 904 return 0;
021f6537 905
b70fb7af
TN
906 if (gic_data.redist_regions[i].single_redist)
907 break;
908
021f6537
MZ
909 if (gic_data.redist_stride) {
910 ptr += gic_data.redist_stride;
911 } else {
912 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
913 if (typer & GICR_TYPER_VLPIS)
914 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
915 }
916 } while (!(typer & GICR_TYPER_LAST));
917 }
918
0d94ded2
MZ
919 return ret ? -ENODEV : 0;
920}
921
922static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
923{
924 unsigned long mpidr = cpu_logical_map(smp_processor_id());
925 u64 typer;
926 u32 aff;
927
928 /*
929 * Convert affinity to a 32bit value that can be matched to
930 * GICR_TYPER bits [63:32].
931 */
932 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
933 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
934 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
935 MPIDR_AFFINITY_LEVEL(mpidr, 0));
936
937 typer = gic_read_typer(ptr + GICR_TYPER);
938 if ((typer >> 32) == aff) {
939 u64 offset = ptr - region->redist_base;
9058a4e9 940 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
0d94ded2
MZ
941 gic_data_rdist_rd_base() = ptr;
942 gic_data_rdist()->phys_base = region->phys_base + offset;
943
944 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
945 smp_processor_id(), mpidr,
946 (int)(region - gic_data.redist_regions),
947 &gic_data_rdist()->phys_base);
948 return 0;
949 }
950
951 /* Try next one */
952 return 1;
953}
954
955static int gic_populate_rdist(void)
956{
957 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
958 return 0;
959
021f6537 960 /* We couldn't even deal with ourselves... */
f6c86a41 961 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
0d94ded2
MZ
962 smp_processor_id(),
963 (unsigned long)cpu_logical_map(smp_processor_id()));
021f6537
MZ
964 return -ENODEV;
965}
966
1a60e1e6
MZ
967static int __gic_update_rdist_properties(struct redist_region *region,
968 void __iomem *ptr)
0edc23ea
MZ
969{
970 u64 typer = gic_read_typer(ptr + GICR_TYPER);
a837ed36 971 u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
b25319d2 972
4d968297 973 /* Boot-time cleanup */
79a7f77b
MZ
974 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
975 u64 val;
976
977 /* Deactivate any present vPE */
978 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
979 if (val & GICR_VPENDBASER_Valid)
980 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
981 ptr + SZ_128K + GICR_VPENDBASER);
982
983 /* Mark the VPE table as invalid */
984 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
985 val &= ~GICR_VPROPBASER_4_1_VALID;
986 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
987 }
988
0edc23ea 989 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
b25319d2 990
a837ed36
MZ
991 /*
992 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
993 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
994 * that the ITS driver can make use of for LPIs (and not VLPIs).
995 *
996 * These are 3 different ways to express the same thing, depending
997 * on the revision of the architecture and its relaxations over
998 * time. Just group them under the 'direct_lpi' banner.
999 */
b25319d2
MZ
1000 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1001 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
a837ed36 1002 !!(ctlr & GICR_CTLR_IR) |
b25319d2 1003 gic_data.rdists.has_rvpeid);
96806229 1004 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
b25319d2
MZ
1005
1006 /* Detect non-sensical configurations */
1007 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1008 gic_data.rdists.has_direct_lpi = false;
1009 gic_data.rdists.has_vlpis = false;
1010 gic_data.rdists.has_rvpeid = false;
1011 }
1012
5f51f803 1013 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
0edc23ea
MZ
1014
1015 return 1;
1016}
1017
1a60e1e6 1018static void gic_update_rdist_properties(void)
0edc23ea 1019{
1a60e1e6
MZ
1020 gic_data.ppi_nr = UINT_MAX;
1021 gic_iterate_rdists(__gic_update_rdist_properties);
1022 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1023 gic_data.ppi_nr = 0;
a837ed36
MZ
1024 pr_info("GICv3 features: %d PPIs%s%s\n",
1025 gic_data.ppi_nr,
1026 gic_data.has_rss ? ", RSS" : "",
1027 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1028
96806229
MZ
1029 if (gic_data.rdists.has_vlpis)
1030 pr_info("GICv4 features: %s%s%s\n",
1031 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1032 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1033 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
0edc23ea
MZ
1034}
1035
d98d0a99
JT
1036/* Check whether it's single security state view */
1037static inline bool gic_dist_security_disabled(void)
1038{
1039 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1040}
1041
3708d52f
SH
1042static void gic_cpu_sys_reg_init(void)
1043{
eda0d04a
SD
1044 int i, cpu = smp_processor_id();
1045 u64 mpidr = cpu_logical_map(cpu);
1046 u64 need_rss = MPIDR_RS(mpidr);
33625282 1047 bool group0;
b5cf6073 1048 u32 pribits;
eda0d04a 1049
7cabd008
MZ
1050 /*
1051 * Need to check that the SRE bit has actually been set. If
1052 * not, it means that SRE is disabled at EL2. We're going to
1053 * die painfully, and there is nothing we can do about it.
1054 *
1055 * Kindly inform the luser.
1056 */
1057 if (!gic_enable_sre())
1058 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
3708d52f 1059
b5cf6073 1060 pribits = gic_get_pribits();
33625282 1061
b5cf6073 1062 group0 = gic_has_group0();
33625282 1063
3708d52f 1064 /* Set priority mask register */
d98d0a99 1065 if (!gic_prio_masking_enabled()) {
e7932188 1066 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
33678059 1067 } else if (gic_supports_nmi()) {
d98d0a99
JT
1068 /*
1069 * Mismatch configuration with boot CPU, the system is likely
1070 * to die as interrupt masking will not work properly on all
1071 * CPUs
33678059
AE
1072 *
1073 * The boot CPU calls this function before enabling NMI support,
1074 * and as a result we'll never see this warning in the boot path
1075 * for that CPU.
d98d0a99 1076 */
33678059
AE
1077 if (static_branch_unlikely(&gic_nonsecure_priorities))
1078 WARN_ON(!group0 || gic_dist_security_disabled());
1079 else
1080 WARN_ON(group0 && !gic_dist_security_disabled());
d98d0a99 1081 }
3708d52f 1082
91ef8442
DT
1083 /*
1084 * Some firmwares hand over to the kernel with the BPR changed from
1085 * its reset value (and with a value large enough to prevent
1086 * any pre-emptive interrupts from working at all). Writing a zero
1087 * to BPR restores is reset value.
1088 */
1089 gic_write_bpr1(0);
1090
d01d3274 1091 if (static_branch_likely(&supports_deactivate_key)) {
0b6a3da9
MZ
1092 /* EOI drops priority only (mode 1) */
1093 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1094 } else {
1095 /* EOI deactivates interrupt too (mode 0) */
1096 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1097 }
3708d52f 1098
33625282
MZ
1099 /* Always whack Group0 before Group1 */
1100 if (group0) {
1101 switch(pribits) {
1102 case 8:
1103 case 7:
1104 write_gicreg(0, ICC_AP0R3_EL1);
1105 write_gicreg(0, ICC_AP0R2_EL1);
df561f66 1106 fallthrough;
33625282
MZ
1107 case 6:
1108 write_gicreg(0, ICC_AP0R1_EL1);
df561f66 1109 fallthrough;
33625282
MZ
1110 case 5:
1111 case 4:
1112 write_gicreg(0, ICC_AP0R0_EL1);
1113 }
1114
1115 isb();
1116 }
d6062a6d 1117
33625282 1118 switch(pribits) {
d6062a6d
MZ
1119 case 8:
1120 case 7:
d6062a6d 1121 write_gicreg(0, ICC_AP1R3_EL1);
d6062a6d 1122 write_gicreg(0, ICC_AP1R2_EL1);
df561f66 1123 fallthrough;
d6062a6d 1124 case 6:
d6062a6d 1125 write_gicreg(0, ICC_AP1R1_EL1);
df561f66 1126 fallthrough;
d6062a6d
MZ
1127 case 5:
1128 case 4:
d6062a6d
MZ
1129 write_gicreg(0, ICC_AP1R0_EL1);
1130 }
1131
1132 isb();
1133
3708d52f
SH
1134 /* ... and let's hit the road... */
1135 gic_write_grpen1(1);
eda0d04a
SD
1136
1137 /* Keep the RSS capability status in per_cpu variable */
1138 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1139
1140 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1141 for_each_online_cpu(i) {
1142 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1143
1144 need_rss |= MPIDR_RS(cpu_logical_map(i));
1145 if (need_rss && (!have_rss))
1146 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1147 cpu, (unsigned long)mpidr,
1148 i, (unsigned long)cpu_logical_map(i));
1149 }
1150
1151 /**
1152 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1153 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1154 * UNPREDICTABLE choice of :
1155 * - The write is ignored.
1156 * - The RS field is treated as 0.
1157 */
1158 if (need_rss && (!gic_data.has_rss))
1159 pr_crit_once("RSS is required but GICD doesn't support it\n");
3708d52f
SH
1160}
1161
f736d65d
MZ
1162static bool gicv3_nolpi;
1163
1164static int __init gicv3_nolpi_cfg(char *buf)
1165{
5e279739 1166 return kstrtobool(buf, &gicv3_nolpi);
f736d65d
MZ
1167}
1168early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1169
da33f31d
MZ
1170static int gic_dist_supports_lpis(void)
1171{
d38a71c5
MZ
1172 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1173 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1174 !gicv3_nolpi);
da33f31d
MZ
1175}
1176
021f6537
MZ
1177static void gic_cpu_init(void)
1178{
1179 void __iomem *rbase;
1a60e1e6 1180 int i;
021f6537
MZ
1181
1182 /* Register ourselves with the rest of the world */
1183 if (gic_populate_rdist())
1184 return;
1185
a2c22510 1186 gic_enable_redist(true);
021f6537 1187
ad5a78d3
MZ
1188 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1189 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1190 "Distributor has extended ranges, but CPU%d doesn't\n",
1191 smp_processor_id());
1192
021f6537
MZ
1193 rbase = gic_data_rdist_sgi_base();
1194
7c9b9730 1195 /* Configure SGIs/PPIs as non-secure Group-1 */
1a60e1e6
MZ
1196 for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1197 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
7c9b9730 1198
1a60e1e6 1199 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
021f6537 1200
3708d52f
SH
1201 /* initialise system registers */
1202 gic_cpu_sys_reg_init();
021f6537
MZ
1203}
1204
1205#ifdef CONFIG_SMP
6670a6d8 1206
eda0d04a
SD
1207#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1208#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1209
6670a6d8 1210static int gic_starting_cpu(unsigned int cpu)
021f6537 1211{
6670a6d8 1212 gic_cpu_init();
d38a71c5
MZ
1213
1214 if (gic_dist_supports_lpis())
1215 its_cpu_init();
1216
6670a6d8 1217 return 0;
021f6537
MZ
1218}
1219
021f6537 1220static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
f6c86a41 1221 unsigned long cluster_id)
021f6537 1222{
727653d6 1223 int next_cpu, cpu = *base_cpu;
f6c86a41 1224 unsigned long mpidr = cpu_logical_map(cpu);
021f6537
MZ
1225 u16 tlist = 0;
1226
1227 while (cpu < nr_cpu_ids) {
021f6537
MZ
1228 tlist |= 1 << (mpidr & 0xf);
1229
727653d6
JM
1230 next_cpu = cpumask_next(cpu, mask);
1231 if (next_cpu >= nr_cpu_ids)
021f6537 1232 goto out;
727653d6 1233 cpu = next_cpu;
021f6537
MZ
1234
1235 mpidr = cpu_logical_map(cpu);
1236
eda0d04a 1237 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
021f6537
MZ
1238 cpu--;
1239 goto out;
1240 }
1241 }
1242out:
1243 *base_cpu = cpu;
1244 return tlist;
1245}
1246
7e580278
AP
1247#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1248 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1249 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1250
021f6537
MZ
1251static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1252{
1253 u64 val;
1254
7e580278
AP
1255 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1256 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1257 irq << ICC_SGI1R_SGI_ID_SHIFT |
1258 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
eda0d04a 1259 MPIDR_TO_SGI_RS(cluster_id) |
7e580278 1260 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
021f6537 1261
b6dd4d83 1262 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
021f6537
MZ
1263 gic_write_sgi1r(val);
1264}
1265
64b499d8 1266static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
021f6537
MZ
1267{
1268 int cpu;
1269
64b499d8 1270 if (WARN_ON(d->hwirq >= 16))
021f6537
MZ
1271 return;
1272
1273 /*
1274 * Ensure that stores to Normal memory are visible to the
1275 * other CPUs before issuing the IPI.
1276 */
80e4e1f4 1277 dsb(ishst);
021f6537 1278
f9b531fe 1279 for_each_cpu(cpu, mask) {
eda0d04a 1280 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
021f6537
MZ
1281 u16 tlist;
1282
1283 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
64b499d8 1284 gic_send_sgi(cluster_id, tlist, d->hwirq);
021f6537
MZ
1285 }
1286
1287 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1288 isb();
1289}
1290
8a94c1ab 1291static void __init gic_smp_init(void)
021f6537 1292{
64b499d8
MZ
1293 struct irq_fwspec sgi_fwspec = {
1294 .fwnode = gic_data.fwnode,
1295 .param_count = 1,
1296 };
1297 int base_sgi;
1298
6896bcd1 1299 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
73c1b41e
TG
1300 "irqchip/arm/gicv3:starting",
1301 gic_starting_cpu, NULL);
64b499d8
MZ
1302
1303 /* Register all 8 non-secure SGIs */
1304 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1305 NUMA_NO_NODE, &sgi_fwspec,
1306 false, NULL);
1307 if (WARN_ON(base_sgi <= 0))
1308 return;
1309
1310 set_smp_ipi_range(base_sgi, 8);
021f6537
MZ
1311}
1312
1313static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1314 bool force)
1315{
65a30f8b 1316 unsigned int cpu;
e91b036e 1317 u32 offset, index;
021f6537
MZ
1318 void __iomem *reg;
1319 int enabled;
1320 u64 val;
1321
65a30f8b
SP
1322 if (force)
1323 cpu = cpumask_first(mask_val);
1324 else
1325 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1326
866d7c1b
SP
1327 if (cpu >= nr_cpu_ids)
1328 return -EINVAL;
1329
021f6537
MZ
1330 if (gic_irq_in_rdist(d))
1331 return -EINVAL;
1332
1333 /* If interrupt was enabled, disable it first */
1334 enabled = gic_peek_irq(d, GICD_ISENABLER);
1335 if (enabled)
1336 gic_mask_irq(d);
1337
e91b036e
MZ
1338 offset = convert_offset_index(d, GICD_IROUTER, &index);
1339 reg = gic_dist_base(d) + offset + (index * 8);
021f6537
MZ
1340 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1341
72c97126 1342 gic_write_irouter(val, reg);
021f6537
MZ
1343
1344 /*
1345 * If the interrupt was enabled, enabled it again. Otherwise,
1346 * just wait for the distributor to have digested our changes.
1347 */
1348 if (enabled)
1349 gic_unmask_irq(d);
021f6537 1350
956ae91a
MZ
1351 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1352
0fc6fa29 1353 return IRQ_SET_MASK_OK_DONE;
021f6537
MZ
1354}
1355#else
1356#define gic_set_affinity NULL
64b499d8 1357#define gic_ipi_send_mask NULL
021f6537
MZ
1358#define gic_smp_init() do { } while(0)
1359#endif
1360
17f644e9
VS
1361static int gic_retrigger(struct irq_data *data)
1362{
1363 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1364}
1365
3708d52f
SH
1366#ifdef CONFIG_CPU_PM
1367static int gic_cpu_pm_notifier(struct notifier_block *self,
1368 unsigned long cmd, void *v)
1369{
1370 if (cmd == CPU_PM_EXIT) {
ccd9432a
SH
1371 if (gic_dist_security_disabled())
1372 gic_enable_redist(true);
3708d52f 1373 gic_cpu_sys_reg_init();
ccd9432a 1374 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
3708d52f
SH
1375 gic_write_grpen1(0);
1376 gic_enable_redist(false);
1377 }
1378 return NOTIFY_OK;
1379}
1380
1381static struct notifier_block gic_cpu_pm_notifier_block = {
1382 .notifier_call = gic_cpu_pm_notifier,
1383};
1384
1385static void gic_cpu_pm_init(void)
1386{
1387 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1388}
1389
1390#else
1391static inline void gic_cpu_pm_init(void) { }
1392#endif /* CONFIG_CPU_PM */
1393
021f6537
MZ
1394static struct irq_chip gic_chip = {
1395 .name = "GICv3",
1396 .irq_mask = gic_mask_irq,
1397 .irq_unmask = gic_unmask_irq,
1398 .irq_eoi = gic_eoi_irq,
1399 .irq_set_type = gic_set_type,
1400 .irq_set_affinity = gic_set_affinity,
17f644e9 1401 .irq_retrigger = gic_retrigger,
b594c6e2
MZ
1402 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1403 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
101b35f7
JT
1404 .irq_nmi_setup = gic_irq_nmi_setup,
1405 .irq_nmi_teardown = gic_irq_nmi_teardown,
64b499d8 1406 .ipi_send_mask = gic_ipi_send_mask,
4110b5cb
MZ
1407 .flags = IRQCHIP_SET_TYPE_MASKED |
1408 IRQCHIP_SKIP_SET_WAKE |
1409 IRQCHIP_MASK_ON_SUSPEND,
021f6537
MZ
1410};
1411
0b6a3da9
MZ
1412static struct irq_chip gic_eoimode1_chip = {
1413 .name = "GICv3",
1414 .irq_mask = gic_eoimode1_mask_irq,
1415 .irq_unmask = gic_unmask_irq,
1416 .irq_eoi = gic_eoimode1_eoi_irq,
1417 .irq_set_type = gic_set_type,
1418 .irq_set_affinity = gic_set_affinity,
17f644e9 1419 .irq_retrigger = gic_retrigger,
0b6a3da9
MZ
1420 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1421 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
530bf353 1422 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
101b35f7
JT
1423 .irq_nmi_setup = gic_irq_nmi_setup,
1424 .irq_nmi_teardown = gic_irq_nmi_teardown,
64b499d8 1425 .ipi_send_mask = gic_ipi_send_mask,
4110b5cb
MZ
1426 .flags = IRQCHIP_SET_TYPE_MASKED |
1427 IRQCHIP_SKIP_SET_WAKE |
1428 IRQCHIP_MASK_ON_SUSPEND,
0b6a3da9
MZ
1429};
1430
021f6537
MZ
1431static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1432 irq_hw_number_t hw)
1433{
0b6a3da9 1434 struct irq_chip *chip = &gic_chip;
1b57d91b 1435 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
0b6a3da9 1436
d01d3274 1437 if (static_branch_likely(&supports_deactivate_key))
0b6a3da9
MZ
1438 chip = &gic_eoimode1_chip;
1439
e91b036e 1440 switch (__get_intid_range(hw)) {
70a29c32 1441 case SGI_RANGE:
e91b036e 1442 case PPI_RANGE:
5f51f803 1443 case EPPI_RANGE:
021f6537 1444 irq_set_percpu_devid(irq);
0b6a3da9 1445 irq_domain_set_info(d, irq, hw, chip, d->host_data,
443acc4f 1446 handle_percpu_devid_irq, NULL, NULL);
e91b036e
MZ
1447 break;
1448
1449 case SPI_RANGE:
211bddd2 1450 case ESPI_RANGE:
0b6a3da9 1451 irq_domain_set_info(d, irq, hw, chip, d->host_data,
443acc4f 1452 handle_fasteoi_irq, NULL, NULL);
d17cab44 1453 irq_set_probe(irq);
1b57d91b 1454 irqd_set_single_target(irqd);
e91b036e
MZ
1455 break;
1456
1457 case LPI_RANGE:
da33f31d
MZ
1458 if (!gic_dist_supports_lpis())
1459 return -EPERM;
0b6a3da9 1460 irq_domain_set_info(d, irq, hw, chip, d->host_data,
da33f31d 1461 handle_fasteoi_irq, NULL, NULL);
e91b036e
MZ
1462 break;
1463
1464 default:
1465 return -EPERM;
da33f31d
MZ
1466 }
1467
1b57d91b
VS
1468 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1469 irqd_set_handle_enforce_irqctx(irqd);
021f6537
MZ
1470 return 0;
1471}
1472
f833f57f
MZ
1473static int gic_irq_domain_translate(struct irq_domain *d,
1474 struct irq_fwspec *fwspec,
1475 unsigned long *hwirq,
1476 unsigned int *type)
021f6537 1477{
64b499d8
MZ
1478 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1479 *hwirq = fwspec->param[0];
1480 *type = IRQ_TYPE_EDGE_RISING;
1481 return 0;
1482 }
1483
f833f57f
MZ
1484 if (is_of_node(fwspec->fwnode)) {
1485 if (fwspec->param_count < 3)
1486 return -EINVAL;
021f6537 1487
db8c70ec
MZ
1488 switch (fwspec->param[0]) {
1489 case 0: /* SPI */
1490 *hwirq = fwspec->param[1] + 32;
1491 break;
1492 case 1: /* PPI */
1493 *hwirq = fwspec->param[1] + 16;
1494 break;
211bddd2
MZ
1495 case 2: /* ESPI */
1496 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1497 break;
5f51f803
MZ
1498 case 3: /* EPPI */
1499 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1500 break;
db8c70ec
MZ
1501 case GIC_IRQ_TYPE_LPI: /* LPI */
1502 *hwirq = fwspec->param[1];
1503 break;
5f51f803
MZ
1504 case GIC_IRQ_TYPE_PARTITION:
1505 *hwirq = fwspec->param[1];
1506 if (fwspec->param[1] >= 16)
1507 *hwirq += EPPI_BASE_INTID - 16;
1508 else
1509 *hwirq += 16;
1510 break;
db8c70ec
MZ
1511 default:
1512 return -EINVAL;
1513 }
f833f57f
MZ
1514
1515 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
6ef6386e 1516
65da7d19
MZ
1517 /*
1518 * Make it clear that broken DTs are... broken.
a359f757 1519 * Partitioned PPIs are an unfortunate exception.
65da7d19
MZ
1520 */
1521 WARN_ON(*type == IRQ_TYPE_NONE &&
1522 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
f833f57f 1523 return 0;
021f6537
MZ
1524 }
1525
ffa7d616
TN
1526 if (is_fwnode_irqchip(fwspec->fwnode)) {
1527 if(fwspec->param_count != 2)
1528 return -EINVAL;
1529
544808f7
AP
1530 if (fwspec->param[0] < 16) {
1531 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1532 fwspec->param[0]);
1533 return -EINVAL;
1534 }
1535
ffa7d616
TN
1536 *hwirq = fwspec->param[0];
1537 *type = fwspec->param[1];
6ef6386e
MZ
1538
1539 WARN_ON(*type == IRQ_TYPE_NONE);
ffa7d616
TN
1540 return 0;
1541 }
1542
f833f57f 1543 return -EINVAL;
021f6537
MZ
1544}
1545
443acc4f
MZ
1546static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1547 unsigned int nr_irqs, void *arg)
1548{
1549 int i, ret;
1550 irq_hw_number_t hwirq;
1551 unsigned int type = IRQ_TYPE_NONE;
f833f57f 1552 struct irq_fwspec *fwspec = arg;
443acc4f 1553
f833f57f 1554 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
443acc4f
MZ
1555 if (ret)
1556 return ret;
1557
63c16c6e
SP
1558 for (i = 0; i < nr_irqs; i++) {
1559 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1560 if (ret)
1561 return ret;
1562 }
443acc4f
MZ
1563
1564 return 0;
1565}
1566
1567static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1568 unsigned int nr_irqs)
1569{
1570 int i;
1571
1572 for (i = 0; i < nr_irqs; i++) {
1573 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1574 irq_set_handler(virq + i, NULL);
1575 irq_domain_reset_irq_data(d);
1576 }
1577}
1578
d753f849
JM
1579static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1580 irq_hw_number_t hwirq)
1581{
1582 enum gic_intid_range range;
1583
1584 if (!gic_data.ppi_descs)
1585 return false;
1586
1587 if (!is_of_node(fwspec->fwnode))
1588 return false;
1589
1590 if (fwspec->param_count < 4 || !fwspec->param[3])
1591 return false;
1592
1593 range = __get_intid_range(hwirq);
1594 if (range != PPI_RANGE && range != EPPI_RANGE)
1595 return false;
1596
1597 return true;
1598}
1599
e3825ba1
MZ
1600static int gic_irq_domain_select(struct irq_domain *d,
1601 struct irq_fwspec *fwspec,
1602 enum irq_domain_bus_token bus_token)
1603{
d753f849
JM
1604 unsigned int type, ret, ppi_idx;
1605 irq_hw_number_t hwirq;
1606
e3825ba1
MZ
1607 /* Not for us */
1608 if (fwspec->fwnode != d->fwnode)
1609 return 0;
1610
1611 /* If this is not DT, then we have a single domain */
1612 if (!is_of_node(fwspec->fwnode))
1613 return 1;
1614
d753f849
JM
1615 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1616 if (WARN_ON_ONCE(ret))
1617 return 0;
1618
1619 if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1620 return d == gic_data.domain;
1621
e3825ba1
MZ
1622 /*
1623 * If this is a PPI and we have a 4th (non-null) parameter,
1624 * then we need to match the partition domain.
1625 */
d753f849
JM
1626 ppi_idx = __gic_get_ppi_index(hwirq);
1627 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
e3825ba1
MZ
1628}
1629
021f6537 1630static const struct irq_domain_ops gic_irq_domain_ops = {
f833f57f 1631 .translate = gic_irq_domain_translate,
443acc4f
MZ
1632 .alloc = gic_irq_domain_alloc,
1633 .free = gic_irq_domain_free,
e3825ba1
MZ
1634 .select = gic_irq_domain_select,
1635};
1636
1637static int partition_domain_translate(struct irq_domain *d,
1638 struct irq_fwspec *fwspec,
1639 unsigned long *hwirq,
1640 unsigned int *type)
1641{
d753f849 1642 unsigned long ppi_intid;
e3825ba1 1643 struct device_node *np;
d753f849 1644 unsigned int ppi_idx;
e3825ba1
MZ
1645 int ret;
1646
52085d3f
MZ
1647 if (!gic_data.ppi_descs)
1648 return -ENOMEM;
1649
e3825ba1
MZ
1650 np = of_find_node_by_phandle(fwspec->param[3]);
1651 if (WARN_ON(!np))
1652 return -EINVAL;
1653
d753f849
JM
1654 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1655 if (WARN_ON_ONCE(ret))
1656 return 0;
1657
1658 ppi_idx = __gic_get_ppi_index(ppi_intid);
1659 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
e3825ba1
MZ
1660 of_node_to_fwnode(np));
1661 if (ret < 0)
1662 return ret;
1663
1664 *hwirq = ret;
1665 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1666
1667 return 0;
1668}
1669
1670static const struct irq_domain_ops partition_domain_ops = {
1671 .translate = partition_domain_translate,
1672 .select = gic_irq_domain_select,
021f6537
MZ
1673};
1674
9c8114c2
SK
1675static bool gic_enable_quirk_msm8996(void *data)
1676{
1677 struct gic_chip_data *d = data;
1678
1679 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1680
1681 return true;
1682}
1683
d01fd161
MZ
1684static bool gic_enable_quirk_cavium_38539(void *data)
1685{
1686 struct gic_chip_data *d = data;
1687
1688 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1689
1690 return true;
1691}
1692
7f2481b3
MZ
1693static bool gic_enable_quirk_hip06_07(void *data)
1694{
1695 struct gic_chip_data *d = data;
1696
1697 /*
1698 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1699 * not being an actual ARM implementation). The saving grace is
1700 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1701 * HIP07 doesn't even have a proper IIDR, and still pretends to
1702 * have ESPI. In both cases, put them right.
1703 */
1704 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1705 /* Zero both ESPI and the RES0 field next to it... */
1706 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1707 return true;
1708 }
1709
1710 return false;
1711}
1712
1713static const struct gic_quirk gic_quirks[] = {
1714 {
1715 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1716 .compatible = "qcom,msm8996-gic-v3",
1717 .init = gic_enable_quirk_msm8996,
1718 },
1719 {
1720 .desc = "GICv3: HIP06 erratum 161010803",
1721 .iidr = 0x0204043b,
1722 .mask = 0xffffffff,
1723 .init = gic_enable_quirk_hip06_07,
1724 },
1725 {
1726 .desc = "GICv3: HIP07 erratum 161010803",
1727 .iidr = 0x00000000,
1728 .mask = 0xffffffff,
1729 .init = gic_enable_quirk_hip06_07,
1730 },
d01fd161
MZ
1731 {
1732 /*
1733 * Reserved register accesses generate a Synchronous
1734 * External Abort. This erratum applies to:
1735 * - ThunderX: CN88xx
1736 * - OCTEON TX: CN83xx, CN81xx
1737 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1738 */
1739 .desc = "GICv3: Cavium erratum 38539",
1740 .iidr = 0xa000034c,
1741 .mask = 0xe8f00fff,
1742 .init = gic_enable_quirk_cavium_38539,
1743 },
7f2481b3
MZ
1744 {
1745 }
1746};
1747
d98d0a99
JT
1748static void gic_enable_nmi_support(void)
1749{
101b35f7
JT
1750 int i;
1751
81a43273
MZ
1752 if (!gic_prio_masking_enabled())
1753 return;
1754
81a43273
MZ
1755 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1756 if (!ppi_nmi_refs)
1757 return;
1758
1759 for (i = 0; i < gic_data.ppi_nr; i++)
101b35f7
JT
1760 refcount_set(&ppi_nmi_refs[i], 0);
1761
4e594ad1 1762 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
8bf0a804 1763 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
f2266504 1764
33678059
AE
1765 /*
1766 * How priority values are used by the GIC depends on two things:
1767 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1768 * and if Group 0 interrupts can be delivered to Linux in the non-secure
1769 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
29517170 1770 * ICC_PMR_EL1 register and the priority that software assigns to
33678059
AE
1771 * interrupts:
1772 *
1773 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1774 * -----------------------------------------------------------
1775 * 1 | - | unchanged | unchanged
1776 * -----------------------------------------------------------
1777 * 0 | 1 | non-secure | non-secure
1778 * -----------------------------------------------------------
1779 * 0 | 0 | unchanged | non-secure
1780 *
1781 * where non-secure means that the value is right-shifted by one and the
1782 * MSB bit set, to make it fit in the non-secure priority range.
1783 *
1784 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1785 * are both either modified or unchanged, we can use the same set of
1786 * priorities.
1787 *
1788 * In the last case, where only the interrupt priorities are modified to
1789 * be in the non-secure range, we use a different PMR value to mask IRQs
1790 * and the rest of the values that we use remain unchanged.
1791 */
1792 if (gic_has_group0() && !gic_dist_security_disabled())
1793 static_branch_enable(&gic_nonsecure_priorities);
1794
d98d0a99 1795 static_branch_enable(&supports_pseudo_nmis);
101b35f7
JT
1796
1797 if (static_branch_likely(&supports_deactivate_key))
1798 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1799 else
1800 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
d98d0a99
JT
1801}
1802
db57d746
TN
1803static int __init gic_init_bases(void __iomem *dist_base,
1804 struct redist_region *rdist_regs,
1805 u32 nr_redist_regions,
1806 u64 redist_stride,
1807 struct fwnode_handle *handle)
021f6537 1808{
f5c1434c 1809 u32 typer;
021f6537 1810 int err;
021f6537 1811
0b6a3da9 1812 if (!is_hyp_mode_available())
d01d3274 1813 static_branch_disable(&supports_deactivate_key);
0b6a3da9 1814
d01d3274 1815 if (static_branch_likely(&supports_deactivate_key))
0b6a3da9
MZ
1816 pr_info("GIC: Using split EOI/Deactivate mode\n");
1817
e3825ba1 1818 gic_data.fwnode = handle;
021f6537 1819 gic_data.dist_base = dist_base;
f5c1434c
MZ
1820 gic_data.redist_regions = rdist_regs;
1821 gic_data.nr_redist_regions = nr_redist_regions;
021f6537
MZ
1822 gic_data.redist_stride = redist_stride;
1823
1824 /*
1825 * Find out how many interrupts are supported.
021f6537 1826 */
f5c1434c 1827 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
a4f9edb2 1828 gic_data.rdists.gicd_typer = typer;
7f2481b3
MZ
1829
1830 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1831 gic_quirks, &gic_data);
1832
211bddd2
MZ
1833 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1834 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
f2d83409 1835
d01fd161
MZ
1836 /*
1837 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1838 * architecture spec (which says that reserved registers are RES0).
1839 */
1840 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1841 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
f2d83409 1842
db57d746
TN
1843 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1844 &gic_data);
f5c1434c 1845 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
b25319d2 1846 gic_data.rdists.has_rvpeid = true;
0edc23ea
MZ
1847 gic_data.rdists.has_vlpis = true;
1848 gic_data.rdists.has_direct_lpi = true;
96806229 1849 gic_data.rdists.has_vpend_valid_dirty = true;
021f6537 1850
f5c1434c 1851 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
021f6537
MZ
1852 err = -ENOMEM;
1853 goto out_free;
1854 }
1855
eeaa4b24 1856 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1857
eda0d04a 1858 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
eda0d04a 1859
50528752
MZ
1860 if (typer & GICD_TYPER_MBIS) {
1861 err = mbi_init(handle, gic_data.domain);
1862 if (err)
1863 pr_err("Failed to initialize MBIs\n");
1864 }
1865
021f6537
MZ
1866 set_handle_irq(gic_handle_irq);
1867
1a60e1e6 1868 gic_update_rdist_properties();
0edc23ea 1869
021f6537
MZ
1870 gic_dist_init();
1871 gic_cpu_init();
64b499d8 1872 gic_smp_init();
3708d52f 1873 gic_cpu_pm_init();
021f6537 1874
d38a71c5
MZ
1875 if (gic_dist_supports_lpis()) {
1876 its_init(handle, &gic_data.rdists, gic_data.domain);
1877 its_cpu_init();
d23bc2bc 1878 its_lpi_memreserve_init();
90b4c555
ZZ
1879 } else {
1880 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1881 gicv2m_init(handle, gic_data.domain);
d38a71c5
MZ
1882 }
1883
81a43273 1884 gic_enable_nmi_support();
d98d0a99 1885
021f6537
MZ
1886 return 0;
1887
1888out_free:
1889 if (gic_data.domain)
1890 irq_domain_remove(gic_data.domain);
f5c1434c 1891 free_percpu(gic_data.rdists.rdist);
db57d746
TN
1892 return err;
1893}
1894
1895static int __init gic_validate_dist_version(void __iomem *dist_base)
1896{
1897 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1898
1899 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1900 return -ENODEV;
1901
1902 return 0;
1903}
1904
e3825ba1 1905/* Create all possible partitions at boot time */
7beaa24b 1906static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
e3825ba1
MZ
1907{
1908 struct device_node *parts_node, *child_part;
1909 int part_idx = 0, i;
1910 int nr_parts;
1911 struct partition_affinity *parts;
1912
00ee9a1c 1913 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
e3825ba1
MZ
1914 if (!parts_node)
1915 return;
1916
52085d3f
MZ
1917 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1918 if (!gic_data.ppi_descs)
ec8401a4 1919 goto out_put_node;
52085d3f 1920
e3825ba1
MZ
1921 nr_parts = of_get_child_count(parts_node);
1922
1923 if (!nr_parts)
00ee9a1c 1924 goto out_put_node;
e3825ba1 1925
6396bb22 1926 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
e3825ba1 1927 if (WARN_ON(!parts))
00ee9a1c 1928 goto out_put_node;
e3825ba1
MZ
1929
1930 for_each_child_of_node(parts_node, child_part) {
1931 struct partition_affinity *part;
1932 int n;
1933
1934 part = &parts[part_idx];
1935
1936 part->partition_id = of_node_to_fwnode(child_part);
1937
2ef790dc
RH
1938 pr_info("GIC: PPI partition %pOFn[%d] { ",
1939 child_part, part_idx);
e3825ba1
MZ
1940
1941 n = of_property_count_elems_of_size(child_part, "affinity",
1942 sizeof(u32));
1943 WARN_ON(n <= 0);
1944
1945 for (i = 0; i < n; i++) {
1946 int err, cpu;
1947 u32 cpu_phandle;
1948 struct device_node *cpu_node;
1949
1950 err = of_property_read_u32_index(child_part, "affinity",
1951 i, &cpu_phandle);
1952 if (WARN_ON(err))
1953 continue;
1954
1955 cpu_node = of_find_node_by_phandle(cpu_phandle);
1956 if (WARN_ON(!cpu_node))
1957 continue;
1958
c08ec7da 1959 cpu = of_cpu_node_to_id(cpu_node);
fa1ad9d4
ML
1960 if (WARN_ON(cpu < 0)) {
1961 of_node_put(cpu_node);
e3825ba1 1962 continue;
fa1ad9d4 1963 }
e3825ba1 1964
e81f54c6 1965 pr_cont("%pOF[%d] ", cpu_node, cpu);
e3825ba1
MZ
1966
1967 cpumask_set_cpu(cpu, &part->mask);
fa1ad9d4 1968 of_node_put(cpu_node);
e3825ba1
MZ
1969 }
1970
1971 pr_cont("}\n");
1972 part_idx++;
1973 }
1974
52085d3f 1975 for (i = 0; i < gic_data.ppi_nr; i++) {
e3825ba1
MZ
1976 unsigned int irq;
1977 struct partition_desc *desc;
1978 struct irq_fwspec ppi_fwspec = {
1979 .fwnode = gic_data.fwnode,
1980 .param_count = 3,
1981 .param = {
65da7d19 1982 [0] = GIC_IRQ_TYPE_PARTITION,
e3825ba1
MZ
1983 [1] = i,
1984 [2] = IRQ_TYPE_NONE,
1985 },
1986 };
1987
1988 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1989 if (WARN_ON(!irq))
1990 continue;
1991 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1992 irq, &partition_domain_ops);
1993 if (WARN_ON(!desc))
1994 continue;
1995
1996 gic_data.ppi_descs[i] = desc;
1997 }
00ee9a1c
JH
1998
1999out_put_node:
2000 of_node_put(parts_node);
e3825ba1
MZ
2001}
2002
1839e576
JG
2003static void __init gic_of_setup_kvm_info(struct device_node *node)
2004{
2005 int ret;
2006 struct resource r;
2007 u32 gicv_idx;
2008
2009 gic_v3_kvm_info.type = GIC_V3;
2010
2011 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2012 if (!gic_v3_kvm_info.maint_irq)
2013 return;
2014
2015 if (of_property_read_u32(node, "#redistributor-regions",
2016 &gicv_idx))
2017 gicv_idx = 1;
2018
2019 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
2020 ret = of_address_to_resource(node, gicv_idx, &r);
2021 if (!ret)
2022 gic_v3_kvm_info.vcpu = r;
2023
4bdf5025 2024 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
3c40706d 2025 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
0e5cb777 2026 vgic_set_kvm_info(&gic_v3_kvm_info);
1839e576
JG
2027}
2028
4deb96e3
RM
2029static void gic_request_region(resource_size_t base, resource_size_t size,
2030 const char *name)
2031{
2032 if (!request_mem_region(base, size, name))
2033 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2034 name, &base);
2035}
2036
2037static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2038 const char *name, struct resource *res)
2039{
2040 void __iomem *base;
2041 int ret;
2042
2043 ret = of_address_to_resource(node, idx, res);
2044 if (ret)
2045 return IOMEM_ERR_PTR(ret);
2046
2047 gic_request_region(res->start, resource_size(res), name);
2048 base = of_iomap(node, idx);
2049
2050 return base ?: IOMEM_ERR_PTR(-ENOMEM);
2051}
2052
db57d746
TN
2053static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2054{
2055 void __iomem *dist_base;
2056 struct redist_region *rdist_regs;
4deb96e3 2057 struct resource res;
db57d746
TN
2058 u64 redist_stride;
2059 u32 nr_redist_regions;
2060 int err, i;
2061
4deb96e3 2062 dist_base = gic_of_iomap(node, 0, "GICD", &res);
2b2cd74a 2063 if (IS_ERR(dist_base)) {
e81f54c6 2064 pr_err("%pOF: unable to map gic dist registers\n", node);
2b2cd74a 2065 return PTR_ERR(dist_base);
db57d746
TN
2066 }
2067
2068 err = gic_validate_dist_version(dist_base);
2069 if (err) {
e81f54c6 2070 pr_err("%pOF: no distributor detected, giving up\n", node);
db57d746
TN
2071 goto out_unmap_dist;
2072 }
2073
2074 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2075 nr_redist_regions = 1;
2076
6396bb22
KC
2077 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2078 GFP_KERNEL);
db57d746
TN
2079 if (!rdist_regs) {
2080 err = -ENOMEM;
2081 goto out_unmap_dist;
2082 }
2083
2084 for (i = 0; i < nr_redist_regions; i++) {
4deb96e3
RM
2085 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2086 if (IS_ERR(rdist_regs[i].redist_base)) {
e81f54c6 2087 pr_err("%pOF: couldn't map region %d\n", node, i);
db57d746
TN
2088 err = -ENODEV;
2089 goto out_unmap_rdist;
2090 }
2091 rdist_regs[i].phys_base = res.start;
2092 }
2093
2094 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2095 redist_stride = 0;
2096
f70fdb42
SK
2097 gic_enable_of_quirks(node, gic_quirks, &gic_data);
2098
db57d746
TN
2099 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
2100 redist_stride, &node->fwnode);
e3825ba1
MZ
2101 if (err)
2102 goto out_unmap_rdist;
2103
2104 gic_populate_ppi_partitions(node);
d33a3c8c 2105
d01d3274 2106 if (static_branch_likely(&supports_deactivate_key))
d33a3c8c 2107 gic_of_setup_kvm_info(node);
e3825ba1 2108 return 0;
db57d746 2109
021f6537 2110out_unmap_rdist:
f5c1434c 2111 for (i = 0; i < nr_redist_regions; i++)
2b2cd74a 2112 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
f5c1434c
MZ
2113 iounmap(rdist_regs[i].redist_base);
2114 kfree(rdist_regs);
021f6537
MZ
2115out_unmap_dist:
2116 iounmap(dist_base);
2117 return err;
2118}
2119
2120IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
ffa7d616
TN
2121
2122#ifdef CONFIG_ACPI
611f039f
JG
2123static struct
2124{
2125 void __iomem *dist_base;
2126 struct redist_region *redist_regs;
2127 u32 nr_redist_regions;
2128 bool single_redist;
926b5dfa 2129 int enabled_rdists;
1839e576
JG
2130 u32 maint_irq;
2131 int maint_irq_mode;
2132 phys_addr_t vcpu_base;
611f039f 2133} acpi_data __initdata;
b70fb7af
TN
2134
2135static void __init
2136gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2137{
2138 static int count = 0;
2139
611f039f
JG
2140 acpi_data.redist_regs[count].phys_base = phys_base;
2141 acpi_data.redist_regs[count].redist_base = redist_base;
2142 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
b70fb7af
TN
2143 count++;
2144}
ffa7d616
TN
2145
2146static int __init
60574d1e 2147gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
ffa7d616
TN
2148 const unsigned long end)
2149{
2150 struct acpi_madt_generic_redistributor *redist =
2151 (struct acpi_madt_generic_redistributor *)header;
2152 void __iomem *redist_base;
ffa7d616
TN
2153
2154 redist_base = ioremap(redist->base_address, redist->length);
2155 if (!redist_base) {
2156 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2157 return -ENOMEM;
2158 }
4deb96e3 2159 gic_request_region(redist->base_address, redist->length, "GICR");
ffa7d616 2160
b70fb7af 2161 gic_acpi_register_redist(redist->base_address, redist_base);
ffa7d616
TN
2162 return 0;
2163}
2164
b70fb7af 2165static int __init
60574d1e 2166gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
b70fb7af
TN
2167 const unsigned long end)
2168{
2169 struct acpi_madt_generic_interrupt *gicc =
2170 (struct acpi_madt_generic_interrupt *)header;
611f039f 2171 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
b70fb7af
TN
2172 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2173 void __iomem *redist_base;
2174
ebe2f871
SD
2175 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2176 if (!(gicc->flags & ACPI_MADT_ENABLED))
2177 return 0;
2178
b70fb7af
TN
2179 redist_base = ioremap(gicc->gicr_base_address, size);
2180 if (!redist_base)
2181 return -ENOMEM;
4deb96e3 2182 gic_request_region(gicc->gicr_base_address, size, "GICR");
b70fb7af
TN
2183
2184 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2185 return 0;
2186}
2187
2188static int __init gic_acpi_collect_gicr_base(void)
2189{
2190 acpi_tbl_entry_handler redist_parser;
2191 enum acpi_madt_type type;
2192
611f039f 2193 if (acpi_data.single_redist) {
b70fb7af
TN
2194 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2195 redist_parser = gic_acpi_parse_madt_gicc;
2196 } else {
2197 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2198 redist_parser = gic_acpi_parse_madt_redist;
2199 }
2200
2201 /* Collect redistributor base addresses in GICR entries */
2202 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2203 return 0;
2204
2205 pr_info("No valid GICR entries exist\n");
2206 return -ENODEV;
2207}
2208
60574d1e 2209static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
ffa7d616
TN
2210 const unsigned long end)
2211{
2212 /* Subtable presence means that redist exists, that's it */
2213 return 0;
2214}
2215
60574d1e 2216static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
b70fb7af
TN
2217 const unsigned long end)
2218{
2219 struct acpi_madt_generic_interrupt *gicc =
2220 (struct acpi_madt_generic_interrupt *)header;
2221
2222 /*
2223 * If GICC is enabled and has valid gicr base address, then it means
2224 * GICR base is presented via GICC
2225 */
926b5dfa
MZ
2226 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2227 acpi_data.enabled_rdists++;
b70fb7af 2228 return 0;
926b5dfa 2229 }
b70fb7af 2230
ebe2f871
SD
2231 /*
2232 * It's perfectly valid firmware can pass disabled GICC entry, driver
2233 * should not treat as errors, skip the entry instead of probe fail.
2234 */
2235 if (!(gicc->flags & ACPI_MADT_ENABLED))
2236 return 0;
2237
b70fb7af
TN
2238 return -ENODEV;
2239}
2240
2241static int __init gic_acpi_count_gicr_regions(void)
2242{
2243 int count;
2244
2245 /*
2246 * Count how many redistributor regions we have. It is not allowed
2247 * to mix redistributor description, GICR and GICC subtables have to be
2248 * mutually exclusive.
2249 */
2250 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2251 gic_acpi_match_gicr, 0);
2252 if (count > 0) {
611f039f 2253 acpi_data.single_redist = false;
b70fb7af
TN
2254 return count;
2255 }
2256
2257 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2258 gic_acpi_match_gicc, 0);
926b5dfa 2259 if (count > 0) {
611f039f 2260 acpi_data.single_redist = true;
926b5dfa
MZ
2261 count = acpi_data.enabled_rdists;
2262 }
b70fb7af
TN
2263
2264 return count;
2265}
2266
ffa7d616
TN
2267static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2268 struct acpi_probe_entry *ape)
2269{
2270 struct acpi_madt_generic_distributor *dist;
2271 int count;
2272
2273 dist = (struct acpi_madt_generic_distributor *)header;
2274 if (dist->version != ape->driver_data)
2275 return false;
2276
2277 /* We need to do that exercise anyway, the sooner the better */
b70fb7af 2278 count = gic_acpi_count_gicr_regions();
ffa7d616
TN
2279 if (count <= 0)
2280 return false;
2281
611f039f 2282 acpi_data.nr_redist_regions = count;
ffa7d616
TN
2283 return true;
2284}
2285
60574d1e 2286static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
1839e576
JG
2287 const unsigned long end)
2288{
2289 struct acpi_madt_generic_interrupt *gicc =
2290 (struct acpi_madt_generic_interrupt *)header;
2291 int maint_irq_mode;
2292 static int first_madt = true;
2293
2294 /* Skip unusable CPUs */
2295 if (!(gicc->flags & ACPI_MADT_ENABLED))
2296 return 0;
2297
2298 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2299 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2300
2301 if (first_madt) {
2302 first_madt = false;
2303
2304 acpi_data.maint_irq = gicc->vgic_interrupt;
2305 acpi_data.maint_irq_mode = maint_irq_mode;
2306 acpi_data.vcpu_base = gicc->gicv_base_address;
2307
2308 return 0;
2309 }
2310
2311 /*
2312 * The maintenance interrupt and GICV should be the same for every CPU
2313 */
2314 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2315 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2316 (acpi_data.vcpu_base != gicc->gicv_base_address))
2317 return -EINVAL;
2318
2319 return 0;
2320}
2321
2322static bool __init gic_acpi_collect_virt_info(void)
2323{
2324 int count;
2325
2326 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2327 gic_acpi_parse_virt_madt_gicc, 0);
2328
2329 return (count > 0);
2330}
2331
ffa7d616 2332#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1839e576
JG
2333#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2334#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2335
2336static void __init gic_acpi_setup_kvm_info(void)
2337{
2338 int irq;
2339
2340 if (!gic_acpi_collect_virt_info()) {
2341 pr_warn("Unable to get hardware information used for virtualization\n");
2342 return;
2343 }
2344
2345 gic_v3_kvm_info.type = GIC_V3;
2346
2347 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2348 acpi_data.maint_irq_mode,
2349 ACPI_ACTIVE_HIGH);
2350 if (irq <= 0)
2351 return;
2352
2353 gic_v3_kvm_info.maint_irq = irq;
2354
2355 if (acpi_data.vcpu_base) {
2356 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2357
2358 vcpu->flags = IORESOURCE_MEM;
2359 vcpu->start = acpi_data.vcpu_base;
2360 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2361 }
2362
4bdf5025 2363 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
3c40706d 2364 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
0e5cb777 2365 vgic_set_kvm_info(&gic_v3_kvm_info);
1839e576 2366}
ffa7d616 2367
7327b16f
MZ
2368static struct fwnode_handle *gsi_domain_handle;
2369
2370static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2371{
2372 return gsi_domain_handle;
2373}
2374
ffa7d616 2375static int __init
aba3c7ed 2376gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
ffa7d616
TN
2377{
2378 struct acpi_madt_generic_distributor *dist;
611f039f 2379 size_t size;
b70fb7af 2380 int i, err;
ffa7d616
TN
2381
2382 /* Get distributor base address */
2383 dist = (struct acpi_madt_generic_distributor *)header;
611f039f
JG
2384 acpi_data.dist_base = ioremap(dist->base_address,
2385 ACPI_GICV3_DIST_MEM_SIZE);
2386 if (!acpi_data.dist_base) {
ffa7d616
TN
2387 pr_err("Unable to map GICD registers\n");
2388 return -ENOMEM;
2389 }
4deb96e3 2390 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
ffa7d616 2391
611f039f 2392 err = gic_validate_dist_version(acpi_data.dist_base);
ffa7d616 2393 if (err) {
71192a68 2394 pr_err("No distributor detected at @%p, giving up\n",
611f039f 2395 acpi_data.dist_base);
ffa7d616
TN
2396 goto out_dist_unmap;
2397 }
2398
611f039f
JG
2399 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2400 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2401 if (!acpi_data.redist_regs) {
ffa7d616
TN
2402 err = -ENOMEM;
2403 goto out_dist_unmap;
2404 }
2405
b70fb7af
TN
2406 err = gic_acpi_collect_gicr_base();
2407 if (err)
ffa7d616 2408 goto out_redist_unmap;
ffa7d616 2409
7327b16f
MZ
2410 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2411 if (!gsi_domain_handle) {
ffa7d616
TN
2412 err = -ENOMEM;
2413 goto out_redist_unmap;
2414 }
2415
611f039f 2416 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
7327b16f 2417 acpi_data.nr_redist_regions, 0, gsi_domain_handle);
ffa7d616
TN
2418 if (err)
2419 goto out_fwhandle_free;
2420
7327b16f 2421 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
d33a3c8c 2422
d01d3274 2423 if (static_branch_likely(&supports_deactivate_key))
d33a3c8c 2424 gic_acpi_setup_kvm_info();
1839e576 2425
ffa7d616
TN
2426 return 0;
2427
2428out_fwhandle_free:
7327b16f 2429 irq_domain_free_fwnode(gsi_domain_handle);
ffa7d616 2430out_redist_unmap:
611f039f
JG
2431 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2432 if (acpi_data.redist_regs[i].redist_base)
2433 iounmap(acpi_data.redist_regs[i].redist_base);
2434 kfree(acpi_data.redist_regs);
ffa7d616 2435out_dist_unmap:
611f039f 2436 iounmap(acpi_data.dist_base);
ffa7d616
TN
2437 return err;
2438}
2439IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2440 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2441 gic_acpi_init);
2442IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2443 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2444 gic_acpi_init);
2445IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2446 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2447 gic_acpi_init);
2448#endif