irqchip/gic: Refactor SMP configuration
[linux-block.git] / drivers / irqchip / irq-gic.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
f27ecacc 2/*
f27ecacc
RK
3 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
4 *
f27ecacc
RK
5 * Interrupt architecture for the GIC:
6 *
7 * o There is one Interrupt Distributor, which receives interrupts
8 * from system devices and sends them to the Interrupt Controllers.
9 *
10 * o There is one CPU Interface per CPU, which sends interrupts sent
11 * by the Distributor, and interrupts generated locally, to the
b3a1bde4
CM
12 * associated CPU. The base address of the CPU interface is usually
13 * aliased so that the same address points to different chips depending
14 * on the CPU it is accessed from.
f27ecacc
RK
15 *
16 * Note that IRQs 0-31 are special - they are local to each CPU.
17 * As such, the enable set/clear, pending set/clear and active bit
18 * registers are banked per-cpu for these sources.
19 */
20#include <linux/init.h>
21#include <linux/kernel.h>
f37a53cc 22#include <linux/err.h>
7e1efcf5 23#include <linux/module.h>
f27ecacc
RK
24#include <linux/list.h>
25#include <linux/smp.h>
c0114709 26#include <linux/cpu.h>
254056f3 27#include <linux/cpu_pm.h>
dcb86e8c 28#include <linux/cpumask.h>
fced80c7 29#include <linux/io.h>
b3f7ed03
RH
30#include <linux/of.h>
31#include <linux/of_address.h>
32#include <linux/of_irq.h>
d60fc389 33#include <linux/acpi.h>
4294f8ba 34#include <linux/irqdomain.h>
292b293c
MZ
35#include <linux/interrupt.h>
36#include <linux/percpu.h>
37#include <linux/slab.h>
41a83e06 38#include <linux/irqchip.h>
de88cbb7 39#include <linux/irqchip/chained_irq.h>
520f7bd7 40#include <linux/irqchip/arm-gic.h>
f27ecacc 41
29e697b1 42#include <asm/cputype.h>
f27ecacc 43#include <asm/irq.h>
562e0027 44#include <asm/exception.h>
eb50439b 45#include <asm/smp_plat.h>
0b996fd3 46#include <asm/virt.h>
f27ecacc 47
d51d0af4 48#include "irq-gic-common.h"
f27ecacc 49
76e52dd0
MZ
50#ifdef CONFIG_ARM64
51#include <asm/cpufeature.h>
52
53static void gic_check_cpu_features(void)
54{
25fc11ae 55 WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
76e52dd0
MZ
56 TAINT_CPU_OUT_OF_SPEC,
57 "GICv3 system registers enabled, broken firmware!\n");
58}
59#else
60#define gic_check_cpu_features() do { } while(0)
61#endif
62
db0d4db2
MZ
63union gic_base {
64 void __iomem *common_base;
6859358e 65 void __percpu * __iomem *percpu_base;
db0d4db2
MZ
66};
67
68struct gic_chip_data {
58b89649 69 struct irq_chip chip;
db0d4db2
MZ
70 union gic_base dist_base;
71 union gic_base cpu_base;
f673b9b5
JH
72 void __iomem *raw_dist_base;
73 void __iomem *raw_cpu_base;
74 u32 percpu_offset;
9c8edddf 75#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
db0d4db2 76 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
1c7d4dd4 77 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
db0d4db2
MZ
78 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
79 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
80 u32 __percpu *saved_ppi_enable;
1c7d4dd4 81 u32 __percpu *saved_ppi_active;
db0d4db2
MZ
82 u32 __percpu *saved_ppi_conf;
83#endif
75294957 84 struct irq_domain *domain;
db0d4db2
MZ
85 unsigned int gic_irqs;
86#ifdef CONFIG_GIC_NON_BANKED
87 void __iomem *(*get_base)(union gic_base *);
88#endif
89};
90
04c8b0f8
MZ
91#ifdef CONFIG_BL_SWITCHER
92
93static DEFINE_RAW_SPINLOCK(cpu_map_lock);
94
95#define gic_lock_irqsave(f) \
96 raw_spin_lock_irqsave(&cpu_map_lock, (f))
97#define gic_unlock_irqrestore(f) \
98 raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
99
100#define gic_lock() raw_spin_lock(&cpu_map_lock)
101#define gic_unlock() raw_spin_unlock(&cpu_map_lock)
102
103#else
104
105#define gic_lock_irqsave(f) do { (void)(f); } while(0)
106#define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
107
108#define gic_lock() do { } while(0)
109#define gic_unlock() do { } while(0)
110
111#endif
f27ecacc 112
384a2902
NP
113/*
114 * The GIC mapping of CPU interfaces does not necessarily match
115 * the logical CPU numbering. Let's use a mapping as returned
116 * by the GIC itself.
117 */
118#define NR_GIC_CPU_IF 8
119static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
120
d01d3274 121static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
0b996fd3 122
a27d21e0 123static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
b3a1bde4 124
502d6df1
JG
125static struct gic_kvm_info gic_v2_kvm_info;
126
db0d4db2
MZ
127#ifdef CONFIG_GIC_NON_BANKED
128static void __iomem *gic_get_percpu_base(union gic_base *base)
129{
513d1a28 130 return raw_cpu_read(*base->percpu_base);
db0d4db2
MZ
131}
132
133static void __iomem *gic_get_common_base(union gic_base *base)
134{
135 return base->common_base;
136}
137
138static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
139{
140 return data->get_base(&data->dist_base);
141}
142
143static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
144{
145 return data->get_base(&data->cpu_base);
146}
147
148static inline void gic_set_base_accessor(struct gic_chip_data *data,
149 void __iomem *(*f)(union gic_base *))
150{
151 data->get_base = f;
152}
153#else
154#define gic_data_dist_base(d) ((d)->dist_base.common_base)
155#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
46f101df 156#define gic_set_base_accessor(d, f)
db0d4db2
MZ
157#endif
158
7d1f4288 159static inline void __iomem *gic_dist_base(struct irq_data *d)
b3a1bde4 160{
7d1f4288 161 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
db0d4db2 162 return gic_data_dist_base(gic_data);
b3a1bde4
CM
163}
164
7d1f4288 165static inline void __iomem *gic_cpu_base(struct irq_data *d)
b3a1bde4 166{
7d1f4288 167 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
db0d4db2 168 return gic_data_cpu_base(gic_data);
b3a1bde4
CM
169}
170
7d1f4288 171static inline unsigned int gic_irq(struct irq_data *d)
b3a1bde4 172{
4294f8ba 173 return d->hwirq;
b3a1bde4
CM
174}
175
01f779f4
MZ
176static inline bool cascading_gic_irq(struct irq_data *d)
177{
178 void *data = irq_data_get_irq_handler_data(d);
179
180 /*
71466535
TG
181 * If handler_data is set, this is a cascading interrupt, and
182 * it cannot possibly be forwarded.
01f779f4 183 */
71466535 184 return data != NULL;
01f779f4
MZ
185}
186
f27ecacc
RK
187/*
188 * Routines to acknowledge, disable and enable interrupts
f27ecacc 189 */
56717807
MZ
190static void gic_poke_irq(struct irq_data *d, u32 offset)
191{
192 u32 mask = 1 << (gic_irq(d) % 32);
193 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
194}
195
196static int gic_peek_irq(struct irq_data *d, u32 offset)
f27ecacc 197{
4294f8ba 198 u32 mask = 1 << (gic_irq(d) % 32);
56717807
MZ
199 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
200}
201
202static void gic_mask_irq(struct irq_data *d)
203{
56717807 204 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
f27ecacc
RK
205}
206
0b996fd3
MZ
207static void gic_eoimode1_mask_irq(struct irq_data *d)
208{
209 gic_mask_irq(d);
01f779f4
MZ
210 /*
211 * When masking a forwarded interrupt, make sure it is
212 * deactivated as well.
213 *
214 * This ensures that an interrupt that is getting
215 * disabled/masked will not get "stuck", because there is
216 * noone to deactivate it (guest is being terminated).
217 */
71466535 218 if (irqd_is_forwarded_to_vcpu(d))
01f779f4 219 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
0b996fd3
MZ
220}
221
7d1f4288 222static void gic_unmask_irq(struct irq_data *d)
f27ecacc 223{
56717807 224 gic_poke_irq(d, GIC_DIST_ENABLE_SET);
f27ecacc
RK
225}
226
1a01753e
WD
227static void gic_eoi_irq(struct irq_data *d)
228{
6ac77e46 229 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
1a01753e
WD
230}
231
0b996fd3
MZ
232static void gic_eoimode1_eoi_irq(struct irq_data *d)
233{
01f779f4 234 /* Do not deactivate an IRQ forwarded to a vcpu. */
71466535 235 if (irqd_is_forwarded_to_vcpu(d))
01f779f4
MZ
236 return;
237
0b996fd3
MZ
238 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
239}
240
56717807
MZ
241static int gic_irq_set_irqchip_state(struct irq_data *d,
242 enum irqchip_irq_state which, bool val)
243{
244 u32 reg;
245
246 switch (which) {
247 case IRQCHIP_STATE_PENDING:
248 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
249 break;
250
251 case IRQCHIP_STATE_ACTIVE:
252 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
253 break;
254
255 case IRQCHIP_STATE_MASKED:
256 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
257 break;
258
259 default:
260 return -EINVAL;
261 }
262
263 gic_poke_irq(d, reg);
264 return 0;
265}
266
267static int gic_irq_get_irqchip_state(struct irq_data *d,
268 enum irqchip_irq_state which, bool *val)
269{
270 switch (which) {
271 case IRQCHIP_STATE_PENDING:
272 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
273 break;
274
275 case IRQCHIP_STATE_ACTIVE:
276 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
277 break;
278
279 case IRQCHIP_STATE_MASKED:
280 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
281 break;
282
283 default:
284 return -EINVAL;
285 }
286
287 return 0;
288}
289
7d1f4288 290static int gic_set_type(struct irq_data *d, unsigned int type)
5c0c1f08 291{
7d1f4288
LB
292 void __iomem *base = gic_dist_base(d);
293 unsigned int gicirq = gic_irq(d);
13d22e2e 294 int ret;
5c0c1f08
RV
295
296 /* Interrupt configuration for SGIs can't be changed */
297 if (gicirq < 16)
298 return -EINVAL;
299
fb7e7deb
LD
300 /* SPIs have restrictions on the supported types */
301 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
302 type != IRQ_TYPE_EDGE_RISING)
5c0c1f08
RV
303 return -EINVAL;
304
13d22e2e
MZ
305 ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
306 if (ret && gicirq < 32) {
307 /* Misconfigured PPIs are usually not fatal */
308 pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
309 ret = 0;
310 }
311
312 return ret;
d7ed36a4
SS
313}
314
01f779f4
MZ
315static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
316{
317 /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
318 if (cascading_gic_irq(d))
319 return -EINVAL;
320
71466535
TG
321 if (vcpu)
322 irqd_set_forwarded_to_vcpu(d);
323 else
324 irqd_clr_forwarded_to_vcpu(d);
01f779f4
MZ
325 return 0;
326}
327
8783dd3a 328static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
562e0027
MZ
329{
330 u32 irqstat, irqnr;
331 struct gic_chip_data *gic = &gic_data[0];
332 void __iomem *cpu_base = gic_data_cpu_base(gic);
333
334 do {
335 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
b8802f76 336 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
562e0027 337
327ebe1f 338 if (likely(irqnr > 15 && irqnr < 1020)) {
d01d3274 339 if (static_branch_likely(&supports_deactivate_key))
0b996fd3 340 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
39a06b67 341 isb();
60031b4e 342 handle_domain_irq(gic->domain, irqnr, regs);
562e0027
MZ
343 continue;
344 }
345 if (irqnr < 16) {
346 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
d01d3274 347 if (static_branch_likely(&supports_deactivate_key))
0b996fd3 348 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
562e0027 349#ifdef CONFIG_SMP
f86c4fbd
WD
350 /*
351 * Ensure any shared data written by the CPU sending
352 * the IPI is read after we've read the ACK register
353 * on the GIC.
354 *
355 * Pairs with the write barrier in gic_raise_softirq
356 */
357 smp_rmb();
562e0027
MZ
358 handle_IPI(irqnr, regs);
359#endif
360 continue;
361 }
362 break;
363 } while (1);
364}
365
bd0b9ac4 366static void gic_handle_cascade_irq(struct irq_desc *desc)
b3a1bde4 367{
5b29264c
JL
368 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
369 struct irq_chip *chip = irq_desc_get_chip(desc);
0f347bb9 370 unsigned int cascade_irq, gic_irq;
b3a1bde4
CM
371 unsigned long status;
372
1a01753e 373 chained_irq_enter(chip, desc);
b3a1bde4 374
db0d4db2 375 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
b3a1bde4 376
e5f81539
FK
377 gic_irq = (status & GICC_IAR_INT_ID_MASK);
378 if (gic_irq == GICC_INT_SPURIOUS)
b3a1bde4 379 goto out;
b3a1bde4 380
75294957 381 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
39a06b67 382 if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
bd0b9ac4 383 handle_bad_irq(desc);
39a06b67
WD
384 } else {
385 isb();
0f347bb9 386 generic_handle_irq(cascade_irq);
39a06b67 387 }
b3a1bde4
CM
388
389 out:
1a01753e 390 chained_irq_exit(chip, desc);
b3a1bde4
CM
391}
392
73c4c37c 393static const struct irq_chip gic_chip = {
7d1f4288
LB
394 .irq_mask = gic_mask_irq,
395 .irq_unmask = gic_unmask_irq,
1a01753e 396 .irq_eoi = gic_eoi_irq,
7d1f4288 397 .irq_set_type = gic_set_type,
56717807
MZ
398 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
399 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
aec89ef7
SH
400 .flags = IRQCHIP_SET_TYPE_MASKED |
401 IRQCHIP_SKIP_SET_WAKE |
402 IRQCHIP_MASK_ON_SUSPEND,
f27ecacc
RK
403};
404
b3a1bde4
CM
405void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
406{
a27d21e0 407 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
4d83fcf8
TG
408 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
409 &gic_data[gic_nr]);
b3a1bde4
CM
410}
411
2bb31351
RK
412static u8 gic_get_cpumask(struct gic_chip_data *gic)
413{
414 void __iomem *base = gic_data_dist_base(gic);
415 u32 mask, i;
416
417 for (i = mask = 0; i < 32; i += 4) {
418 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
419 mask |= mask >> 16;
420 mask |= mask >> 8;
421 if (mask)
422 break;
423 }
424
6e3aca44 425 if (!mask && num_possible_cpus() > 1)
2bb31351
RK
426 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
427
428 return mask;
429}
430
c5e1035c
MZ
431static bool gic_check_gicv2(void __iomem *base)
432{
433 u32 val = readl_relaxed(base + GIC_CPU_IDENT);
434 return (val & 0xff0fff) == 0x02043B;
435}
436
4c2880b3 437static void gic_cpu_if_up(struct gic_chip_data *gic)
32289506 438{
4c2880b3 439 void __iomem *cpu_base = gic_data_cpu_base(gic);
32289506 440 u32 bypass = 0;
0b996fd3 441 u32 mode = 0;
c5e1035c 442 int i;
0b996fd3 443
d01d3274 444 if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
0b996fd3 445 mode = GIC_CPU_CTRL_EOImodeNS;
32289506 446
c5e1035c
MZ
447 if (gic_check_gicv2(cpu_base))
448 for (i = 0; i < 4; i++)
449 writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
450
32289506
FK
451 /*
452 * Preserve bypass disable bits to be written back later
453 */
454 bypass = readl(cpu_base + GIC_CPU_CTRL);
455 bypass &= GICC_DIS_BYPASS_MASK;
456
0b996fd3 457 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
32289506
FK
458}
459
460
cdbb813d 461static void gic_dist_init(struct gic_chip_data *gic)
f27ecacc 462{
75294957 463 unsigned int i;
267840f3 464 u32 cpumask;
4294f8ba 465 unsigned int gic_irqs = gic->gic_irqs;
db0d4db2 466 void __iomem *base = gic_data_dist_base(gic);
f27ecacc 467
e5f81539 468 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
f27ecacc 469
f27ecacc
RK
470 /*
471 * Set all global interrupts to this CPU only.
472 */
2bb31351
RK
473 cpumask = gic_get_cpumask(gic);
474 cpumask |= cpumask << 8;
475 cpumask |= cpumask << 16;
e6afec9b 476 for (i = 32; i < gic_irqs; i += 4)
6ac77e46 477 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
f27ecacc 478
d51d0af4 479 gic_dist_config(base, gic_irqs, NULL);
f27ecacc 480
e5f81539 481 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
f27ecacc
RK
482}
483
dc9722cc 484static int gic_cpu_init(struct gic_chip_data *gic)
f27ecacc 485{
db0d4db2
MZ
486 void __iomem *dist_base = gic_data_dist_base(gic);
487 void __iomem *base = gic_data_cpu_base(gic);
384a2902 488 unsigned int cpu_mask, cpu = smp_processor_id();
9395f6ea
RK
489 int i;
490
384a2902 491 /*
567e5a01
JH
492 * Setting up the CPU map is only relevant for the primary GIC
493 * because any nested/secondary GICs do not directly interface
494 * with the CPU(s).
384a2902 495 */
567e5a01
JH
496 if (gic == &gic_data[0]) {
497 /*
498 * Get what the GIC says our CPU mask is.
499 */
dc9722cc
JH
500 if (WARN_ON(cpu >= NR_GIC_CPU_IF))
501 return -EINVAL;
502
25fc11ae 503 gic_check_cpu_features();
567e5a01
JH
504 cpu_mask = gic_get_cpumask(gic);
505 gic_cpu_map[cpu] = cpu_mask;
384a2902 506
567e5a01
JH
507 /*
508 * Clear our mask from the other map entries in case they're
509 * still undefined.
510 */
511 for (i = 0; i < NR_GIC_CPU_IF; i++)
512 if (i != cpu)
513 gic_cpu_map[i] &= ~cpu_mask;
514 }
384a2902 515
1a60e1e6 516 gic_cpu_config(dist_base, 32, NULL);
9395f6ea 517
e5f81539 518 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
4c2880b3 519 gic_cpu_if_up(gic);
dc9722cc
JH
520
521 return 0;
f27ecacc
RK
522}
523
4c2880b3 524int gic_cpu_if_down(unsigned int gic_nr)
10d9eb8a 525{
4c2880b3 526 void __iomem *cpu_base;
32289506
FK
527 u32 val = 0;
528
a27d21e0 529 if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
4c2880b3
JH
530 return -EINVAL;
531
532 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
32289506
FK
533 val = readl(cpu_base + GIC_CPU_CTRL);
534 val &= ~GICC_ENABLE;
535 writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
4c2880b3
JH
536
537 return 0;
10d9eb8a
NP
538}
539
9c8edddf 540#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
254056f3
CC
541/*
542 * Saves the GIC distributor registers during suspend or idle. Must be called
543 * with interrupts disabled but before powering down the GIC. After calling
544 * this function, no interrupts will be delivered by the GIC, and another
545 * platform-specific wakeup source must be enabled.
546 */
cdbb813d 547void gic_dist_save(struct gic_chip_data *gic)
254056f3
CC
548{
549 unsigned int gic_irqs;
550 void __iomem *dist_base;
551 int i;
552
6e5b5924
JH
553 if (WARN_ON(!gic))
554 return;
254056f3 555
6e5b5924
JH
556 gic_irqs = gic->gic_irqs;
557 dist_base = gic_data_dist_base(gic);
254056f3
CC
558
559 if (!dist_base)
560 return;
561
562 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
6e5b5924 563 gic->saved_spi_conf[i] =
254056f3
CC
564 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
565
566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
6e5b5924 567 gic->saved_spi_target[i] =
254056f3
CC
568 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
569
570 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
6e5b5924 571 gic->saved_spi_enable[i] =
254056f3 572 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
1c7d4dd4
MZ
573
574 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
6e5b5924 575 gic->saved_spi_active[i] =
1c7d4dd4 576 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
254056f3
CC
577}
578
579/*
580 * Restores the GIC distributor registers during resume or when coming out of
581 * idle. Must be called before enabling interrupts. If a level interrupt
c5f48c0a
IM
582 * that occurred while the GIC was suspended is still present, it will be
583 * handled normally, but any edge interrupts that occurred will not be seen by
254056f3
CC
584 * the GIC and need to be handled by the platform-specific wakeup source.
585 */
cdbb813d 586void gic_dist_restore(struct gic_chip_data *gic)
254056f3
CC
587{
588 unsigned int gic_irqs;
589 unsigned int i;
590 void __iomem *dist_base;
591
6e5b5924
JH
592 if (WARN_ON(!gic))
593 return;
254056f3 594
6e5b5924
JH
595 gic_irqs = gic->gic_irqs;
596 dist_base = gic_data_dist_base(gic);
254056f3
CC
597
598 if (!dist_base)
599 return;
600
e5f81539 601 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
254056f3
CC
602
603 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
6e5b5924 604 writel_relaxed(gic->saved_spi_conf[i],
254056f3
CC
605 dist_base + GIC_DIST_CONFIG + i * 4);
606
607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
e5f81539 608 writel_relaxed(GICD_INT_DEF_PRI_X4,
254056f3
CC
609 dist_base + GIC_DIST_PRI + i * 4);
610
611 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
6e5b5924 612 writel_relaxed(gic->saved_spi_target[i],
254056f3
CC
613 dist_base + GIC_DIST_TARGET + i * 4);
614
92eda4ad
MZ
615 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
616 writel_relaxed(GICD_INT_EN_CLR_X32,
617 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
6e5b5924 618 writel_relaxed(gic->saved_spi_enable[i],
254056f3 619 dist_base + GIC_DIST_ENABLE_SET + i * 4);
92eda4ad 620 }
254056f3 621
1c7d4dd4
MZ
622 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
623 writel_relaxed(GICD_INT_EN_CLR_X32,
624 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
6e5b5924 625 writel_relaxed(gic->saved_spi_active[i],
1c7d4dd4
MZ
626 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
627 }
628
e5f81539 629 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
254056f3
CC
630}
631
cdbb813d 632void gic_cpu_save(struct gic_chip_data *gic)
254056f3
CC
633{
634 int i;
635 u32 *ptr;
636 void __iomem *dist_base;
637 void __iomem *cpu_base;
638
6e5b5924
JH
639 if (WARN_ON(!gic))
640 return;
254056f3 641
6e5b5924
JH
642 dist_base = gic_data_dist_base(gic);
643 cpu_base = gic_data_cpu_base(gic);
254056f3
CC
644
645 if (!dist_base || !cpu_base)
646 return;
647
6e5b5924 648 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
254056f3
CC
649 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
650 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
651
6e5b5924 652 ptr = raw_cpu_ptr(gic->saved_ppi_active);
1c7d4dd4
MZ
653 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
654 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
655
6e5b5924 656 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
254056f3
CC
657 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
658 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
659
660}
661
cdbb813d 662void gic_cpu_restore(struct gic_chip_data *gic)
254056f3
CC
663{
664 int i;
665 u32 *ptr;
666 void __iomem *dist_base;
667 void __iomem *cpu_base;
668
6e5b5924
JH
669 if (WARN_ON(!gic))
670 return;
254056f3 671
6e5b5924
JH
672 dist_base = gic_data_dist_base(gic);
673 cpu_base = gic_data_cpu_base(gic);
254056f3
CC
674
675 if (!dist_base || !cpu_base)
676 return;
677
6e5b5924 678 ptr = raw_cpu_ptr(gic->saved_ppi_enable);
92eda4ad
MZ
679 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
680 writel_relaxed(GICD_INT_EN_CLR_X32,
681 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
254056f3 682 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
92eda4ad 683 }
254056f3 684
6e5b5924 685 ptr = raw_cpu_ptr(gic->saved_ppi_active);
1c7d4dd4
MZ
686 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
687 writel_relaxed(GICD_INT_EN_CLR_X32,
688 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
689 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
690 }
691
6e5b5924 692 ptr = raw_cpu_ptr(gic->saved_ppi_conf);
254056f3
CC
693 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
694 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
695
696 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
e5f81539
FK
697 writel_relaxed(GICD_INT_DEF_PRI_X4,
698 dist_base + GIC_DIST_PRI + i * 4);
254056f3 699
e5f81539 700 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
6e5b5924 701 gic_cpu_if_up(gic);
254056f3
CC
702}
703
704static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
705{
706 int i;
707
a27d21e0 708 for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
db0d4db2
MZ
709#ifdef CONFIG_GIC_NON_BANKED
710 /* Skip over unused GICs */
711 if (!gic_data[i].get_base)
712 continue;
713#endif
254056f3
CC
714 switch (cmd) {
715 case CPU_PM_ENTER:
6e5b5924 716 gic_cpu_save(&gic_data[i]);
254056f3
CC
717 break;
718 case CPU_PM_ENTER_FAILED:
719 case CPU_PM_EXIT:
6e5b5924 720 gic_cpu_restore(&gic_data[i]);
254056f3
CC
721 break;
722 case CPU_CLUSTER_PM_ENTER:
6e5b5924 723 gic_dist_save(&gic_data[i]);
254056f3
CC
724 break;
725 case CPU_CLUSTER_PM_ENTER_FAILED:
726 case CPU_CLUSTER_PM_EXIT:
6e5b5924 727 gic_dist_restore(&gic_data[i]);
254056f3
CC
728 break;
729 }
730 }
731
732 return NOTIFY_OK;
733}
734
735static struct notifier_block gic_notifier_block = {
736 .notifier_call = gic_notifier,
737};
738
cdbb813d 739static int gic_pm_init(struct gic_chip_data *gic)
254056f3
CC
740{
741 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
742 sizeof(u32));
dc9722cc
JH
743 if (WARN_ON(!gic->saved_ppi_enable))
744 return -ENOMEM;
254056f3 745
1c7d4dd4
MZ
746 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
747 sizeof(u32));
dc9722cc
JH
748 if (WARN_ON(!gic->saved_ppi_active))
749 goto free_ppi_enable;
1c7d4dd4 750
254056f3
CC
751 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
752 sizeof(u32));
dc9722cc
JH
753 if (WARN_ON(!gic->saved_ppi_conf))
754 goto free_ppi_active;
254056f3 755
abdd7b91
MZ
756 if (gic == &gic_data[0])
757 cpu_pm_register_notifier(&gic_notifier_block);
dc9722cc
JH
758
759 return 0;
760
761free_ppi_active:
762 free_percpu(gic->saved_ppi_active);
763free_ppi_enable:
764 free_percpu(gic->saved_ppi_enable);
765
766 return -ENOMEM;
254056f3
CC
767}
768#else
cdbb813d 769static int gic_pm_init(struct gic_chip_data *gic)
254056f3 770{
dc9722cc 771 return 0;
254056f3
CC
772}
773#endif
774
b1cffebf 775#ifdef CONFIG_SMP
7ec46b51
MZ
776static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
777 bool force)
778{
779 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
780 unsigned int cpu;
781
782 if (!force)
783 cpu = cpumask_any_and(mask_val, cpu_online_mask);
784 else
785 cpu = cpumask_first(mask_val);
786
787 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
788 return -EINVAL;
789
790 writeb_relaxed(gic_cpu_map[cpu], reg);
791 irq_data_update_effective_affinity(d, cpumask_of(cpu));
792
793 return IRQ_SET_MASK_OK_DONE;
794}
795
6859358e 796static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
b1cffebf
RH
797{
798 int cpu;
1a6b69b6
NP
799 unsigned long flags, map = 0;
800
059e2320
MZ
801 if (unlikely(nr_cpu_ids == 1)) {
802 /* Only one CPU? let's do a self-IPI... */
803 writel_relaxed(2 << 24 | irq,
804 gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
805 return;
806 }
807
04c8b0f8 808 gic_lock_irqsave(flags);
b1cffebf
RH
809
810 /* Convert our logical CPU mask into a physical one. */
811 for_each_cpu(cpu, mask)
91bdf0d0 812 map |= gic_cpu_map[cpu];
b1cffebf
RH
813
814 /*
815 * Ensure that stores to Normal memory are visible to the
8adbf57f 816 * other CPUs before they observe us issuing the IPI.
b1cffebf 817 */
8adbf57f 818 dmb(ishst);
b1cffebf
RH
819
820 /* this always happens on GIC0 */
821 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
1a6b69b6 822
04c8b0f8 823 gic_unlock_irqrestore(flags);
1a6b69b6 824}
7ec46b51
MZ
825
826static int gic_starting_cpu(unsigned int cpu)
827{
828 gic_cpu_init(&gic_data[0]);
829 return 0;
830}
831
832static __init void gic_smp_init(void)
833{
834 set_smp_cross_call(gic_raise_softirq);
835 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
836 "irqchip/arm/gic:starting",
837 gic_starting_cpu, NULL);
838}
839#else
840#define gic_smp_init() do { } while(0)
841#define gic_set_affinity NULL
1a6b69b6
NP
842#endif
843
844#ifdef CONFIG_BL_SWITCHER
14d2ca61
NP
845/*
846 * gic_send_sgi - send a SGI directly to given CPU interface number
847 *
848 * cpu_id: the ID for the destination CPU interface
849 * irq: the IPI number to send a SGI for
850 */
851void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
852{
853 BUG_ON(cpu_id >= NR_GIC_CPU_IF);
854 cpu_id = 1 << cpu_id;
855 /* this always happens on GIC0 */
856 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
857}
858
ed96762e
NP
859/*
860 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
861 *
862 * @cpu: the logical CPU number to get the GIC ID for.
863 *
864 * Return the CPU interface ID for the given logical CPU number,
865 * or -1 if the CPU number is too large or the interface ID is
866 * unknown (more than one bit set).
867 */
868int gic_get_cpu_id(unsigned int cpu)
869{
870 unsigned int cpu_bit;
871
872 if (cpu >= NR_GIC_CPU_IF)
873 return -1;
874 cpu_bit = gic_cpu_map[cpu];
875 if (cpu_bit & (cpu_bit - 1))
876 return -1;
877 return __ffs(cpu_bit);
878}
879
1a6b69b6
NP
880/*
881 * gic_migrate_target - migrate IRQs to another CPU interface
882 *
883 * @new_cpu_id: the CPU target ID to migrate IRQs to
884 *
885 * Migrate all peripheral interrupts with a target matching the current CPU
886 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
887 * is also updated. Targets to other CPU interfaces are unchanged.
888 * This must be called with IRQs locally disabled.
889 */
890void gic_migrate_target(unsigned int new_cpu_id)
891{
892 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
893 void __iomem *dist_base;
894 int i, ror_val, cpu = smp_processor_id();
895 u32 val, cur_target_mask, active_mask;
896
a27d21e0 897 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
1a6b69b6
NP
898
899 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
900 if (!dist_base)
901 return;
902 gic_irqs = gic_data[gic_nr].gic_irqs;
903
904 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
905 cur_target_mask = 0x01010101 << cur_cpu_id;
906 ror_val = (cur_cpu_id - new_cpu_id) & 31;
907
04c8b0f8 908 gic_lock();
1a6b69b6
NP
909
910 /* Update the target interface for this logical CPU */
911 gic_cpu_map[cpu] = 1 << new_cpu_id;
912
913 /*
c5f48c0a 914 * Find all the peripheral interrupts targeting the current
1a6b69b6
NP
915 * CPU interface and migrate them to the new CPU interface.
916 * We skip DIST_TARGET 0 to 7 as they are read-only.
917 */
918 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
919 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
920 active_mask = val & cur_target_mask;
921 if (active_mask) {
922 val &= ~active_mask;
923 val |= ror32(active_mask, ror_val);
924 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
925 }
926 }
927
04c8b0f8 928 gic_unlock();
1a6b69b6
NP
929
930 /*
931 * Now let's migrate and clear any potential SGIs that might be
932 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
933 * is a banked register, we can only forward the SGI using
934 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
935 * doesn't use that information anyway.
936 *
937 * For the same reason we do not adjust SGI source information
938 * for previously sent SGIs by us to other CPUs either.
939 */
940 for (i = 0; i < 16; i += 4) {
941 int j;
942 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
943 if (!val)
944 continue;
945 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
946 for (j = i; j < i + 4; j++) {
947 if (val & 0xff)
948 writel_relaxed((1 << (new_cpu_id + 16)) | j,
949 dist_base + GIC_DIST_SOFTINT);
950 val >>= 8;
951 }
952 }
b1cffebf 953}
eeb44658
NP
954
955/*
956 * gic_get_sgir_physaddr - get the physical address for the SGI register
957 *
958 * REturn the physical address of the SGI register to be used
959 * by some early assembly code when the kernel is not yet available.
960 */
961static unsigned long gic_dist_physaddr;
962
963unsigned long gic_get_sgir_physaddr(void)
964{
965 if (!gic_dist_physaddr)
966 return 0;
967 return gic_dist_physaddr + GIC_DIST_SOFTINT;
968}
969
89c59cca 970static void __init gic_init_physaddr(struct device_node *node)
eeb44658
NP
971{
972 struct resource res;
973 if (of_address_to_resource(node, 0, &res) == 0) {
974 gic_dist_physaddr = res.start;
975 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
976 }
977}
978
979#else
980#define gic_init_physaddr(node) do { } while (0)
b1cffebf
RH
981#endif
982
75294957
GL
983static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
984 irq_hw_number_t hw)
985{
58b89649 986 struct gic_chip_data *gic = d->host_data;
0b996fd3 987
75294957
GL
988 if (hw < 32) {
989 irq_set_percpu_devid(irq);
58b89649 990 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
9a1091ef 991 handle_percpu_devid_irq, NULL, NULL);
75294957 992 } else {
58b89649 993 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
9a1091ef 994 handle_fasteoi_irq, NULL, NULL);
d17cab44 995 irq_set_probe(irq);
0c9e4982 996 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
75294957 997 }
75294957
GL
998 return 0;
999}
1000
006e983b
S
1001static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
1002{
006e983b
S
1003}
1004
f833f57f
MZ
1005static int gic_irq_domain_translate(struct irq_domain *d,
1006 struct irq_fwspec *fwspec,
1007 unsigned long *hwirq,
1008 unsigned int *type)
1009{
1010 if (is_of_node(fwspec->fwnode)) {
1011 if (fwspec->param_count < 3)
1012 return -EINVAL;
1013
1014 /* Get the interrupt number and add 16 to skip over SGIs */
1015 *hwirq = fwspec->param[1] + 16;
1016
1017 /*
1018 * For SPIs, we need to add 16 more to get the GIC irq
1019 * ID number
1020 */
1021 if (!fwspec->param[0])
1022 *hwirq += 16;
1023
1024 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
83a86fbb
MZ
1025
1026 /* Make it clear that broken DTs are... broken */
1027 WARN_ON(*type == IRQ_TYPE_NONE);
f833f57f
MZ
1028 return 0;
1029 }
1030
75aba7b0 1031 if (is_fwnode_irqchip(fwspec->fwnode)) {
891ae769
MZ
1032 if(fwspec->param_count != 2)
1033 return -EINVAL;
1034
1035 *hwirq = fwspec->param[0];
1036 *type = fwspec->param[1];
83a86fbb
MZ
1037
1038 WARN_ON(*type == IRQ_TYPE_NONE);
891ae769
MZ
1039 return 0;
1040 }
1041
f833f57f
MZ
1042 return -EINVAL;
1043}
1044
9a1091ef
YC
1045static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1046 unsigned int nr_irqs, void *arg)
1047{
1048 int i, ret;
1049 irq_hw_number_t hwirq;
1050 unsigned int type = IRQ_TYPE_NONE;
f833f57f 1051 struct irq_fwspec *fwspec = arg;
9a1091ef 1052
f833f57f 1053 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
9a1091ef
YC
1054 if (ret)
1055 return ret;
1056
456c59c3
SP
1057 for (i = 0; i < nr_irqs; i++) {
1058 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1059 if (ret)
1060 return ret;
1061 }
9a1091ef
YC
1062
1063 return 0;
1064}
1065
1066static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
f833f57f 1067 .translate = gic_irq_domain_translate,
9a1091ef
YC
1068 .alloc = gic_irq_domain_alloc,
1069 .free = irq_domain_free_irqs_top,
1070};
1071
6859358e 1072static const struct irq_domain_ops gic_irq_domain_ops = {
75294957 1073 .map = gic_irq_domain_map,
006e983b 1074 .unmap = gic_irq_domain_unmap,
4294f8ba
RH
1075};
1076
faea6455
JH
1077static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
1078 const char *name, bool use_eoimode1)
b580b899 1079{
58b89649 1080 /* Initialize irq_chip */
c2baa2f3 1081 gic->chip = gic_chip;
faea6455
JH
1082 gic->chip.name = name;
1083 gic->chip.parent_device = dev;
c2baa2f3 1084
faea6455 1085 if (use_eoimode1) {
c2baa2f3
JH
1086 gic->chip.irq_mask = gic_eoimode1_mask_irq;
1087 gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
1088 gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
58b89649
LW
1089 }
1090
f673b9b5 1091 if (gic == &gic_data[0])
7bf29d3a 1092 gic->chip.irq_set_affinity = gic_set_affinity;
faea6455
JH
1093}
1094
b41fdc4a 1095static int gic_init_bases(struct gic_chip_data *gic,
faea6455
JH
1096 struct fwnode_handle *handle)
1097{
b41fdc4a 1098 int gic_irqs, ret;
7bf29d3a 1099
f673b9b5 1100 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
dc9722cc 1101 /* Frankein-GIC without banked registers... */
db0d4db2
MZ
1102 unsigned int cpu;
1103
1104 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
1105 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
1106 if (WARN_ON(!gic->dist_base.percpu_base ||
1107 !gic->cpu_base.percpu_base)) {
dc9722cc
JH
1108 ret = -ENOMEM;
1109 goto error;
db0d4db2
MZ
1110 }
1111
1112 for_each_possible_cpu(cpu) {
29e697b1
TF
1113 u32 mpidr = cpu_logical_map(cpu);
1114 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
f673b9b5
JH
1115 unsigned long offset = gic->percpu_offset * core_id;
1116 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
1117 gic->raw_dist_base + offset;
1118 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
1119 gic->raw_cpu_base + offset;
db0d4db2
MZ
1120 }
1121
1122 gic_set_base_accessor(gic, gic_get_percpu_base);
dc9722cc
JH
1123 } else {
1124 /* Normal, sane GIC... */
f673b9b5 1125 WARN(gic->percpu_offset,
db0d4db2 1126 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
f673b9b5
JH
1127 gic->percpu_offset);
1128 gic->dist_base.common_base = gic->raw_dist_base;
1129 gic->cpu_base.common_base = gic->raw_cpu_base;
db0d4db2
MZ
1130 gic_set_base_accessor(gic, gic_get_common_base);
1131 }
bef8f9ee 1132
4294f8ba
RH
1133 /*
1134 * Find out how many interrupts are supported.
1135 * The GIC only supports up to 1020 interrupt sources.
1136 */
db0d4db2 1137 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
4294f8ba
RH
1138 gic_irqs = (gic_irqs + 1) * 32;
1139 if (gic_irqs > 1020)
1140 gic_irqs = 1020;
1141 gic->gic_irqs = gic_irqs;
1142
891ae769
MZ
1143 if (handle) { /* DT/ACPI */
1144 gic->domain = irq_domain_create_linear(handle, gic_irqs,
1145 &gic_irq_domain_hierarchy_ops,
1146 gic);
1147 } else { /* Legacy support */
9a1091ef
YC
1148 /*
1149 * For primary GICs, skip over SGIs.
b41fdc4a 1150 * No secondary GIC support whatsoever.
9a1091ef 1151 */
b41fdc4a 1152 int irq_base;
9a1091ef 1153
b41fdc4a 1154 gic_irqs -= 16; /* calculate # of irqs to allocate */
006e983b 1155
b41fdc4a 1156 irq_base = irq_alloc_descs(16, 16, gic_irqs,
006e983b 1157 numa_node_id());
287980e4 1158 if (irq_base < 0) {
b41fdc4a
MZ
1159 WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
1160 irq_base = 16;
006e983b
S
1161 }
1162
891ae769 1163 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
b41fdc4a 1164 16, &gic_irq_domain_ops, gic);
f37a53cc 1165 }
006e983b 1166
dc9722cc
JH
1167 if (WARN_ON(!gic->domain)) {
1168 ret = -ENODEV;
1169 goto error;
1170 }
bef8f9ee 1171
4294f8ba 1172 gic_dist_init(gic);
dc9722cc
JH
1173 ret = gic_cpu_init(gic);
1174 if (ret)
1175 goto error;
1176
1177 ret = gic_pm_init(gic);
1178 if (ret)
1179 goto error;
1180
1181 return 0;
1182
1183error:
f673b9b5 1184 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
dc9722cc
JH
1185 free_percpu(gic->dist_base.percpu_base);
1186 free_percpu(gic->cpu_base.percpu_base);
1187 }
1188
dc9722cc 1189 return ret;
b580b899
RK
1190}
1191
d6ce564c 1192static int __init __gic_init_bases(struct gic_chip_data *gic,
d6ce564c
JH
1193 struct fwnode_handle *handle)
1194{
faea6455
JH
1195 char *name;
1196 int i, ret;
d6ce564c
JH
1197
1198 if (WARN_ON(!gic || gic->domain))
1199 return -EINVAL;
1200
1201 if (gic == &gic_data[0]) {
1202 /*
1203 * Initialize the CPU interface map to all CPUs.
1204 * It will be refined as each CPU probes its ID.
1205 * This is only necessary for the primary GIC.
1206 */
1207 for (i = 0; i < NR_GIC_CPU_IF; i++)
1208 gic_cpu_map[i] = 0xff;
7ec46b51 1209
d6ce564c 1210 set_handle_irq(gic_handle_irq);
d01d3274 1211 if (static_branch_likely(&supports_deactivate_key))
d6ce564c
JH
1212 pr_info("GIC: Using split EOI/Deactivate mode\n");
1213 }
1214
d01d3274 1215 if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) {
faea6455
JH
1216 name = kasprintf(GFP_KERNEL, "GICv2");
1217 gic_init_chip(gic, NULL, name, true);
1218 } else {
1219 name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
1220 gic_init_chip(gic, NULL, name, false);
1221 }
1222
b41fdc4a 1223 ret = gic_init_bases(gic, handle);
faea6455
JH
1224 if (ret)
1225 kfree(name);
7ec46b51
MZ
1226 else if (gic == &gic_data[0])
1227 gic_smp_init();
faea6455
JH
1228
1229 return ret;
d6ce564c
JH
1230}
1231
b41fdc4a 1232void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
4a6ac304 1233{
f673b9b5
JH
1234 struct gic_chip_data *gic;
1235
4a6ac304
MZ
1236 /*
1237 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1238 * bother with these...
1239 */
d01d3274 1240 static_branch_disable(&supports_deactivate_key);
f673b9b5 1241
b41fdc4a 1242 gic = &gic_data[0];
f673b9b5
JH
1243 gic->raw_dist_base = dist_base;
1244 gic->raw_cpu_base = cpu_base;
1245
b41fdc4a 1246 __gic_init_bases(gic, NULL);
4a6ac304
MZ
1247}
1248
d6490461
JH
1249static void gic_teardown(struct gic_chip_data *gic)
1250{
1251 if (WARN_ON(!gic))
1252 return;
1253
1254 if (gic->raw_dist_base)
1255 iounmap(gic->raw_dist_base);
1256 if (gic->raw_cpu_base)
1257 iounmap(gic->raw_cpu_base);
4a6ac304
MZ
1258}
1259
b3f7ed03 1260#ifdef CONFIG_OF
46f101df 1261static int gic_cnt __initdata;
0962289b
MZ
1262static bool gicv2_force_probe;
1263
1264static int __init gicv2_force_probe_cfg(char *buf)
1265{
1266 return strtobool(buf, &gicv2_force_probe);
1267}
1268early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
1269
12e14066
MZ
1270static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
1271{
1272 struct resource cpuif_res;
1273
1274 of_address_to_resource(node, 1, &cpuif_res);
1275
1276 if (!is_hyp_mode_available())
1277 return false;
0962289b
MZ
1278 if (resource_size(&cpuif_res) < SZ_8K) {
1279 void __iomem *alt;
1280 /*
1281 * Check for a stupid firmware that only exposes the
1282 * first page of a GICv2.
1283 */
1284 if (!gic_check_gicv2(*base))
1285 return false;
1286
1287 if (!gicv2_force_probe) {
1288 pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
1289 return false;
1290 }
1291
1292 alt = ioremap(cpuif_res.start, SZ_8K);
1293 if (!alt)
1294 return false;
1295 if (!gic_check_gicv2(alt + SZ_4K)) {
1296 /*
1297 * The first page was that of a GICv2, and
1298 * the second was *something*. Let's trust it
1299 * to be a GICv2, and update the mapping.
1300 */
1301 pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
1302 &cpuif_res.start);
1303 iounmap(*base);
1304 *base = alt;
1305 return true;
1306 }
12e14066
MZ
1307
1308 /*
0962289b
MZ
1309 * We detected *two* initial GICv2 pages in a
1310 * row. Could be a GICv2 aliased over two 64kB
1311 * pages. Update the resource, map the iospace, and
1312 * pray.
1313 */
1314 iounmap(alt);
1315 alt = ioremap(cpuif_res.start, SZ_128K);
1316 if (!alt)
1317 return false;
1318 pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
1319 &cpuif_res.start);
1320 cpuif_res.end = cpuif_res.start + SZ_128K -1;
1321 iounmap(*base);
1322 *base = alt;
1323 }
1324 if (resource_size(&cpuif_res) == SZ_128K) {
1325 /*
1326 * Verify that we have the first 4kB of a GICv2
12e14066
MZ
1327 * aliased over the first 64kB by checking the
1328 * GICC_IIDR register on both ends.
1329 */
0962289b
MZ
1330 if (!gic_check_gicv2(*base) ||
1331 !gic_check_gicv2(*base + 0xf000))
12e14066
MZ
1332 return false;
1333
1334 /*
1335 * Move the base up by 60kB, so that we have a 8kB
1336 * contiguous region, which allows us to use GICC_DIR
1337 * at its normal offset. Please pass me that bucket.
1338 */
1339 *base += 0xf000;
1340 cpuif_res.start += 0xf000;
fd5bed48 1341 pr_warn("GIC: Adjusting CPU interface base to %pa\n",
12e14066
MZ
1342 &cpuif_res.start);
1343 }
1344
1345 return true;
1346}
1347
9c8edddf 1348static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
d6490461
JH
1349{
1350 if (!gic || !node)
1351 return -EINVAL;
1352
1353 gic->raw_dist_base = of_iomap(node, 0);
1354 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
1355 goto error;
1356
1357 gic->raw_cpu_base = of_iomap(node, 1);
1358 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
1359 goto error;
1360
1361 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
1362 gic->percpu_offset = 0;
1363
1364 return 0;
1365
1366error:
1367 gic_teardown(gic);
1368
1369 return -ENOMEM;
1370}
1371
9c8edddf
JH
1372int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1373{
1374 int ret;
1375
1376 if (!dev || !dev->of_node || !gic || !irq)
1377 return -EINVAL;
1378
1379 *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
1380 if (!*gic)
1381 return -ENOMEM;
1382
1383 gic_init_chip(*gic, dev, dev->of_node->name, false);
1384
1385 ret = gic_of_setup(*gic, dev->of_node);
1386 if (ret)
1387 return ret;
1388
b41fdc4a 1389 ret = gic_init_bases(*gic, &dev->of_node->fwnode);
9c8edddf
JH
1390 if (ret) {
1391 gic_teardown(*gic);
1392 return ret;
1393 }
1394
1395 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
1396
1397 return 0;
1398}
1399
502d6df1
JG
1400static void __init gic_of_setup_kvm_info(struct device_node *node)
1401{
1402 int ret;
1403 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
1404 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
1405
1406 gic_v2_kvm_info.type = GIC_V2;
1407
1408 gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1409 if (!gic_v2_kvm_info.maint_irq)
1410 return;
1411
1412 ret = of_address_to_resource(node, 2, vctrl_res);
1413 if (ret)
1414 return;
1415
1416 ret = of_address_to_resource(node, 3, vcpu_res);
1417 if (ret)
1418 return;
1419
d01d3274 1420 if (static_branch_likely(&supports_deactivate_key))
d33a3c8c 1421 gic_set_kvm_info(&gic_v2_kvm_info);
502d6df1
JG
1422}
1423
8673c1d7 1424int __init
6859358e 1425gic_of_init(struct device_node *node, struct device_node *parent)
b3f7ed03 1426{
f673b9b5 1427 struct gic_chip_data *gic;
dc9722cc 1428 int irq, ret;
b3f7ed03
RH
1429
1430 if (WARN_ON(!node))
1431 return -ENODEV;
1432
f673b9b5
JH
1433 if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
1434 return -EINVAL;
1435
1436 gic = &gic_data[gic_cnt];
b3f7ed03 1437
d6490461
JH
1438 ret = gic_of_setup(gic, node);
1439 if (ret)
1440 return ret;
b3f7ed03 1441
0b996fd3
MZ
1442 /*
1443 * Disable split EOI/Deactivate if either HYP is not available
1444 * or the CPU interface is too small.
1445 */
f673b9b5 1446 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
d01d3274 1447 static_branch_disable(&supports_deactivate_key);
0b996fd3 1448
b41fdc4a 1449 ret = __gic_init_bases(gic, &node->fwnode);
dc9722cc 1450 if (ret) {
d6490461 1451 gic_teardown(gic);
dc9722cc
JH
1452 return ret;
1453 }
db0d4db2 1454
502d6df1 1455 if (!gic_cnt) {
eeb44658 1456 gic_init_physaddr(node);
502d6df1
JG
1457 gic_of_setup_kvm_info(node);
1458 }
b3f7ed03
RH
1459
1460 if (parent) {
1461 irq = irq_of_parse_and_map(node, 0);
1462 gic_cascade_irq(gic_cnt, irq);
1463 }
853a33ce
SS
1464
1465 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
0644b3da 1466 gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
853a33ce 1467
b3f7ed03
RH
1468 gic_cnt++;
1469 return 0;
1470}
144cb088 1471IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
fa6e2eec
LW
1472IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1473IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
81243e44
RH
1474IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1475IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
a97e8027 1476IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
81243e44
RH
1477IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1478IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
8709b9eb 1479IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
9c8edddf
JH
1480#else
1481int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1482{
1483 return -ENOTSUPP;
1484}
b3f7ed03 1485#endif
d60fc389
TN
1486
1487#ifdef CONFIG_ACPI
bafa9193
JG
1488static struct
1489{
1490 phys_addr_t cpu_phys_base;
502d6df1
JG
1491 u32 maint_irq;
1492 int maint_irq_mode;
1493 phys_addr_t vctrl_base;
1494 phys_addr_t vcpu_base;
bafa9193 1495} acpi_data __initdata;
d60fc389
TN
1496
1497static int __init
60574d1e 1498gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
d60fc389
TN
1499 const unsigned long end)
1500{
1501 struct acpi_madt_generic_interrupt *processor;
1502 phys_addr_t gic_cpu_base;
1503 static int cpu_base_assigned;
1504
1505 processor = (struct acpi_madt_generic_interrupt *)header;
1506
99e3e3ae 1507 if (BAD_MADT_GICC_ENTRY(processor, end))
d60fc389
TN
1508 return -EINVAL;
1509
1510 /*
1511 * There is no support for non-banked GICv1/2 register in ACPI spec.
1512 * All CPU interface addresses have to be the same.
1513 */
1514 gic_cpu_base = processor->base_address;
bafa9193 1515 if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
d60fc389
TN
1516 return -EINVAL;
1517
bafa9193 1518 acpi_data.cpu_phys_base = gic_cpu_base;
502d6df1
JG
1519 acpi_data.maint_irq = processor->vgic_interrupt;
1520 acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1521 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1522 acpi_data.vctrl_base = processor->gich_base_address;
1523 acpi_data.vcpu_base = processor->gicv_base_address;
1524
d60fc389
TN
1525 cpu_base_assigned = 1;
1526 return 0;
1527}
1528
f26527b1 1529/* The things you have to do to just *count* something... */
60574d1e 1530static int __init acpi_dummy_func(union acpi_subtable_headers *header,
f26527b1 1531 const unsigned long end)
d60fc389 1532{
f26527b1
MZ
1533 return 0;
1534}
d60fc389 1535
f26527b1
MZ
1536static bool __init acpi_gic_redist_is_present(void)
1537{
1538 return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1539 acpi_dummy_func, 0) > 0;
1540}
d60fc389 1541
f26527b1
MZ
1542static bool __init gic_validate_dist(struct acpi_subtable_header *header,
1543 struct acpi_probe_entry *ape)
1544{
1545 struct acpi_madt_generic_distributor *dist;
1546 dist = (struct acpi_madt_generic_distributor *)header;
d60fc389 1547
f26527b1
MZ
1548 return (dist->version == ape->driver_data &&
1549 (dist->version != ACPI_MADT_GIC_VERSION_NONE ||
1550 !acpi_gic_redist_is_present()));
d60fc389
TN
1551}
1552
f26527b1
MZ
1553#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
1554#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
502d6df1
JG
1555#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1556#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1557
1558static void __init gic_acpi_setup_kvm_info(void)
1559{
1560 int irq;
1561 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
1562 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
1563
1564 gic_v2_kvm_info.type = GIC_V2;
1565
1566 if (!acpi_data.vctrl_base)
1567 return;
1568
1569 vctrl_res->flags = IORESOURCE_MEM;
1570 vctrl_res->start = acpi_data.vctrl_base;
1571 vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
1572
1573 if (!acpi_data.vcpu_base)
1574 return;
1575
1576 vcpu_res->flags = IORESOURCE_MEM;
1577 vcpu_res->start = acpi_data.vcpu_base;
1578 vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1579
1580 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1581 acpi_data.maint_irq_mode,
1582 ACPI_ACTIVE_HIGH);
1583 if (irq <= 0)
1584 return;
1585
1586 gic_v2_kvm_info.maint_irq = irq;
1587
1588 gic_set_kvm_info(&gic_v2_kvm_info);
1589}
f26527b1 1590
aba3c7ed 1591static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
f26527b1 1592 const unsigned long end)
d60fc389 1593{
f26527b1 1594 struct acpi_madt_generic_distributor *dist;
891ae769 1595 struct fwnode_handle *domain_handle;
f673b9b5 1596 struct gic_chip_data *gic = &gic_data[0];
dc9722cc 1597 int count, ret;
d60fc389
TN
1598
1599 /* Collect CPU base addresses */
f26527b1
MZ
1600 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1601 gic_acpi_parse_madt_cpu, 0);
d60fc389
TN
1602 if (count <= 0) {
1603 pr_err("No valid GICC entries exist\n");
1604 return -EINVAL;
1605 }
1606
7beaa24b 1607 gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
f673b9b5 1608 if (!gic->raw_cpu_base) {
d60fc389
TN
1609 pr_err("Unable to map GICC registers\n");
1610 return -ENOMEM;
1611 }
1612
f26527b1 1613 dist = (struct acpi_madt_generic_distributor *)header;
f673b9b5
JH
1614 gic->raw_dist_base = ioremap(dist->base_address,
1615 ACPI_GICV2_DIST_MEM_SIZE);
1616 if (!gic->raw_dist_base) {
d60fc389 1617 pr_err("Unable to map GICD registers\n");
d6490461 1618 gic_teardown(gic);
d60fc389
TN
1619 return -ENOMEM;
1620 }
1621
0b996fd3
MZ
1622 /*
1623 * Disable split EOI/Deactivate if HYP is not available. ACPI
1624 * guarantees that we'll always have a GICv2, so the CPU
1625 * interface will always be the right size.
1626 */
1627 if (!is_hyp_mode_available())
d01d3274 1628 static_branch_disable(&supports_deactivate_key);
0b996fd3 1629
d60fc389 1630 /*
891ae769 1631 * Initialize GIC instance zero (no multi-GIC support).
d60fc389 1632 */
188a8471 1633 domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
891ae769
MZ
1634 if (!domain_handle) {
1635 pr_err("Unable to allocate domain handle\n");
d6490461 1636 gic_teardown(gic);
891ae769
MZ
1637 return -ENOMEM;
1638 }
1639
b41fdc4a 1640 ret = __gic_init_bases(gic, domain_handle);
dc9722cc
JH
1641 if (ret) {
1642 pr_err("Failed to initialise GIC\n");
1643 irq_domain_free_fwnode(domain_handle);
d6490461 1644 gic_teardown(gic);
dc9722cc
JH
1645 return ret;
1646 }
d8f4f161 1647
891ae769 1648 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
0644b3da
SS
1649
1650 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1651 gicv2m_init(NULL, gic_data[0].domain);
1652
d01d3274 1653 if (static_branch_likely(&supports_deactivate_key))
d33a3c8c 1654 gic_acpi_setup_kvm_info();
502d6df1 1655
d60fc389
TN
1656 return 0;
1657}
f26527b1
MZ
1658IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1659 gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
1660 gic_v2_acpi_init);
1661IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1662 gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
1663 gic_v2_acpi_init);
d60fc389 1664#endif