Commit | Line | Data |
---|---|---|
f27ecacc RK |
1 | /* |
2 | * linux/arch/arm/common/gic.c | |
3 | * | |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Interrupt architecture for the GIC: | |
11 | * | |
12 | * o There is one Interrupt Distributor, which receives interrupts | |
13 | * from system devices and sends them to the Interrupt Controllers. | |
14 | * | |
15 | * o There is one CPU Interface per CPU, which sends interrupts sent | |
16 | * by the Distributor, and interrupts generated locally, to the | |
b3a1bde4 CM |
17 | * associated CPU. The base address of the CPU interface is usually |
18 | * aliased so that the same address points to different chips depending | |
19 | * on the CPU it is accessed from. | |
f27ecacc RK |
20 | * |
21 | * Note that IRQs 0-31 are special - they are local to each CPU. | |
22 | * As such, the enable set/clear, pending set/clear and active bit | |
23 | * registers are banked per-cpu for these sources. | |
24 | */ | |
25 | #include <linux/init.h> | |
26 | #include <linux/kernel.h> | |
f37a53cc | 27 | #include <linux/err.h> |
7e1efcf5 | 28 | #include <linux/module.h> |
f27ecacc RK |
29 | #include <linux/list.h> |
30 | #include <linux/smp.h> | |
254056f3 | 31 | #include <linux/cpu_pm.h> |
dcb86e8c | 32 | #include <linux/cpumask.h> |
fced80c7 | 33 | #include <linux/io.h> |
b3f7ed03 RH |
34 | #include <linux/of.h> |
35 | #include <linux/of_address.h> | |
36 | #include <linux/of_irq.h> | |
4294f8ba | 37 | #include <linux/irqdomain.h> |
292b293c MZ |
38 | #include <linux/interrupt.h> |
39 | #include <linux/percpu.h> | |
40 | #include <linux/slab.h> | |
f27ecacc RK |
41 | |
42 | #include <asm/irq.h> | |
562e0027 | 43 | #include <asm/exception.h> |
eb50439b | 44 | #include <asm/smp_plat.h> |
f27ecacc RK |
45 | #include <asm/mach/irq.h> |
46 | #include <asm/hardware/gic.h> | |
47 | ||
db0d4db2 MZ |
48 | union gic_base { |
49 | void __iomem *common_base; | |
50 | void __percpu __iomem **percpu_base; | |
51 | }; | |
52 | ||
53 | struct gic_chip_data { | |
54 | unsigned int irq_offset; | |
55 | union gic_base dist_base; | |
56 | union gic_base cpu_base; | |
57 | #ifdef CONFIG_CPU_PM | |
58 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; | |
59 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; | |
60 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; | |
61 | u32 __percpu *saved_ppi_enable; | |
62 | u32 __percpu *saved_ppi_conf; | |
63 | #endif | |
64 | #ifdef CONFIG_IRQ_DOMAIN | |
65 | struct irq_domain domain; | |
66 | #endif | |
67 | unsigned int gic_irqs; | |
68 | #ifdef CONFIG_GIC_NON_BANKED | |
69 | void __iomem *(*get_base)(union gic_base *); | |
70 | #endif | |
71 | }; | |
72 | ||
bd31b859 | 73 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
f27ecacc | 74 | |
d7ed36a4 SS |
75 | /* |
76 | * Supported arch specific GIC irq extension. | |
77 | * Default make them NULL. | |
78 | */ | |
79 | struct irq_chip gic_arch_extn = { | |
1a01753e | 80 | .irq_eoi = NULL, |
d7ed36a4 SS |
81 | .irq_mask = NULL, |
82 | .irq_unmask = NULL, | |
83 | .irq_retrigger = NULL, | |
84 | .irq_set_type = NULL, | |
85 | .irq_set_wake = NULL, | |
86 | }; | |
87 | ||
b3a1bde4 CM |
88 | #ifndef MAX_GIC_NR |
89 | #define MAX_GIC_NR 1 | |
90 | #endif | |
91 | ||
bef8f9ee | 92 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; |
b3a1bde4 | 93 | |
db0d4db2 MZ |
94 | #ifdef CONFIG_GIC_NON_BANKED |
95 | static void __iomem *gic_get_percpu_base(union gic_base *base) | |
96 | { | |
97 | return *__this_cpu_ptr(base->percpu_base); | |
98 | } | |
99 | ||
100 | static void __iomem *gic_get_common_base(union gic_base *base) | |
101 | { | |
102 | return base->common_base; | |
103 | } | |
104 | ||
105 | static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) | |
106 | { | |
107 | return data->get_base(&data->dist_base); | |
108 | } | |
109 | ||
110 | static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) | |
111 | { | |
112 | return data->get_base(&data->cpu_base); | |
113 | } | |
114 | ||
115 | static inline void gic_set_base_accessor(struct gic_chip_data *data, | |
116 | void __iomem *(*f)(union gic_base *)) | |
117 | { | |
118 | data->get_base = f; | |
119 | } | |
120 | #else | |
121 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) | |
122 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) | |
123 | #define gic_set_base_accessor(d,f) | |
124 | #endif | |
125 | ||
7d1f4288 | 126 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
b3a1bde4 | 127 | { |
7d1f4288 | 128 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
db0d4db2 | 129 | return gic_data_dist_base(gic_data); |
b3a1bde4 CM |
130 | } |
131 | ||
7d1f4288 | 132 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
b3a1bde4 | 133 | { |
7d1f4288 | 134 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
db0d4db2 | 135 | return gic_data_cpu_base(gic_data); |
b3a1bde4 CM |
136 | } |
137 | ||
7d1f4288 | 138 | static inline unsigned int gic_irq(struct irq_data *d) |
b3a1bde4 | 139 | { |
4294f8ba | 140 | return d->hwirq; |
b3a1bde4 CM |
141 | } |
142 | ||
f27ecacc RK |
143 | /* |
144 | * Routines to acknowledge, disable and enable interrupts | |
f27ecacc | 145 | */ |
7d1f4288 | 146 | static void gic_mask_irq(struct irq_data *d) |
f27ecacc | 147 | { |
4294f8ba | 148 | u32 mask = 1 << (gic_irq(d) % 32); |
c4bfa28a | 149 | |
bd31b859 | 150 | raw_spin_lock(&irq_controller_lock); |
6ac77e46 | 151 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
d7ed36a4 SS |
152 | if (gic_arch_extn.irq_mask) |
153 | gic_arch_extn.irq_mask(d); | |
bd31b859 | 154 | raw_spin_unlock(&irq_controller_lock); |
f27ecacc RK |
155 | } |
156 | ||
7d1f4288 | 157 | static void gic_unmask_irq(struct irq_data *d) |
f27ecacc | 158 | { |
4294f8ba | 159 | u32 mask = 1 << (gic_irq(d) % 32); |
c4bfa28a | 160 | |
bd31b859 | 161 | raw_spin_lock(&irq_controller_lock); |
d7ed36a4 SS |
162 | if (gic_arch_extn.irq_unmask) |
163 | gic_arch_extn.irq_unmask(d); | |
6ac77e46 | 164 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
bd31b859 | 165 | raw_spin_unlock(&irq_controller_lock); |
f27ecacc RK |
166 | } |
167 | ||
1a01753e WD |
168 | static void gic_eoi_irq(struct irq_data *d) |
169 | { | |
170 | if (gic_arch_extn.irq_eoi) { | |
bd31b859 | 171 | raw_spin_lock(&irq_controller_lock); |
1a01753e | 172 | gic_arch_extn.irq_eoi(d); |
bd31b859 | 173 | raw_spin_unlock(&irq_controller_lock); |
1a01753e WD |
174 | } |
175 | ||
6ac77e46 | 176 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
1a01753e WD |
177 | } |
178 | ||
7d1f4288 | 179 | static int gic_set_type(struct irq_data *d, unsigned int type) |
5c0c1f08 | 180 | { |
7d1f4288 LB |
181 | void __iomem *base = gic_dist_base(d); |
182 | unsigned int gicirq = gic_irq(d); | |
5c0c1f08 RV |
183 | u32 enablemask = 1 << (gicirq % 32); |
184 | u32 enableoff = (gicirq / 32) * 4; | |
185 | u32 confmask = 0x2 << ((gicirq % 16) * 2); | |
186 | u32 confoff = (gicirq / 16) * 4; | |
187 | bool enabled = false; | |
188 | u32 val; | |
189 | ||
190 | /* Interrupt configuration for SGIs can't be changed */ | |
191 | if (gicirq < 16) | |
192 | return -EINVAL; | |
193 | ||
194 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) | |
195 | return -EINVAL; | |
196 | ||
bd31b859 | 197 | raw_spin_lock(&irq_controller_lock); |
5c0c1f08 | 198 | |
d7ed36a4 SS |
199 | if (gic_arch_extn.irq_set_type) |
200 | gic_arch_extn.irq_set_type(d, type); | |
201 | ||
6ac77e46 | 202 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); |
5c0c1f08 RV |
203 | if (type == IRQ_TYPE_LEVEL_HIGH) |
204 | val &= ~confmask; | |
205 | else if (type == IRQ_TYPE_EDGE_RISING) | |
206 | val |= confmask; | |
207 | ||
208 | /* | |
209 | * As recommended by the spec, disable the interrupt before changing | |
210 | * the configuration | |
211 | */ | |
6ac77e46 SS |
212 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { |
213 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); | |
5c0c1f08 RV |
214 | enabled = true; |
215 | } | |
216 | ||
6ac77e46 | 217 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); |
5c0c1f08 RV |
218 | |
219 | if (enabled) | |
6ac77e46 | 220 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
5c0c1f08 | 221 | |
bd31b859 | 222 | raw_spin_unlock(&irq_controller_lock); |
5c0c1f08 RV |
223 | |
224 | return 0; | |
225 | } | |
226 | ||
d7ed36a4 SS |
227 | static int gic_retrigger(struct irq_data *d) |
228 | { | |
229 | if (gic_arch_extn.irq_retrigger) | |
230 | return gic_arch_extn.irq_retrigger(d); | |
231 | ||
232 | return -ENXIO; | |
233 | } | |
234 | ||
a06f5466 | 235 | #ifdef CONFIG_SMP |
c191789c RK |
236 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
237 | bool force) | |
f27ecacc | 238 | { |
7d1f4288 | 239 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
4294f8ba | 240 | unsigned int shift = (gic_irq(d) % 4) * 8; |
5dfc54e0 | 241 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
c191789c | 242 | u32 val, mask, bit; |
f27ecacc | 243 | |
5dfc54e0 | 244 | if (cpu >= 8 || cpu >= nr_cpu_ids) |
87507500 | 245 | return -EINVAL; |
c191789c RK |
246 | |
247 | mask = 0xff << shift; | |
267840f3 | 248 | bit = 1 << (cpu_logical_map(cpu) + shift); |
c191789c | 249 | |
bd31b859 | 250 | raw_spin_lock(&irq_controller_lock); |
6ac77e46 SS |
251 | val = readl_relaxed(reg) & ~mask; |
252 | writel_relaxed(val | bit, reg); | |
bd31b859 | 253 | raw_spin_unlock(&irq_controller_lock); |
d5dedd45 | 254 | |
5dfc54e0 | 255 | return IRQ_SET_MASK_OK; |
f27ecacc | 256 | } |
a06f5466 | 257 | #endif |
f27ecacc | 258 | |
d7ed36a4 SS |
259 | #ifdef CONFIG_PM |
260 | static int gic_set_wake(struct irq_data *d, unsigned int on) | |
261 | { | |
262 | int ret = -ENXIO; | |
263 | ||
264 | if (gic_arch_extn.irq_set_wake) | |
265 | ret = gic_arch_extn.irq_set_wake(d, on); | |
266 | ||
267 | return ret; | |
268 | } | |
269 | ||
270 | #else | |
271 | #define gic_set_wake NULL | |
272 | #endif | |
273 | ||
562e0027 MZ |
274 | asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
275 | { | |
276 | u32 irqstat, irqnr; | |
277 | struct gic_chip_data *gic = &gic_data[0]; | |
278 | void __iomem *cpu_base = gic_data_cpu_base(gic); | |
279 | ||
280 | do { | |
281 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); | |
282 | irqnr = irqstat & ~0x1c00; | |
283 | ||
284 | if (likely(irqnr > 15 && irqnr < 1021)) { | |
285 | irqnr = irq_domain_to_irq(&gic->domain, irqnr); | |
286 | handle_IRQ(irqnr, regs); | |
287 | continue; | |
288 | } | |
289 | if (irqnr < 16) { | |
290 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | |
291 | #ifdef CONFIG_SMP | |
292 | handle_IPI(irqnr, regs); | |
293 | #endif | |
294 | continue; | |
295 | } | |
296 | break; | |
297 | } while (1); | |
298 | } | |
299 | ||
0f347bb9 | 300 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
b3a1bde4 | 301 | { |
6845664a TG |
302 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
303 | struct irq_chip *chip = irq_get_chip(irq); | |
0f347bb9 | 304 | unsigned int cascade_irq, gic_irq; |
b3a1bde4 CM |
305 | unsigned long status; |
306 | ||
1a01753e | 307 | chained_irq_enter(chip, desc); |
b3a1bde4 | 308 | |
bd31b859 | 309 | raw_spin_lock(&irq_controller_lock); |
db0d4db2 | 310 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); |
bd31b859 | 311 | raw_spin_unlock(&irq_controller_lock); |
b3a1bde4 | 312 | |
0f347bb9 RK |
313 | gic_irq = (status & 0x3ff); |
314 | if (gic_irq == 1023) | |
b3a1bde4 | 315 | goto out; |
b3a1bde4 | 316 | |
4294f8ba | 317 | cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); |
0f347bb9 RK |
318 | if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) |
319 | do_bad_IRQ(cascade_irq, desc); | |
320 | else | |
321 | generic_handle_irq(cascade_irq); | |
b3a1bde4 CM |
322 | |
323 | out: | |
1a01753e | 324 | chained_irq_exit(chip, desc); |
b3a1bde4 CM |
325 | } |
326 | ||
38c677cb | 327 | static struct irq_chip gic_chip = { |
7d1f4288 | 328 | .name = "GIC", |
7d1f4288 LB |
329 | .irq_mask = gic_mask_irq, |
330 | .irq_unmask = gic_unmask_irq, | |
1a01753e | 331 | .irq_eoi = gic_eoi_irq, |
7d1f4288 | 332 | .irq_set_type = gic_set_type, |
d7ed36a4 | 333 | .irq_retrigger = gic_retrigger, |
f27ecacc | 334 | #ifdef CONFIG_SMP |
c191789c | 335 | .irq_set_affinity = gic_set_affinity, |
f27ecacc | 336 | #endif |
d7ed36a4 | 337 | .irq_set_wake = gic_set_wake, |
f27ecacc RK |
338 | }; |
339 | ||
b3a1bde4 CM |
340 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
341 | { | |
342 | if (gic_nr >= MAX_GIC_NR) | |
343 | BUG(); | |
6845664a | 344 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
b3a1bde4 | 345 | BUG(); |
6845664a | 346 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
b3a1bde4 CM |
347 | } |
348 | ||
4294f8ba | 349 | static void __init gic_dist_init(struct gic_chip_data *gic) |
f27ecacc | 350 | { |
4294f8ba | 351 | unsigned int i, irq; |
267840f3 | 352 | u32 cpumask; |
4294f8ba RH |
353 | unsigned int gic_irqs = gic->gic_irqs; |
354 | struct irq_domain *domain = &gic->domain; | |
db0d4db2 | 355 | void __iomem *base = gic_data_dist_base(gic); |
eb50439b | 356 | u32 cpu = cpu_logical_map(smp_processor_id()); |
267840f3 WD |
357 | |
358 | cpumask = 1 << cpu; | |
f27ecacc RK |
359 | cpumask |= cpumask << 8; |
360 | cpumask |= cpumask << 16; | |
361 | ||
6ac77e46 | 362 | writel_relaxed(0, base + GIC_DIST_CTRL); |
f27ecacc | 363 | |
f27ecacc RK |
364 | /* |
365 | * Set all global interrupts to be level triggered, active low. | |
366 | */ | |
e6afec9b | 367 | for (i = 32; i < gic_irqs; i += 16) |
6ac77e46 | 368 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); |
f27ecacc RK |
369 | |
370 | /* | |
371 | * Set all global interrupts to this CPU only. | |
372 | */ | |
e6afec9b | 373 | for (i = 32; i < gic_irqs; i += 4) |
6ac77e46 | 374 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
f27ecacc RK |
375 | |
376 | /* | |
9395f6ea | 377 | * Set priority on all global interrupts. |
f27ecacc | 378 | */ |
e6afec9b | 379 | for (i = 32; i < gic_irqs; i += 4) |
6ac77e46 | 380 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); |
f27ecacc RK |
381 | |
382 | /* | |
9395f6ea RK |
383 | * Disable all interrupts. Leave the PPI and SGIs alone |
384 | * as these enables are banked registers. | |
f27ecacc | 385 | */ |
e6afec9b | 386 | for (i = 32; i < gic_irqs; i += 32) |
6ac77e46 | 387 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
f27ecacc RK |
388 | |
389 | /* | |
390 | * Setup the Linux IRQ subsystem. | |
391 | */ | |
4294f8ba RH |
392 | irq_domain_for_each_irq(domain, i, irq) { |
393 | if (i < 32) { | |
394 | irq_set_percpu_devid(irq); | |
395 | irq_set_chip_and_handler(irq, &gic_chip, | |
396 | handle_percpu_devid_irq); | |
397 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); | |
398 | } else { | |
399 | irq_set_chip_and_handler(irq, &gic_chip, | |
400 | handle_fasteoi_irq); | |
401 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | |
402 | } | |
403 | irq_set_chip_data(irq, gic); | |
f27ecacc RK |
404 | } |
405 | ||
6ac77e46 | 406 | writel_relaxed(1, base + GIC_DIST_CTRL); |
f27ecacc RK |
407 | } |
408 | ||
bef8f9ee | 409 | static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) |
f27ecacc | 410 | { |
db0d4db2 MZ |
411 | void __iomem *dist_base = gic_data_dist_base(gic); |
412 | void __iomem *base = gic_data_cpu_base(gic); | |
9395f6ea RK |
413 | int i; |
414 | ||
9395f6ea RK |
415 | /* |
416 | * Deal with the banked PPI and SGI interrupts - disable all | |
417 | * PPI interrupts, ensure all SGI interrupts are enabled. | |
418 | */ | |
6ac77e46 SS |
419 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); |
420 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); | |
9395f6ea RK |
421 | |
422 | /* | |
423 | * Set priority on PPI and SGI interrupts | |
424 | */ | |
425 | for (i = 0; i < 32; i += 4) | |
6ac77e46 | 426 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); |
9395f6ea | 427 | |
6ac77e46 SS |
428 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); |
429 | writel_relaxed(1, base + GIC_CPU_CTRL); | |
f27ecacc RK |
430 | } |
431 | ||
254056f3 CC |
432 | #ifdef CONFIG_CPU_PM |
433 | /* | |
434 | * Saves the GIC distributor registers during suspend or idle. Must be called | |
435 | * with interrupts disabled but before powering down the GIC. After calling | |
436 | * this function, no interrupts will be delivered by the GIC, and another | |
437 | * platform-specific wakeup source must be enabled. | |
438 | */ | |
439 | static void gic_dist_save(unsigned int gic_nr) | |
440 | { | |
441 | unsigned int gic_irqs; | |
442 | void __iomem *dist_base; | |
443 | int i; | |
444 | ||
445 | if (gic_nr >= MAX_GIC_NR) | |
446 | BUG(); | |
447 | ||
448 | gic_irqs = gic_data[gic_nr].gic_irqs; | |
db0d4db2 | 449 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
254056f3 CC |
450 | |
451 | if (!dist_base) | |
452 | return; | |
453 | ||
454 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | |
455 | gic_data[gic_nr].saved_spi_conf[i] = | |
456 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | |
457 | ||
458 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | |
459 | gic_data[gic_nr].saved_spi_target[i] = | |
460 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | |
461 | ||
462 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | |
463 | gic_data[gic_nr].saved_spi_enable[i] = | |
464 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
465 | } | |
466 | ||
467 | /* | |
468 | * Restores the GIC distributor registers during resume or when coming out of | |
469 | * idle. Must be called before enabling interrupts. If a level interrupt | |
470 | * that occured while the GIC was suspended is still present, it will be | |
471 | * handled normally, but any edge interrupts that occured will not be seen by | |
472 | * the GIC and need to be handled by the platform-specific wakeup source. | |
473 | */ | |
474 | static void gic_dist_restore(unsigned int gic_nr) | |
475 | { | |
476 | unsigned int gic_irqs; | |
477 | unsigned int i; | |
478 | void __iomem *dist_base; | |
479 | ||
480 | if (gic_nr >= MAX_GIC_NR) | |
481 | BUG(); | |
482 | ||
483 | gic_irqs = gic_data[gic_nr].gic_irqs; | |
db0d4db2 | 484 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
254056f3 CC |
485 | |
486 | if (!dist_base) | |
487 | return; | |
488 | ||
489 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | |
490 | ||
491 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | |
492 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], | |
493 | dist_base + GIC_DIST_CONFIG + i * 4); | |
494 | ||
495 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | |
496 | writel_relaxed(0xa0a0a0a0, | |
497 | dist_base + GIC_DIST_PRI + i * 4); | |
498 | ||
499 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | |
500 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | |
501 | dist_base + GIC_DIST_TARGET + i * 4); | |
502 | ||
503 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | |
504 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | |
505 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
506 | ||
507 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); | |
508 | } | |
509 | ||
510 | static void gic_cpu_save(unsigned int gic_nr) | |
511 | { | |
512 | int i; | |
513 | u32 *ptr; | |
514 | void __iomem *dist_base; | |
515 | void __iomem *cpu_base; | |
516 | ||
517 | if (gic_nr >= MAX_GIC_NR) | |
518 | BUG(); | |
519 | ||
db0d4db2 MZ |
520 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
521 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); | |
254056f3 CC |
522 | |
523 | if (!dist_base || !cpu_base) | |
524 | return; | |
525 | ||
526 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | |
527 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | |
528 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
529 | ||
530 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | |
531 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | |
532 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | |
533 | ||
534 | } | |
535 | ||
536 | static void gic_cpu_restore(unsigned int gic_nr) | |
537 | { | |
538 | int i; | |
539 | u32 *ptr; | |
540 | void __iomem *dist_base; | |
541 | void __iomem *cpu_base; | |
542 | ||
543 | if (gic_nr >= MAX_GIC_NR) | |
544 | BUG(); | |
545 | ||
db0d4db2 MZ |
546 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
547 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); | |
254056f3 CC |
548 | |
549 | if (!dist_base || !cpu_base) | |
550 | return; | |
551 | ||
552 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | |
553 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | |
554 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | |
555 | ||
556 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | |
557 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | |
558 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | |
559 | ||
560 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | |
561 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); | |
562 | ||
563 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); | |
564 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); | |
565 | } | |
566 | ||
567 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |
568 | { | |
569 | int i; | |
570 | ||
571 | for (i = 0; i < MAX_GIC_NR; i++) { | |
db0d4db2 MZ |
572 | #ifdef CONFIG_GIC_NON_BANKED |
573 | /* Skip over unused GICs */ | |
574 | if (!gic_data[i].get_base) | |
575 | continue; | |
576 | #endif | |
254056f3 CC |
577 | switch (cmd) { |
578 | case CPU_PM_ENTER: | |
579 | gic_cpu_save(i); | |
580 | break; | |
581 | case CPU_PM_ENTER_FAILED: | |
582 | case CPU_PM_EXIT: | |
583 | gic_cpu_restore(i); | |
584 | break; | |
585 | case CPU_CLUSTER_PM_ENTER: | |
586 | gic_dist_save(i); | |
587 | break; | |
588 | case CPU_CLUSTER_PM_ENTER_FAILED: | |
589 | case CPU_CLUSTER_PM_EXIT: | |
590 | gic_dist_restore(i); | |
591 | break; | |
592 | } | |
593 | } | |
594 | ||
595 | return NOTIFY_OK; | |
596 | } | |
597 | ||
598 | static struct notifier_block gic_notifier_block = { | |
599 | .notifier_call = gic_notifier, | |
600 | }; | |
601 | ||
602 | static void __init gic_pm_init(struct gic_chip_data *gic) | |
603 | { | |
604 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | |
605 | sizeof(u32)); | |
606 | BUG_ON(!gic->saved_ppi_enable); | |
607 | ||
608 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | |
609 | sizeof(u32)); | |
610 | BUG_ON(!gic->saved_ppi_conf); | |
611 | ||
abdd7b91 MZ |
612 | if (gic == &gic_data[0]) |
613 | cpu_pm_register_notifier(&gic_notifier_block); | |
254056f3 CC |
614 | } |
615 | #else | |
616 | static void __init gic_pm_init(struct gic_chip_data *gic) | |
617 | { | |
618 | } | |
619 | #endif | |
620 | ||
b3f7ed03 RH |
621 | #ifdef CONFIG_OF |
622 | static int gic_irq_domain_dt_translate(struct irq_domain *d, | |
623 | struct device_node *controller, | |
624 | const u32 *intspec, unsigned int intsize, | |
625 | unsigned long *out_hwirq, unsigned int *out_type) | |
626 | { | |
627 | if (d->of_node != controller) | |
628 | return -EINVAL; | |
629 | if (intsize < 3) | |
630 | return -EINVAL; | |
631 | ||
632 | /* Get the interrupt number and add 16 to skip over SGIs */ | |
633 | *out_hwirq = intspec[1] + 16; | |
634 | ||
635 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ | |
636 | if (!intspec[0]) | |
637 | *out_hwirq += 16; | |
638 | ||
639 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; | |
640 | return 0; | |
641 | } | |
642 | #endif | |
643 | ||
4294f8ba | 644 | const struct irq_domain_ops gic_irq_domain_ops = { |
b3f7ed03 RH |
645 | #ifdef CONFIG_OF |
646 | .dt_translate = gic_irq_domain_dt_translate, | |
647 | #endif | |
4294f8ba RH |
648 | }; |
649 | ||
db0d4db2 MZ |
650 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, |
651 | void __iomem *dist_base, void __iomem *cpu_base, | |
652 | u32 percpu_offset) | |
b580b899 | 653 | { |
bef8f9ee | 654 | struct gic_chip_data *gic; |
4294f8ba RH |
655 | struct irq_domain *domain; |
656 | int gic_irqs; | |
bef8f9ee RK |
657 | |
658 | BUG_ON(gic_nr >= MAX_GIC_NR); | |
659 | ||
660 | gic = &gic_data[gic_nr]; | |
4294f8ba | 661 | domain = &gic->domain; |
db0d4db2 MZ |
662 | #ifdef CONFIG_GIC_NON_BANKED |
663 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ | |
664 | unsigned int cpu; | |
665 | ||
666 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); | |
667 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); | |
668 | if (WARN_ON(!gic->dist_base.percpu_base || | |
669 | !gic->cpu_base.percpu_base)) { | |
670 | free_percpu(gic->dist_base.percpu_base); | |
671 | free_percpu(gic->cpu_base.percpu_base); | |
672 | return; | |
673 | } | |
674 | ||
675 | for_each_possible_cpu(cpu) { | |
676 | unsigned long offset = percpu_offset * cpu_logical_map(cpu); | |
677 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; | |
678 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; | |
679 | } | |
680 | ||
681 | gic_set_base_accessor(gic, gic_get_percpu_base); | |
682 | } else | |
683 | #endif | |
684 | { /* Normal, sane GIC... */ | |
685 | WARN(percpu_offset, | |
686 | "GIC_NON_BANKED not enabled, ignoring %08x offset!", | |
687 | percpu_offset); | |
688 | gic->dist_base.common_base = dist_base; | |
689 | gic->cpu_base.common_base = cpu_base; | |
690 | gic_set_base_accessor(gic, gic_get_common_base); | |
691 | } | |
bef8f9ee | 692 | |
4294f8ba RH |
693 | /* |
694 | * For primary GICs, skip over SGIs. | |
695 | * For secondary GICs, skip over PPIs, too. | |
696 | */ | |
fe41db7b | 697 | domain->hwirq_base = 32; |
4294f8ba | 698 | if (gic_nr == 0) { |
fe41db7b WD |
699 | if ((irq_start & 31) > 0) { |
700 | domain->hwirq_base = 16; | |
701 | if (irq_start != -1) | |
702 | irq_start = (irq_start & ~31) + 16; | |
703 | } | |
704 | } | |
4294f8ba RH |
705 | |
706 | /* | |
707 | * Find out how many interrupts are supported. | |
708 | * The GIC only supports up to 1020 interrupt sources. | |
709 | */ | |
db0d4db2 | 710 | gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; |
4294f8ba RH |
711 | gic_irqs = (gic_irqs + 1) * 32; |
712 | if (gic_irqs > 1020) | |
713 | gic_irqs = 1020; | |
714 | gic->gic_irqs = gic_irqs; | |
715 | ||
716 | domain->nr_irq = gic_irqs - domain->hwirq_base; | |
f37a53cc | 717 | domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, |
4294f8ba | 718 | numa_node_id()); |
f37a53cc RH |
719 | if (IS_ERR_VALUE(domain->irq_base)) { |
720 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", | |
721 | irq_start); | |
722 | domain->irq_base = irq_start; | |
723 | } | |
4294f8ba RH |
724 | domain->priv = gic; |
725 | domain->ops = &gic_irq_domain_ops; | |
726 | irq_domain_add(domain); | |
bef8f9ee | 727 | |
9c12845e | 728 | gic_chip.flags |= gic_arch_extn.flags; |
4294f8ba | 729 | gic_dist_init(gic); |
bef8f9ee | 730 | gic_cpu_init(gic); |
254056f3 | 731 | gic_pm_init(gic); |
b580b899 RK |
732 | } |
733 | ||
38489533 RK |
734 | void __cpuinit gic_secondary_init(unsigned int gic_nr) |
735 | { | |
bef8f9ee RK |
736 | BUG_ON(gic_nr >= MAX_GIC_NR); |
737 | ||
738 | gic_cpu_init(&gic_data[gic_nr]); | |
38489533 RK |
739 | } |
740 | ||
f27ecacc | 741 | #ifdef CONFIG_SMP |
82668104 | 742 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
f27ecacc | 743 | { |
267840f3 WD |
744 | int cpu; |
745 | unsigned long map = 0; | |
746 | ||
747 | /* Convert our logical CPU mask into a physical one. */ | |
748 | for_each_cpu(cpu, mask) | |
749 | map |= 1 << cpu_logical_map(cpu); | |
f27ecacc | 750 | |
6ac77e46 SS |
751 | /* |
752 | * Ensure that stores to Normal memory are visible to the | |
753 | * other CPUs before issuing the IPI. | |
754 | */ | |
755 | dsb(); | |
756 | ||
b3a1bde4 | 757 | /* this always happens on GIC0 */ |
db0d4db2 | 758 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
f27ecacc RK |
759 | } |
760 | #endif | |
b3f7ed03 RH |
761 | |
762 | #ifdef CONFIG_OF | |
763 | static int gic_cnt __initdata = 0; | |
764 | ||
765 | int __init gic_of_init(struct device_node *node, struct device_node *parent) | |
766 | { | |
767 | void __iomem *cpu_base; | |
768 | void __iomem *dist_base; | |
db0d4db2 | 769 | u32 percpu_offset; |
b3f7ed03 RH |
770 | int irq; |
771 | struct irq_domain *domain = &gic_data[gic_cnt].domain; | |
772 | ||
773 | if (WARN_ON(!node)) | |
774 | return -ENODEV; | |
775 | ||
776 | dist_base = of_iomap(node, 0); | |
777 | WARN(!dist_base, "unable to map gic dist registers\n"); | |
778 | ||
779 | cpu_base = of_iomap(node, 1); | |
780 | WARN(!cpu_base, "unable to map gic cpu registers\n"); | |
781 | ||
db0d4db2 MZ |
782 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) |
783 | percpu_offset = 0; | |
784 | ||
b3f7ed03 RH |
785 | domain->of_node = of_node_get(node); |
786 | ||
db0d4db2 | 787 | gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset); |
b3f7ed03 RH |
788 | |
789 | if (parent) { | |
790 | irq = irq_of_parse_and_map(node, 0); | |
791 | gic_cascade_irq(gic_cnt, irq); | |
792 | } | |
793 | gic_cnt++; | |
794 | return 0; | |
795 | } | |
796 | #endif |