irqchip: mips-gic: Make IPI bitmaps static
[linux-2.6-block.git] / drivers / irqchip / irq-mips-gic.c
CommitLineData
2299c49d
SH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
39b8d525 9#include <linux/bitmap.h>
fb8f7be1 10#include <linux/clocksource.h>
da61fcf9 11#include <linux/cpuhotplug.h>
39b8d525 12#include <linux/init.h>
18743d27 13#include <linux/interrupt.h>
fb8f7be1 14#include <linux/irq.h>
41a83e06 15#include <linux/irqchip.h>
a7057270 16#include <linux/of_address.h>
aa493737 17#include <linux/percpu.h>
18743d27 18#include <linux/sched.h>
631330f5 19#include <linux/smp.h>
39b8d525 20
e83f7e02 21#include <asm/mips-cps.h>
98b67c37
SH
22#include <asm/setup.h>
23#include <asm/traps.h>
39b8d525 24
a7057270
AB
25#include <dt-bindings/interrupt-controller/mips-gic.h>
26
b11d4c1f 27#define GIC_MAX_INTRS 256
aa493737 28#define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
98b67c37 29
b11d4c1f
PB
30/* Add 2 to convert GIC CPU pin to core interrupt */
31#define GIC_CPU_PIN_OFFSET 2
32
33/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
34#define GIC_PIN_TO_VEC_OFFSET 1
35
36/* Convert between local/shared IRQ number and GIC HW IRQ number. */
37#define GIC_LOCAL_HWIRQ_BASE 0
38#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
39#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
40#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
41#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
42#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
43
582e2b4a 44void __iomem *mips_gic_base;
822350bc 45
aa493737 46DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
2af70a96 47
95150ae8 48static DEFINE_SPINLOCK(gic_lock);
c49581a4 49static struct irq_domain *gic_irq_domain;
2af70a96 50static struct irq_domain *gic_ipi_domain;
fbd55241 51static int gic_shared_intrs;
3263d085 52static unsigned int gic_cpu_pin;
1b6af71a 53static unsigned int timer_cpu_pin;
4a6a3ea3 54static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
61dc367e
PB
55static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
56static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
39b8d525 57
da61fcf9
PB
58static struct gic_all_vpes_chip_data {
59 u32 map;
60 bool mask;
61} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
62
7778c4b2 63static void gic_clear_pcpu_masks(unsigned int intr)
8fa4b930 64{
7778c4b2 65 unsigned int i;
835d2b45 66
7778c4b2
PB
67 /* Clear the interrupt's bit in all pcpu_masks */
68 for_each_possible_cpu(i)
69 clear_bit(intr, per_cpu_ptr(pcpu_masks, i));
835d2b45
PB
70}
71
e9de688d
AB
72static bool gic_local_irq_is_routable(int intr)
73{
74 u32 vpe_ctl;
75
76 /* All local interrupts are routable in EIC mode. */
77 if (cpu_has_veic)
78 return true;
79
0d0cf58c 80 vpe_ctl = read_gic_vl_ctl();
e9de688d
AB
81 switch (intr) {
82 case GIC_LOCAL_INT_TIMER:
0d0cf58c 83 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE;
e9de688d 84 case GIC_LOCAL_INT_PERFCTR:
0d0cf58c 85 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE;
e9de688d 86 case GIC_LOCAL_INT_FDC:
0d0cf58c 87 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE;
e9de688d
AB
88 case GIC_LOCAL_INT_SWINT0:
89 case GIC_LOCAL_INT_SWINT1:
0d0cf58c 90 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE;
e9de688d
AB
91 default:
92 return true;
93 }
94}
95
3263d085 96static void gic_bind_eic_interrupt(int irq, int set)
98b67c37
SH
97{
98 /* Convert irq vector # to hw int # */
99 irq -= GIC_PIN_TO_VEC_OFFSET;
100
101 /* Set irq to use shadow set */
0d0cf58c 102 write_gic_vl_eic_shadow_set(irq, set);
98b67c37
SH
103}
104
bb11cff3 105static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
39b8d525 106{
bb11cff3
QY
107 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
108
3680746a 109 write_gic_wedge(GIC_WEDGE_RW | hwirq);
39b8d525
RB
110}
111
e9de688d
AB
112int gic_get_c0_compare_int(void)
113{
114 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
115 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
116 return irq_create_mapping(gic_irq_domain,
117 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
118}
119
120int gic_get_c0_perfcount_int(void)
121{
122 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
7e3e6cb2 123 /* Is the performance counter shared with the timer? */
e9de688d
AB
124 if (cp0_perfcount_irq < 0)
125 return -1;
126 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
127 }
128 return irq_create_mapping(gic_irq_domain,
129 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
130}
131
6429e2b6
JH
132int gic_get_c0_fdc_int(void)
133{
134 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
135 /* Is the FDC IRQ even present? */
136 if (cp0_fdc_irq < 0)
137 return -1;
138 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
139 }
140
6429e2b6
JH
141 return irq_create_mapping(gic_irq_domain,
142 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
143}
144
1b3ed367 145static void gic_handle_shared_int(bool chained)
39b8d525 146{
e98fcb2a 147 unsigned int intr, virq;
8f5ee79c 148 unsigned long *pcpu_mask;
8f5ee79c 149 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
39b8d525
RB
150
151 /* Get per-cpu bitmaps */
aa493737 152 pcpu_mask = this_cpu_ptr(pcpu_masks);
d77d5ac9 153
7778c4b2 154 if (mips_cm_is64)
e98fcb2a
PB
155 __ioread64_copy(pending, addr_gic_pend(),
156 DIV_ROUND_UP(gic_shared_intrs, 64));
7778c4b2 157 else
e98fcb2a
PB
158 __ioread32_copy(pending, addr_gic_pend(),
159 DIV_ROUND_UP(gic_shared_intrs, 32));
39b8d525 160
fbd55241 161 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
39b8d525 162
cae750ba 163 for_each_set_bit(intr, pending, gic_shared_intrs) {
d7eb4f2e
QY
164 virq = irq_linear_revmap(gic_irq_domain,
165 GIC_SHARED_TO_HWIRQ(intr));
1b3ed367
RV
166 if (chained)
167 generic_handle_irq(virq);
168 else
169 do_IRQ(virq);
d7eb4f2e 170 }
39b8d525
RB
171}
172
161d049e 173static void gic_mask_irq(struct irq_data *d)
39b8d525 174{
7778c4b2
PB
175 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
176
90019f8f 177 write_gic_rmask(intr);
7778c4b2 178 gic_clear_pcpu_masks(intr);
39b8d525
RB
179}
180
161d049e 181static void gic_unmask_irq(struct irq_data *d)
39b8d525 182{
7778c4b2
PB
183 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
184 unsigned int cpu;
185
90019f8f 186 write_gic_smask(intr);
7778c4b2
PB
187
188 gic_clear_pcpu_masks(intr);
d9f82930 189 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
7778c4b2 190 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
39b8d525
RB
191}
192
5561c9e4
AB
193static void gic_ack_irq(struct irq_data *d)
194{
e9de688d 195 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
c49581a4 196
3680746a 197 write_gic_wedge(irq);
5561c9e4
AB
198}
199
95150ae8
AB
200static int gic_set_type(struct irq_data *d, unsigned int type)
201{
5af3e93e 202 unsigned int irq, pol, trig, dual;
95150ae8 203 unsigned long flags;
5af3e93e
PB
204
205 irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
95150ae8
AB
206
207 spin_lock_irqsave(&gic_lock, flags);
208 switch (type & IRQ_TYPE_SENSE_MASK) {
209 case IRQ_TYPE_EDGE_FALLING:
5af3e93e
PB
210 pol = GIC_POL_FALLING_EDGE;
211 trig = GIC_TRIG_EDGE;
212 dual = GIC_DUAL_SINGLE;
95150ae8
AB
213 break;
214 case IRQ_TYPE_EDGE_RISING:
5af3e93e
PB
215 pol = GIC_POL_RISING_EDGE;
216 trig = GIC_TRIG_EDGE;
217 dual = GIC_DUAL_SINGLE;
95150ae8
AB
218 break;
219 case IRQ_TYPE_EDGE_BOTH:
5af3e93e
PB
220 pol = 0; /* Doesn't matter */
221 trig = GIC_TRIG_EDGE;
222 dual = GIC_DUAL_DUAL;
95150ae8
AB
223 break;
224 case IRQ_TYPE_LEVEL_LOW:
5af3e93e
PB
225 pol = GIC_POL_ACTIVE_LOW;
226 trig = GIC_TRIG_LEVEL;
227 dual = GIC_DUAL_SINGLE;
95150ae8
AB
228 break;
229 case IRQ_TYPE_LEVEL_HIGH:
230 default:
5af3e93e
PB
231 pol = GIC_POL_ACTIVE_HIGH;
232 trig = GIC_TRIG_LEVEL;
233 dual = GIC_DUAL_SINGLE;
95150ae8
AB
234 break;
235 }
236
5af3e93e
PB
237 change_gic_pol(irq, pol);
238 change_gic_trig(irq, trig);
239 change_gic_dual(irq, dual);
240
241 if (trig == GIC_TRIG_EDGE)
a595fc51
TG
242 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
243 handle_edge_irq, NULL);
244 else
245 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
246 handle_level_irq, NULL);
95150ae8 247 spin_unlock_irqrestore(&gic_lock, flags);
39b8d525 248
95150ae8
AB
249 return 0;
250}
251
252#ifdef CONFIG_SMP
161d049e
TG
253static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
254 bool force)
39b8d525 255{
e9de688d 256 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
07df8bfe
PB
257 unsigned long flags;
258 unsigned int cpu;
39b8d525 259
07df8bfe
PB
260 cpu = cpumask_first_and(cpumask, cpu_online_mask);
261 if (cpu >= NR_CPUS)
14d160ab 262 return -EINVAL;
39b8d525
RB
263
264 /* Assumption : cpumask refers to a single CPU */
265 spin_lock_irqsave(&gic_lock, flags);
39b8d525 266
c214c035 267 /* Re-route this IRQ */
07df8bfe 268 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
c214c035
TW
269
270 /* Update the pcpu_masks */
7778c4b2
PB
271 gic_clear_pcpu_masks(irq);
272 if (read_gic_mask(irq))
07df8bfe 273 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
39b8d525 274
18416e45 275 irq_data_update_effective_affinity(d, cpumask_of(cpu));
39b8d525
RB
276 spin_unlock_irqrestore(&gic_lock, flags);
277
7f15a648 278 return IRQ_SET_MASK_OK;
39b8d525
RB
279}
280#endif
281
4a6a3ea3
AB
282static struct irq_chip gic_level_irq_controller = {
283 .name = "MIPS GIC",
284 .irq_mask = gic_mask_irq,
285 .irq_unmask = gic_unmask_irq,
286 .irq_set_type = gic_set_type,
287#ifdef CONFIG_SMP
288 .irq_set_affinity = gic_set_affinity,
289#endif
290};
291
292static struct irq_chip gic_edge_irq_controller = {
161d049e 293 .name = "MIPS GIC",
5561c9e4 294 .irq_ack = gic_ack_irq,
161d049e 295 .irq_mask = gic_mask_irq,
161d049e 296 .irq_unmask = gic_unmask_irq,
95150ae8 297 .irq_set_type = gic_set_type,
39b8d525 298#ifdef CONFIG_SMP
161d049e 299 .irq_set_affinity = gic_set_affinity,
39b8d525 300#endif
bb11cff3 301 .ipi_send_single = gic_send_ipi,
39b8d525
RB
302};
303
1b3ed367 304static void gic_handle_local_int(bool chained)
e9de688d
AB
305{
306 unsigned long pending, masked;
d7eb4f2e 307 unsigned int intr, virq;
e9de688d 308
9da3c645
PB
309 pending = read_gic_vl_pend();
310 masked = read_gic_vl_mask();
e9de688d
AB
311
312 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
313
0f4ed158 314 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
d7eb4f2e
QY
315 virq = irq_linear_revmap(gic_irq_domain,
316 GIC_LOCAL_TO_HWIRQ(intr));
1b3ed367
RV
317 if (chained)
318 generic_handle_irq(virq);
319 else
320 do_IRQ(virq);
d7eb4f2e 321 }
e9de688d
AB
322}
323
324static void gic_mask_local_irq(struct irq_data *d)
325{
326 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
327
9da3c645 328 write_gic_vl_rmask(BIT(intr));
e9de688d
AB
329}
330
331static void gic_unmask_local_irq(struct irq_data *d)
332{
333 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
334
9da3c645 335 write_gic_vl_smask(BIT(intr));
e9de688d
AB
336}
337
338static struct irq_chip gic_local_irq_controller = {
339 .name = "MIPS GIC Local",
340 .irq_mask = gic_mask_local_irq,
341 .irq_unmask = gic_unmask_local_irq,
342};
343
344static void gic_mask_local_irq_all_vpes(struct irq_data *d)
345{
da61fcf9 346 struct gic_all_vpes_chip_data *cd;
e9de688d 347 unsigned long flags;
da61fcf9
PB
348 int intr, cpu;
349
350 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
351 cd = irq_data_get_irq_chip_data(d);
352 cd->mask = false;
e9de688d
AB
353
354 spin_lock_irqsave(&gic_lock, flags);
da61fcf9
PB
355 for_each_online_cpu(cpu) {
356 write_gic_vl_other(mips_cm_vp_id(cpu));
9da3c645 357 write_gic_vo_rmask(BIT(intr));
e9de688d
AB
358 }
359 spin_unlock_irqrestore(&gic_lock, flags);
360}
361
362static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
363{
da61fcf9 364 struct gic_all_vpes_chip_data *cd;
e9de688d 365 unsigned long flags;
da61fcf9
PB
366 int intr, cpu;
367
368 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
369 cd = irq_data_get_irq_chip_data(d);
370 cd->mask = true;
e9de688d
AB
371
372 spin_lock_irqsave(&gic_lock, flags);
da61fcf9
PB
373 for_each_online_cpu(cpu) {
374 write_gic_vl_other(mips_cm_vp_id(cpu));
9da3c645 375 write_gic_vo_smask(BIT(intr));
e9de688d
AB
376 }
377 spin_unlock_irqrestore(&gic_lock, flags);
378}
379
da61fcf9
PB
380static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
381{
382 struct gic_all_vpes_chip_data *cd;
383 unsigned int intr;
384
385 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
386 cd = irq_data_get_irq_chip_data(d);
387
388 write_gic_vl_map(intr, cd->map);
389 if (cd->mask)
390 write_gic_vl_smask(BIT(intr));
391}
392
e9de688d 393static struct irq_chip gic_all_vpes_local_irq_controller = {
da61fcf9
PB
394 .name = "MIPS GIC Local",
395 .irq_mask = gic_mask_local_irq_all_vpes,
396 .irq_unmask = gic_unmask_local_irq_all_vpes,
397 .irq_cpu_online = gic_all_vpes_irq_cpu_online,
e9de688d
AB
398};
399
18743d27 400static void __gic_irq_dispatch(void)
39b8d525 401{
1b3ed367
RV
402 gic_handle_local_int(false);
403 gic_handle_shared_int(false);
18743d27 404}
39b8d525 405
bd0b9ac4 406static void gic_irq_dispatch(struct irq_desc *desc)
18743d27 407{
1b3ed367
RV
408 gic_handle_local_int(true);
409 gic_handle_shared_int(true);
18743d27
AB
410}
411
e9de688d 412static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
7778c4b2 413 irq_hw_number_t hw, unsigned int cpu)
e9de688d
AB
414{
415 int intr = GIC_HWIRQ_TO_SHARED(hw);
d9f82930 416 struct irq_data *data;
c49581a4
AB
417 unsigned long flags;
418
d9f82930
PB
419 data = irq_get_irq_data(virq);
420
c49581a4 421 spin_lock_irqsave(&gic_lock, flags);
d3e8cf44 422 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
7778c4b2
PB
423 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
424 gic_clear_pcpu_masks(intr);
425 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
d9f82930 426 irq_data_update_effective_affinity(data, cpumask_of(cpu));
c49581a4
AB
427 spin_unlock_irqrestore(&gic_lock, flags);
428
429 return 0;
430}
431
b87281e7 432static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
c98c1822
QY
433 const u32 *intspec, unsigned int intsize,
434 irq_hw_number_t *out_hwirq,
435 unsigned int *out_type)
436{
437 if (intsize != 3)
438 return -EINVAL;
439
440 if (intspec[0] == GIC_SHARED)
441 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
442 else if (intspec[0] == GIC_LOCAL)
443 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
444 else
445 return -EINVAL;
446 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
447
448 return 0;
449}
450
8ada00a6
MR
451static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
452 irq_hw_number_t hwirq)
c98c1822 453{
da61fcf9 454 struct gic_all_vpes_chip_data *cd;
63b746b1
PB
455 unsigned long flags;
456 unsigned int intr;
da61fcf9 457 int err, cpu;
63b746b1 458 u32 map;
c98c1822 459
8ada00a6 460 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
b87281e7
PB
461 /* verify that shared irqs don't conflict with an IPI irq */
462 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
463 return -EBUSY;
c98c1822 464
b87281e7
PB
465 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
466 &gic_level_irq_controller,
467 NULL);
468 if (err)
469 return err;
470
18416e45 471 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
b87281e7 472 return gic_shared_irq_domain_map(d, virq, hwirq, 0);
c98c1822
QY
473 }
474
63b746b1
PB
475 intr = GIC_HWIRQ_TO_LOCAL(hwirq);
476 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
477
478 switch (intr) {
b87281e7 479 case GIC_LOCAL_INT_TIMER:
63b746b1
PB
480 /* CONFIG_MIPS_CMP workaround (see __gic_init) */
481 map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
482 /* fall-through */
b87281e7
PB
483 case GIC_LOCAL_INT_PERFCTR:
484 case GIC_LOCAL_INT_FDC:
485 /*
486 * HACK: These are all really percpu interrupts, but
487 * the rest of the MIPS kernel code does not use the
488 * percpu IRQ API for them.
489 */
da61fcf9
PB
490 cd = &gic_all_vpes_chip_data[intr];
491 cd->map = map;
b87281e7
PB
492 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
493 &gic_all_vpes_local_irq_controller,
da61fcf9 494 cd);
b87281e7
PB
495 if (err)
496 return err;
c98c1822 497
b87281e7
PB
498 irq_set_handler(virq, handle_percpu_irq);
499 break;
500
501 default:
502 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
503 &gic_local_irq_controller,
504 NULL);
505 if (err)
506 return err;
507
508 irq_set_handler(virq, handle_percpu_devid_irq);
509 irq_set_percpu_devid(virq);
510 break;
511 }
512
63b746b1
PB
513 if (!gic_local_irq_is_routable(intr))
514 return -EPERM;
515
516 spin_lock_irqsave(&gic_lock, flags);
da61fcf9
PB
517 for_each_online_cpu(cpu) {
518 write_gic_vl_other(mips_cm_vp_id(cpu));
63b746b1
PB
519 write_gic_vo_map(intr, map);
520 }
521 spin_unlock_irqrestore(&gic_lock, flags);
522
523 return 0;
c98c1822
QY
524}
525
8ada00a6
MR
526static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
527 unsigned int nr_irqs, void *arg)
528{
529 struct irq_fwspec *fwspec = arg;
530 irq_hw_number_t hwirq;
531
532 if (fwspec->param[0] == GIC_SHARED)
533 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
534 else
535 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
536
537 return gic_irq_domain_map(d, virq, hwirq);
538}
539
b87281e7
PB
540void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
541 unsigned int nr_irqs)
2564970a 542{
2564970a
PB
543}
544
b87281e7
PB
545static const struct irq_domain_ops gic_irq_domain_ops = {
546 .xlate = gic_irq_domain_xlate,
547 .alloc = gic_irq_domain_alloc,
548 .free = gic_irq_domain_free,
8ada00a6 549 .map = gic_irq_domain_map,
2af70a96
QY
550};
551
552static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
553 const u32 *intspec, unsigned int intsize,
554 irq_hw_number_t *out_hwirq,
555 unsigned int *out_type)
556{
557 /*
558 * There's nothing to translate here. hwirq is dynamically allocated and
559 * the irq type is always edge triggered.
560 * */
561 *out_hwirq = 0;
562 *out_type = IRQ_TYPE_EDGE_RISING;
563
564 return 0;
565}
566
567static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
568 unsigned int nr_irqs, void *arg)
569{
570 struct cpumask *ipimask = arg;
b87281e7
PB
571 irq_hw_number_t hwirq, base_hwirq;
572 int cpu, ret, i;
2af70a96 573
b87281e7
PB
574 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
575 if (base_hwirq == gic_shared_intrs)
576 return -ENOMEM;
577
578 /* check that we have enough space */
579 for (i = base_hwirq; i < nr_irqs; i++) {
580 if (!test_bit(i, ipi_available))
581 return -EBUSY;
582 }
583 bitmap_clear(ipi_available, base_hwirq, nr_irqs);
584
585 /* map the hwirq for each cpu consecutively */
586 i = 0;
587 for_each_cpu(cpu, ipimask) {
588 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
589
590 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
591 &gic_edge_irq_controller,
592 NULL);
593 if (ret)
594 goto error;
2af70a96 595
b87281e7 596 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
2af70a96
QY
597 &gic_edge_irq_controller,
598 NULL);
599 if (ret)
600 goto error;
601
602 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
603 if (ret)
604 goto error;
b87281e7
PB
605
606 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
607 if (ret)
608 goto error;
609
610 i++;
2af70a96
QY
611 }
612
613 return 0;
614error:
b87281e7 615 bitmap_set(ipi_available, base_hwirq, nr_irqs);
2af70a96
QY
616 return ret;
617}
618
619void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
620 unsigned int nr_irqs)
621{
b87281e7
PB
622 irq_hw_number_t base_hwirq;
623 struct irq_data *data;
624
625 data = irq_get_irq_data(virq);
626 if (!data)
627 return;
628
629 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
630 bitmap_set(ipi_available, base_hwirq, nr_irqs);
2af70a96
QY
631}
632
633int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
634 enum irq_domain_bus_token bus_token)
635{
636 bool is_ipi;
637
638 switch (bus_token) {
639 case DOMAIN_BUS_IPI:
640 is_ipi = d->bus_token == bus_token;
547aefc4 641 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
2af70a96
QY
642 break;
643 default:
644 return 0;
645 }
646}
647
0b7e815a 648static const struct irq_domain_ops gic_ipi_domain_ops = {
2af70a96
QY
649 .xlate = gic_ipi_domain_xlate,
650 .alloc = gic_ipi_domain_alloc,
651 .free = gic_ipi_domain_free,
652 .match = gic_ipi_domain_match,
c49581a4
AB
653};
654
da61fcf9
PB
655static int gic_cpu_startup(unsigned int cpu)
656{
890f6b55
PB
657 /* Enable or disable EIC */
658 change_gic_vl_ctl(GIC_VX_CTL_EIC,
659 cpu_has_veic ? GIC_VX_CTL_EIC : 0);
660
25ac19e1
PB
661 /* Clear all local IRQ masks (ie. disable all local interrupts) */
662 write_gic_vl_rmask(~0);
663
da61fcf9
PB
664 /* Invoke irq_cpu_online callbacks to enable desired interrupts */
665 irq_cpu_online();
666
667 return 0;
668}
fbea7541
PB
669
670static int __init gic_of_init(struct device_node *node,
671 struct device_node *parent)
39b8d525 672{
25c51dad 673 unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
b2b2e584 674 unsigned long reserved;
fbea7541
PB
675 phys_addr_t gic_base;
676 struct resource res;
677 size_t gic_len;
678
679 /* Find the first available CPU vector. */
b2b2e584 680 i = 0;
a08588ea 681 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
fbea7541
PB
682 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
683 i++, &cpu_vec))
684 reserved |= BIT(cpu_vec);
b2b2e584
PB
685
686 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
687 if (cpu_vec == hweight_long(ST0_IM)) {
fbea7541
PB
688 pr_err("No CPU vectors available for GIC\n");
689 return -ENODEV;
690 }
691
692 if (of_address_to_resource(node, 0, &res)) {
693 /*
694 * Probe the CM for the GIC base address if not specified
695 * in the device-tree.
696 */
697 if (mips_cm_present()) {
698 gic_base = read_gcr_gic_base() &
699 ~CM_GCR_GIC_BASE_GICEN;
700 gic_len = 0x20000;
701 } else {
702 pr_err("Failed to get GIC memory range\n");
703 return -ENODEV;
704 }
705 } else {
706 gic_base = res.start;
707 gic_len = resource_size(&res);
708 }
39b8d525 709
fbea7541
PB
710 if (mips_cm_present()) {
711 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN);
712 /* Ensure GIC region is enabled before trying to access it */
713 __sync();
714 }
c0a9f72c 715
fbea7541 716 mips_gic_base = ioremap_nocache(gic_base, gic_len);
39b8d525 717
3680746a
PB
718 gicconfig = read_gic_config();
719 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
a08588ea 720 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
3680746a 721 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
39b8d525 722
18743d27
AB
723 if (cpu_has_veic) {
724 /* Always use vector 1 in EIC mode */
725 gic_cpu_pin = 0;
1b6af71a 726 timer_cpu_pin = gic_cpu_pin;
18743d27
AB
727 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
728 __gic_irq_dispatch);
729 } else {
730 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
731 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
732 gic_irq_dispatch);
1b6af71a
JH
733 /*
734 * With the CMP implementation of SMP (deprecated), other CPUs
735 * are started by the bootloader and put into a timer based
736 * waiting poll loop. We must not re-route those CPU's local
737 * timer interrupts as the wait instruction will never finish,
738 * so just handle whatever CPU interrupt it is routed to by
739 * default.
740 *
741 * This workaround should be removed when CMP support is
742 * dropped.
743 */
744 if (IS_ENABLED(CONFIG_MIPS_CMP) &&
745 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
0d0cf58c 746 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP;
1b6af71a
JH
747 irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
748 GIC_CPU_PIN_OFFSET +
749 timer_cpu_pin,
750 gic_irq_dispatch);
751 } else {
752 timer_cpu_pin = gic_cpu_pin;
753 }
18743d27
AB
754 }
755
a7057270 756 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
fbea7541 757 gic_shared_intrs, 0,
c49581a4 758 &gic_irq_domain_ops, NULL);
fbea7541
PB
759 if (!gic_irq_domain) {
760 pr_err("Failed to add GIC IRQ domain");
761 return -ENXIO;
762 }
0b271f56 763
2af70a96
QY
764 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
765 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
766 GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
767 node, &gic_ipi_domain_ops, NULL);
fbea7541
PB
768 if (!gic_ipi_domain) {
769 pr_err("Failed to add GIC IPI domain");
770 return -ENXIO;
771 }
2af70a96 772
96f0d93a 773 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
2af70a96 774
16a8083c
QY
775 if (node &&
776 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
777 bitmap_set(ipi_resrv, v[0], v[1]);
778 } else {
25c51dad
PB
779 /*
780 * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
781 * meeting the requirements of arch/mips SMP.
782 */
783 num_ipis = 2 * num_possible_cpus();
784 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
16a8083c 785 }
2af70a96 786
f8dcd9e8 787 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
a7057270 788
87888bcb 789 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
a7057270 790
87888bcb
PB
791 /* Setup defaults */
792 for (i = 0; i < gic_shared_intrs; i++) {
793 change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
794 change_gic_trig(i, GIC_TRIG_LEVEL);
90019f8f 795 write_gic_rmask(i);
a7057270
AB
796 }
797
da61fcf9
PB
798 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
799 "irqchip/mips/gic:starting",
800 gic_cpu_startup, NULL);
a7057270
AB
801}
802IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);