2 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * Interrupt architecture for the GIC:
10 * o There is one Interrupt Distributor, which receives interrupts
11 * from system devices and sends them to the Interrupt Controllers.
13 * o There is one CPU Interface per CPU, which sends interrupts sent
14 * by the Distributor, and interrupts generated locally, to the
15 * associated CPU. The base address of the CPU interface is usually
16 * aliased so that the same address points to different chips depending
17 * on the CPU it is accessed from.
19 * Note that IRQs 0-31 are special - they are local to each CPU.
20 * As such, the enable set/clear, pending set/clear and active bit
21 * registers are banked per-cpu for these sources.
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/list.h>
28 #include <linux/smp.h>
29 #include <linux/cpu.h>
30 #include <linux/cpu_pm.h>
31 #include <linux/cpumask.h>
34 #include <linux/of_address.h>
35 #include <linux/of_irq.h>
36 #include <linux/acpi.h>
37 #include <linux/irqdomain.h>
38 #include <linux/interrupt.h>
39 #include <linux/percpu.h>
40 #include <linux/slab.h>
41 #include <linux/irqchip.h>
42 #include <linux/irqchip/chained_irq.h>
43 #include <linux/irqchip/arm-gic.h>
44 #include <linux/irqchip/arm-gic-acpi.h>
46 #include <asm/cputype.h>
48 #include <asm/exception.h>
49 #include <asm/smp_plat.h>
51 #include "irq-gic-common.h"
54 void __iomem *common_base;
55 void __percpu * __iomem *percpu_base;
58 struct gic_chip_data {
59 union gic_base dist_base;
60 union gic_base cpu_base;
62 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
63 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
64 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
65 u32 __percpu *saved_ppi_enable;
66 u32 __percpu *saved_ppi_conf;
68 struct irq_domain *domain;
69 unsigned int gic_irqs;
70 #ifdef CONFIG_GIC_NON_BANKED
71 void __iomem *(*get_base)(union gic_base *);
75 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
78 * The GIC mapping of CPU interfaces does not necessarily match
79 * the logical CPU numbering. Let's use a mapping as returned
82 #define NR_GIC_CPU_IF 8
83 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
89 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
91 #ifdef CONFIG_GIC_NON_BANKED
92 static void __iomem *gic_get_percpu_base(union gic_base *base)
94 return raw_cpu_read(*base->percpu_base);
97 static void __iomem *gic_get_common_base(union gic_base *base)
99 return base->common_base;
102 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
104 return data->get_base(&data->dist_base);
107 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
109 return data->get_base(&data->cpu_base);
112 static inline void gic_set_base_accessor(struct gic_chip_data *data,
113 void __iomem *(*f)(union gic_base *))
118 #define gic_data_dist_base(d) ((d)->dist_base.common_base)
119 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
120 #define gic_set_base_accessor(d, f)
123 static inline void __iomem *gic_dist_base(struct irq_data *d)
125 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
126 return gic_data_dist_base(gic_data);
129 static inline void __iomem *gic_cpu_base(struct irq_data *d)
131 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
132 return gic_data_cpu_base(gic_data);
135 static inline unsigned int gic_irq(struct irq_data *d)
141 * Routines to acknowledge, disable and enable interrupts
143 static void gic_poke_irq(struct irq_data *d, u32 offset)
145 u32 mask = 1 << (gic_irq(d) % 32);
146 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
149 static int gic_peek_irq(struct irq_data *d, u32 offset)
151 u32 mask = 1 << (gic_irq(d) % 32);
152 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
155 static void gic_mask_irq(struct irq_data *d)
157 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
160 static void gic_unmask_irq(struct irq_data *d)
162 gic_poke_irq(d, GIC_DIST_ENABLE_SET);
165 static void gic_eoi_irq(struct irq_data *d)
167 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
170 static int gic_irq_set_irqchip_state(struct irq_data *d,
171 enum irqchip_irq_state which, bool val)
176 case IRQCHIP_STATE_PENDING:
177 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
180 case IRQCHIP_STATE_ACTIVE:
181 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
184 case IRQCHIP_STATE_MASKED:
185 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
192 gic_poke_irq(d, reg);
196 static int gic_irq_get_irqchip_state(struct irq_data *d,
197 enum irqchip_irq_state which, bool *val)
200 case IRQCHIP_STATE_PENDING:
201 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
204 case IRQCHIP_STATE_ACTIVE:
205 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
208 case IRQCHIP_STATE_MASKED:
209 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
219 static int gic_set_type(struct irq_data *d, unsigned int type)
221 void __iomem *base = gic_dist_base(d);
222 unsigned int gicirq = gic_irq(d);
224 /* Interrupt configuration for SGIs can't be changed */
228 /* SPIs have restrictions on the supported types */
229 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
230 type != IRQ_TYPE_EDGE_RISING)
233 return gic_configure_irq(gicirq, type, base, NULL);
237 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
240 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
241 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
246 cpu = cpumask_any_and(mask_val, cpu_online_mask);
248 cpu = cpumask_first(mask_val);
250 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
253 raw_spin_lock_irqsave(&irq_controller_lock, flags);
254 mask = 0xff << shift;
255 bit = gic_cpu_map[cpu] << shift;
256 val = readl_relaxed(reg) & ~mask;
257 writel_relaxed(val | bit, reg);
258 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
260 return IRQ_SET_MASK_OK;
264 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
267 struct gic_chip_data *gic = &gic_data[0];
268 void __iomem *cpu_base = gic_data_cpu_base(gic);
271 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
272 irqnr = irqstat & GICC_IAR_INT_ID_MASK;
274 if (likely(irqnr > 15 && irqnr < 1021)) {
275 handle_domain_irq(gic->domain, irqnr, regs);
279 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
281 handle_IPI(irqnr, regs);
289 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
291 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
292 struct irq_chip *chip = irq_desc_get_chip(desc);
293 unsigned int cascade_irq, gic_irq;
294 unsigned long status;
296 chained_irq_enter(chip, desc);
298 raw_spin_lock(&irq_controller_lock);
299 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
300 raw_spin_unlock(&irq_controller_lock);
302 gic_irq = (status & GICC_IAR_INT_ID_MASK);
303 if (gic_irq == GICC_INT_SPURIOUS)
306 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
307 if (unlikely(gic_irq < 32 || gic_irq > 1020))
308 handle_bad_irq(cascade_irq, desc);
310 generic_handle_irq(cascade_irq);
313 chained_irq_exit(chip, desc);
316 static struct irq_chip gic_chip = {
318 .irq_mask = gic_mask_irq,
319 .irq_unmask = gic_unmask_irq,
320 .irq_eoi = gic_eoi_irq,
321 .irq_set_type = gic_set_type,
323 .irq_set_affinity = gic_set_affinity,
325 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
326 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
327 .flags = IRQCHIP_SET_TYPE_MASKED |
328 IRQCHIP_SKIP_SET_WAKE |
329 IRQCHIP_MASK_ON_SUSPEND,
332 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
334 if (gic_nr >= MAX_GIC_NR)
336 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
340 static u8 gic_get_cpumask(struct gic_chip_data *gic)
342 void __iomem *base = gic_data_dist_base(gic);
345 for (i = mask = 0; i < 32; i += 4) {
346 mask = readl_relaxed(base + GIC_DIST_TARGET + i);
353 if (!mask && num_possible_cpus() > 1)
354 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
359 static void gic_cpu_if_up(struct gic_chip_data *gic)
361 void __iomem *cpu_base = gic_data_cpu_base(gic);
365 * Preserve bypass disable bits to be written back later
367 bypass = readl(cpu_base + GIC_CPU_CTRL);
368 bypass &= GICC_DIS_BYPASS_MASK;
370 writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
374 static void __init gic_dist_init(struct gic_chip_data *gic)
378 unsigned int gic_irqs = gic->gic_irqs;
379 void __iomem *base = gic_data_dist_base(gic);
381 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
384 * Set all global interrupts to this CPU only.
386 cpumask = gic_get_cpumask(gic);
387 cpumask |= cpumask << 8;
388 cpumask |= cpumask << 16;
389 for (i = 32; i < gic_irqs; i += 4)
390 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
392 gic_dist_config(base, gic_irqs, NULL);
394 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
397 static void gic_cpu_init(struct gic_chip_data *gic)
399 void __iomem *dist_base = gic_data_dist_base(gic);
400 void __iomem *base = gic_data_cpu_base(gic);
401 unsigned int cpu_mask, cpu = smp_processor_id();
405 * Setting up the CPU map is only relevant for the primary GIC
406 * because any nested/secondary GICs do not directly interface
409 if (gic == &gic_data[0]) {
411 * Get what the GIC says our CPU mask is.
413 BUG_ON(cpu >= NR_GIC_CPU_IF);
414 cpu_mask = gic_get_cpumask(gic);
415 gic_cpu_map[cpu] = cpu_mask;
418 * Clear our mask from the other map entries in case they're
421 for (i = 0; i < NR_GIC_CPU_IF; i++)
423 gic_cpu_map[i] &= ~cpu_mask;
426 gic_cpu_config(dist_base, NULL);
428 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
432 int gic_cpu_if_down(unsigned int gic_nr)
434 void __iomem *cpu_base;
437 if (gic_nr >= MAX_GIC_NR)
440 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
441 val = readl(cpu_base + GIC_CPU_CTRL);
443 writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
450 * Saves the GIC distributor registers during suspend or idle. Must be called
451 * with interrupts disabled but before powering down the GIC. After calling
452 * this function, no interrupts will be delivered by the GIC, and another
453 * platform-specific wakeup source must be enabled.
455 static void gic_dist_save(unsigned int gic_nr)
457 unsigned int gic_irqs;
458 void __iomem *dist_base;
461 if (gic_nr >= MAX_GIC_NR)
464 gic_irqs = gic_data[gic_nr].gic_irqs;
465 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
470 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
471 gic_data[gic_nr].saved_spi_conf[i] =
472 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
474 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
475 gic_data[gic_nr].saved_spi_target[i] =
476 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
478 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
479 gic_data[gic_nr].saved_spi_enable[i] =
480 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
484 * Restores the GIC distributor registers during resume or when coming out of
485 * idle. Must be called before enabling interrupts. If a level interrupt
486 * that occured while the GIC was suspended is still present, it will be
487 * handled normally, but any edge interrupts that occured will not be seen by
488 * the GIC and need to be handled by the platform-specific wakeup source.
490 static void gic_dist_restore(unsigned int gic_nr)
492 unsigned int gic_irqs;
494 void __iomem *dist_base;
496 if (gic_nr >= MAX_GIC_NR)
499 gic_irqs = gic_data[gic_nr].gic_irqs;
500 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
505 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
507 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
508 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
509 dist_base + GIC_DIST_CONFIG + i * 4);
511 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
512 writel_relaxed(GICD_INT_DEF_PRI_X4,
513 dist_base + GIC_DIST_PRI + i * 4);
515 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
516 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
517 dist_base + GIC_DIST_TARGET + i * 4);
519 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
520 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
521 dist_base + GIC_DIST_ENABLE_SET + i * 4);
523 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
526 static void gic_cpu_save(unsigned int gic_nr)
530 void __iomem *dist_base;
531 void __iomem *cpu_base;
533 if (gic_nr >= MAX_GIC_NR)
536 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
537 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
539 if (!dist_base || !cpu_base)
542 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
543 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
544 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
546 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
547 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
548 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
552 static void gic_cpu_restore(unsigned int gic_nr)
556 void __iomem *dist_base;
557 void __iomem *cpu_base;
559 if (gic_nr >= MAX_GIC_NR)
562 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
563 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
565 if (!dist_base || !cpu_base)
568 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
569 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
570 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
572 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
573 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
574 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
576 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
577 writel_relaxed(GICD_INT_DEF_PRI_X4,
578 dist_base + GIC_DIST_PRI + i * 4);
580 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
581 gic_cpu_if_up(&gic_data[gic_nr]);
584 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
588 for (i = 0; i < MAX_GIC_NR; i++) {
589 #ifdef CONFIG_GIC_NON_BANKED
590 /* Skip over unused GICs */
591 if (!gic_data[i].get_base)
598 case CPU_PM_ENTER_FAILED:
602 case CPU_CLUSTER_PM_ENTER:
605 case CPU_CLUSTER_PM_ENTER_FAILED:
606 case CPU_CLUSTER_PM_EXIT:
615 static struct notifier_block gic_notifier_block = {
616 .notifier_call = gic_notifier,
619 static void __init gic_pm_init(struct gic_chip_data *gic)
621 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
623 BUG_ON(!gic->saved_ppi_enable);
625 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
627 BUG_ON(!gic->saved_ppi_conf);
629 if (gic == &gic_data[0])
630 cpu_pm_register_notifier(&gic_notifier_block);
633 static void __init gic_pm_init(struct gic_chip_data *gic)
639 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
642 unsigned long flags, map = 0;
644 raw_spin_lock_irqsave(&irq_controller_lock, flags);
646 /* Convert our logical CPU mask into a physical one. */
647 for_each_cpu(cpu, mask)
648 map |= gic_cpu_map[cpu];
651 * Ensure that stores to Normal memory are visible to the
652 * other CPUs before they observe us issuing the IPI.
656 /* this always happens on GIC0 */
657 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
659 raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
663 #ifdef CONFIG_BL_SWITCHER
665 * gic_send_sgi - send a SGI directly to given CPU interface number
667 * cpu_id: the ID for the destination CPU interface
668 * irq: the IPI number to send a SGI for
670 void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
672 BUG_ON(cpu_id >= NR_GIC_CPU_IF);
673 cpu_id = 1 << cpu_id;
674 /* this always happens on GIC0 */
675 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
679 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
681 * @cpu: the logical CPU number to get the GIC ID for.
683 * Return the CPU interface ID for the given logical CPU number,
684 * or -1 if the CPU number is too large or the interface ID is
685 * unknown (more than one bit set).
687 int gic_get_cpu_id(unsigned int cpu)
689 unsigned int cpu_bit;
691 if (cpu >= NR_GIC_CPU_IF)
693 cpu_bit = gic_cpu_map[cpu];
694 if (cpu_bit & (cpu_bit - 1))
696 return __ffs(cpu_bit);
700 * gic_migrate_target - migrate IRQs to another CPU interface
702 * @new_cpu_id: the CPU target ID to migrate IRQs to
704 * Migrate all peripheral interrupts with a target matching the current CPU
705 * to the interface corresponding to @new_cpu_id. The CPU interface mapping
706 * is also updated. Targets to other CPU interfaces are unchanged.
707 * This must be called with IRQs locally disabled.
709 void gic_migrate_target(unsigned int new_cpu_id)
711 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
712 void __iomem *dist_base;
713 int i, ror_val, cpu = smp_processor_id();
714 u32 val, cur_target_mask, active_mask;
716 if (gic_nr >= MAX_GIC_NR)
719 dist_base = gic_data_dist_base(&gic_data[gic_nr]);
722 gic_irqs = gic_data[gic_nr].gic_irqs;
724 cur_cpu_id = __ffs(gic_cpu_map[cpu]);
725 cur_target_mask = 0x01010101 << cur_cpu_id;
726 ror_val = (cur_cpu_id - new_cpu_id) & 31;
728 raw_spin_lock(&irq_controller_lock);
730 /* Update the target interface for this logical CPU */
731 gic_cpu_map[cpu] = 1 << new_cpu_id;
734 * Find all the peripheral interrupts targetting the current
735 * CPU interface and migrate them to the new CPU interface.
736 * We skip DIST_TARGET 0 to 7 as they are read-only.
738 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
739 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
740 active_mask = val & cur_target_mask;
743 val |= ror32(active_mask, ror_val);
744 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
748 raw_spin_unlock(&irq_controller_lock);
751 * Now let's migrate and clear any potential SGIs that might be
752 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET
753 * is a banked register, we can only forward the SGI using
754 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux
755 * doesn't use that information anyway.
757 * For the same reason we do not adjust SGI source information
758 * for previously sent SGIs by us to other CPUs either.
760 for (i = 0; i < 16; i += 4) {
762 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
765 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
766 for (j = i; j < i + 4; j++) {
768 writel_relaxed((1 << (new_cpu_id + 16)) | j,
769 dist_base + GIC_DIST_SOFTINT);
776 * gic_get_sgir_physaddr - get the physical address for the SGI register
778 * REturn the physical address of the SGI register to be used
779 * by some early assembly code when the kernel is not yet available.
781 static unsigned long gic_dist_physaddr;
783 unsigned long gic_get_sgir_physaddr(void)
785 if (!gic_dist_physaddr)
787 return gic_dist_physaddr + GIC_DIST_SOFTINT;
790 void __init gic_init_physaddr(struct device_node *node)
793 if (of_address_to_resource(node, 0, &res) == 0) {
794 gic_dist_physaddr = res.start;
795 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
800 #define gic_init_physaddr(node) do { } while (0)
803 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
807 irq_set_percpu_devid(irq);
808 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
809 handle_percpu_devid_irq, NULL, NULL);
810 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
812 irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
813 handle_fasteoi_irq, NULL, NULL);
814 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
819 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
823 static int gic_irq_domain_xlate(struct irq_domain *d,
824 struct device_node *controller,
825 const u32 *intspec, unsigned int intsize,
826 unsigned long *out_hwirq, unsigned int *out_type)
828 unsigned long ret = 0;
830 if (d->of_node != controller)
835 /* Get the interrupt number and add 16 to skip over SGIs */
836 *out_hwirq = intspec[1] + 16;
838 /* For SPIs, we need to add 16 more to get the GIC irq ID number */
842 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
848 static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
851 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
852 gic_cpu_init(&gic_data[0]);
857 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
858 * priority because the GIC needs to be up before the ARM generic timers.
860 static struct notifier_block gic_cpu_notifier = {
861 .notifier_call = gic_secondary_init,
866 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
867 unsigned int nr_irqs, void *arg)
870 irq_hw_number_t hwirq;
871 unsigned int type = IRQ_TYPE_NONE;
872 struct of_phandle_args *irq_data = arg;
874 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args,
875 irq_data->args_count, &hwirq, &type);
879 for (i = 0; i < nr_irqs; i++)
880 gic_irq_domain_map(domain, virq + i, hwirq + i);
885 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
886 .xlate = gic_irq_domain_xlate,
887 .alloc = gic_irq_domain_alloc,
888 .free = irq_domain_free_irqs_top,
891 static const struct irq_domain_ops gic_irq_domain_ops = {
892 .map = gic_irq_domain_map,
893 .unmap = gic_irq_domain_unmap,
894 .xlate = gic_irq_domain_xlate,
897 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
898 void __iomem *dist_base, void __iomem *cpu_base,
899 u32 percpu_offset, struct device_node *node)
901 irq_hw_number_t hwirq_base;
902 struct gic_chip_data *gic;
903 int gic_irqs, irq_base, i;
905 BUG_ON(gic_nr >= MAX_GIC_NR);
907 gic = &gic_data[gic_nr];
908 #ifdef CONFIG_GIC_NON_BANKED
909 if (percpu_offset) { /* Frankein-GIC without banked registers... */
912 gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
913 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
914 if (WARN_ON(!gic->dist_base.percpu_base ||
915 !gic->cpu_base.percpu_base)) {
916 free_percpu(gic->dist_base.percpu_base);
917 free_percpu(gic->cpu_base.percpu_base);
921 for_each_possible_cpu(cpu) {
922 u32 mpidr = cpu_logical_map(cpu);
923 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
924 unsigned long offset = percpu_offset * core_id;
925 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
926 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
929 gic_set_base_accessor(gic, gic_get_percpu_base);
932 { /* Normal, sane GIC... */
934 "GIC_NON_BANKED not enabled, ignoring %08x offset!",
936 gic->dist_base.common_base = dist_base;
937 gic->cpu_base.common_base = cpu_base;
938 gic_set_base_accessor(gic, gic_get_common_base);
942 * Find out how many interrupts are supported.
943 * The GIC only supports up to 1020 interrupt sources.
945 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
946 gic_irqs = (gic_irqs + 1) * 32;
949 gic->gic_irqs = gic_irqs;
951 if (node) { /* DT case */
952 gic->domain = irq_domain_add_linear(node, gic_irqs,
953 &gic_irq_domain_hierarchy_ops,
955 } else { /* Non-DT case */
957 * For primary GICs, skip over SGIs.
958 * For secondary GICs, skip over PPIs, too.
960 if (gic_nr == 0 && (irq_start & 31) > 0) {
963 irq_start = (irq_start & ~31) + 16;
968 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
970 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
972 if (IS_ERR_VALUE(irq_base)) {
973 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
975 irq_base = irq_start;
978 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
979 hwirq_base, &gic_irq_domain_ops, gic);
982 if (WARN_ON(!gic->domain))
987 * Initialize the CPU interface map to all CPUs.
988 * It will be refined as each CPU probes its ID.
989 * This is only necessary for the primary GIC.
991 for (i = 0; i < NR_GIC_CPU_IF; i++)
992 gic_cpu_map[i] = 0xff;
994 set_smp_cross_call(gic_raise_softirq);
995 register_cpu_notifier(&gic_cpu_notifier);
997 set_handle_irq(gic_handle_irq);
1006 static int gic_cnt __initdata;
1009 gic_of_init(struct device_node *node, struct device_node *parent)
1011 void __iomem *cpu_base;
1012 void __iomem *dist_base;
1019 dist_base = of_iomap(node, 0);
1020 WARN(!dist_base, "unable to map gic dist registers\n");
1022 cpu_base = of_iomap(node, 1);
1023 WARN(!cpu_base, "unable to map gic cpu registers\n");
1025 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
1028 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
1030 gic_init_physaddr(node);
1033 irq = irq_of_parse_and_map(node, 0);
1034 gic_cascade_irq(gic_cnt, irq);
1037 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1038 gicv2m_of_init(node, gic_data[gic_cnt].domain);
1043 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1044 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
1045 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
1046 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1047 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1048 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1049 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1050 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1055 static phys_addr_t dist_phy_base, cpu_phy_base __initdata;
1058 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
1059 const unsigned long end)
1061 struct acpi_madt_generic_interrupt *processor;
1062 phys_addr_t gic_cpu_base;
1063 static int cpu_base_assigned;
1065 processor = (struct acpi_madt_generic_interrupt *)header;
1067 if (BAD_MADT_GICC_ENTRY(processor, end))
1071 * There is no support for non-banked GICv1/2 register in ACPI spec.
1072 * All CPU interface addresses have to be the same.
1074 gic_cpu_base = processor->base_address;
1075 if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
1078 cpu_phy_base = gic_cpu_base;
1079 cpu_base_assigned = 1;
1084 gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
1085 const unsigned long end)
1087 struct acpi_madt_generic_distributor *dist;
1089 dist = (struct acpi_madt_generic_distributor *)header;
1091 if (BAD_MADT_ENTRY(dist, end))
1094 dist_phy_base = dist->base_address;
1099 gic_v2_acpi_init(struct acpi_table_header *table)
1101 void __iomem *cpu_base, *dist_base;
1104 /* Collect CPU base addresses */
1105 count = acpi_parse_entries(ACPI_SIG_MADT,
1106 sizeof(struct acpi_table_madt),
1107 gic_acpi_parse_madt_cpu, table,
1108 ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
1110 pr_err("No valid GICC entries exist\n");
1115 * Find distributor base address. We expect one distributor entry since
1116 * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
1118 count = acpi_parse_entries(ACPI_SIG_MADT,
1119 sizeof(struct acpi_table_madt),
1120 gic_acpi_parse_madt_distributor, table,
1121 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
1123 pr_err("No valid GICD entries exist\n");
1125 } else if (count > 1) {
1126 pr_err("More than one GICD entry detected\n");
1130 cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
1132 pr_err("Unable to map GICC registers\n");
1136 dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
1138 pr_err("Unable to map GICD registers\n");
1144 * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
1145 * as default IRQ domain to allow for GSI registration and GSI to IRQ
1146 * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
1148 gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
1149 irq_set_default_host(gic_data[0].domain);
1151 acpi_irq_model = ACPI_IRQ_MODEL_GIC;