controller, or the HW block containing it, is referred to occasionally
as "armctrl" in the SoC documentation, hence naming of this binding.
+The BCM2836 contains the same interrupt controller with the same
+interrupts, but the per-CPU interrupt controller is the root, and an
+interrupt there indicates that the ARMCTRL has an interrupt to handle.
+
Required properties:
-- compatible : should be "brcm,bcm2835-armctrl-ic"
+- compatible : should be "brcm,bcm2835-armctrl-ic" or
+ "brcm,bcm2836-armctrl-ic"
- reg : Specifies base physical address and size of the registers.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
The 2nd cell contains the interrupt number within the bank. Valid values
are 0..7 for bank 0, and 0..31 for bank 1.
+Additional required properties for brcm,bcm2836-armctrl-ic:
+- interrupt-parent : Specifies the parent interrupt controller when this
+ controller is the second level.
+- interrupts : Specifies the interrupt on the parent for this interrupt
+ controller to handle.
+
The interrupt sources are as follows:
Bank 0:
Example:
+/* BCM2835, first level */
intc: interrupt-controller {
compatible = "brcm,bcm2835-armctrl-ic";
reg = <0x7e00b200 0x200>;
interrupt-controller;
#interrupt-cells = <2>;
};
+
+/* BCM2836, second level */
+intc: interrupt-controller {
+ compatible = "brcm,bcm2836-armctrl-ic";
+ reg = <0x7e00b200 0x200>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&local_intc>;
+ interrupts = <8>;
+};
--- /dev/null
+BCM2836 per-CPU interrupt controller
+
+The BCM2836 has a per-cpu interrupt controller for the timer, PMU
+events, and SMP IPIs. One of the CPUs may receive interrupts for the
+peripheral (GPU) events, which chain to the BCM2835-style interrupt
+controller.
+
+Required properties:
+
+- compatible: Should be "brcm,bcm2836-l1-intc"
+- reg: Specifies base physical address and size of the
+ registers
+- interrupt-controller: Identifies the node as an interrupt controller
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. The value shall be 1
+
+Please refer to interrupts.txt in this directory for details of the common
+Interrupt Controllers bindings used by client devices.
+
+The interrupt sources are as follows:
+
+0: CNTPSIRQ
+1: CNTPNSIRQ
+2: CNTHPIRQ
+3: CNTVIRQ
+8: GPU_FAST
+9: PMU_FAST
+
+Example:
+
+local_intc: local_intc {
+ compatible = "brcm,bcm2836-l1-intc";
+ reg = <0x40000000 0x100>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&local_intc>;
+};
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
- cpumask_copy(data->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0;
}
static int idu_first_irq;
-static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc)
+static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ unsigned int core_irq = irq_desc_get_irq(desc);
unsigned int idu_irq;
idu_irq = core_irq - idu_first_irq;
static void __init r8a7779_init_irq_dt(void)
{
- gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE);
-
irqchip_init();
/* route all interrupts to ARM */
struct device_node *np;
struct resource r;
- gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND);
irqchip_init();
np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
of_address_to_resource(np, 0, &r);
* to the CPU by disabling the GIC CPU IF to prevent wfi
* from completing execution behind power controller back
*/
- gic_cpu_if_down();
+ gic_cpu_if_down(0);
}
static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
static void __init zynq_irq_init(void)
{
- gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND);
irqchip_init();
}
irqd_set_trigger_type(d, flow_type);
if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else
- __irq_set_handler_locked(irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
return IRQ_SET_MASK_OK_NOCOPY;
}
struct pio_device *pio = irq_desc_get_chip_data(desc);
unsigned gpio_irq;
- gpio_irq = (unsigned) irq_get_handler_data(irq);
+ gpio_irq = (unsigned) irq_desc_get_handler_data(desc);
for (;;) {
u32 isr;
unsigned i;
irq_set_chip_data(irq, pio);
- irq_set_handler_data(irq, (void *)gpio_irq);
for (i = 0; i < 32; i++, gpio_irq++) {
irq_set_chip_data(gpio_irq, pio);
handle_simple_irq);
}
- irq_set_chained_handler(irq, gpio_irq_handler);
+ irq_set_chained_handler_and_data(irq, gpio_irq_handler,
+ (void *)gpio_irq);
}
/*--------------------------------------------------------------------------*/
.irq_unmask = bf537_mac_rx_unmask_irq,
};
-static void bf537_demux_mac_rx_irq(unsigned int int_irq,
+static void bf537_demux_mac_rx_irq(unsigned int __int_irq,
struct irq_desc *desc)
{
+ unsigned int int_irq = irq_desc_get_irq(desc);
+
if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
bfin_handle_irq(IRQ_MAC_RX);
else
#ifdef CONFIG_SMP
static void bfin_internal_unmask_irq_chip(struct irq_data *d)
{
- bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
+ bfin_internal_unmask_irq_affinity(d->irq,
+ irq_data_get_affinity_mask(d));
}
static int bfin_internal_set_affinity(struct irq_data *d,
}
#endif
-static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
+static inline void bfin_set_irq_handler(struct irq_data *d, irq_flow_handler_t handle)
{
#ifdef CONFIG_IPIPE
handle = handle_level_irq;
#endif
- __irq_set_handler_locked(irq, handle);
+ irq_set_handler_locked(d, handle);
}
#ifdef CONFIG_GPIO_ADI
}
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
- bfin_set_irq_handler(irq, handle_edge_irq);
+ bfin_set_irq_handler(d, handle_edge_irq);
else
- bfin_set_irq_handler(irq, handle_level_irq);
+ bfin_set_irq_handler(d, handle_level_irq);
return 0;
}
}
}
-void bfin_demux_gpio_irq(unsigned int inta_irq,
- struct irq_desc *desc)
+void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc)
{
+ unsigned int inta_irq = irq_desc_get_irq(desc);
unsigned int irq;
switch (inta_irq) {
.irq_unmask = unmask_megamod,
};
-static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc)
{
struct megamod_cascade_data *cascade;
struct megamod_pic *pic;
+ unsigned int irq;
u32 events;
int n, idx;
soc_writel(~0, &pic->regs->evtmask[i]);
soc_writel(~0, &pic->regs->evtclr[i]);
- irq_set_handler_data(irq, &cascade_data[i]);
- irq_set_chained_handler(irq, megamod_irq_cascade);
+ irq_set_chained_handler_and_data(irq, megamod_irq_cascade,
+ &cascade_data[i]);
}
/* Finally, set up the MUX registers */
chip->name, irq_type->name);
chip = irq_type;
}
- __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
- handle_edge_irq : handle_level_irq,
- NULL);
+ irq_set_chip_handler_name_locked(irq_get_irq_data(irq), chip,
+ trigger == IOSAPIC_EDGE ? handle_edge_irq : handle_level_irq,
+ NULL);
return 0;
}
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
- cpumask_setall(irq_get_irq_data(irq)->affinity);
+ cpumask_setall(irq_get_affinity_mask(irq));
#endif
/* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0;
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
if (irq < NR_IRQS) {
- cpumask_copy(irq_get_irq_data(irq)->affinity,
+ cpumask_copy(irq_get_affinity_mask(irq),
cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff);
}
if (irqd_is_per_cpu(data))
continue;
- if (cpumask_any_and(data->affinity, cpu_online_mask)
- >= nr_cpu_ids) {
+ if (cpumask_any_and(irq_data_get_affinity_mask(data),
+ cpu_online_mask) >= nr_cpu_ids) {
/*
* Save it for phase 2 processing
*/
if (irq_prepare_move(irq, cpu))
return -1;
- __get_cached_msi_msg(idata->msi_desc, &msg);
+ __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
addr = msg.address_lo;
addr &= MSI_ADDR_DEST_ID_MASK;
msg.data = data;
pci_write_msi_msg(irq, &msg);
- cpumask_copy(idata->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
return 0;
}
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
- cpumask_copy(data->affinity, mask);
+ cpumask_copy(irq_data_get_affinity_mask(data), mask);
return 0;
}
* Release XIO resources for the old MSI PCI address
*/
- __get_cached_msi_msg(data->msi_desc, &msg);
+ __get_cached_msi_msg(irq_data_get_msi_desc(data), &msg);
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
pci_write_msi_msg(irq, &msg);
- cpumask_copy(data->affinity, cpu_mask);
+ cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask);
return 0;
}
* We need to be careful with the masking/acking due to the side effects
* of masking an interrupt.
*/
-static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
+static void intc_external_irq(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
+
irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
handle_simple_irq(irq, desc);
}
* Handle miscellaneous OSS interrupts.
*/
-static void oss_irq(unsigned int irq, struct irq_desc *desc)
+static void oss_irq(unsigned int __irq, struct irq_desc *desc)
{
int events = oss->irq_pending &
- (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
+ (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
#ifdef DEBUG_IRQS
if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
+ unsigned int irq = irq_desc_get_irq(desc);
+
printk("oss_irq: irq %u events = 0x%04X\n", irq,
(int) oss->irq_pending);
}
* PSC interrupt handler. It's a lot like the VIA interrupt handler.
*/
-static void psc_irq(unsigned int irq, struct irq_desc *desc)
+static void psc_irq(unsigned int __irq, struct irq_desc *desc)
{
unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
int pIFR = pIFRbase + offset;
int pIER = pIERbase + offset;
int irq_num;
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/bug.h>
-#include "../../drivers/irqchip/irqchip.h"
-
static void __iomem *intc_baseaddr;
/* No one else should require these constants, so define them locally here. */
config SYS_SUPPORTS_HOTPLUG_CPU
bool
-config I8259
- bool
- select IRQ_DOMAIN
-
config MIPS_BONITO64
bool
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/of_irq.h>
-#include "../../../drivers/irqchip/irqchip.h"
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
irqchip_init();
}
-OF_DECLARE_2(irqchip, mips_cpu_intc, "mti,cpu-interrupt-controller",
+IRQCHIP_DECLARE(mips_cpu_intc, "mti,cpu-interrupt-controller",
mips_cpu_irq_of_init);
obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
-obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Code to handle x86 style IRQs plus some generic interrupt stuff.
- *
- * Copyright (C) 1992 Linus Torvalds
- * Copyright (C) 1994 - 2000 Ralf Baechle
- */
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/of_irq.h>
-#include <linux/spinlock.h>
-#include <linux/syscore_ops.h>
-#include <linux/irq.h>
-
-#include <asm/i8259.h>
-#include <asm/io.h>
-
-#include "../../drivers/irqchip/irqchip.h"
-
-/*
- * This is the 'legacy' 8259A Programmable Interrupt Controller,
- * present in the majority of PC/AT boxes.
- * plus some generic x86 specific things if generic specifics makes
- * any sense at all.
- * this file should become arch/i386/kernel/irq.c when the old irq.c
- * moves to arch independent land
- */
-
-static int i8259A_auto_eoi = -1;
-DEFINE_RAW_SPINLOCK(i8259A_lock);
-static void disable_8259A_irq(struct irq_data *d);
-static void enable_8259A_irq(struct irq_data *d);
-static void mask_and_ack_8259A(struct irq_data *d);
-static void init_8259A(int auto_eoi);
-
-static struct irq_chip i8259A_chip = {
- .name = "XT-PIC",
- .irq_mask = disable_8259A_irq,
- .irq_disable = disable_8259A_irq,
- .irq_unmask = enable_8259A_irq,
- .irq_mask_ack = mask_and_ack_8259A,
-};
-
-/*
- * 8259A PIC functions to handle ISA devices:
- */
-
-/*
- * This contains the irq mask for both 8259A irq controllers,
- */
-static unsigned int cached_irq_mask = 0xffff;
-
-#define cached_master_mask (cached_irq_mask)
-#define cached_slave_mask (cached_irq_mask >> 8)
-
-static void disable_8259A_irq(struct irq_data *d)
-{
- unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
- unsigned long flags;
-
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- cached_irq_mask |= mask;
- if (irq & 8)
- outb(cached_slave_mask, PIC_SLAVE_IMR);
- else
- outb(cached_master_mask, PIC_MASTER_IMR);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-static void enable_8259A_irq(struct irq_data *d)
-{
- unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
- unsigned long flags;
-
- mask = ~(1 << irq);
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- cached_irq_mask &= mask;
- if (irq & 8)
- outb(cached_slave_mask, PIC_SLAVE_IMR);
- else
- outb(cached_master_mask, PIC_MASTER_IMR);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-int i8259A_irq_pending(unsigned int irq)
-{
- unsigned int mask;
- unsigned long flags;
- int ret;
-
- irq -= I8259A_IRQ_BASE;
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- if (irq < 8)
- ret = inb(PIC_MASTER_CMD) & mask;
- else
- ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- return ret;
-}
-
-void make_8259A_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
- enable_irq(irq);
-}
-
-/*
- * This function assumes to be called rarely. Switching between
- * 8259A registers is slow.
- * This has to be protected by the irq controller spinlock
- * before being called.
- */
-static inline int i8259A_irq_real(unsigned int irq)
-{
- int value;
- int irqmask = 1 << irq;
-
- if (irq < 8) {
- outb(0x0B, PIC_MASTER_CMD); /* ISR register */
- value = inb(PIC_MASTER_CMD) & irqmask;
- outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
- return value;
- }
- outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
- value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
- outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
- return value;
-}
-
-/*
- * Careful! The 8259A is a fragile beast, it pretty
- * much _has_ to be done exactly like this (mask it
- * first, _then_ send the EOI, and the order of EOI
- * to the two 8259s is important!
- */
-static void mask_and_ack_8259A(struct irq_data *d)
-{
- unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
- unsigned long flags;
-
- irqmask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- /*
- * Lightweight spurious IRQ detection. We do not want
- * to overdo spurious IRQ handling - it's usually a sign
- * of hardware problems, so we only do the checks we can
- * do without slowing down good hardware unnecessarily.
- *
- * Note that IRQ7 and IRQ15 (the two spurious IRQs
- * usually resulting from the 8259A-1|2 PICs) occur
- * even if the IRQ is masked in the 8259A. Thus we
- * can check spurious 8259A IRQs without doing the
- * quite slow i8259A_irq_real() call for every IRQ.
- * This does not cover 100% of spurious interrupts,
- * but should be enough to warn the user that there
- * is something bad going on ...
- */
- if (cached_irq_mask & irqmask)
- goto spurious_8259A_irq;
- cached_irq_mask |= irqmask;
-
-handle_real_irq:
- if (irq & 8) {
- inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
- outb(cached_slave_mask, PIC_SLAVE_IMR);
- outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
- outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
- } else {
- inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
- outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
- }
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
- return;
-
-spurious_8259A_irq:
- /*
- * this is the slow path - should happen rarely.
- */
- if (i8259A_irq_real(irq))
- /*
- * oops, the IRQ _is_ in service according to the
- * 8259A - not spurious, go handle it.
- */
- goto handle_real_irq;
-
- {
- static int spurious_irq_mask;
- /*
- * At this point we can be sure the IRQ is spurious,
- * lets ACK and report it. [once per IRQ]
- */
- if (!(spurious_irq_mask & irqmask)) {
- printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
- atomic_inc(&irq_err_count);
- /*
- * Theoretically we do not have to handle this IRQ,
- * but in Linux this does not cause problems and is
- * simpler for us.
- */
- goto handle_real_irq;
- }
-}
-
-static void i8259A_resume(void)
-{
- if (i8259A_auto_eoi >= 0)
- init_8259A(i8259A_auto_eoi);
-}
-
-static void i8259A_shutdown(void)
-{
- /* Put the i8259A into a quiescent state that
- * the kernel initialization code can get it
- * out of.
- */
- if (i8259A_auto_eoi >= 0) {
- outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
- }
-}
-
-static struct syscore_ops i8259_syscore_ops = {
- .resume = i8259A_resume,
- .shutdown = i8259A_shutdown,
-};
-
-static int __init i8259A_init_sysfs(void)
-{
- register_syscore_ops(&i8259_syscore_ops);
- return 0;
-}
-
-device_initcall(i8259A_init_sysfs);
-
-static void init_8259A(int auto_eoi)
-{
- unsigned long flags;
-
- i8259A_auto_eoi = auto_eoi;
-
- raw_spin_lock_irqsave(&i8259A_lock, flags);
-
- outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
-
- /*
- * outb_p - this has to work on a wide range of PC hardware.
- */
- outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
- outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
- outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
- if (auto_eoi) /* master does Auto EOI */
- outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
- else /* master expects normal EOI */
- outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
-
- outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
- outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
- outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
- outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
- if (auto_eoi)
- /*
- * In AEOI mode we just have to mask the interrupt
- * when acking.
- */
- i8259A_chip.irq_mask_ack = disable_8259A_irq;
- else
- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
-
- udelay(100); /* wait for 8259A to initialize */
-
- outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
- outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
-
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
- .handler = no_action,
- .name = "cascade",
- .flags = IRQF_NO_THREAD,
-};
-
-static struct resource pic1_io_resource = {
- .name = "pic1",
- .start = PIC_MASTER_CMD,
- .end = PIC_MASTER_IMR,
- .flags = IORESOURCE_BUSY
-};
-
-static struct resource pic2_io_resource = {
- .name = "pic2",
- .start = PIC_SLAVE_CMD,
- .end = PIC_SLAVE_IMR,
- .flags = IORESOURCE_BUSY
-};
-
-static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
- irq_set_probe(virq);
- return 0;
-}
-
-static struct irq_domain_ops i8259A_ops = {
- .map = i8259A_irq_domain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-/*
- * On systems with i8259-style interrupt controllers we assume for
- * driver compatibility reasons interrupts 0 - 15 to be the i8259
- * interrupts even if the hardware uses a different interrupt numbering.
- */
-struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
-{
- struct irq_domain *domain;
-
- insert_resource(&ioport_resource, &pic1_io_resource);
- insert_resource(&ioport_resource, &pic2_io_resource);
-
- init_8259A(0);
-
- domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
- &i8259A_ops, NULL);
- if (!domain)
- panic("Failed to add i8259 IRQ domain");
-
- setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
- return domain;
-}
-
-void __init init_i8259_irqs(void)
-{
- __init_i8259_irqs(NULL);
-}
-
-static void i8259_irq_dispatch(unsigned int irq, struct irq_desc *desc)
-{
- struct irq_domain *domain = irq_get_handler_data(irq);
- int hwirq = i8259_irq();
-
- if (hwirq < 0)
- return;
-
- irq = irq_linear_revmap(domain, hwirq);
- generic_handle_irq(irq);
-}
-
-int __init i8259_of_init(struct device_node *node, struct device_node *parent)
-{
- struct irq_domain *domain;
- unsigned int parent_irq;
-
- parent_irq = irq_of_parse_and_map(node, 0);
- if (!parent_irq) {
- pr_err("Failed to map i8259 parent IRQ\n");
- return -ENODEV;
- }
-
- domain = __init_i8259_irqs(node);
- irq_set_handler_data(parent_irq, domain);
- irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
- return 0;
-}
-IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
ret = arch_setup_msi_irq(dev, entry);
if (ret < 0)
return ret;
{
struct irq_data *data;
data = irq_get_irq_data(cd->irq);
- cpumask_copy(data->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
iact->flags |= IRQF_NOBALANCING;
}
#endif
tmp2 = GxICR(irq);
irq_affinity_online[irq] =
- cpumask_any_and(d->affinity, cpu_online_mask);
+ cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) =
(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
} else {
tmp = GxICR(irq);
- irq_affinity_online[irq] = cpumask_any_and(d->affinity,
+ irq_affinity_online[irq] = cpumask_any_and(irq_data_get_affinity_mask(d),
cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_data *data = irq_get_irq_data(irq);
+ struct cpumask *mask = irq_data_get_affinity_mask(data);
if (irqd_is_per_cpu(data))
continue;
- if (cpumask_test_cpu(self, data->affinity) &&
+ if (cpumask_test_cpu(self, mask) &&
!cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
int cpu_id;
cpu_id = cpumask_first(cpu_online_mask);
- cpumask_set_cpu(cpu_id, data->affinity);
+ cpumask_set_cpu(cpu_id, mask);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
GxICR(irq) = x & GxICR_LEVEL;
tmp = GxICR(irq);
- new = cpumask_any_and(data->affinity,
- cpu_online_mask);
+ new = cpumask_any_and(mask, cpu_online_mask);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =
if (cpu_dest < 0)
return -1;
- cpumask_copy(d->affinity, dest);
+ cpumask_copy(irq_data_get_affinity_mask(d), dest);
return 0;
}
{
#ifdef CONFIG_SMP
struct irq_data *d = irq_get_irq_data(irq);
- cpumask_copy(d->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
- struct irq_desc *desc;
+ struct irq_data *irq_data;
cpumask_t dest;
#endif
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
- desc = irq_to_desc(irq);
- cpumask_copy(&dest, desc->irq_data.affinity);
- if (irqd_is_per_cpu(&desc->irq_data) &&
+ irq_data = irq_get_irq_data(irq);
+ cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
+ if (irqd_is_per_cpu(irq_data) &&
!cpumask_test_cpu(smp_processor_id(), &dest)) {
int cpu = cpumask_first(&dest);
}
static int
-cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
+cpld_pic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return cpld_pic_node == node;
}
return -ENODEV;
}
- entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ entry = first_pci_msi_entry(dev);
for (; dn; dn = of_get_next_parent(dn)) {
if (entry->msi_attrib.is_64) {
if (rc)
return rc;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) {
dev_warn(&dev->dev,
dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq == NO_IRQ)
continue;
#endif /* CONFIG_SMP */
-static int iic_host_match(struct irq_domain *h, struct device_node *node)
+static int iic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return of_device_is_compatible(node,
"IBM,CBEA-Internal-Interrupt-Controller");
return 0;
}
-static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
+static int flipper_pic_match(struct irq_domain *h, struct device_node *np,
+ enum irq_domain_bus_token bus_token)
{
return 1;
}
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
msg.address_hi = 0;
msg.address_lo = PASEMI_MSI_ADDR;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
/* Allocate 16 interrupts for now, since that's the grouping for
* affinity. This can be changed later if it turns out 32 is too
* few MSIs for someone, but restrictions will apply to how the
.name = "cascade",
};
-static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
/* We match all, we don't always have a node anyway */
return 1;
opal_handle_events(be64_to_cpu(last_outstanding_events));
}
-static int opal_event_match(struct irq_domain *h, struct device_node *node)
+static int opal_event_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return h->of_node == node;
}
if (pdev->no_64bit_msi && !phb->msi32_support)
return -ENODEV;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
pr_warn("%s: Supports only 64-bit MSIs\n",
pci_name(pdev));
if (WARN_ON(!phb))
return;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
return 0;
}
-static int ps3_host_match(struct irq_domain *h, struct device_node *np)
+static int ps3_host_match(struct irq_domain *h, struct device_node *np,
+ enum irq_domain_bus_token bus_token)
{
/* Match all */
return 1;
{
struct msi_desc *entry;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
* So we must reject such requests. */
expected = 0;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->msi_attrib.entry_nr != expected) {
pr_debug("rtas_msi: bad MSI-X entries.\n");
return -EINVAL;
}
i = 0;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
hwirq = rtas_query_irq_number(pdn, i++);
if (hwirq < 0) {
pr_debug("rtas_msi: error (%d) getting hwirq\n", rc);
return irq_linear_revmap(global_ehv_pic->irqhost, irq);
}
-static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
+static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
/* Exact match, unless ehv_pic node is NULL */
return h->of_node == NULL || h->of_node == node;
struct msi_desc *entry;
struct fsl_msi *msi_data;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
msi_data = irq_get_chip_data(entry->irq);
}
}
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
/*
* Loop over all the MSI devices until we find one that has an
* available interrupt.
.flags = IORESOURCE_BUSY,
};
-static int i8259_host_match(struct irq_domain *h, struct device_node *node)
+static int i8259_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return h->of_node == NULL || h->of_node == node;
}
.irq_set_type = ipic_set_irq_type,
};
-static int ipic_host_match(struct irq_domain *h, struct device_node *node)
+static int ipic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
/* Exact match, unless ipic node is NULL */
return h->of_node == NULL || h->of_node == node;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
-static int mpic_host_match(struct irq_domain *h, struct device_node *node)
+static int mpic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
/* Exact match, unless mpic node is NULL */
return h->of_node == NULL || h->of_node == node;
{
struct msi_desc *entry;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
return -ENXIO;
}
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
if (hwirq < 0) {
pr_debug("u3msi: failed allocating hwirq\n");
return -EINVAL;
}
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
if (irq < 0) {
pr_debug("%s: Failed to allocate msi interrupt\n",
struct msi_desc *entry;
int irq;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq == NO_IRQ)
continue;
if (!msi_data->msi_virqs)
return -ENOMEM;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
if (int_no >= 0)
break;
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
.irq_mask_ack = qe_ic_mask_irq,
};
-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
/* Exact match, unless qe_ic node is NULL */
return h->of_node == NULL || h->of_node == node;
* card, using the MSI mask bits. Firmware doesn't appear to unmask
* at that level, so we do it here by hand.
*/
- if (d->msi_desc)
+ if (irq_data_get_msi_desc(d))
pci_msi_unmask_irq(d);
#endif
* card, using the MSI mask bits. Firmware doesn't appear to unmask
* at that level, so we do it here by hand.
*/
- if (d->msi_desc)
+ if (irq_data_get_msi_desc(d))
pci_msi_unmask_irq(d);
#endif
/* unmask it */
}
#endif /* CONFIG_SMP */
-static int xics_host_match(struct irq_domain *h, struct device_node *node)
+static int xics_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
struct ics *ics;
/* Request MSI interrupts */
hwirq = 0;
- list_for_each_entry(msi, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(msi, pdev) {
rc = -EIO;
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
if (irq < 0)
return (msi_vecs == nvec) ? 0 : msi_vecs;
out_msi:
- list_for_each_entry(msi, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(msi, pdev) {
if (hwirq-- == 0)
break;
irq_set_msi_desc(msi->irq, NULL);
return;
/* Release MSI interrupts */
- list_for_each_entry(msi, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(msi, pdev) {
if (msi->msi_attrib.is_msix)
__pci_msix_desc_mask_irq(msi, 1);
else
static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned long mask;
int bit;
static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned long mask;
int bit;
.irq_unmask = enable_se7724_irq,
};
-static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct fpga_irq set = get_fpga_irq(irq);
unsigned short intv = __raw_readw(set.sraddr);
unsigned int ext_irq = set.base;
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
unsigned long mask;
int pin;
for_each_active_irq(irq) {
struct irq_data *data = irq_get_irq_data(irq);
- if (data->node == cpu) {
- unsigned int newcpu = cpumask_any_and(data->affinity,
+ if (irq_data_get_node(data) == cpu) {
+ struct cpumask *mask = irq_data_get_affinity_mask(data);
+ unsigned int newcpu = cpumask_any_and(mask,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
irq, cpu);
- cpumask_setall(data->affinity);
+ cpumask_setall(mask);
}
- irq_set_affinity(irq, data->affinity);
+ irq_set_affinity(irq, mask);
}
}
}
static inline unsigned int irq_data_to_handle(struct irq_data *data)
{
- struct irq_handler_data *ihd = data->handler_data;
+ struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
return ihd->dev_handle;
}
static inline unsigned int irq_data_to_ino(struct irq_data *data)
{
- struct irq_handler_data *ihd = data->handler_data;
+ struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
return ihd->dev_ino;
}
static inline unsigned long irq_data_to_sysino(struct irq_data *data)
{
- struct irq_handler_data *ihd = data->handler_data;
+ struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
return ihd->sysino;
}
static void sun4u_irq_enable(struct irq_data *data)
{
- struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_handler_data *handler_data;
+ handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(data->irq, data->affinity);
+ cpuid = irq_choose_cpu(data->irq,
+ irq_data_get_affinity_mask(data));
imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
static int sun4u_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
- struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_handler_data *handler_data;
+ handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
static void sun4u_irq_eoi(struct irq_data *data)
{
- struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_handler_data *handler_data;
+ handler_data = irq_data_get_irq_handler_data(data);
if (likely(handler_data))
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
static void sun4v_irq_enable(struct irq_data *data)
{
- unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
+ unsigned long cpuid = irq_choose_cpu(data->irq,
+ irq_data_get_affinity_mask(data));
unsigned int ino = irq_data_to_sysino(data);
int err;
unsigned long cpuid;
int err;
- cpuid = irq_choose_cpu(data->irq, data->affinity);
+ cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data));
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
if (desc->action && !irqd_is_per_cpu(data)) {
if (data->chip->irq_set_affinity)
data->chip->irq_set_affinity(data,
- data->affinity,
- false);
+ irq_data_get_affinity_mask(data),
+ false);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
int oldcpu, newcpu;
mask = (unsigned long)data->chip_data;
- oldcpu = irq_choose_cpu(data->affinity);
+ oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
newcpu = irq_choose_cpu(dest);
if (oldcpu == newcpu)
int cpu;
mask = (unsigned long)data->chip_data;
- cpu = irq_choose_cpu(data->affinity);
+ cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
spin_lock_irqsave(&leon_irq_lock, flags);
oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
int cpu;
mask = (unsigned long)data->chip_data;
- cpu = irq_choose_cpu(data->affinity);
+ cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
spin_lock_irqsave(&leon_irq_lock, flags);
oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
void arch_teardown_msi_irq(unsigned int irq)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
- struct pci_dev *pdev = entry->dev;
+ struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
static void sun4d_mask_irq(struct irq_data *data)
{
- struct sun4d_handler_data *handler_data = data->handler_data;
+ struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
unsigned int real_irq;
#ifdef CONFIG_SMP
int cpuid = handler_data->cpuid;
static void sun4d_unmask_irq(struct irq_data *data)
{
- struct sun4d_handler_data *handler_data = data->handler_data;
+ struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
unsigned int real_irq;
#ifdef CONFIG_SMP
int cpuid = handler_data->cpuid;
static void sun4m_mask_irq(struct irq_data *data)
{
- struct sun4m_handler_data *handler_data = data->handler_data;
+ struct sun4m_handler_data *handler_data;
int cpu = smp_processor_id();
+ handler_data = irq_data_get_irq_handler_data(data);
if (handler_data->mask) {
unsigned long flags;
static void sun4m_unmask_irq(struct irq_data *data)
{
- struct sun4m_handler_data *handler_data = data->handler_data;
+ struct sun4m_handler_data *handler_data;
int cpu = smp_processor_id();
+ handler_data = irq_data_get_irq_handler_data(data);
if (handler_data->mask) {
unsigned long flags;
* to Linux which just calls handle_level_irq() after clearing the
* MAC INTx Assert status bit associated with this interrupt.
*/
-static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
+static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc)
{
struct pci_controller *controller = irq_desc_get_handler_data(desc);
gxio_trio_context_t *trio_context = controller->trio;
uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
int mac = controller->mac;
unsigned int reg_offset;
uint64_t level_mask;
/* MSI support starts here. */
static unsigned int tilegx_msi_startup(struct irq_data *d)
{
- if (d->msi_desc)
+ if (irq_data_get_msi_desc(d))
pci_msi_unmask_irq(d);
return 0;
* irq_controller_lock held, and IRQs disabled. Decode the IRQ
* and call the handler.
*/
-static void
-puv3_gpio_handler(unsigned int irq, struct irq_desc *desc)
+static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc)
{
- unsigned int mask;
+ unsigned int mask, irq;
mask = readl(GPIO_GEDR);
do {
if (ret)
goto error;
i = 0;
- list_for_each_entry(msidesc, &dev->msi_list, list) {
+ for_each_pci_msi_entry(msidesc, dev) {
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- list_for_each_entry(msidesc, &dev->msi_list, list) {
+ for_each_pci_msi_entry(msidesc, dev) {
__pci_read_msi_msg(msidesc, &msg);
pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
int ret = 0;
struct msi_desc *msidesc;
- list_for_each_entry(msidesc, &dev->msi_list, list) {
+ for_each_pci_msi_entry(msidesc, dev) {
struct physdev_map_pirq map_irq;
domid_t domid;
{
struct msi_desc *msidesc;
- msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ msidesc = first_pci_msi_entry(dev);
if (msidesc->msi_attrib.is_msix)
xen_pci_frontend_disable_msix(dev);
else
for_each_active_irq(i) {
struct irq_data *data = irq_get_irq_data(i);
+ struct cpumask *mask;
unsigned int newcpu;
if (irqd_is_per_cpu(data))
continue;
- if (!cpumask_test_cpu(cpu, data->affinity))
+ mask = irq_data_get_affinity_mask(data);
+ if (!cpumask_test_cpu(cpu, mask))
continue;
- newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
+ newcpu = cpumask_any_and(mask, cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, cpu);
- cpumask_setall(data->affinity);
+ cpumask_setall(mask);
}
- irq_set_affinity(i, data->affinity);
+ irq_set_affinity(i, mask);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
obj-$(CONFIG_SOC_BUS) += soc.o
obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
+obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, -1);
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ INIT_LIST_HEAD(&dev->msi_list);
+#endif
}
EXPORT_SYMBOL_GPL(device_initialize);
--- /dev/null
+/*
+ * MSI framework for platform devices
+ *
+ * Copyright (C) 2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#define DEV_ID_SHIFT 24
+
+/*
+ * Internal data structure containing a (made up, but unique) devid
+ * and the callback to write the MSI message.
+ */
+struct platform_msi_priv_data {
+ irq_write_msi_msg_t write_msg;
+ int devid;
+};
+
+/* The devid allocator */
+static DEFINE_IDA(platform_msi_devid_ida);
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Convert an msi_desc to a globaly unique identifier (per-device
+ * devid + msi_desc position in the msi_list).
+ */
+static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+{
+ u32 devid;
+
+ devid = desc->platform.msi_priv_data->devid;
+
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+}
+
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = platform_msi_calc_hwirq(desc);
+}
+
+static int platform_msi_init(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg)
+{
+ struct irq_data *data;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ info->chip, info->chip_data);
+
+ /*
+ * Save the MSI descriptor in handler_data so that the
+ * irq_write_msi_msg callback can retrieve it (and the
+ * associated device).
+ */
+ data = irq_domain_get_irq_data(domain, virq);
+ data->handler_data = arg->desc;
+
+ return 0;
+}
+#else
+#define platform_msi_set_desc NULL
+#define platform_msi_init NULL
+#endif
+
+static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ BUG_ON(!ops);
+
+ if (ops->msi_init == NULL)
+ ops->msi_init = platform_msi_init;
+ if (ops->set_desc == NULL)
+ ops->set_desc = platform_msi_set_desc;
+}
+
+static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data_get_irq_handler_data(data);
+ struct platform_msi_priv_data *priv_data;
+
+ priv_data = desc->platform.msi_priv_data;
+
+ priv_data->write_msg(desc, msg);
+}
+
+static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_mask)
+ chip->irq_mask = irq_chip_mask_parent;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = irq_chip_unmask_parent;
+ if (!chip->irq_eoi)
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_set_affinity)
+ chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = platform_msi_write_msg;
+}
+
+static void platform_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int platform_msi_alloc_descs(struct device *dev, int nvec,
+ struct platform_msi_priv_data *data)
+
+{
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ struct msi_desc *desc;
+
+ desc = alloc_msi_entry(dev);
+ if (!desc)
+ break;
+
+ desc->platform.msi_priv_data = data;
+ desc->platform.msi_index = i;
+ desc->nvec_used = 1;
+
+ list_add_tail(&desc->list, dev_to_msi_list(dev));
+ }
+
+ if (i != nvec) {
+ /* Clean up the mess */
+ platform_msi_free_descs(dev);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
+ * @np: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a platform MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ platform_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ platform_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(np, info, parent);
+ if (domain)
+ domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
+
+ return domain;
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct platform_msi_priv_data *priv_data;
+ int err;
+
+ /*
+ * Limit the number of interrupts to 256 per device. Should we
+ * need to bump this up, DEV_ID_SHIFT should be adjusted
+ * accordingly (which would impact the max number of MSI
+ * capable devices).
+ */
+ if (!dev->msi_domain || !write_msi_msg || !nvec ||
+ nvec > (1 << (32 - DEV_ID_SHIFT)))
+ return -EINVAL;
+
+ if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ dev_err(dev, "Incompatible msi_domain, giving up\n");
+ return -EINVAL;
+ }
+
+ /* Already had a helping of MSI? Greed... */
+ if (!list_empty(dev_to_msi_list(dev)))
+ return -EBUSY;
+
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data)
+ return -ENOMEM;
+
+ priv_data->devid = ida_simple_get(&platform_msi_devid_ida,
+ 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ if (priv_data->devid < 0) {
+ err = priv_data->devid;
+ goto out_free_data;
+ }
+
+ priv_data->write_msg = write_msi_msg;
+
+ err = platform_msi_alloc_descs(dev, nvec, priv_data);
+ if (err)
+ goto out_free_id;
+
+ err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+ if (err)
+ goto out_free_desc;
+
+ return 0;
+
+out_free_desc:
+ platform_msi_free_descs(dev);
+out_free_id:
+ ida_simple_remove(&platform_msi_devid_ida, priv_data->devid);
+out_free_data:
+ kfree(priv_data);
+
+ return err;
+}
+
+/**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_msi_domain_free_irqs(struct device *dev)
+{
+ struct msi_desc *desc;
+
+ desc = first_msi_entry(dev);
+ if (desc) {
+ struct platform_msi_priv_data *data;
+
+ data = desc->platform.msi_priv_data;
+
+ ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ kfree(data);
+ }
+
+ msi_domain_free_irqs(dev->msi_domain, dev);
+ platform_msi_free_descs(dev);
+}
select MULTI_IRQ_HANDLER
select SPARSE_IRQ
+config I8259
+ bool
+ select IRQ_DOMAIN
+
config BCM7038_L1_IRQ
bool
select GENERIC_IRQ_CHIP
config RENESAS_H8S_INTC
bool
select IRQ_DOMAIN
+
+config IMX_GPCV2
+ bool
+ select IRQ_DOMAIN
+ help
+ Enables the wakeup IRQs for IMX platforms with GPCv2 block
obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
+obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
+obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_H8S_INTC) += irq-renesas-h8s.o
obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
+obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include "irqchip.h"
-
#define COMBINER_ENABLE_SET 0x0
#define COMBINER_ENABLE_CLEAR 0x4
#define COMBINER_INT_STATUS 0xC
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
-static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+static void combiner_handle_cascade_irq(unsigned int __irq,
+ struct irq_desc *desc)
{
- struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
unsigned int cascade_irq, combiner_irq;
unsigned long status;
static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
unsigned int irq)
{
- if (irq_set_handler_data(irq, combiner_data) != 0)
- BUG();
- irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+ irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
+ combiner_data);
}
static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
if (!combiner_data) {
- pr_warning("%s: could not allocate combiner data\n", __func__);
+ pr_warn("%s: could not allocate combiner data\n", __func__);
return;
}
combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
&combiner_irq_domain_ops, combiner_data);
if (WARN_ON(!combiner_irq_domain)) {
- pr_warning("%s: irq domain init failed\n", __func__);
+ pr_warn("%s: irq domain init failed\n", __func__);
return;
}
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/cpu.h>
#include <linux/io.h>
#include <asm/smp_plat.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
/* Interrupt Controller Registers Map */
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long irqmap, irqn, irqsrc, cpuid;
unsigned int cascade_irq;
#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/mach/irq.h>
#include "irq-atmel-aic-common.h"
-#include "irqchip.h"
/* Number of irq lines managed by AIC */
#define NR_AIC_IRQS 32
aic_common_rtt_irq_fixup(root);
}
-static const struct of_device_id __initdata aic_irq_fixups[] = {
+static const struct of_device_id aic_irq_fixups[] __initconst = {
{ .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup },
{ .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup },
{ .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/mach/irq.h>
#include "irq-atmel-aic-common.h"
-#include "irqchip.h"
/* Number of irq lines managed by AIC */
#define NR_AIC5_IRQS 128
aic_common_rtc_irq_fixup(root);
}
-static const struct of_device_id __initdata aic5_irq_fixups[] = {
+static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
{ /* sentinel */ },
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
#define HWIRQ_BANK(i) (i >> 5)
#define NR_BANKS 3
#define IRQS_PER_BANK 32
-static int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
-static int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
-static int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
-static int bank_irqs[] __initconst = { 8, 32, 32 };
+static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
+static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
+static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
+static const int bank_irqs[] __initconst = { 8, 32, 32 };
static const int shortcuts[] = {
7, 9, 10, 18, 19, /* Bank 1 */
static struct armctrl_ic intc __read_mostly;
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
+static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc);
static void armctrl_mask_irq(struct irq_data *d)
{
};
static int __init armctrl_of_init(struct device_node *node,
- struct device_node *parent)
+ struct device_node *parent,
+ bool is_2836)
{
void __iomem *base;
int irq, b, i;
}
}
- set_handle_irq(bcm2835_handle_irq);
+ if (is_2836) {
+ int parent_irq = irq_of_parse_and_map(node, 0);
+
+ if (!parent_irq) {
+ panic("%s: unable to get parent interrupt.\n",
+ node->full_name);
+ }
+ irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
+ } else {
+ set_handle_irq(bcm2835_handle_irq);
+ }
+
return 0;
}
+static int __init bcm2835_armctrl_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return armctrl_of_init(node, parent, false);
+}
+
+static int __init bcm2836_armctrl_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return armctrl_of_init(node, parent, true);
+}
+
+
/*
* Handle each interrupt across the entire interrupt controller. This reads the
* status register before handling each interrupt, which is necessary given that
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
-static void armctrl_handle_bank(int bank, struct pt_regs *regs)
+static u32 armctrl_translate_bank(int bank)
{
- u32 stat, irq;
+ u32 stat = readl_relaxed(intc.pending[bank]);
- while ((stat = readl_relaxed(intc.pending[bank]))) {
- irq = MAKE_HWIRQ(bank, ffs(stat) - 1);
- handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
- }
+ return MAKE_HWIRQ(bank, ffs(stat) - 1);
+}
+
+static u32 armctrl_translate_shortcut(int bank, u32 stat)
+{
+ return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
}
-static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
- u32 stat)
+static u32 get_next_armctrl_hwirq(void)
{
- u32 irq = MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
- handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
+ u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK;
+
+ if (stat == 0)
+ return ~0;
+ else if (stat & BANK0_HWIRQ_MASK)
+ return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
+ else if (stat & SHORTCUT1_MASK)
+ return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK);
+ else if (stat & SHORTCUT2_MASK)
+ return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK);
+ else if (stat & BANK1_HWIRQ)
+ return armctrl_translate_bank(1);
+ else if (stat & BANK2_HWIRQ)
+ return armctrl_translate_bank(2);
+ else
+ BUG();
}
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs)
{
- u32 stat, irq;
-
- while ((stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK)) {
- if (stat & BANK0_HWIRQ_MASK) {
- irq = MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
- handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
- } else if (stat & SHORTCUT1_MASK) {
- armctrl_handle_shortcut(1, regs, stat & SHORTCUT1_MASK);
- } else if (stat & SHORTCUT2_MASK) {
- armctrl_handle_shortcut(2, regs, stat & SHORTCUT2_MASK);
- } else if (stat & BANK1_HWIRQ) {
- armctrl_handle_bank(1, regs);
- } else if (stat & BANK2_HWIRQ) {
- armctrl_handle_bank(2, regs);
- } else {
- BUG();
- }
- }
+ u32 hwirq;
+
+ while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+ handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+}
+
+static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+ u32 hwirq;
+
+ while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+ generic_handle_irq(irq_linear_revmap(intc.domain, hwirq));
}
-IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init);
+IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
+ bcm2835_armctrl_of_init);
+IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic",
+ bcm2836_armctrl_of_init);
--- /dev/null
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <asm/exception.h>
+
+/*
+ * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
+ * next 2 bits identify the CPU that the GPU FIQ goes to.
+ */
+#define LOCAL_GPU_ROUTING 0x00c
+/* When setting bits 0-3, enables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_SET 0x010
+/* When setting bits 0-3, disables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_CLR 0x014
+/*
+ * The low 4 bits of this are the CPU's timer IRQ enables, and the
+ * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
+ * bits).
+ */
+#define LOCAL_TIMER_INT_CONTROL0 0x040
+/*
+ * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
+ * the next 4 bits are the CPU's per-mailbox FIQ enables (which
+ * override the IRQ bits).
+ */
+#define LOCAL_MAILBOX_INT_CONTROL0 0x050
+/*
+ * The CPU's interrupt status register. Bits are defined by the the
+ * LOCAL_IRQ_* bits below.
+ */
+#define LOCAL_IRQ_PENDING0 0x060
+/* Same status bits as above, but for FIQ. */
+#define LOCAL_FIQ_PENDING0 0x070
+/*
+ * Mailbox0 write-to-set bits. There are 16 mailboxes, 4 per CPU, and
+ * these bits are organized by mailbox number and then CPU number. We
+ * use mailbox 0 for IPIs. The mailbox's interrupt is raised while
+ * any bit is set.
+ */
+#define LOCAL_MAILBOX0_SET0 0x080
+/* Mailbox0 write-to-clear bits. */
+#define LOCAL_MAILBOX0_CLR0 0x0c0
+
+#define LOCAL_IRQ_CNTPSIRQ 0
+#define LOCAL_IRQ_CNTPNSIRQ 1
+#define LOCAL_IRQ_CNTHPIRQ 2
+#define LOCAL_IRQ_CNTVIRQ 3
+#define LOCAL_IRQ_MAILBOX0 4
+#define LOCAL_IRQ_MAILBOX1 5
+#define LOCAL_IRQ_MAILBOX2 6
+#define LOCAL_IRQ_MAILBOX3 7
+#define LOCAL_IRQ_GPU_FAST 8
+#define LOCAL_IRQ_PMU_FAST 9
+#define LAST_IRQ LOCAL_IRQ_PMU_FAST
+
+struct bcm2836_arm_irqchip_intc {
+ struct irq_domain *domain;
+ void __iomem *base;
+};
+
+static struct bcm2836_arm_irqchip_intc intc __read_mostly;
+
+static void bcm2836_arm_irqchip_mask_per_cpu_irq(unsigned int reg_offset,
+ unsigned int bit,
+ int cpu)
+{
+ void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+ writel(readl(reg) & ~BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_unmask_per_cpu_irq(unsigned int reg_offset,
+ unsigned int bit,
+ int cpu)
+{
+ void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+ writel(readl(reg) | BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_mask_timer_irq(struct irq_data *d)
+{
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+ d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+ smp_processor_id());
+}
+
+static void bcm2836_arm_irqchip_unmask_timer_irq(struct irq_data *d)
+{
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+ d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+ smp_processor_id());
+}
+
+static struct irq_chip bcm2836_arm_irqchip_timer = {
+ .name = "bcm2836-timer",
+ .irq_mask = bcm2836_arm_irqchip_mask_timer_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
+{
+ writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_CLR);
+}
+
+static void bcm2836_arm_irqchip_unmask_pmu_irq(struct irq_data *d)
+{
+ writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_SET);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_pmu = {
+ .name = "bcm2836-pmu",
+ .irq_mask = bcm2836_arm_irqchip_mask_pmu_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
+{
+}
+
+static void bcm2836_arm_irqchip_unmask_gpu_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_gpu = {
+ .name = "bcm2836-gpu",
+ .irq_mask = bcm2836_arm_irqchip_mask_gpu_irq,
+ .irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
+};
+
+static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
+{
+ int irq = irq_create_mapping(intc.domain, hwirq);
+
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+}
+
+static void
+__exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ u32 stat;
+
+ stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
+ if (stat & 0x10) {
+#ifdef CONFIG_SMP
+ void __iomem *mailbox0 = (intc.base +
+ LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+ u32 mbox_val = readl(mailbox0);
+ u32 ipi = ffs(mbox_val) - 1;
+
+ writel(1 << ipi, mailbox0);
+ handle_IPI(ipi, regs);
+#endif
+ } else {
+ u32 hwirq = ffs(stat) - 1;
+
+ handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
+ unsigned int ipi)
+{
+ int cpu;
+ void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
+
+ /*
+ * Ensure that stores to normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ for_each_cpu(cpu, mask) {
+ writel(1 << ipi, mailbox0_base + 16 * cpu);
+ }
+}
+
+/* Unmasks the IPI on the CPU when it's online. */
+static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
+ unsigned int mailbox = 0;
+
+ if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
+ else if (action == CPU_DYING)
+ bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
+ .notifier_call = bcm2836_arm_irqchip_cpu_notify,
+ .priority = 100,
+};
+#endif
+
+static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
+ .xlate = irq_domain_xlate_onecell
+};
+
+static void
+bcm2836_arm_irqchip_smp_init(void)
+{
+#ifdef CONFIG_SMP
+ /* Unmask IPIs to the boot CPU. */
+ bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
+ CPU_STARTING,
+ (void *)smp_processor_id());
+ register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
+
+ set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+#endif
+}
+
+static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ intc.base = of_iomap(node, 0);
+ if (!intc.base) {
+ panic("%s: unable to map local interrupt registers\n",
+ node->full_name);
+ }
+
+ intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+ &bcm2836_arm_irqchip_intc_ops,
+ NULL);
+ if (!intc.domain)
+ panic("%s: unable to create IRQ domain\n", node->full_name);
+
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
+ &bcm2836_arm_irqchip_timer);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
+ &bcm2836_arm_irqchip_gpu);
+ bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
+ &bcm2836_arm_irqchip_pmu);
+
+ bcm2836_arm_irqchip_smp_init();
+
+ set_handle_irq(bcm2836_arm_irqchip_handle_irq);
+ return 0;
+}
+
+IRQCHIP_DECLARE(bcm2836_arm_irqchip_l1_intc, "brcm,bcm2836-l1-intc",
+ bcm2836_arm_irqchip_l1_intc_of_init);
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
-
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
#define MAX_WORDS 8
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
- irq_set_handler_data(parent_irq, intc);
- irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle);
+ irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
+ intc);
return 0;
}
#include <linux/irqdomain.h>
#include <linux/reboot.h>
#include <linux/bitops.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
-
/* Register offset in the L2 interrupt controller */
#define IRQEN 0x00
#define IRQSTAT 0x04
#define MAX_MAPPINGS (MAX_WORDS * 2)
#define IRQS_PER_WORD 32
+struct bcm7120_l1_intc_data {
+ struct bcm7120_l2_intc_data *b;
+ u32 irq_map_mask[MAX_WORDS];
+};
+
struct bcm7120_l2_intc_data {
unsigned int n_words;
void __iomem *map_base[MAX_MAPPINGS];
struct irq_domain *domain;
bool can_wake;
u32 irq_fwd_mask[MAX_WORDS];
- u32 irq_map_mask[MAX_WORDS];
+ struct bcm7120_l1_intc_data *l1_data;
int num_parent_irqs;
const __be32 *map_mask_prop;
};
static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
{
- struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc);
+ struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
+ struct bcm7120_l2_intc_data *b = data->b;
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
irq_gc_lock(gc);
pending = irq_reg_readl(gc, b->stat_offset[idx]) &
- gc->mask_cache;
+ gc->mask_cache &
+ data->irq_map_mask[idx];
irq_gc_unlock(gc);
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
chained_irq_exit(chip, desc);
}
-static void bcm7120_l2_intc_suspend(struct irq_data *d)
+static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct bcm7120_l2_intc_data *b = gc->private;
+ struct irq_chip_type *ct = gc->chip_types;
irq_gc_lock(gc);
if (b->can_wake)
irq_gc_unlock(gc);
}
-static void bcm7120_l2_intc_resume(struct irq_data *d)
+static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ struct irq_chip_type *ct = gc->chip_types;
/* Restore the saved mask */
irq_gc_lock(gc);
static int bcm7120_l2_intc_init_one(struct device_node *dn,
struct bcm7120_l2_intc_data *data,
- int irq)
+ int irq, u32 *valid_mask)
{
+ struct bcm7120_l1_intc_data *l1_data = &data->l1_data[irq];
int parent_irq;
unsigned int idx;
/* For multiple parent IRQs with multiple words, this looks like:
* <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
+ *
+ * We need to associate a given parent interrupt with its corresponding
+ * map_mask in order to mask the status register with it because we
+ * have the same handler being called for multiple parent interrupts.
+ *
+ * This is typically something needed on BCM7xxx (STB chips).
*/
for (idx = 0; idx < data->n_words; idx++) {
if (data->map_mask_prop) {
- data->irq_map_mask[idx] |=
+ l1_data->irq_map_mask[idx] |=
be32_to_cpup(data->map_mask_prop +
irq * data->n_words + idx);
} else {
- data->irq_map_mask[idx] = 0xffffffff;
+ l1_data->irq_map_mask[idx] = 0xffffffff;
}
+ valid_mask[idx] |= l1_data->irq_map_mask[idx];
}
- irq_set_handler_data(parent_irq, data);
- irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
+ l1_data->b = data;
+ irq_set_chained_handler_and_data(parent_irq,
+ bcm7120_l2_intc_irq_handle, l1_data);
return 0;
}
struct irq_chip_type *ct;
int ret = 0;
unsigned int idx, irq, flags;
+ u32 valid_mask[MAX_WORDS] = { };
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out_unmap;
}
+ data->l1_data = kcalloc(data->num_parent_irqs, sizeof(*data->l1_data),
+ GFP_KERNEL);
+ if (!data->l1_data) {
+ ret = -ENOMEM;
+ goto out_free_l1_data;
+ }
+
ret = iomap_regs_fn(dn, data);
if (ret < 0)
- goto out_unmap;
+ goto out_free_l1_data;
for (idx = 0; idx < data->n_words; idx++) {
__raw_writel(data->irq_fwd_mask[idx],
}
for (irq = 0; irq < data->num_parent_irqs; irq++) {
- ret = bcm7120_l2_intc_init_one(dn, data, irq);
+ ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
if (ret)
- goto out_unmap;
+ goto out_free_l1_data;
}
data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
&irq_generic_chip_ops, NULL);
if (!data->domain) {
ret = -ENOMEM;
- goto out_unmap;
+ goto out_free_l1_data;
}
/* MIPS chips strapped for BE will automagically configure the
irq = idx * IRQS_PER_WORD;
gc = irq_get_domain_generic_chip(data->domain, irq);
- gc->unused = 0xffffffff & ~data->irq_map_mask[idx];
+ gc->unused = 0xffffffff & ~valid_mask[idx];
gc->private = data;
ct = gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop;
- ct->chip.irq_suspend = bcm7120_l2_intc_suspend;
- ct->chip.irq_resume = bcm7120_l2_intc_resume;
+ gc->suspend = bcm7120_l2_intc_suspend;
+ gc->resume = bcm7120_l2_intc_resume;
+
+ /*
+ * Initialize mask-cache, in case we need it for
+ * saving/restoring fwd mask even w/o any child interrupts
+ * installed
+ */
+ gc->mask_cache = irq_reg_readl(gc, ct->regs.mask);
if (data->can_wake) {
/* This IRQ chip can wake the system, set all
out_free_domain:
irq_domain_remove(data->domain);
+out_free_l1_data:
+ kfree(data->l1_data);
out_unmap:
for (idx = 0; idx < MAX_MAPPINGS; idx++) {
if (data->map_base[idx])
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
-
/* Register offsets in the L2 interrupt controller */
#define CPU_STATUS 0x00
#define CPU_SET 0x04
u32 saved_mask; /* for suspend/resume */
};
-static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
+ struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
u32 status;
chained_irq_enter(chip, desc);
}
/* Set the IRQ chaining logic */
- irq_set_handler_data(data->parent_irq, data);
- irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle);
+ irq_set_chained_handler_and_data(data->parent_irq,
+ brcmstb_l2_intc_irq_handle, data);
gc = irq_get_domain_generic_chip(data->domain, 0);
gc->reg_base = data->base;
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define CLPS711X_INTSR1 (0x0240)
#define CLPS711X_INTMR1 (0x0280)
#define CLPS711X_BLEOI (0x0600)
*/
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
-#include "irqchip.h"
-
#define IRQ_FREE -1
#define IRQ_RESERVED -2
#define IRQ_SKIP -3
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define UC_IRQ_CONTROL 0x04
#define IC_FLAG_CLEAR_LO 0x00
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include "irqchip.h"
-
#define APB_INT_ENABLE_L 0x00
#define APB_INT_ENABLE_H 0x04
#define APB_INT_MASK_L 0x08
#define APB_INT_MASK_H 0x0c
#define APB_INT_FINALSTATUS_L 0x30
#define APB_INT_FINALSTATUS_H 0x34
+#define APB_INT_BASE_OFFSET 0x04
static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = irq_get_chip(irq);
- struct irq_chip_generic *gc = irq_get_handler_data(irq);
- struct irq_domain *d = gc->private;
- u32 stat;
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int n;
chained_irq_enter(chip, desc);
- for (n = 0; n < gc->num_ct; n++) {
- stat = readl_relaxed(gc->reg_base +
- APB_INT_FINALSTATUS_L + 4 * n);
+ for (n = 0; n < d->revmap_size; n += 32) {
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+ u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
while (stat) {
u32 hwirq = ffs(stat) - 1;
- generic_handle_irq(irq_find_mapping(d,
- gc->irq_base + hwirq + 32 * n));
+ u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
+
+ generic_handle_irq(virq);
stat &= ~(1 << hwirq);
}
}
struct irq_domain *domain;
struct irq_chip_generic *gc;
void __iomem *iobase;
- int ret, nrirqs, irq;
+ int ret, nrirqs, irq, i;
u32 reg;
/* Map the parent interrupt for the chained handler */
goto err_unmap;
}
- ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
- np->name, handle_level_irq, clr, 0,
- IRQ_GC_MASK_CACHE_PER_TYPE |
+ ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
+ handle_level_irq, clr, 0,
IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
goto err_unmap;
}
- gc = irq_get_domain_generic_chip(domain, 0);
- gc->private = domain;
- gc->reg_base = iobase;
-
- gc->chip_types[0].regs.mask = APB_INT_MASK_L;
- gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
-
- if (nrirqs > 32) {
- gc->chip_types[1].regs.mask = APB_INT_MASK_H;
- gc->chip_types[1].regs.enable = APB_INT_ENABLE_H;
- gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
- gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
- gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume;
+ for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+ gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
+ gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+ gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
}
- irq_set_handler_data(irq, gc);
- irq_set_chained_handler(irq, dw_apb_ictl_handler);
+ irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
return 0;
struct v2m_data {
spinlock_t msi_cnt_lock;
- struct msi_controller mchip;
struct resource res; /* GICv2m resource */
void __iomem *base; /* GICv2m virt address */
u32 spi_start; /* The SPI number that MSIs start */
u32 nr_spis; /* The number of SPIs for MSIs */
unsigned long *bm; /* MSI vector bitmap */
- struct irq_domain *domain;
};
static void gicv2m_mask_msi_irq(struct irq_data *d)
return true;
}
+static struct irq_chip gicv2m_pmsi_irq_chip = {
+ .name = "pMSI",
+};
+
+static struct msi_domain_ops gicv2m_pmsi_ops = {
+};
+
+static struct msi_domain_info gicv2m_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &gicv2m_pmsi_ops,
+ .chip = &gicv2m_pmsi_irq_chip,
+};
+
static int __init gicv2m_init_one(struct device_node *node,
struct irq_domain *parent)
{
int ret;
struct v2m_data *v2m;
+ struct irq_domain *inner_domain, *pci_domain, *plat_domain;
v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
if (!v2m) {
goto err_iounmap;
}
- v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
- if (!v2m->domain) {
+ inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m);
+ if (!inner_domain) {
pr_err("Failed to create GICv2m domain\n");
ret = -ENOMEM;
goto err_free_bm;
}
- v2m->domain->parent = parent;
- v2m->mchip.of_node = node;
- v2m->mchip.domain = pci_msi_create_irq_domain(node,
- &gicv2m_msi_domain_info,
- v2m->domain);
- if (!v2m->mchip.domain) {
- pr_err("Failed to create MSI domain\n");
+ inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+ inner_domain->parent = parent;
+ pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
+ inner_domain);
+ plat_domain = platform_msi_create_irq_domain(node,
+ &gicv2m_pmsi_domain_info,
+ inner_domain);
+ if (!pci_domain || !plat_domain) {
+ pr_err("Failed to create MSI domains\n");
ret = -ENOMEM;
goto err_free_domains;
}
spin_lock_init(&v2m->msi_cnt_lock);
- ret = of_pci_msi_chip_add(&v2m->mchip);
- if (ret) {
- pr_err("Failed to add msi_chip.\n");
- goto err_free_domains;
- }
-
pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
(unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
return 0;
err_free_domains:
- if (v2m->mchip.domain)
- irq_domain_remove(v2m->mchip.domain);
- if (v2m->domain)
- irq_domain_remove(v2m->domain);
+ if (plat_domain)
+ irq_domain_remove(plat_domain);
+ if (pci_domain)
+ irq_domain_remove(pci_domain);
+ if (inner_domain)
+ irq_domain_remove(inner_domain);
err_free_bm:
kfree(v2m->bm);
err_iounmap:
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "ITS-MSI",
+ .irq_unmask = its_unmask_msi_irq,
+ .irq_mask = its_mask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pci_msi_domain_write_msg,
+};
+
+struct its_pci_alias {
+ struct pci_dev *pdev;
+ u32 dev_id;
+ u32 count;
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev)
+{
+ int msi, msix;
+
+ msi = max(pci_msi_vec_count(pdev), 0);
+ msix = max(pci_msix_vec_count(pdev), 0);
+
+ return max(msi, msix);
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct its_pci_alias *dev_alias = data;
+
+ dev_alias->dev_id = alias;
+ if (pdev != dev_alias->pdev)
+ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+
+ return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct pci_dev *pdev;
+ struct its_pci_alias dev_alias;
+ struct msi_domain_info *msi_info;
+
+ if (!dev_is_pci(dev))
+ return -EINVAL;
+
+ msi_info = msi_get_domain_info(domain->parent);
+
+ pdev = to_pci_dev(dev);
+ dev_alias.pdev = pdev;
+ dev_alias.count = nvec;
+
+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_alias.dev_id;
+
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, dev_alias.count, info);
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+ .msi_prepare = its_pci_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .ops = &its_pci_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_pci_msi_init(void)
+{
+ struct device_node *np;
+ struct irq_domain *parent;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n",
+ np->full_name);
+ continue;
+ }
+
+ if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info,
+ parent)) {
+ pr_err("%s: unable to create PCI domain\n",
+ np->full_name);
+ continue;
+ }
+
+ pr_info("PCI/MSI: %s domain created\n", np->full_name);
+ }
+
+ return 0;
+}
+early_initcall(its_pci_msi_init);
--- /dev/null
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+static struct irq_chip its_pmsi_irq_chip = {
+ .name = "ITS-pMSI",
+};
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ msi_info = msi_get_domain_info(domain->parent);
+
+ /* Suck the DeviceID out of the msi-parent property */
+ ret = of_property_read_u32_index(dev->of_node, "msi-parent",
+ 1, &dev_id);
+ if (ret)
+ return ret;
+
+ /* ITS specific DeviceID, as the core ITS ignores dev. */
+ info->scratchpad[0].ul = dev_id;
+
+ return msi_info->ops->msi_prepare(domain->parent,
+ dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pmsi_ops = {
+ .msi_prepare = its_pmsi_prepare,
+};
+
+static struct msi_domain_info its_pmsi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &its_pmsi_ops,
+ .chip = &its_pmsi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+static int __init its_pmsi_init(void)
+{
+ struct device_node *np;
+ struct irq_domain *parent;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n",
+ np->full_name);
+ continue;
+ }
+
+ if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info,
+ parent)) {
+ pr_err("%s: unable to create platform domain\n",
+ np->full_name);
+ continue;
+ }
+
+ pr_info("Platform MSI: %s domain created\n", np->full_name);
+ }
+
+ return 0;
+}
+early_initcall(its_pmsi_init);
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
/*
* The ITS structure - contains most of the infrastructure, with the
- * msi_controller, the command queue, the collections, and the list of
- * devices writing to it.
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
*/
struct its_node {
raw_spinlock_t lock;
struct list_head entry;
- struct msi_controller msi_chip;
- struct irq_domain *domain;
void __iomem *base;
unsigned long phys_base;
struct its_cmd_block *cmd_base;
.irq_compose_msi_msg = its_irq_compose_msi_msg,
};
-static void its_mask_msi_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void its_unmask_msi_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip its_msi_irq_chip = {
- .name = "ITS-MSI",
- .irq_unmask = its_unmask_msi_irq,
- .irq_mask = its_mask_msi_irq,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
-};
-
/*
* How we allocate LPIs:
*
}
}
-static int its_alloc_tables(struct its_node *its)
+static int its_alloc_tables(const char *node_name, struct its_node *its)
{
int err;
int i;
if (order >= MAX_ORDER) {
order = MAX_ORDER - 1;
pr_warn("%s: Device Table too large, reduce its page order to %u\n",
- its->msi_chip.of_node->full_name, order);
+ node_name, order);
}
}
if (val != tmp) {
pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
- its->msi_chip.of_node->full_name, i,
+ node_name, i,
(unsigned long) val, (unsigned long) tmp);
err = -ENXIO;
goto out_free;
return 0;
}
-struct its_pci_alias {
- struct pci_dev *pdev;
- u32 dev_id;
- u32 count;
-};
-
-static int its_pci_msi_vec_count(struct pci_dev *pdev)
-{
- int msi, msix;
-
- msi = max(pci_msi_vec_count(pdev), 0);
- msix = max(pci_msix_vec_count(pdev), 0);
-
- return max(msi, msix);
-}
-
-static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- struct its_pci_alias *dev_alias = data;
-
- dev_alias->dev_id = alias;
- if (pdev != dev_alias->pdev)
- dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
-
- return 0;
-}
-
static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *info)
{
- struct pci_dev *pdev;
struct its_node *its;
struct its_device *its_dev;
- struct its_pci_alias dev_alias;
-
- if (!dev_is_pci(dev))
- return -EINVAL;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
- pdev = to_pci_dev(dev);
- dev_alias.pdev = pdev;
- dev_alias.count = nvec;
+ /*
+ * We ignore "dev" entierely, and rely on the dev_id that has
+ * been passed via the scratchpad. This limits this domain's
+ * usefulness to upper layers that definitely know that they
+ * are built on top of the ITS.
+ */
+ dev_id = info->scratchpad[0].ul;
- pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
- its = domain->parent->host_data;
+ msi_info = msi_get_domain_info(domain);
+ its = msi_info->data;
- its_dev = its_find_device(its, dev_alias.dev_id);
+ its_dev = its_find_device(its, dev_id);
if (its_dev) {
/*
* We already have seen this ID, probably through
* another alias (PCI bridge of some sort). No need to
* create the device.
*/
- dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
+ pr_debug("Reusing ITT for devID %x\n", dev_id);
goto out;
}
- its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
+ its_dev = its_create_device(its, dev_id, nvec);
if (!its_dev)
return -ENOMEM;
- dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
- dev_alias.count, ilog2(dev_alias.count));
+ pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
out:
info->scratchpad[0].ptr = its_dev;
- info->scratchpad[1].ptr = dev;
return 0;
}
-static struct msi_domain_ops its_pci_msi_ops = {
+static struct msi_domain_ops its_msi_domain_ops = {
.msi_prepare = its_msi_prepare,
};
-static struct msi_domain_info its_pci_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .ops = &its_pci_msi_ops,
- .chip = &its_msi_irq_chip,
-};
-
static int its_irq_gic_domain_alloc(struct irq_domain *domain,
unsigned int virq,
irq_hw_number_t hwirq)
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq, &its_irq_chip, its_dev);
- dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
- (int)(hwirq - its_dev->event_map.lpi_base),
- (int)hwirq, virq + i);
+ pr_debug("ID:%d pID:%d vID:%d\n",
+ (int)(hwirq - its_dev->event_map.lpi_base),
+ (int) hwirq, virq + i);
}
return 0;
struct resource res;
struct its_node *its;
void __iomem *its_base;
+ struct irq_domain *inner_domain;
u32 val;
u64 baser, tmp;
int err;
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res.start;
- its->msi_chip.of_node = node;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
}
its->cmd_write = its->cmd_base;
- err = its_alloc_tables(its);
+ err = its_alloc_tables(node->full_name, its);
if (err)
goto out_free_cmd;
writeq_relaxed(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
- if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
- its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
- if (!its->domain) {
+ if (of_property_read_bool(node, "msi-controller")) {
+ struct msi_domain_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
err = -ENOMEM;
goto out_free_tables;
}
- its->domain->parent = parent;
-
- its->msi_chip.domain = pci_msi_create_irq_domain(node,
- &its_pci_msi_domain_info,
- its->domain);
- if (!its->msi_chip.domain) {
+ inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
+ if (!inner_domain) {
err = -ENOMEM;
- goto out_free_domains;
+ kfree(info);
+ goto out_free_tables;
}
- err = of_pci_msi_chip_add(&its->msi_chip);
- if (err)
- goto out_free_domains;
+ inner_domain->parent = parent;
+ inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+ info->ops = &its_msi_domain_ops;
+ info->data = its;
+ inner_domain->host_data = info;
}
spin_lock(&its_lock);
return 0;
-out_free_domains:
- if (its->msi_chip.domain)
- irq_domain_remove(its->msi_chip.domain);
- if (its->domain)
- irq_domain_remove(its->domain);
out_free_tables:
its_free_tables(its);
out_free_cmd:
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/cputype.h>
#include <asm/smp_plat.h>
#include "irq-gic-common.h"
-#include "irqchip.h"
struct redist_region {
void __iomem *redist_base;
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-acpi.h>
#include <asm/smp_plat.h>
#include "irq-gic-common.h"
-#include "irqchip.h"
union gic_base {
void __iomem *common_base;
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
- struct gic_chip_data *chip_data = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq, gic_irq;
unsigned long status;
#endif
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
};
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
if (gic_nr >= MAX_GIC_NR)
BUG();
- if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
- BUG();
- irq_set_chained_handler(irq, gic_handle_cascade_irq);
+ irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
+ &gic_data[gic_nr]);
}
static u8 gic_get_cpumask(struct gic_chip_data *gic)
return mask;
}
-static void gic_cpu_if_up(void)
+static void gic_cpu_if_up(struct gic_chip_data *gic)
{
- void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ void __iomem *cpu_base = gic_data_cpu_base(gic);
u32 bypass = 0;
/*
int i;
/*
- * Get what the GIC says our CPU mask is.
+ * Setting up the CPU map is only relevant for the primary GIC
+ * because any nested/secondary GICs do not directly interface
+ * with the CPU(s).
*/
- BUG_ON(cpu >= NR_GIC_CPU_IF);
- cpu_mask = gic_get_cpumask(gic);
- gic_cpu_map[cpu] = cpu_mask;
+ if (gic == &gic_data[0]) {
+ /*
+ * Get what the GIC says our CPU mask is.
+ */
+ BUG_ON(cpu >= NR_GIC_CPU_IF);
+ cpu_mask = gic_get_cpumask(gic);
+ gic_cpu_map[cpu] = cpu_mask;
- /*
- * Clear our mask from the other map entries in case they're
- * still undefined.
- */
- for (i = 0; i < NR_GIC_CPU_IF; i++)
- if (i != cpu)
- gic_cpu_map[i] &= ~cpu_mask;
+ /*
+ * Clear our mask from the other map entries in case they're
+ * still undefined.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ if (i != cpu)
+ gic_cpu_map[i] &= ~cpu_mask;
+ }
gic_cpu_config(dist_base, NULL);
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
- gic_cpu_if_up();
+ gic_cpu_if_up(gic);
}
-void gic_cpu_if_down(void)
+int gic_cpu_if_down(unsigned int gic_nr)
{
- void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+ void __iomem *cpu_base;
u32 val = 0;
+ if (gic_nr >= MAX_GIC_NR)
+ return -EINVAL;
+
+ cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
val = readl(cpu_base + GIC_CPU_CTRL);
val &= ~GICC_ENABLE;
writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
+
+ return 0;
}
#ifdef CONFIG_CPU_PM
dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
- gic_cpu_if_up();
+ gic_cpu_if_up(&gic_data[gic_nr]);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
.xlate = gic_irq_domain_xlate,
};
-void gic_set_irqchip_flags(unsigned long flags)
-{
- gic_chip.flags |= flags;
-}
-
void __init gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset, struct device_node *node)
gic_set_base_accessor(gic, gic_get_common_base);
}
- /*
- * Initialize the CPU interface map to all CPUs.
- * It will be refined as each CPU probes its ID.
- */
- for (i = 0; i < NR_GIC_CPU_IF; i++)
- gic_cpu_map[i] = 0xff;
-
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
return;
if (gic_nr == 0) {
+ /*
+ * Initialize the CPU interface map to all CPUs.
+ * It will be refined as each CPU probes its ID.
+ * This is only necessary for the primary GIC.
+ */
+ for (i = 0; i < NR_GIC_CPU_IF; i++)
+ gic_cpu_map[i] = 0xff;
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
register_cpu_notifier(&gic_cpu_notifier);
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/irq.h>
#include <asm/smp_plat.h>
#include "irq-gic-common.h"
-#include "irqchip.h"
#define HIP04_MAX_IRQS 510
#ifdef CONFIG_SMP
.irq_set_affinity = hip04_irq_set_affinity,
#endif
- .flags = IRQCHIP_SET_TYPE_MASKED,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
};
static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
+
+#include <asm/i8259.h>
+#include <asm/io.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ * plus some generic x86 specific things if generic specifics makes
+ * any sense at all.
+ * this file should become arch/i386/kernel/irq.c when the old irq.c
+ * moves to arch independent land
+ */
+
+static int i8259A_auto_eoi = -1;
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
+static void init_8259A(int auto_eoi);
+
+static struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+ .irq_mask = disable_8259A_irq,
+ .irq_disable = disable_8259A_irq,
+ .irq_unmask = enable_8259A_irq,
+ .irq_mask_ack = mask_and_ack_8259A,
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define cached_master_mask (cached_irq_mask)
+#define cached_slave_mask (cached_irq_mask >> 8)
+
+static void disable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ cached_irq_mask |= mask;
+ if (irq & 8)
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ else
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void enable_8259A_irq(struct irq_data *d)
+{
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ mask = ~(1 << irq);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ cached_irq_mask &= mask;
+ if (irq & 8)
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ else
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+int i8259A_irq_pending(unsigned int irq)
+{
+ unsigned int mask;
+ unsigned long flags;
+ int ret;
+
+ irq -= I8259A_IRQ_BASE;
+ mask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ if (irq < 8)
+ ret = inb(PIC_MASTER_CMD) & mask;
+ else
+ ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+
+ return ret;
+}
+
+void make_8259A_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ enable_irq(irq);
+}
+
+/*
+ * This function assumes to be called rarely. Switching between
+ * 8259A registers is slow.
+ * This has to be protected by the irq controller spinlock
+ * before being called.
+ */
+static inline int i8259A_irq_real(unsigned int irq)
+{
+ int value;
+ int irqmask = 1 << irq;
+
+ if (irq < 8) {
+ outb(0x0B, PIC_MASTER_CMD); /* ISR register */
+ value = inb(PIC_MASTER_CMD) & irqmask;
+ outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */
+ return value;
+ }
+ outb(0x0B, PIC_SLAVE_CMD); /* ISR register */
+ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+ outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */
+ return value;
+}
+
+/*
+ * Careful! The 8259A is a fragile beast, it pretty
+ * much _has_ to be done exactly like this (mask it
+ * first, _then_ send the EOI, and the order of EOI
+ * to the two 8259s is important!
+ */
+static void mask_and_ack_8259A(struct irq_data *d)
+{
+ unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
+ unsigned long flags;
+
+ irqmask = 1 << irq;
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+ /*
+ * Lightweight spurious IRQ detection. We do not want
+ * to overdo spurious IRQ handling - it's usually a sign
+ * of hardware problems, so we only do the checks we can
+ * do without slowing down good hardware unnecessarily.
+ *
+ * Note that IRQ7 and IRQ15 (the two spurious IRQs
+ * usually resulting from the 8259A-1|2 PICs) occur
+ * even if the IRQ is masked in the 8259A. Thus we
+ * can check spurious 8259A IRQs without doing the
+ * quite slow i8259A_irq_real() call for every IRQ.
+ * This does not cover 100% of spurious interrupts,
+ * but should be enough to warn the user that there
+ * is something bad going on ...
+ */
+ if (cached_irq_mask & irqmask)
+ goto spurious_8259A_irq;
+ cached_irq_mask |= irqmask;
+
+handle_real_irq:
+ if (irq & 8) {
+ inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+ } else {
+ inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
+ outb(cached_master_mask, PIC_MASTER_IMR);
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+ }
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+ return;
+
+spurious_8259A_irq:
+ /*
+ * this is the slow path - should happen rarely.
+ */
+ if (i8259A_irq_real(irq))
+ /*
+ * oops, the IRQ _is_ in service according to the
+ * 8259A - not spurious, go handle it.
+ */
+ goto handle_real_irq;
+
+ {
+ static int spurious_irq_mask;
+ /*
+ * At this point we can be sure the IRQ is spurious,
+ * lets ACK and report it. [once per IRQ]
+ */
+ if (!(spurious_irq_mask & irqmask)) {
+ printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+ atomic_inc(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+ * simpler for us.
+ */
+ goto handle_real_irq;
+ }
+}
+
+static void i8259A_resume(void)
+{
+ if (i8259A_auto_eoi >= 0)
+ init_8259A(i8259A_auto_eoi);
+}
+
+static void i8259A_shutdown(void)
+{
+ /* Put the i8259A into a quiescent state that
+ * the kernel initialization code can get it
+ * out of.
+ */
+ if (i8259A_auto_eoi >= 0) {
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+ }
+}
+
+static struct syscore_ops i8259_syscore_ops = {
+ .resume = i8259A_resume,
+ .shutdown = i8259A_shutdown,
+};
+
+static int __init i8259A_init_sysfs(void)
+{
+ register_syscore_ops(&i8259_syscore_ops);
+ return 0;
+}
+
+device_initcall(i8259A_init_sysfs);
+
+static void init_8259A(int auto_eoi)
+{
+ unsigned long flags;
+
+ i8259A_auto_eoi = auto_eoi;
+
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
+
+ /*
+ * outb_p - this has to work on a wide range of PC hardware.
+ */
+ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
+ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
+ if (auto_eoi) /* master does Auto EOI */
+ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+ else /* master expects normal EOI */
+ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+ outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
+ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
+ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+ if (auto_eoi)
+ /*
+ * In AEOI mode we just have to mask the interrupt
+ * when acking.
+ */
+ i8259A_chip.irq_mask_ack = disable_8259A_irq;
+ else
+ i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+
+ udelay(100); /* wait for 8259A to initialize */
+
+ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
+
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+ .handler = no_action,
+ .name = "cascade",
+ .flags = IRQF_NO_THREAD,
+};
+
+static struct resource pic1_io_resource = {
+ .name = "pic1",
+ .start = PIC_MASTER_CMD,
+ .end = PIC_MASTER_IMR,
+ .flags = IORESOURCE_BUSY
+};
+
+static struct resource pic2_io_resource = {
+ .name = "pic2",
+ .start = PIC_SLAVE_CMD,
+ .end = PIC_SLAVE_IMR,
+ .flags = IORESOURCE_BUSY
+};
+
+static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
+ irq_set_probe(virq);
+ return 0;
+}
+
+static struct irq_domain_ops i8259A_ops = {
+ .map = i8259A_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
+{
+ struct irq_domain *domain;
+
+ insert_resource(&ioport_resource, &pic1_io_resource);
+ insert_resource(&ioport_resource, &pic2_io_resource);
+
+ init_8259A(0);
+
+ domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
+ &i8259A_ops, NULL);
+ if (!domain)
+ panic("Failed to add i8259 IRQ domain");
+
+ setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ return domain;
+}
+
+void __init init_i8259_irqs(void)
+{
+ __init_i8259_irqs(NULL);
+}
+
+static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ int hwirq = i8259_irq();
+ unsigned int irq;
+
+ if (hwirq < 0)
+ return;
+
+ irq = irq_linear_revmap(domain, hwirq);
+ generic_handle_irq(irq);
+}
+
+int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ unsigned int parent_irq;
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map i8259 parent IRQ\n");
+ return -ENODEV;
+ }
+
+ domain = __init_i8259_irqs(node);
+ irq_set_handler_data(parent_irq, domain);
+ irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
+ return 0;
+}
+IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
return 0;
}
-static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
+static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct pdc_intc_priv *priv;
unsigned int i, irq_no;
/* Setup chained handlers for the peripheral IRQs */
for (i = 0; i < priv->nr_perips; ++i) {
irq = priv->perip_irqs[i];
- irq_set_handler_data(irq, priv);
- irq_set_chained_handler(irq, pdc_intc_perip_isr);
+ irq_set_chained_handler_and_data(irq, pdc_intc_perip_isr,
+ priv);
}
/* Setup chained handler for the syswake IRQ */
- irq_set_handler_data(priv->syswake_irq, priv);
- irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
+ irq_set_chained_handler_and_data(priv->syswake_irq,
+ pdc_intc_syswake_isr, priv);
dev_info(&pdev->dev,
"PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
--- /dev/null
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/syscore_ops.h>
+
+#define IMR_NUM 4
+#define GPC_MAX_IRQS (IMR_NUM * 32)
+
+#define GPC_IMR1_CORE0 0x30
+#define GPC_IMR1_CORE1 0x40
+
+struct gpcv2_irqchip_data {
+ struct raw_spinlock rlock;
+ void __iomem *gpc_base;
+ u32 wakeup_sources[IMR_NUM];
+ u32 saved_irq_mask[IMR_NUM];
+ u32 cpu2wakeup;
+};
+
+static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+
+/*
+ * Interface for the low level wakeup code.
+ */
+u32 imx_gpcv2_get_wakeup_source(u32 **sources)
+{
+ if (!imx_gpcv2_instance)
+ return 0;
+
+ if (sources)
+ *sources = imx_gpcv2_instance->wakeup_sources;
+
+ return IMR_NUM;
+}
+
+static int gpcv2_wakeup_source_save(void)
+{
+ struct gpcv2_irqchip_data *cd;
+ void __iomem *reg;
+ int i;
+
+ cd = imx_gpcv2_instance;
+ if (!cd)
+ return 0;
+
+ for (i = 0; i < IMR_NUM; i++) {
+ reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+ cd->saved_irq_mask[i] = readl_relaxed(reg);
+ writel_relaxed(cd->wakeup_sources[i], reg);
+ }
+
+ return 0;
+}
+
+static void gpcv2_wakeup_source_restore(void)
+{
+ struct gpcv2_irqchip_data *cd;
+ void __iomem *reg;
+ int i;
+
+ cd = imx_gpcv2_instance;
+ if (!cd)
+ return;
+
+ for (i = 0; i < IMR_NUM; i++) {
+ reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+ writel_relaxed(cd->saved_irq_mask[i], reg);
+ }
+}
+
+static struct syscore_ops imx_gpcv2_syscore_ops = {
+ .suspend = gpcv2_wakeup_source_save,
+ .resume = gpcv2_wakeup_source_restore,
+};
+
+static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ unsigned int idx = d->hwirq / 32;
+ unsigned long flags;
+ void __iomem *reg;
+ u32 mask, val;
+
+ raw_spin_lock_irqsave(&cd->rlock, flags);
+ reg = cd->gpc_base + cd->cpu2wakeup + idx * 4;
+ mask = 1 << d->hwirq % 32;
+ val = cd->wakeup_sources[idx];
+
+ cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
+ raw_spin_unlock_irqrestore(&cd->rlock, flags);
+
+ /*
+ * Do *not* call into the parent, as the GIC doesn't have any
+ * wake-up facility...
+ */
+
+ return 0;
+}
+
+static void imx_gpcv2_irq_unmask(struct irq_data *d)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock(&cd->rlock);
+ reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+ val = readl_relaxed(reg);
+ val &= ~(1 << d->hwirq % 32);
+ writel_relaxed(val, reg);
+ raw_spin_unlock(&cd->rlock);
+
+ irq_chip_unmask_parent(d);
+}
+
+static void imx_gpcv2_irq_mask(struct irq_data *d)
+{
+ struct gpcv2_irqchip_data *cd = d->chip_data;
+ void __iomem *reg;
+ u32 val;
+
+ raw_spin_lock(&cd->rlock);
+ reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+ val = readl_relaxed(reg);
+ val |= 1 << (d->hwirq % 32);
+ writel_relaxed(val, reg);
+ raw_spin_unlock(&cd->rlock);
+
+ irq_chip_mask_parent(d);
+}
+
+static struct irq_chip gpcv2_irqchip_data_chip = {
+ .name = "GPCv2",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = imx_gpcv2_irq_mask,
+ .irq_unmask = imx_gpcv2_irq_unmask,
+ .irq_set_wake = imx_gpcv2_irq_set_wake,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int imx_gpcv2_domain_xlate(struct irq_domain *domain,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ /* Shouldn't happen, really... */
+ if (domain->of_node != controller)
+ return -EINVAL;
+
+ /* Not GIC compliant */
+ if (intsize != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (intspec[0] != 0)
+ return -EINVAL;
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2];
+ return 0;
+}
+
+static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct of_phandle_args *args = data;
+ struct of_phandle_args parent_args;
+ irq_hw_number_t hwirq;
+ int i;
+
+ /* Not GIC compliant */
+ if (args->args_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (args->args[0] != 0)
+ return -EINVAL;
+
+ /* Can't deal with this */
+ hwirq = args->args[1];
+ if (hwirq >= GPC_MAX_IRQS)
+ return -EINVAL;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
+ &gpcv2_irqchip_data_chip, domain->host_data);
+ }
+
+ parent_args = *args;
+ parent_args.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
+ .xlate = imx_gpcv2_domain_xlate,
+ .alloc = imx_gpcv2_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init imx_gpcv2_irqchip_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct gpcv2_irqchip_data *cd;
+ int i;
+
+ if (!parent) {
+ pr_err("%s: no parent, giving up\n", node->full_name);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%s: unable to get parent domain\n", node->full_name);
+ return -ENXIO;
+ }
+
+ cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
+ if (!cd) {
+ pr_err("kzalloc failed!\n");
+ return -ENOMEM;
+ }
+
+ cd->gpc_base = of_iomap(node, 0);
+ if (!cd->gpc_base) {
+ pr_err("fsl-gpcv2: unable to map gpc registers\n");
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+ node, &gpcv2_irqchip_data_domain_ops, cd);
+ if (!domain) {
+ iounmap(cd->gpc_base);
+ kfree(cd);
+ return -ENOMEM;
+ }
+ irq_set_default_host(domain);
+
+ /* Initially mask all interrupts */
+ for (i = 0; i < IMR_NUM; i++) {
+ writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
+ writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
+ cd->wakeup_sources[i] = ~0;
+ }
+
+ /* Let CORE0 as the default CPU to wake up by GPC */
+ cd->cpu2wakeup = GPC_IMR1_CORE0;
+
+ /*
+ * Due to hardware design failure, need to make sure GPR
+ * interrupt(#32) is unmasked during RUN mode to avoid entering
+ * DSM by mistake.
+ */
+ writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
+
+ imx_gpcv2_instance = cd;
+ register_syscore_ops(&imx_gpcv2_syscore_ops);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/ingenic.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/mach-jz4740/irq.h>
-#include "irqchip.h"
-
struct ingenic_intc_data {
void __iomem *base;
unsigned num_chips;
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include "irqchip.h"
-
/* The source ID bits start from 4 to 31 (total 28 bits)*/
#define BIT_OFS 4
/* nothing to do here */
}
-static void keystone_irq_handler(unsigned irq, struct irq_desc *desc)
+static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
unsigned long pending;
int src, virq;
#ifdef CONFIG_METAG_SUSPEND_MEM
struct meta_intc_priv *priv = &meta_intc_priv;
#endif
- unsigned int irq = data->irq;
irq_hw_number_t hw = data->hwirq;
unsigned int bit = 1 << meta_intc_offset(hw);
void __iomem *level_addr = meta_intc_level_addr(hw);
/* update the chip/handler */
if (flow_type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip,
- handle_level_irq, NULL);
+ irq_set_chip_handler_name_locked(data, &meta_intc_level_chip,
+ handle_level_irq, NULL);
else
- __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip,
- handle_edge_irq, NULL);
+ irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip,
+ handle_edge_irq, NULL);
/* and clear/set the bit in HWLEVELEXT */
__global_lock2(flags);
int irq = tbisig_map(signum);
/* Register the multiplexed IRQ handler */
- irq_set_handler_data(irq, priv);
- irq_set_chained_handler(irq, metag_internal_irq_demux);
+ irq_set_chained_handler_and_data(irq, metag_internal_irq_demux, priv);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
}
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
#include <asm/mipsmtregs.h>
#include <asm/setup.h>
-#include "irqchip.h"
-
static inline void unmask_mips_irq(struct irq_data *d)
{
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/mips-gic.h>
#include <linux/of_address.h>
#include <linux/sched.h>
#include <dt-bindings/interrupt-controller/mips-gic.h>
-#include "irqchip.h"
-
unsigned int gic_present;
struct gic_pcpu_mask {
break;
}
- if (is_edge) {
- __irq_set_chip_handler_name_locked(d->irq,
- &gic_edge_irq_controller,
- handle_edge_irq, NULL);
- } else {
- __irq_set_chip_handler_name_locked(d->irq,
- &gic_level_irq_controller,
- handle_level_irq, NULL);
- }
+ if (is_edge)
+ irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
+ handle_edge_irq, NULL);
+ else
+ irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+ handle_level_irq, NULL);
spin_unlock_irqrestore(&gic_lock, flags);
return 0;
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
- cpumask_copy(d->affinity, cpumask);
+ cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY;
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <asm/exception.h>
#include <asm/hardirq.h>
-#include "irqchip.h"
-
#define MAX_ICU_NR 16
#define PJ1_INT_SEL 0x10c
.irq_unmask = icu_unmask_irq,
};
-static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct irq_domain *domain;
struct icu_chip_data *data;
int i;
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define IRQ_SOURCE_REG 0
#define IRQ_MASK_REG 0x04
#define IRQ_CLEAR_REG 0x08
*/
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "irqchip.h"
-
struct mtk_sysirq_chip_data {
spinlock_t lock;
void __iomem *intpol_base;
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/stmp_device.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define HW_ICOLL_VECTOR 0x0000
#define HW_ICOLL_LEVELACK 0x0010
#define HW_ICOLL_CTRL 0x0020
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/v7m.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define NVIC_ISER 0x000
#define NVIC_ICER 0x080
#define NVIC_IPR 0x300
#include <linux/io.h>
#include <asm/exception.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include "irqchip.h"
-
/* Define these here for now until we drop all board-files */
#define OMAP24XX_IC_BASE 0x480fe000
#define OMAP34XX_IC_BASE 0x48200000
static asmlinkage void __exception_irq_entry
omap_intc_handle_irq(struct pt_regs *regs)
{
- u32 irqnr = 0;
- int handled_irq = 0;
- int i;
-
- do {
- for (i = 0; i < omap_nr_pending; i++) {
- irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i));
- if (irqnr)
- goto out;
- }
-
-out:
- if (!irqnr)
- break;
+ u32 irqnr;
- irqnr = intc_readl(INTC_SIR);
- irqnr &= ACTIVEIRQ_MASK;
-
- if (irqnr) {
- handle_domain_irq(domain, irqnr, regs);
- handled_irq = 1;
- }
- } while (irqnr);
-
- /*
- * If an irq is masked or deasserted while active, we will
- * keep ending up here with no irq handled. So remove it from
- * the INTC with an ack.
- */
- if (!handled_irq)
- omap_ack_irq(NULL);
+ irqnr = intc_readl(INTC_SIR);
+ irqnr &= ACTIVEIRQ_MASK;
+ WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
+ handle_domain_irq(domain, irqnr, regs);
}
void __init omap3_init_irq(void)
*/
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include "irqchip.h"
-
/* OR1K PIC implementation */
struct or1k_pic_dev {
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
/*
* Orion SoC main interrupt controller
*/
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_domain *d = irq_get_handler_data(irq);
+ struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, orion_bridge_irq_handler);
+ irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
+ domain);
return 0;
}
#include <linux/of_irq.h>
#include <asm/io.h>
-#include "irqchip.h"
-
static const char ipr_bit[] = {
7, 6, 5, 5,
4, 4, 4, 4, 3, 3, 3, 3,
*/
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
-#include "irqchip.h"
static void *intc_baseaddr;
#define IPRA ((unsigned long)intc_baseaddr)
struct irqc_irq {
int hw_irq;
int requested_irq;
- int domain_irq;
struct irqc_priv *p;
};
static void irqc_dbg(struct irqc_irq *i, char *str)
{
- dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
- str, i->requested_irq, i->hw_irq, i->domain_irq);
+ dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
+ str, i->requested_irq, i->hw_irq);
}
static void irqc_irq_enable(struct irq_data *d)
if (ioread32(p->iomem + DETECT_STATUS) & bit) {
iowrite32(bit, p->iomem + DETECT_STATUS);
irqc_dbg(i, "demux2");
- generic_handle_irq(i->domain_irq);
+ generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
return IRQ_HANDLED;
}
return IRQ_NONE;
{
struct irqc_priv *p = h->host_data;
- p->irq[hw].domain_irq = virq;
- p->irq[hw].hw_irq = hw;
-
irqc_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID); /* kill me now */
return 0;
}
break;
p->irq[k].p = p;
+ p->irq[k].hw_irq = k;
p->irq[k].requested_irq = irq->start;
}
irq_chip->irq_set_wake = irqc_irq_set_wake;
irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
- p->number_of_irqs, 0,
+ p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ p->number_of_irqs,
&irqc_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <plat/regs-irqtype.h>
#include <plat/pm.h>
-#include "irqchip.h"
-
#define S3C_IRQTYPE_NONE 0
#define S3C_IRQTYPE_EINT 1
#define S3C_IRQTYPE_EDGE 2
.irq_set_type = s3c_irqext0_type,
};
-static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
struct s3c_irq_intc *intc = irq_data->intc;
struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
- unsigned long src;
- unsigned long msk;
- unsigned int n;
- unsigned int offset;
+ unsigned int n, offset, irq;
+ unsigned long src, msk;
/* we're using individual domains for the non-dt case
* and one big domain for the dt case where the subintc
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/syscore_ops.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
-#include "irqchip.h"
-#define SIRFSOC_INT_RISC_MASK0 0x0018
-#define SIRFSOC_INT_RISC_MASK1 0x001C
-#define SIRFSOC_INT_RISC_LEVEL0 0x0020
-#define SIRFSOC_INT_RISC_LEVEL1 0x0024
+#define SIRFSOC_INT_RISC_MASK0 0x0018
+#define SIRFSOC_INT_RISC_MASK1 0x001C
+#define SIRFSOC_INT_RISC_LEVEL0 0x0020
+#define SIRFSOC_INT_RISC_LEVEL1 0x0024
#define SIRFSOC_INIT_IRQ_ID 0x0038
+#define SIRFSOC_INT_BASE_OFFSET 0x0004
#define SIRFSOC_NUM_IRQS 64
+#define SIRFSOC_NUM_BANKS (SIRFSOC_NUM_IRQS / 32)
static struct irq_domain *sirfsoc_irqdomain;
-static __init void
-sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
+static __init void sirfsoc_alloc_gc(void __iomem *base)
{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
- int ret;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
unsigned int set = IRQ_LEVEL;
-
- ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
- handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE);
-
- gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
- gc->reg_base = base;
- ct = gc->chip_types;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
- ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ int i;
+
+ irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc",
+ handle_level_irq, clr, set,
+ IRQ_GC_INIT_MASK_CACHE);
+
+ for (i = 0; i < SIRFSOC_NUM_BANKS; i++) {
+ gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32);
+ gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET;
+ ct = gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+ }
}
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
panic("unable to map intc cpu registers\n");
sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
- &irq_generic_chip_ops, base);
-
- sirfsoc_alloc_gc(base, 0, 32);
- sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
+ &irq_generic_chip_ops, base);
+ sirfsoc_alloc_gc(base);
writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define SUN4I_IRQ_VECTOR_REG 0x00
#define SUN4I_IRQ_PROTECTION_REG 0x08
#define SUN4I_IRQ_NMI_CTRL_REG 0x0c
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int virq = irq_find_mapping(domain, 0);
chained_irq_enter(chip, desc);
sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
+ irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
return 0;
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/bitops.h>
-#include "irqchip.h"
#define AB_IRQCTL_INT_ENABLE 0x00
#define AB_IRQCTL_INT_STATUS 0x04
return IRQ_SET_MASK_OK;
}
-static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
generic_handle_irq(irq_find_mapping(domain, irq));
}
for (i = 0; i < nrirqs; i++) {
unsigned int irq = irq_of_parse_and_map(ictl, i);
- irq_set_handler_data(irq, domain);
- irq_set_chained_handler(irq, tb10x_irq_cascade);
+ irq_set_chained_handler_and_data(irq, tb10x_irq_cascade,
+ domain);
}
ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "irqchip.h"
-
#define ICTLR_CPU_IEP_VFIQ 0x08
#define ICTLR_CPU_IEP_FIR 0x14
#define ICTLR_CPU_IEP_FIR_SET 0x18
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define IRQ_STATUS 0x00
#define IRQ_RAW_STATUS 0x04
#define IRQ_ENABLE_SET 0x08
writel(mask, f->base + IRQ_ENABLE_SET);
}
-static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc)
{
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
u32 status = readl(f->base + IRQ_STATUS);
if (status == 0) {
f->valid = valid;
if (parent_irq != -1) {
- irq_set_handler_data(parent_irq, f);
- irq_set_chained_handler(parent_irq, fpga_irq_handle);
+ irq_set_chained_handler_and_data(parent_irq, fpga_irq_handle,
+ f);
}
/* This will also allocate irq descriptors */
#include <linux/cpu_pm.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "irqchip.h"
-
#define MSCM_CPxNUM 0x4
#define MSCM_IRSPRC(n) (0x80 + 2 * (n))
#include <linux/list.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <asm/exception.h>
#include <asm/irq.h>
-#include "irqchip.h"
-
#define VIC_IRQ_STATUS 0x00
#define VIC_FIQ_STATUS 0x04
#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
vic_id++;
if (parent_irq) {
- irq_set_handler_data(parent_irq, v);
- irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded);
+ irq_set_chained_handler_and_data(parent_irq,
+ vic_handle_irq_cascaded, v);
}
v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
-#include "irqchip.h"
-
#define VT8500_ICPC_IRQ 0x20
#define VT8500_ICPC_FIQ 0x24
#define VT8500_ICDC 0x40 /* Destination Control 64*u32 */
return -EINVAL;
case IRQF_TRIGGER_HIGH:
dctr |= VT8500_TRIGGER_HIGH;
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
break;
case IRQF_TRIGGER_FALLING:
dctr |= VT8500_TRIGGER_FALLING;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
case IRQF_TRIGGER_RISING:
dctr |= VT8500_TRIGGER_RISING;
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
break;
}
writeb(dctr, base + VT8500_ICDC + d->hwirq);
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <asm/mxregs.h>
-#include "irqchip.h"
-
#define HW_IRQ_IPI_COUNT 2
#define HW_IRQ_MX_BASE 2
#define HW_IRQ_EXTERN_BASE 3
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
-#include "irqchip.h"
-
unsigned int cached_irq_mask;
/*
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
-#include "irqchip.h"
-
#define IO_STATUS 0x000
#define IO_RAW_STATUS 0x004
#define IO_ENABLE 0x008
+++ /dev/null
-/*
- * Copyright (C) 2012 Thomas Petazzoni
- *
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/irqchip.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
-#include "irqchip.h"
-
/*
* struct spear_shirq: shared irq structure
*
&spear320_shirq_intrcomm_ras,
};
-static void shirq_handler(unsigned irq, struct irq_desc *desc)
+static void shirq_handler(unsigned __irq, struct irq_desc *desc)
{
- struct spear_shirq *shirq = irq_get_handler_data(irq);
+ struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
u32 pend;
pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
* driver.
*/
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/module.h>
kfree(desc);
}
}
+
+/**
+ * of_msi_configure - Set the msi_domain field of a device
+ * @dev: device structure to associate with an MSI irq domain
+ * @np: device node for that device
+ */
+void of_msi_configure(struct device *dev, struct device_node *np)
+{
+ struct device_node *msi_np;
+ struct irq_domain *d;
+
+ msi_np = of_parse_phandle(np, "msi-parent", 0);
+ if (!msi_np)
+ return;
+
+ d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI);
+ if (!d)
+ d = irq_find_host(msi_np);
+ dev_set_msi_domain(dev, d);
+}
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
of_dma_deconfigure(&dev->dev);
if (dest_cpu < 0)
return -1;
- cpumask_copy(d->affinity, cpumask_of(dest_cpu));
+ cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags);
{
u32 offset, reg_offset, bit_pos;
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, ®_offset, &bit_pos);
writel(BIT(bit_pos),
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
- unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
- msi = irq_get_msi_desc(irq);
- pp = sys_to_pcie(msi->dev->bus->sysdata);
+ msi = irq_data_get_msi_desc(d);
+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
ks_pcie = to_keystone_pcie(pp);
- offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
return -EINVAL;
}
-static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
* Traverse through pending legacy interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_legacy_irq_handler(unsigned int __irq,
+ struct irq_desc *desc)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
/* Legacy IRQ */
for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
- irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
- ks_pcie_legacy_irq_handler);
+ irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+ ks_pcie_legacy_irq_handler,
+ ks_pcie);
}
ks_dw_pcie_enable_legacy_irqs(ks_pcie);
struct xgene_msi {
struct device_node *node;
- struct msi_controller mchip;
- struct irq_domain *domain;
+ struct irq_domain *inner_domain;
+ struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
static int xgene_allocate_domains(struct xgene_msi *msi)
{
- msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->domain)
+ msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+ &msi_domain_ops, msi);
+ if (!msi->inner_domain)
return -ENOMEM;
- msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
- &xgene_msi_domain_info,
- msi->domain);
+ msi->msi_domain = pci_msi_create_irq_domain(msi->node,
+ &xgene_msi_domain_info,
+ msi->inner_domain);
- if (!msi->mchip.domain) {
- irq_domain_remove(msi->domain);
+ if (!msi->msi_domain) {
+ irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
static void xgene_free_domains(struct xgene_msi *msi)
{
- if (msi->mchip.domain)
- irq_domain_remove(msi->mchip.domain);
- if (msi->domain)
- irq_domain_remove(msi->domain);
+ if (msi->msi_domain)
+ irq_domain_remove(msi->msi_domain);
+ if (msi->inner_domain)
+ irq_domain_remove(msi->inner_domain);
}
static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
* CPU0
*/
hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- virq = irq_find_mapping(xgene_msi->domain, hw_irq);
+ virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
WARN_ON(!virq);
if (virq != 0)
generic_handle_irq(virq);
for (i = 0; i < NR_HW_IRQS; i++) {
virq = msi->msi_groups[i].gic_irq;
- if (virq != 0) {
- irq_set_chained_handler(virq, NULL);
- irq_set_handler_data(virq, NULL);
- }
+ if (virq != 0)
+ irq_set_chained_handler_and_data(virq, NULL, NULL);
}
kfree(msi->msi_groups);
}
if (err) {
- irq_set_chained_handler(msi_group->gic_irq, NULL);
- irq_set_handler_data(msi_group->gic_irq, NULL);
+ irq_set_chained_handler_and_data(msi_group->gic_irq,
+ NULL, NULL);
return err;
}
}
if (!msi_group->gic_irq)
continue;
- irq_set_chained_handler(msi_group->gic_irq, NULL);
- irq_set_handler_data(msi_group->gic_irq, NULL);
+ irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
+ NULL);
}
}
goto error;
}
xgene_msi->msi_addr = res->start;
-
+ xgene_msi->node = pdev->dev.of_node;
xgene_msi->num_cpus = num_possible_cpus();
rc = xgene_msi_init_allocator(xgene_msi);
cpu_notifier_register_done();
- xgene_msi->mchip.of_node = pdev->dev.of_node;
- rc = of_pci_msi_chip_add(&xgene_msi->mchip);
- if (rc) {
- dev_err(&pdev->dev, "failed to add MSI controller chip\n");
- goto error_notifier;
- }
-
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
-error_notifier:
- unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
error:
xgene_msi_remove(pdev);
return rc;
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
- struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+ struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
- struct msi_desc *msi = irq_data_get_msi(data);
- struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
+ struct msi_desc *msi = irq_data_get_msi_desc(data);
+ struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
clear_irq_range(pp, irq, 1, data->hwirq);
}
*/
static void xilinx_pcie_destroy_msi(unsigned int irq)
{
- struct irq_desc *desc;
struct msi_desc *msi;
struct xilinx_pcie_port *port;
- desc = irq_to_desc(irq);
- msi = irq_desc_get_msi_desc(desc);
- port = sys_to_pcie(msi->dev->bus->sysdata);
-
- if (!test_bit(irq, msi_irq_in_use))
+ if (!test_bit(irq, msi_irq_in_use)) {
+ msi = irq_get_msi_desc(irq);
+ port = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
- else
+ } else {
clear_bit(irq, msi_irq_in_use);
+ }
}
/**
static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
{
- struct irq_domain *domain = NULL;
+ struct irq_domain *domain;
- if (dev->bus->msi)
- domain = dev->bus->msi->domain;
- if (!domain)
- domain = arch_get_pci_msi_domain(dev);
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain)
+ return domain;
- return domain;
+ return arch_get_pci_msi_domain(dev);
}
static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
ret = arch_setup_msi_irq(dev, entry);
if (ret < 0)
return ret;
int i;
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
arch_teardown_msi_irq(entry->irq + i);
entry = NULL;
if (dev->msix_enabled) {
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (irq == entry->irq)
break;
}
mask_bits &= ~mask;
mask_bits |= flag;
- pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
+ mask_bits);
return mask_bits;
}
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
{
- struct msi_desc *desc = irq_data_get_msi(data);
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag);
{
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
default_restore_msi_irq(dev, entry->irq);
}
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
- BUG_ON(entry->dev->current_state != PCI_D0);
+ struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+ BUG_ON(dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) {
void __iomem *base = entry->mask_base +
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
- struct pci_dev *dev = entry->dev;
int pos = dev->msi_cap;
u16 data;
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
- if (entry->dev->current_state != PCI_D0) {
+ struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+ if (dev->current_state != PCI_D0) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base;
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
- struct pci_dev *dev = entry->dev;
int pos = dev->msi_cap;
u16 msgctl;
static void free_msi_irqs(struct pci_dev *dev)
{
+ struct list_head *msi_list = dev_to_msi_list(&dev->dev);
struct msi_desc *entry, *tmp;
struct attribute **msi_attrs;
struct device_attribute *dev_attr;
int i, count = 0;
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
if (entry->irq)
for (i = 0; i < entry->nvec_used; i++)
BUG_ON(irq_has_action(entry->irq + i));
pci_msi_teardown_msi_irqs(dev);
- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ list_for_each_entry_safe(entry, tmp, msi_list, list) {
if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, &dev->msi_list))
+ if (list_is_last(&entry->list, msi_list))
iounmap(entry->mask_base);
}
}
}
-static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
-{
- struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return NULL;
-
- INIT_LIST_HEAD(&desc->list);
- desc->dev = dev;
-
- return desc;
-}
-
static void pci_intx_for_msi(struct pci_dev *dev, int enable)
{
if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(&dev->msi_list));
+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
arch_restore_msi_irqs(dev);
- list_for_each_entry(entry, &dev->msi_list, list)
+ for_each_pci_msi_entry(entry, dev)
msix_mask_irq(entry, entry->masked);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
int count = 0;
/* Determine how many msi entries we have */
- list_for_each_entry(entry, &pdev->msi_list, list)
+ for_each_pci_msi_entry(entry, pdev)
++num_msi;
if (!num_msi)
return 0;
msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
if (!msi_attrs)
return -ENOMEM;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
if (!msi_dev_attr)
goto error_attrs;
struct msi_desc *entry;
/* MSI Entry Initialization */
- entry = alloc_msi_entry(dev);
+ entry = alloc_msi_entry(&dev->dev);
if (!entry)
return NULL;
{
struct msi_desc *entry;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (!dev->no_64bit_msi || !entry->msg.address_hi)
continue;
dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
mask = msi_mask(entry->msi_attrib.multi_cap);
msi_mask_irq(entry, mask, mask);
- list_add_tail(&entry->list, &dev->msi_list);
+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
int i;
for (i = 0; i < nvec; i++) {
- entry = alloc_msi_entry(dev);
+ entry = alloc_msi_entry(&dev->dev);
if (!entry) {
if (!i)
iounmap(base);
entry->mask_base = base;
entry->nvec_used = 1;
- list_add_tail(&entry->list, &dev->msi_list);
+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
}
return 0;
struct msi_desc *entry;
int i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
PCI_MSIX_ENTRY_VECTOR_CTRL;
struct msi_desc *entry;
int avail = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq != 0)
avail++;
}
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
+ desc = first_pci_msi_entry(dev);
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
return;
/* Return the device with MSI-X masked as initial states */
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
/* Keep cached states to be restored */
__pci_msix_desc_mask_irq(entry, 1);
}
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
- INIT_LIST_HEAD(&dev->msi_list);
}
/**
}
EXPORT_SYMBOL(pci_enable_msix_range);
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+ return to_pci_dev(desc->dev);
+}
+
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+{
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
+ return dev->bus->sysdata;
+}
+EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
+
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
/**
* pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
*/
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
- struct msi_desc *desc = irq_data->msi_desc;
+ struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
/*
* For MSI-X desc->irq is always equal to irq_data->irq. For
struct msi_domain_info *info,
struct irq_domain *parent)
{
+ struct irq_domain *domain;
+
if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
pci_msi_domain_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
- return msi_create_irq_domain(node, info, parent);
+ domain = msi_create_irq_domain(node, info, parent);
+ if (!domain)
+ return NULL;
+
+ domain->bus_token = DOMAIN_BUS_PCI_MSI;
+ return domain;
}
/**
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
+
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
+{
+#ifdef CONFIG_IRQ_DOMAIN
+ struct device_node *np;
+ struct irq_domain *d;
+
+ if (!bus->dev.of_node)
+ return NULL;
+
+ /* Start looking for a phandle to an MSI controller. */
+ np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
+
+ /*
+ * If we don't have an msi-parent property, look for a domain
+ * directly attached to the host bridge.
+ */
+ if (!np)
+ np = bus->dev.of_node;
+
+ d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ if (d)
+ return d;
+
+ return irq_find_host(np);
+#else
+ return NULL;
+#endif
+}
}
}
+static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
+{
+ struct irq_domain *d;
+
+ /*
+ * Any firmware interface that can resolve the msi_domain
+ * should be called from here.
+ */
+ d = pci_host_bridge_of_msi_domain(bus);
+
+ return d;
+}
+
+static void pci_set_bus_msi_domain(struct pci_bus *bus)
+{
+ struct irq_domain *d;
+
+ /*
+ * Either bus is the root, and we must obtain it from the
+ * firmware, or we inherit it from the bridge device.
+ */
+ if (pci_is_root_bus(bus))
+ d = pci_host_bridge_msi_domain(bus);
+ else
+ d = dev_get_msi_domain(&bus->self->dev);
+
+ dev_set_msi_domain(&bus->dev, d);
+}
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
bridge->subordinate = child;
add_dev:
+ pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
WARN_ON(ret < 0);
pci_enable_acs(dev);
}
+static void pci_set_msi_domain(struct pci_dev *dev)
+{
+ /*
+ * If no domain has been set through the pcibios_add_device
+ * callback, inherit the default from the bus device.
+ */
+ if (!dev_get_msi_domain(&dev->dev))
+ dev_set_msi_domain(&dev->dev,
+ dev_get_msi_domain(&dev->bus->dev));
+}
+
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
int ret;
ret = pcibios_add_device(dev);
WARN_ON(ret < 0);
+ /* Setup MSI irq domain */
+ pci_set_msi_domain(dev);
+
/* Notifier could use PCI capabilities */
dev->match_driver = false;
ret = device_add(&dev->dev);
b->bridge = get_device(&bridge->dev);
device_enable_async_suspend(b->bridge);
pci_set_bus_of_node(b);
+ pci_set_bus_msi_domain(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
}
i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
/* Vector is useless at this point. */
op.msix_entries[i].vector = -1;
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
#ifdef CONFIG_SMP
- if (!cpumask_test_cpu(cpu, data->affinity))
+ if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
- if (!cpumask_test_cpu(cpu, data->affinity))
+ if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1;
- cpumask_copy(data->affinity, cpumask);
+ cpumask_copy(irq_data_get_affinity_mask(data), cpumask);
return IRQ_SET_MASK_OK_NOCOPY;
}
static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
{
- generic_handle_irq((unsigned int)irq_get_handler_data(irq));
+ generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
}
static void __init intc_register_irq(struct intc_desc *desc,
static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
{
- struct intc_virq_list **last, *entry;
- struct irq_data *data = irq_get_irq_data(irq);
+ struct intc_virq_list *entry;
+ struct intc_virq_list **last = NULL;
/* scan for duplicates */
- last = (struct intc_virq_list **)&data->handler_data;
- for_each_virq(entry, data->handler_data) {
+ for_each_virq(entry, irq_get_handler_data(irq)) {
if (entry->irq == virq)
return 0;
last = &entry->next;
entry->irq = virq;
- *last = entry;
+ if (last)
+ *last = entry;
+ else
+ irq_set_handler_data(irq, entry);
return 0;
}
-static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
+static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
{
- struct irq_data *data = irq_get_irq_data(irq);
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
struct intc_desc_int *d = get_intc_desc(irq);
for_each_virq(entry, vlist) {
unsigned long addr, handle;
+ struct irq_desc *vdesc = irq_to_desc(entry->irq);
- handle = (unsigned long)irq_get_handler_data(entry->irq);
- addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
-
- if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
- generic_handle_irq(entry->irq);
+ if (vdesc) {
+ handle = (unsigned long)irq_desc_get_handler_data(vdesc);
+ addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
+ if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
+ generic_handle_irq_desc(entry->irq, vdesc);
+ }
}
chip->irq_unmask(data);
static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
{
- struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *intr = pa->intr;
int first = pa->min_apid >> 5;
int last = pa->max_apid >> 5;
goto err_put_ctrl;
}
- irq_set_handler_data(pa->irq, pa);
- irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
+ irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
err = spmi_controller_add(ctrl);
if (err)
return 0;
err_domain_remove:
- irq_set_chained_handler(pa->irq, NULL);
- irq_set_handler_data(pa->irq, NULL);
+ irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
irq_domain_remove(pa->domain);
err_put_ctrl:
spmi_controller_put(ctrl);
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
spmi_controller_remove(ctrl);
- irq_set_chained_handler(pa->irq, NULL);
- irq_set_handler_data(pa->irq, NULL);
+ irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
irq_domain_remove(pa->domain);
spmi_controller_put(ctrl);
return 0;
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
* See Documentation/pinctrl.txt for details.
+ * @msi_list: Hosts MSI descriptors
+ * @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *msi_domain;
+#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct list_head msi_list;
+#endif
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
}
#endif
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ return dev->msi_domain;
+#else
+ return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ dev->msi_domain = d;
+#endif
+}
+
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
- * @irq_suspend: function called from core code on suspend once per chip
- * @irq_resume: function called from core code on resume once per chip
+ * @irq_suspend: function called from core code on suspend once per
+ * chip, when one or more interrupts are installed
+ * @irq_resume: function called from core code on resume once per chip,
+ * when one ore more interrupts are installed
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_calc_mask: Optional function to set irq_data.mask for special cases
* @irq_print_chip: optional to print special chip info in show_interrupts
#endif
/* Handling of unhandled and spurious interrupts: */
-extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret);
+extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
/* Enable/disable irq debugging output: */
return d ? d->msi_desc : NULL;
}
-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
{
return d->msi_desc;
}
* @reg_base: Register base address (virtual)
* @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
* @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
+ * @suspend: Function called from core code on suspend once per
+ * chip; can be useful instead of irq_chip::suspend to
+ * handle chip details even when no interrupts are in use
+ * @resume: Function called from core code on resume once per chip;
+ * can be useful instead of irq_chip::suspend to handle
+ * chip details even when no interrupts are in use
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
+ void (*suspend)(struct irq_chip_generic *gc);
+ void (*resume)(struct irq_chip_generic *gc);
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
+#include <asm/msi.h>
/*
* We need a value to serve as a irq-type for LPIs. Choose one that will
struct device_node;
-void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_cpu_if_down(void);
+int gic_cpu_if_down(unsigned int gic_nr);
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+};
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
* @match: Match an interrupt controller device node to a host, returns
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
/* Optional data */
struct device_node *of_node;
+ enum irq_domain_bus_token bus_token;
struct irq_domain_chip_generic *gc;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
struct irq_domain *parent;
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_host(struct device_node *node);
+extern struct irq_domain *irq_find_matching_host(struct device_node *node,
+ enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
+static inline struct irq_domain *irq_find_host(struct device_node *node)
+{
+ return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+}
+
/**
* irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
/* Helper functions */
struct irq_data;
struct msi_desc;
+struct pci_dev;
+struct platform_msi_priv_data;
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
+ struct msi_msg *msg);
+
+/**
+ * platform_msi_desc - Platform device specific msi descriptor data
+ * @msi_priv_data: Pointer to platform private data
+ * @msi_index: The index of the MSI descriptor for multi MSI
+ */
+struct platform_msi_desc {
+ struct platform_msi_priv_data *msi_priv_data;
+ u16 msi_index;
+};
+
+/**
+ * struct msi_desc - Descriptor structure for MSI based interrupts
+ * @list: List head for management
+ * @irq: The base interrupt number
+ * @nvec_used: The number of vectors used
+ * @dev: Pointer to the device which uses this descriptor
+ * @msg: The last set MSI message cached for reuse
+ *
+ * @masked: [PCI MSI/X] Mask bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
+ * @platform: [platform] Platform device specific msi descriptor data
+ */
struct msi_desc {
- struct {
- __u8 is_msix : 1;
- __u8 multiple: 3; /* log2 num of messages allocated */
- __u8 multi_cap : 3; /* log2 num of messages supported */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u16 entry_nr; /* specific enabled entry */
- unsigned default_irq; /* default pre-assigned irq */
- } msi_attrib;
-
- u32 masked; /* mask bits */
- unsigned int irq;
- unsigned int nvec_used; /* number of messages */
- struct list_head list;
+ /* Shared device/bus type independent data */
+ struct list_head list;
+ unsigned int irq;
+ unsigned int nvec_used;
+ struct device *dev;
+ struct msi_msg msg;
union {
- void __iomem *mask_base;
- u8 mask_pos;
- };
- struct pci_dev *dev;
+ /* PCI MSI/X specific data */
+ struct {
+ u32 masked;
+ struct {
+ __u8 is_msix : 1;
+ __u8 multiple : 3;
+ __u8 multi_cap : 3;
+ __u8 maskbit : 1;
+ __u8 is_64 : 1;
+ __u16 entry_nr;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
+ };
- /* Last set MSI message */
- struct msi_msg msg;
+ /*
+ * Non PCI variants add their data structure here. New
+ * entries need to use a named structure. We want
+ * proper name spaces for this. The PCI part is
+ * anonymous for now as it would require an immediate
+ * tree wide cleanup.
+ */
+ struct platform_msi_desc platform;
+ };
};
/* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
-#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define msi_desc_to_dev(desc) ((desc)->dev)
+#define dev_to_msi_list(dev) (&(dev)->msi_list)
#define first_msi_entry(dev) \
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
#define for_each_pci_msi_entry(desc, pdev) \
for_each_msi_entry((desc), &(pdev)->dev)
-static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
{
- return desc->dev;
+ return NULL;
}
#endif /* CONFIG_PCI_MSI */
+struct msi_desc *alloc_msi_entry(struct device *dev);
+void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct device *dev;
struct device_node *of_node;
struct list_head list;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *domain;
-#endif
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
struct msi_desc *desc);
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg);
+void platform_msi_domain_free_irqs(struct device *dev);
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
*/
extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern void of_msi_configure(struct device *dev, struct device_node *np);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
#ifdef CONFIG_PCI_MSI
- struct list_head msi_list;
const struct attribute_group **msi_irq_groups;
#endif
struct pci_vpd *vpd;
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
+struct irq_domain;
void pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
#endif /* CONFIG_OF */
#ifdef CONFIG_EEH
return -EINVAL;
type &= IRQ_TYPE_SENSE_MASK;
- ret = __irq_set_trigger(desc, irq, type);
+ ret = __irq_set_trigger(desc, type);
irq_put_desc_busunlock(desc, flags);
return ret;
}
irq_enable(desc);
}
if (resend)
- check_irq_resend(desc, desc->irq_data.irq);
+ check_irq_resend(desc);
return ret;
}
raw_spin_lock_irq(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
action = desc->action;
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(desc, action_ret);
raw_spin_lock_irq(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
/*
* If its disabled or no action available
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
/*
* If its disabled or no action available
goto out_unlock;
}
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
goto out_eoi;
}
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
do {
if (unlikely(!desc->action))
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
irqreturn_t res;
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
/**
* irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
* @data: Pointer to interrupt specific data
- * @dest: The vcpu affinity information
+ * @vcpu_info: The vcpu affinity information
*/
int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
{
if (data)
ct->chip.irq_suspend(data);
}
+
+ if (gc->suspend)
+ gc->suspend(gc);
}
return 0;
}
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
+ if (gc->resume)
+ gc->resume(gc);
+
if (ct->chip.irq_resume) {
struct irq_data *data = irq_gc_get_irq_data(gc);
void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
- kstat_incr_irqs_this_cpu(irq, desc);
+ kstat_incr_irqs_this_cpu(desc);
ack_bad_irq(irq);
}
add_interrupt_randomness(irq, flags);
if (!noirqdebug)
- note_interrupt(irq, desc, retval);
+ note_interrupt(desc, retval);
return retval;
}
#include "debug.h"
#include "settings.h"
-extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
- unsigned long flags);
-extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
-extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
+extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags);
+extern void __disable_irq(struct irq_desc *desc);
+extern void __enable_irq(struct irq_desc *desc);
extern int irq_startup(struct irq_desc *desc, bool resend);
extern void irq_shutdown(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc);
/* Resending of interrupts :*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+void check_irq_resend(struct irq_desc *desc);
bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
return __irqd_to_state(d) & mask;
}
-static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc)
+static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
{
__this_cpu_inc(*desc->kstat_irqs);
__this_cpu_inc(kstat.irqs_sum);
void kstat_incr_irq_this_cpu(unsigned int irq)
{
- kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+ kstat_incr_irqs_this_cpu(irq_to_desc(irq));
}
/**
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
- * irq_find_host() - Locates a domain for a given device node
+ * irq_find_matching_host() - Locates a domain for a given device node
* @node: device-tree node of the interrupt controller
+ * @bus_token: domain-specific data
*/
-struct irq_domain *irq_find_host(struct device_node *node)
+struct irq_domain *irq_find_matching_host(struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
struct irq_domain *h, *found = NULL;
int rc;
* it might potentially be set to match all interrupts in
* the absence of a device node. This isn't a problem so far
* yet though...
+ *
+ * bus_token == DOMAIN_BUS_ANY matches any domain, any other
+ * values must generate an exact match for the domain to be
+ * selected.
*/
mutex_lock(&irq_domain_mutex);
list_for_each_entry(h, &irq_domain_list, link) {
if (h->ops->match)
- rc = h->ops->match(h, node);
+ rc = h->ops->match(h, node, bus_token);
else
- rc = (h->of_node != NULL) && (h->of_node == node);
+ rc = ((h->of_node != NULL) && (h->of_node == node) &&
+ ((bus_token == DOMAIN_BUS_ANY) ||
+ (h->bus_token == bus_token)));
if (rc) {
found = h;
mutex_unlock(&irq_domain_mutex);
return found;
}
-EXPORT_SYMBOL_GPL(irq_find_host);
+EXPORT_SYMBOL_GPL(irq_find_matching_host);
/**
* irq_set_default_host() - Set a "default" irq domain
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
+static int __irq_can_set_affinity(struct irq_desc *desc)
+{
+ if (!desc || !irqd_can_balance(&desc->irq_data) ||
+ !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
+ return 0;
+ return 1;
+}
+
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
*/
int irq_can_set_affinity(unsigned int irq)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (!desc || !irqd_can_balance(&desc->irq_data) ||
- !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
- return 0;
-
- return 1;
+ return __irq_can_set_affinity(irq_to_desc(irq));
}
/**
/*
* Generic version of the affinity autoselector.
*/
-static int
-setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
{
struct cpumask *set = irq_default_affinity;
int node = irq_desc_get_node(desc);
/* Excludes PER_CPU and NO_BALANCE interrupts */
- if (!irq_can_set_affinity(irq))
+ if (!__irq_can_set_affinity(desc))
return 0;
/*
return 0;
}
#else
-static inline int
-setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
+/* Wrapper for ALPHA specific affinity selector magic */
+static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
{
- return irq_select_affinity(irq);
+ return irq_select_affinity(irq_desc_get_irq(d));
}
#endif
int ret;
raw_spin_lock_irqsave(&desc->lock, flags);
- ret = setup_affinity(irq, desc, mask);
+ ret = setup_affinity(desc, mask);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
#else
static inline int
-setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+setup_affinity(struct irq_desc *desc, struct cpumask *mask)
{
return 0;
}
#endif
-void __disable_irq(struct irq_desc *desc, unsigned int irq)
+void __disable_irq(struct irq_desc *desc)
{
if (!desc->depth++)
irq_disable(desc);
if (!desc)
return -EINVAL;
- __disable_irq(desc, irq);
+ __disable_irq(desc);
irq_put_desc_busunlock(desc, flags);
return 0;
}
}
EXPORT_SYMBOL_GPL(disable_hardirq);
-void __enable_irq(struct irq_desc *desc, unsigned int irq)
+void __enable_irq(struct irq_desc *desc)
{
switch (desc->depth) {
case 0:
err_out:
- WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
+ WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
+ irq_desc_get_irq(desc));
break;
case 1: {
if (desc->istate & IRQS_SUSPENDED)
/* Prevent probing on this irq: */
irq_settings_set_noprobe(desc);
irq_enable(desc);
- check_irq_resend(desc, irq);
+ check_irq_resend(desc);
/* fall-through */
}
default:
KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
goto out;
- __enable_irq(desc, irq);
+ __enable_irq(desc);
out:
irq_put_desc_busunlock(desc, flags);
}
return canrequest;
}
-int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
- unsigned long flags)
+int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
{
struct irq_chip *chip = desc->irq_data.chip;
int ret, unmask = 0;
* IRQF_TRIGGER_* but the PIC does not support multiple
* flow-types?
*/
- pr_debug("No set_type function for IRQ %d (%s)\n", irq,
+ pr_debug("No set_type function for IRQ %d (%s)\n",
+ irq_desc_get_irq(desc),
chip ? (chip->name ? : "unknown") : "unknown");
return 0;
}
break;
default:
pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
- flags, irq, chip->irq_set_type);
+ flags, irq_desc_get_irq(desc), chip->irq_set_type);
}
if (unmask)
unmask_irq(desc);
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
- ret = __irq_set_trigger(desc, irq,
- new->flags & IRQF_TRIGGER_MASK);
+ ret = __irq_set_trigger(desc,
+ new->flags & IRQF_TRIGGER_MASK);
if (ret)
goto out_mask;
}
/* Set default affinity mask once everything is setup */
- setup_affinity(irq, desc, mask);
+ setup_affinity(desc, mask);
} else if (new->flags & IRQF_TRIGGER_MASK) {
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
*/
if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
desc->istate &= ~IRQS_SPURIOUS_DISABLED;
- __enable_irq(desc, irq);
+ __enable_irq(desc);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (type != IRQ_TYPE_NONE) {
int ret;
- ret = __irq_set_trigger(desc, irq, type);
+ ret = __irq_set_trigger(desc, type);
if (ret) {
WARN(1, "failed to set type for IRQ%d\n", irq);
irq_put_desc_busunlock(desc, flags);
return err;
}
+EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
/**
* irq_set_irqchip_state - set the state of a forwarded interrupt.
irq_put_desc_busunlock(desc, flags);
return err;
}
+EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
/* Temparory solution for building, will be removed later */
#include <linux/pci.h>
+struct msi_desc *alloc_msi_entry(struct device *dev)
+{
+ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->list);
+ desc->dev = dev;
+
+ return desc;
+}
+
+void free_msi_entry(struct msi_desc *entry)
+{
+ kfree(entry);
+}
+
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
*msg = entry->msg;
desc->cond_suspend_depth--;
}
-static bool suspend_device_irq(struct irq_desc *desc, int irq)
+static bool suspend_device_irq(struct irq_desc *desc)
{
if (!desc->action || desc->no_suspend_depth)
return false;
}
desc->istate |= IRQS_SUSPENDED;
- __disable_irq(desc, irq);
+ __disable_irq(desc);
/*
* Hardware which has no wakeup source configuration facility
if (irq_settings_is_nested_thread(desc))
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
- sync = suspend_device_irq(desc, irq);
+ sync = suspend_device_irq(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (sync)
}
EXPORT_SYMBOL_GPL(suspend_device_irqs);
-static void resume_irq(struct irq_desc *desc, int irq)
+static void resume_irq(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
desc->depth++;
resume:
desc->istate &= ~IRQS_SUSPENDED;
- __enable_irq(desc, irq);
+ __enable_irq(desc);
}
static void resume_irqs(bool want_early)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
- resume_irq(desc, irq);
+ resume_irq(desc);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
*
* Is called with interrupts disabled and desc->lock held.
*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq)
+void check_irq_resend(struct irq_desc *desc)
{
/*
* We do not resend level type interrupts. Level type
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
+ unsigned int irq = irq_desc_get_irq(desc);
+
/*
* If the interrupt is running in the thread
* context of the parent irq we need to be
/*
* Recovery handler for misrouted interrupts.
*/
-static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+static int try_one_irq(struct irq_desc *desc, bool force)
{
irqreturn_t ret = IRQ_NONE;
struct irqaction *action;
if (i == irq) /* Already tried */
continue;
- if (try_one_irq(i, desc, false))
+ if (try_one_irq(desc, false))
ok = 1;
}
out:
continue;
local_irq_disable();
- try_one_irq(i, desc, true);
+ try_one_irq(desc, true);
local_irq_enable();
}
out:
* (The other 100-of-100,000 interrupts may have been a correctly
* functioning device sharing an IRQ with the failing one)
*/
-static void
-__report_bad_irq(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret)
+static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
{
+ unsigned int irq = irq_desc_get_irq(desc);
struct irqaction *action;
unsigned long flags;
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
-static void
-report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
+static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
{
static int count = 100;
if (count > 0) {
count--;
- __report_bad_irq(irq, desc, action_ret);
+ __report_bad_irq(desc, action_ret);
}
}
#define SPURIOUS_DEFERRED 0x80000000
-void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret)
+void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
{
+ unsigned int irq;
+
if (desc->istate & IRQS_POLL_INPROGRESS ||
irq_settings_is_polled(desc))
return;
if (bad_action_ret(action_ret)) {
- report_bad_irq(irq, desc, action_ret);
+ report_bad_irq(desc, action_ret);
return;
}
desc->last_unhandled = jiffies;
}
+ irq = irq_desc_get_irq(desc);
if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
int ok = misrouted_irq(irq);
if (action_ret == IRQ_NONE)
/*
* The interrupt is stuck
*/
- __report_bad_irq(irq, desc, action_ret);
+ __report_bad_irq(desc, action_ret);
/*
* Now kill the IRQ
*/