| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 4 | * Copyright (C) 2005-2006, Thomas Gleixner |
| 5 | * |
| 6 | * This file contains the IRQ-resend code |
| 7 | * |
| 8 | * If the interrupt is waiting to be processed, we try to re-run it. |
| 9 | * We can't directly run it from here since the caller might be in an |
| 10 | * interrupt-protected region. Not all irq controller chips can |
| 11 | * retrigger interrupts at the hardware level, so in those cases |
| 12 | * we allow the resending of IRQs via a tasklet. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/irq.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/random.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | |
| 20 | #include "internals.h" |
| 21 | |
| 22 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
| 23 | |
| 24 | /* hlist_head to handle software resend of interrupts: */ |
| 25 | static HLIST_HEAD(irq_resend_list); |
| 26 | static DEFINE_RAW_SPINLOCK(irq_resend_lock); |
| 27 | |
| 28 | /* |
| 29 | * Run software resends of IRQ's |
| 30 | */ |
| 31 | static void resend_irqs(struct tasklet_struct *unused) |
| 32 | { |
| 33 | guard(raw_spinlock_irq)(&irq_resend_lock); |
| 34 | while (!hlist_empty(&irq_resend_list)) { |
| 35 | struct irq_desc *desc; |
| 36 | |
| 37 | desc = hlist_entry(irq_resend_list.first, struct irq_desc, resend_node); |
| 38 | hlist_del_init(&desc->resend_node); |
| 39 | |
| 40 | raw_spin_unlock(&irq_resend_lock); |
| 41 | desc->handle_irq(desc); |
| 42 | raw_spin_lock(&irq_resend_lock); |
| 43 | } |
| 44 | } |
| 45 | |
| 46 | /* Tasklet to handle resend: */ |
| 47 | static DECLARE_TASKLET(resend_tasklet, resend_irqs); |
| 48 | |
| 49 | static int irq_sw_resend(struct irq_desc *desc) |
| 50 | { |
| 51 | /* |
| 52 | * Validate whether this interrupt can be safely injected from |
| 53 | * non interrupt context |
| 54 | */ |
| 55 | if (irqd_is_handle_enforce_irqctx(&desc->irq_data)) |
| 56 | return -EINVAL; |
| 57 | |
| 58 | /* |
| 59 | * If the interrupt is running in the thread context of the parent |
| 60 | * irq we need to be careful, because we cannot trigger it |
| 61 | * directly. |
| 62 | */ |
| 63 | if (irq_settings_is_nested_thread(desc)) { |
| 64 | /* |
| 65 | * If the parent_irq is valid, we retrigger the parent, |
| 66 | * otherwise we do nothing. |
| 67 | */ |
| 68 | if (!desc->parent_irq) |
| 69 | return -EINVAL; |
| 70 | |
| 71 | desc = irq_to_desc(desc->parent_irq); |
| 72 | if (!desc) |
| 73 | return -EINVAL; |
| 74 | } |
| 75 | |
| 76 | /* Add to resend_list and activate the softirq: */ |
| 77 | scoped_guard(raw_spinlock, &irq_resend_lock) { |
| 78 | if (hlist_unhashed(&desc->resend_node)) |
| 79 | hlist_add_head(&desc->resend_node, &irq_resend_list); |
| 80 | } |
| 81 | tasklet_schedule(&resend_tasklet); |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | void clear_irq_resend(struct irq_desc *desc) |
| 86 | { |
| 87 | guard(raw_spinlock)(&irq_resend_lock); |
| 88 | hlist_del_init(&desc->resend_node); |
| 89 | } |
| 90 | |
| 91 | void irq_resend_init(struct irq_desc *desc) |
| 92 | { |
| 93 | INIT_HLIST_NODE(&desc->resend_node); |
| 94 | } |
| 95 | #else |
| 96 | void clear_irq_resend(struct irq_desc *desc) {} |
| 97 | void irq_resend_init(struct irq_desc *desc) {} |
| 98 | |
| 99 | static int irq_sw_resend(struct irq_desc *desc) |
| 100 | { |
| 101 | return -EINVAL; |
| 102 | } |
| 103 | #endif |
| 104 | |
| 105 | static int try_retrigger(struct irq_desc *desc) |
| 106 | { |
| 107 | if (desc->irq_data.chip->irq_retrigger) |
| 108 | return desc->irq_data.chip->irq_retrigger(&desc->irq_data); |
| 109 | |
| 110 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
| 111 | return irq_chip_retrigger_hierarchy(&desc->irq_data); |
| 112 | #else |
| 113 | return 0; |
| 114 | #endif |
| 115 | } |
| 116 | |
| 117 | /* |
| 118 | * IRQ resend |
| 119 | * |
| 120 | * Is called with interrupts disabled and desc->lock held. |
| 121 | */ |
| 122 | int check_irq_resend(struct irq_desc *desc, bool inject) |
| 123 | { |
| 124 | int err = 0; |
| 125 | |
| 126 | /* |
| 127 | * We do not resend level type interrupts. Level type interrupts |
| 128 | * are resent by hardware when they are still active. Clear the |
| 129 | * pending bit so suspend/resume does not get confused. |
| 130 | */ |
| 131 | if (irq_settings_is_level(desc)) { |
| 132 | desc->istate &= ~IRQS_PENDING; |
| 133 | return -EINVAL; |
| 134 | } |
| 135 | |
| 136 | if (desc->istate & IRQS_REPLAY) |
| 137 | return -EBUSY; |
| 138 | |
| 139 | if (!(desc->istate & IRQS_PENDING) && !inject) |
| 140 | return 0; |
| 141 | |
| 142 | desc->istate &= ~IRQS_PENDING; |
| 143 | |
| 144 | if (!try_retrigger(desc)) |
| 145 | err = irq_sw_resend(desc); |
| 146 | |
| 147 | /* If the retrigger was successful, mark it with the REPLAY bit */ |
| 148 | if (!err) |
| 149 | desc->istate |= IRQS_REPLAY; |
| 150 | return err; |
| 151 | } |
| 152 | |
| 153 | #ifdef CONFIG_GENERIC_IRQ_INJECTION |
| 154 | /** |
| 155 | * irq_inject_interrupt - Inject an interrupt for testing/error injection |
| 156 | * @irq: The interrupt number |
| 157 | * |
| 158 | * This function must only be used for debug and testing purposes! |
| 159 | * |
| 160 | * Especially on x86 this can cause a premature completion of an interrupt |
| 161 | * affinity change causing the interrupt line to become stale. Very |
| 162 | * unlikely, but possible. |
| 163 | * |
| 164 | * The injection can fail for various reasons: |
| 165 | * - Interrupt is not activated |
| 166 | * - Interrupt is NMI type or currently replaying |
| 167 | * - Interrupt is level type |
| 168 | * - Interrupt does not support hardware retrigger and software resend is |
| 169 | * either not enabled or not possible for the interrupt. |
| 170 | */ |
| 171 | int irq_inject_interrupt(unsigned int irq) |
| 172 | { |
| 173 | int err = -EINVAL; |
| 174 | |
| 175 | /* Try the state injection hardware interface first */ |
| 176 | if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) |
| 177 | return 0; |
| 178 | |
| 179 | /* That failed, try via the resend mechanism */ |
| 180 | scoped_irqdesc_get_and_buslock(irq, 0) { |
| 181 | struct irq_desc *desc = scoped_irqdesc; |
| 182 | |
| 183 | /* |
| 184 | * Only try to inject when the interrupt is: |
| 185 | * - not NMI type |
| 186 | * - activated |
| 187 | */ |
| 188 | if (!irq_is_nmi(desc) && irqd_is_activated(&desc->irq_data)) |
| 189 | err = check_irq_resend(desc, true); |
| 190 | } |
| 191 | return err; |
| 192 | } |
| 193 | EXPORT_SYMBOL_GPL(irq_inject_interrupt); |
| 194 | #endif |