Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e360adbe | 2 | /* |
90eec103 | 3 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
e360adbe PZ |
4 | * |
5 | * Provides a framework for enqueueing and running callbacks from hardirq | |
6 | * context. The enqueueing is NMI-safe. | |
7 | */ | |
8 | ||
83e3fa6f | 9 | #include <linux/bug.h> |
e360adbe | 10 | #include <linux/kernel.h> |
9984de1a | 11 | #include <linux/export.h> |
e360adbe | 12 | #include <linux/irq_work.h> |
967d1f90 | 13 | #include <linux/percpu.h> |
e360adbe | 14 | #include <linux/hardirq.h> |
ef1f0982 | 15 | #include <linux/irqflags.h> |
bc6679ae FW |
16 | #include <linux/sched.h> |
17 | #include <linux/tick.h> | |
c0e980a4 SR |
18 | #include <linux/cpu.h> |
19 | #include <linux/notifier.h> | |
47885016 | 20 | #include <linux/smp.h> |
b4c6f86e | 21 | #include <linux/smpboot.h> |
967d1f90 | 22 | #include <asm/processor.h> |
e2b5bcf9 | 23 | #include <linux/kasan.h> |
e360adbe | 24 | |
4468161a VS |
25 | #include <trace/events/ipi.h> |
26 | ||
b93e0b8f FW |
27 | static DEFINE_PER_CPU(struct llist_head, raised_list); |
28 | static DEFINE_PER_CPU(struct llist_head, lazy_list); | |
b4c6f86e SAS |
29 | static DEFINE_PER_CPU(struct task_struct *, irq_workd); |
30 | ||
31 | static void wake_irq_workd(void) | |
32 | { | |
33 | struct task_struct *tsk = __this_cpu_read(irq_workd); | |
34 | ||
35 | if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) | |
36 | wake_up_process(tsk); | |
37 | } | |
38 | ||
39 | #ifdef CONFIG_SMP | |
40 | static void irq_work_wake(struct irq_work *entry) | |
41 | { | |
42 | wake_irq_workd(); | |
43 | } | |
44 | ||
45 | static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = | |
46 | IRQ_WORK_INIT_HARD(irq_work_wake); | |
47 | #endif | |
48 | ||
49 | static int irq_workd_should_run(unsigned int cpu) | |
50 | { | |
51 | return !llist_empty(this_cpu_ptr(&lazy_list)); | |
52 | } | |
e360adbe PZ |
53 | |
54 | /* | |
55 | * Claim the entry so that no one else will poke at it. | |
56 | */ | |
38aaf809 | 57 | static bool irq_work_claim(struct irq_work *work) |
e360adbe | 58 | { |
25269871 | 59 | int oflags; |
e360adbe | 60 | |
7a9f50a0 | 61 | oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); |
e0bbe2d8 | 62 | /* |
25269871 | 63 | * If the work is already pending, no need to raise the IPI. |
2914b0ba | 64 | * The pairing smp_mb() in irq_work_single() makes sure |
25269871 | 65 | * everything we did before is visible. |
e0bbe2d8 | 66 | */ |
25269871 FW |
67 | if (oflags & IRQ_WORK_PENDING) |
68 | return false; | |
e360adbe PZ |
69 | return true; |
70 | } | |
71 | ||
e360adbe PZ |
72 | void __weak arch_irq_work_raise(void) |
73 | { | |
74 | /* | |
75 | * Lame architectures will get the timer tick callback | |
76 | */ | |
77 | } | |
78 | ||
4468161a VS |
79 | static __always_inline void irq_work_raise(struct irq_work *work) |
80 | { | |
68e2d17c PZ |
81 | if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) |
82 | trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); | |
4468161a VS |
83 | |
84 | arch_irq_work_raise(); | |
85 | } | |
86 | ||
471ba0e6 NP |
87 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
88 | static void __irq_work_queue_local(struct irq_work *work) | |
47885016 | 89 | { |
b4c6f86e SAS |
90 | struct llist_head *list; |
91 | bool rt_lazy_work = false; | |
92 | bool lazy_work = false; | |
93 | int work_flags; | |
94 | ||
95 | work_flags = atomic_read(&work->node.a_flags); | |
96 | if (work_flags & IRQ_WORK_LAZY) | |
97 | lazy_work = true; | |
98 | else if (IS_ENABLED(CONFIG_PREEMPT_RT) && | |
99 | !(work_flags & IRQ_WORK_HARD_IRQ)) | |
100 | rt_lazy_work = true; | |
101 | ||
102 | if (lazy_work || rt_lazy_work) | |
103 | list = this_cpu_ptr(&lazy_list); | |
104 | else | |
105 | list = this_cpu_ptr(&raised_list); | |
106 | ||
107 | if (!llist_add(&work->node.llist, list)) | |
108 | return; | |
109 | ||
471ba0e6 | 110 | /* If the work is "lazy", handle it from next tick if any */ |
b4c6f86e | 111 | if (!lazy_work || tick_nohz_tick_stopped()) |
4468161a | 112 | irq_work_raise(work); |
471ba0e6 | 113 | } |
47885016 | 114 | |
471ba0e6 NP |
115 | /* Enqueue the irq work @work on the current CPU */ |
116 | bool irq_work_queue(struct irq_work *work) | |
117 | { | |
47885016 FW |
118 | /* Only queue if not already pending */ |
119 | if (!irq_work_claim(work)) | |
120 | return false; | |
121 | ||
471ba0e6 NP |
122 | /* Queue the entry and raise the IPI if needed. */ |
123 | preempt_disable(); | |
124 | __irq_work_queue_local(work); | |
125 | preempt_enable(); | |
6733bab7 | 126 | |
47885016 FW |
127 | return true; |
128 | } | |
471ba0e6 | 129 | EXPORT_SYMBOL_GPL(irq_work_queue); |
47885016 | 130 | |
471ba0e6 NP |
131 | /* |
132 | * Enqueue the irq_work @work on @cpu unless it's already pending | |
133 | * somewhere. | |
134 | * | |
135 | * Can be re-enqueued while the callback is still in progress. | |
136 | */ | |
137 | bool irq_work_queue_on(struct irq_work *work, int cpu) | |
e360adbe | 138 | { |
471ba0e6 NP |
139 | #ifndef CONFIG_SMP |
140 | return irq_work_queue(work); | |
141 | ||
142 | #else /* CONFIG_SMP: */ | |
143 | /* All work should have been flushed before going offline */ | |
144 | WARN_ON_ONCE(cpu_is_offline(cpu)); | |
145 | ||
c02cf5f8 | 146 | /* Only queue if not already pending */ |
147 | if (!irq_work_claim(work)) | |
cd578abb | 148 | return false; |
c02cf5f8 | 149 | |
25934fcf | 150 | kasan_record_aux_stack_noalloc(work); |
e2b5bcf9 | 151 | |
20b87691 | 152 | preempt_disable(); |
471ba0e6 NP |
153 | if (cpu != smp_processor_id()) { |
154 | /* Arch remote IPI send/receive backend aren't NMI safe */ | |
155 | WARN_ON_ONCE(in_nmi()); | |
b4c6f86e SAS |
156 | |
157 | /* | |
158 | * On PREEMPT_RT the items which are not marked as | |
159 | * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work | |
160 | * item is used on the remote CPU to wake the thread. | |
161 | */ | |
162 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && | |
163 | !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { | |
164 | ||
165 | if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) | |
166 | goto out; | |
167 | ||
168 | work = &per_cpu(irq_work_wakeup, cpu); | |
169 | if (!irq_work_claim(work)) | |
170 | goto out; | |
171 | } | |
172 | ||
7a9f50a0 | 173 | __smp_call_single_queue(cpu, &work->node.llist); |
b93e0b8f | 174 | } else { |
471ba0e6 | 175 | __irq_work_queue_local(work); |
bc6679ae | 176 | } |
b4c6f86e | 177 | out: |
20b87691 | 178 | preempt_enable(); |
cd578abb PZ |
179 | |
180 | return true; | |
471ba0e6 | 181 | #endif /* CONFIG_SMP */ |
e360adbe | 182 | } |
471ba0e6 | 183 | |
00b42959 FW |
184 | bool irq_work_needs_cpu(void) |
185 | { | |
b93e0b8f | 186 | struct llist_head *raised, *lazy; |
00b42959 | 187 | |
22127e93 CL |
188 | raised = this_cpu_ptr(&raised_list); |
189 | lazy = this_cpu_ptr(&lazy_list); | |
76a33061 FW |
190 | |
191 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) | |
192 | if (llist_empty(lazy)) | |
193 | return false; | |
00b42959 | 194 | |
8aa2acce SR |
195 | /* All work should have been flushed before going offline */ |
196 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | |
197 | ||
00b42959 FW |
198 | return true; |
199 | } | |
200 | ||
4b44a21d PZ |
201 | void irq_work_single(void *arg) |
202 | { | |
203 | struct irq_work *work = arg; | |
204 | int flags; | |
205 | ||
206 | /* | |
2914b0ba PZ |
207 | * Clear the PENDING bit, after this point the @work can be re-used. |
208 | * The PENDING bit acts as a lock, and we own it, so we can clear it | |
209 | * without atomic ops. | |
4b44a21d | 210 | */ |
2914b0ba PZ |
211 | flags = atomic_read(&work->node.a_flags); |
212 | flags &= ~IRQ_WORK_PENDING; | |
213 | atomic_set(&work->node.a_flags, flags); | |
214 | ||
215 | /* | |
216 | * See irq_work_claim(). | |
217 | */ | |
218 | smp_mb(); | |
4b44a21d | 219 | |
2914b0ba | 220 | lockdep_irq_work_enter(flags); |
4b44a21d | 221 | work->func(work); |
2914b0ba PZ |
222 | lockdep_irq_work_exit(flags); |
223 | ||
4b44a21d | 224 | /* |
2914b0ba PZ |
225 | * Clear the BUSY bit, if set, and return to the free state if no-one |
226 | * else claimed it meanwhile. | |
4b44a21d | 227 | */ |
7a9f50a0 | 228 | (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); |
81097968 | 229 | |
09089db7 SAS |
230 | if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || |
231 | !arch_irq_work_has_interrupt()) | |
81097968 | 232 | rcuwait_wake_up(&work->irqwait); |
4b44a21d PZ |
233 | } |
234 | ||
b93e0b8f | 235 | static void irq_work_run_list(struct llist_head *list) |
e360adbe | 236 | { |
d00a08cf | 237 | struct irq_work *work, *tmp; |
38aaf809 | 238 | struct llist_node *llnode; |
e360adbe | 239 | |
b4c6f86e SAS |
240 | /* |
241 | * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed | |
242 | * in a per-CPU thread in preemptible context. Only the items which are | |
243 | * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. | |
244 | */ | |
245 | BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); | |
bc6679ae | 246 | |
b93e0b8f | 247 | if (llist_empty(list)) |
e360adbe PZ |
248 | return; |
249 | ||
b93e0b8f | 250 | llnode = llist_del_all(list); |
7a9f50a0 | 251 | llist_for_each_entry_safe(work, tmp, llnode, node.llist) |
4b44a21d | 252 | irq_work_single(work); |
e360adbe | 253 | } |
c0e980a4 SR |
254 | |
255 | /* | |
a77353e5 PZ |
256 | * hotplug calls this through: |
257 | * hotplug_cfd() -> flush_smp_call_function_queue() | |
c0e980a4 SR |
258 | */ |
259 | void irq_work_run(void) | |
260 | { | |
22127e93 | 261 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
b4c6f86e SAS |
262 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
263 | irq_work_run_list(this_cpu_ptr(&lazy_list)); | |
264 | else | |
265 | wake_irq_workd(); | |
c0e980a4 | 266 | } |
e360adbe PZ |
267 | EXPORT_SYMBOL_GPL(irq_work_run); |
268 | ||
76a33061 FW |
269 | void irq_work_tick(void) |
270 | { | |
56e4dea8 | 271 | struct llist_head *raised = this_cpu_ptr(&raised_list); |
76a33061 FW |
272 | |
273 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) | |
274 | irq_work_run_list(raised); | |
b4c6f86e SAS |
275 | |
276 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) | |
277 | irq_work_run_list(this_cpu_ptr(&lazy_list)); | |
278 | else | |
279 | wake_irq_workd(); | |
76a33061 FW |
280 | } |
281 | ||
e360adbe PZ |
282 | /* |
283 | * Synchronize against the irq_work @entry, ensures the entry is not | |
284 | * currently in use. | |
285 | */ | |
38aaf809 | 286 | void irq_work_sync(struct irq_work *work) |
e360adbe | 287 | { |
3c7169a3 | 288 | lockdep_assert_irqs_enabled(); |
81097968 SAS |
289 | might_sleep(); |
290 | ||
09089db7 SAS |
291 | if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || |
292 | !arch_irq_work_has_interrupt()) { | |
81097968 SAS |
293 | rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), |
294 | TASK_UNINTERRUPTIBLE); | |
295 | return; | |
296 | } | |
e360adbe | 297 | |
7a9f50a0 | 298 | while (irq_work_is_busy(work)) |
e360adbe PZ |
299 | cpu_relax(); |
300 | } | |
301 | EXPORT_SYMBOL_GPL(irq_work_sync); | |
b4c6f86e SAS |
302 | |
303 | static void run_irq_workd(unsigned int cpu) | |
304 | { | |
305 | irq_work_run_list(this_cpu_ptr(&lazy_list)); | |
306 | } | |
307 | ||
308 | static void irq_workd_setup(unsigned int cpu) | |
309 | { | |
310 | sched_set_fifo_low(current); | |
311 | } | |
312 | ||
313 | static struct smp_hotplug_thread irqwork_threads = { | |
314 | .store = &irq_workd, | |
315 | .setup = irq_workd_setup, | |
316 | .thread_should_run = irq_workd_should_run, | |
317 | .thread_fn = run_irq_workd, | |
318 | .thread_comm = "irq_work/%u", | |
319 | }; | |
320 | ||
321 | static __init int irq_work_init_threads(void) | |
322 | { | |
323 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) | |
324 | BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); | |
325 | return 0; | |
326 | } | |
327 | early_initcall(irq_work_init_threads); |