Merge tag 'smp-core-2024-03-10' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / kernel / softirq.c
CommitLineData
767a67b0 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
b10db7f0 7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
8 */
9
40322764
JP
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
9984de1a 12#include <linux/export.h>
1da177e4
LT
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
8b1c04ac 16#include <linux/local_lock.h>
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
83144186 21#include <linux/freezer.h>
1da177e4
LT
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
7e49fcce 24#include <linux/ftrace.h>
78eef01b 25#include <linux/smp.h>
3e339b5d 26#include <linux/smpboot.h>
79bf2bb3 27#include <linux/tick.h>
d532676c 28#include <linux/irq.h>
da044747 29#include <linux/wait_bit.h>
4cb1ef64 30#include <linux/workqueue.h>
a0e39ed3 31
db1cc7ae
TG
32#include <asm/softirq_stack.h>
33
a0e39ed3 34#define CREATE_TRACE_POINTS
ad8d75ff 35#include <trace/events/irq.h>
1da177e4 36
1da177e4
LT
37/*
38 - No shared variables, all the data are CPU local.
39 - If a softirq needs serialization, let it serialize itself
40 by its own spinlocks.
41 - Even if softirq is serialized, only local cpu is marked for
42 execution. Hence, we get something sort of weak cpu binding.
43 Though it is still not clear, will it result in better locality
44 or will not.
45
46 Examples:
47 - NET RX softirq. It is multithreaded and does not require
48 any global serialization.
49 - NET TX softirq. It kicks software netdevice queues, hence
50 it is logically serialized per device, but this serialization
51 is invisible to common code.
52 - Tasklets: serialized wrt itself.
53 */
54
55#ifndef __ARCH_IRQ_STAT
0f6f47ba
FW
56DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57EXPORT_PER_CPU_SYMBOL(irq_stat);
1da177e4
LT
58#endif
59
978b0116 60static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 61
4dd53d89 62DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 63
ce85b4f2 64const char * const softirq_to_name[NR_SOFTIRQS] = {
f660f606 65 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
09223371 66 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
67};
68
1da177e4
LT
69/*
70 * we cannot loop indefinitely here to avoid userspace starvation,
71 * but we also don't want to introduce a worst case 1/HZ latency
72 * to the pending events, so lets the scheduler to balance
73 * the softirq load for us.
74 */
676cb02d 75static void wakeup_softirqd(void)
1da177e4
LT
76{
77 /* Interrupts are disabled: no need to stop preemption */
909ea964 78 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4 79
37aadc68 80 if (tsk)
1da177e4
LT
81 wake_up_process(tsk);
82}
83
ae9ef589
TG
84#ifdef CONFIG_TRACE_IRQFLAGS
85DEFINE_PER_CPU(int, hardirqs_enabled);
86DEFINE_PER_CPU(int, hardirq_context);
87EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
89#endif
90
75e1056f 91/*
8b1c04ac
TG
92 * SOFTIRQ_OFFSET usage:
93 *
94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95 * to a per CPU counter and to task::softirqs_disabled_cnt.
96 *
97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
98 * processing.
99 *
100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
75e1056f 101 * on local_bh_disable or local_bh_enable.
8b1c04ac 102 *
75e1056f
VP
103 * This lets us distinguish between whether we are currently processing
104 * softirq and whether we just have bh disabled.
105 */
8b1c04ac
TG
106#ifdef CONFIG_PREEMPT_RT
107
108/*
109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111 * softirq disabled section to be preempted.
112 *
113 * The per task counter is used for softirq_count(), in_softirq() and
114 * in_serving_softirqs() because these counts are only valid when the task
115 * holding softirq_ctrl::lock is running.
116 *
117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118 * the task which is in a softirq disabled section is preempted or blocks.
119 */
120struct softirq_ctrl {
121 local_lock_t lock;
122 int cnt;
123};
124
125static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
127};
128
47c218dc
TG
129/**
130 * local_bh_blocked() - Check for idle whether BH processing is blocked
131 *
132 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
133 *
134 * This is invoked from the idle task to guard against false positive
135 * softirq pending warnings, which would happen when the task which holds
136 * softirq_ctrl::lock was the only running task on the CPU and blocks on
137 * some other lock.
138 */
139bool local_bh_blocked(void)
140{
141 return __this_cpu_read(softirq_ctrl.cnt) != 0;
142}
143
8b1c04ac
TG
144void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
145{
146 unsigned long flags;
147 int newcnt;
148
149 WARN_ON_ONCE(in_hardirq());
150
151 /* First entry of a task into a BH disabled section? */
152 if (!current->softirq_disable_cnt) {
153 if (preemptible()) {
154 local_lock(&softirq_ctrl.lock);
155 /* Required to meet the RCU bottomhalf requirements. */
156 rcu_read_lock();
157 } else {
158 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
159 }
160 }
161
162 /*
163 * Track the per CPU softirq disabled state. On RT this is per CPU
164 * state to allow preemption of bottom half disabled sections.
165 */
166 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
167 /*
168 * Reflect the result in the task state to prevent recursion on the
169 * local lock and to make softirq_count() & al work.
170 */
171 current->softirq_disable_cnt = newcnt;
172
173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
174 raw_local_irq_save(flags);
175 lockdep_softirqs_off(ip);
176 raw_local_irq_restore(flags);
177 }
178}
179EXPORT_SYMBOL(__local_bh_disable_ip);
180
181static void __local_bh_enable(unsigned int cnt, bool unlock)
182{
183 unsigned long flags;
184 int newcnt;
185
186 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
187 this_cpu_read(softirq_ctrl.cnt));
188
189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
190 raw_local_irq_save(flags);
191 lockdep_softirqs_on(_RET_IP_);
192 raw_local_irq_restore(flags);
193 }
194
195 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
196 current->softirq_disable_cnt = newcnt;
197
198 if (!newcnt && unlock) {
199 rcu_read_unlock();
200 local_unlock(&softirq_ctrl.lock);
201 }
202}
203
204void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
205{
206 bool preempt_on = preemptible();
207 unsigned long flags;
208 u32 pending;
209 int curcnt;
210
fe13889c 211 WARN_ON_ONCE(in_hardirq());
8b1c04ac
TG
212 lockdep_assert_irqs_enabled();
213
214 local_irq_save(flags);
215 curcnt = __this_cpu_read(softirq_ctrl.cnt);
216
217 /*
218 * If this is not reenabling soft interrupts, no point in trying to
219 * run pending ones.
220 */
221 if (curcnt != cnt)
222 goto out;
223
224 pending = local_softirq_pending();
d15121be 225 if (!pending)
8b1c04ac
TG
226 goto out;
227
228 /*
229 * If this was called from non preemptible context, wake up the
230 * softirq daemon.
231 */
232 if (!preempt_on) {
233 wakeup_softirqd();
234 goto out;
235 }
236
237 /*
238 * Adjust softirq count to SOFTIRQ_OFFSET which makes
239 * in_serving_softirq() become true.
240 */
241 cnt = SOFTIRQ_OFFSET;
242 __local_bh_enable(cnt, false);
243 __do_softirq();
244
245out:
246 __local_bh_enable(cnt, preempt_on);
247 local_irq_restore(flags);
248}
249EXPORT_SYMBOL(__local_bh_enable_ip);
75e1056f 250
de30a2b3 251/*
8b1c04ac
TG
252 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
253 * to acquire the per CPU local lock for reentrancy protection.
254 */
255static inline void ksoftirqd_run_begin(void)
256{
257 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
258 local_irq_disable();
259}
260
261/* Counterpart to ksoftirqd_run_begin() */
262static inline void ksoftirqd_run_end(void)
263{
264 __local_bh_enable(SOFTIRQ_OFFSET, true);
265 WARN_ON_ONCE(in_interrupt());
266 local_irq_enable();
267}
268
269static inline void softirq_handle_begin(void) { }
270static inline void softirq_handle_end(void) { }
271
272static inline bool should_wake_ksoftirqd(void)
273{
274 return !this_cpu_read(softirq_ctrl.cnt);
275}
276
277static inline void invoke_softirq(void)
278{
279 if (should_wake_ksoftirqd())
280 wakeup_softirqd();
281}
282
1a90bfd2
SAS
283/*
284 * flush_smp_call_function_queue() can raise a soft interrupt in a function
285 * call. On RT kernels this is undesired and the only known functionality
286 * in the block layer which does this is disabled on RT. If soft interrupts
287 * get raised which haven't been raised before the flush, warn so it can be
288 * investigated.
289 */
290void do_softirq_post_smp_call_flush(unsigned int was_pending)
291{
292 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
293 invoke_softirq();
294}
295
8b1c04ac
TG
296#else /* CONFIG_PREEMPT_RT */
297
298/*
299 * This one is for softirq.c-internal use, where hardirqs are disabled
ae9ef589 300 * legitimately:
de30a2b3 301 */
8b1c04ac 302#ifdef CONFIG_TRACE_IRQFLAGS
0bd3a173 303void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
304{
305 unsigned long flags;
306
fe13889c 307 WARN_ON_ONCE(in_hardirq());
de30a2b3
IM
308
309 raw_local_irq_save(flags);
7e49fcce 310 /*
bdb43806 311 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
312 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
313 * is set and before current->softirq_enabled is cleared.
314 * We must manually increment preempt_count here and manually
315 * call the trace_preempt_off later.
316 */
bdb43806 317 __preempt_count_add(cnt);
de30a2b3
IM
318 /*
319 * Were softirqs turned off above:
320 */
9ea4c380 321 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0d38453c 322 lockdep_softirqs_off(ip);
de30a2b3 323 raw_local_irq_restore(flags);
7e49fcce 324
0f1ba9a2
HC
325 if (preempt_count() == cnt) {
326#ifdef CONFIG_DEBUG_PREEMPT
f904f582 327 current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2 328#endif
f904f582 329 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2 330 }
de30a2b3 331}
0bd3a173 332EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 333#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 334
75e1056f
VP
335static void __local_bh_enable(unsigned int cnt)
336{
f71b74bc 337 lockdep_assert_irqs_disabled();
75e1056f 338
1a63dcd8
JFG
339 if (preempt_count() == cnt)
340 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
341
9ea4c380 342 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0d38453c 343 lockdep_softirqs_on(_RET_IP_);
1a63dcd8
JFG
344
345 __preempt_count_sub(cnt);
75e1056f
VP
346}
347
de30a2b3 348/*
c3442697 349 * Special-case - softirqs can safely be enabled by __do_softirq(),
de30a2b3
IM
350 * without processing still-pending softirqs:
351 */
352void _local_bh_enable(void)
353{
fe13889c 354 WARN_ON_ONCE(in_hardirq());
75e1056f 355 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3 356}
de30a2b3
IM
357EXPORT_SYMBOL(_local_bh_enable);
358
0bd3a173 359void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 360{
fe13889c 361 WARN_ON_ONCE(in_hardirq());
f71b74bc 362 lockdep_assert_irqs_enabled();
3c829c36 363#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 364 local_irq_disable();
3c829c36 365#endif
de30a2b3
IM
366 /*
367 * Are softirqs going to be turned on now:
368 */
75e1056f 369 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0d38453c 370 lockdep_softirqs_on(ip);
de30a2b3
IM
371 /*
372 * Keep preemption disabled until we are done with
373 * softirq processing:
ce85b4f2 374 */
91ea62d5 375 __preempt_count_sub(cnt - 1);
de30a2b3 376
0bed698a
FW
377 if (unlikely(!in_interrupt() && local_softirq_pending())) {
378 /*
379 * Run softirq if any pending. And do it in its own stack
380 * as we may be calling this deep in a task call stack already.
381 */
de30a2b3 382 do_softirq();
0bed698a 383 }
de30a2b3 384
bdb43806 385 preempt_count_dec();
3c829c36 386#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 387 local_irq_enable();
3c829c36 388#endif
de30a2b3
IM
389 preempt_check_resched();
390}
0bd3a173 391EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 392
f02fc963
TG
393static inline void softirq_handle_begin(void)
394{
395 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
396}
397
398static inline void softirq_handle_end(void)
399{
400 __local_bh_enable(SOFTIRQ_OFFSET);
401 WARN_ON_ONCE(in_interrupt());
402}
403
404static inline void ksoftirqd_run_begin(void)
405{
406 local_irq_disable();
407}
408
409static inline void ksoftirqd_run_end(void)
410{
411 local_irq_enable();
412}
413
414static inline bool should_wake_ksoftirqd(void)
415{
416 return true;
417}
418
ae9ef589
TG
419static inline void invoke_softirq(void)
420{
91cc470e 421 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
ae9ef589
TG
422#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
423 /*
424 * We can safely execute softirq on the current stack if
425 * it is the irq stack, because it should be near empty
426 * at this stage.
427 */
428 __do_softirq();
429#else
430 /*
431 * Otherwise, irq_exit() is called on the task stack that can
432 * be potentially deep already. So call softirq in its own stack
433 * to prevent from any overrun.
434 */
435 do_softirq_own_stack();
436#endif
437 } else {
438 wakeup_softirqd();
439 }
440}
441
442asmlinkage __visible void do_softirq(void)
443{
444 __u32 pending;
445 unsigned long flags;
446
447 if (in_interrupt())
448 return;
449
450 local_irq_save(flags);
451
452 pending = local_softirq_pending();
453
d15121be 454 if (pending)
ae9ef589
TG
455 do_softirq_own_stack();
456
457 local_irq_restore(flags);
458}
459
8b1c04ac
TG
460#endif /* !CONFIG_PREEMPT_RT */
461
1da177e4 462/*
34376a50
BG
463 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
464 * but break the loop if need_resched() is set or after 2 ms.
465 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
466 * certain cases, such as stop_machine(), jiffies may cease to
467 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
468 * well to make sure we eventually return from this method.
1da177e4 469 *
c10d7367 470 * These limits have been established via experimentation.
1da177e4
LT
471 * The two things to balance is latency against fairness -
472 * we want to handle softirqs as soon as possible, but they
473 * should not be able to lock up the box.
474 */
c10d7367 475#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 476#define MAX_SOFTIRQ_RESTART 10
1da177e4 477
f1a83e65
PZ
478#ifdef CONFIG_TRACE_IRQFLAGS
479/*
f1a83e65
PZ
480 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
481 * to keep the lockdep irq context tracking as tight as possible in order to
482 * not miss-qualify lock contexts and miss possible deadlocks.
483 */
f1a83e65 484
5c4853b6 485static inline bool lockdep_softirq_start(void)
f1a83e65 486{
5c4853b6 487 bool in_hardirq = false;
f1a83e65 488
f9ad4a5f 489 if (lockdep_hardirq_context()) {
5c4853b6 490 in_hardirq = true;
2502ec37 491 lockdep_hardirq_exit();
5c4853b6
FW
492 }
493
f1a83e65 494 lockdep_softirq_enter();
5c4853b6
FW
495
496 return in_hardirq;
f1a83e65
PZ
497}
498
5c4853b6 499static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
500{
501 lockdep_softirq_exit();
5c4853b6
FW
502
503 if (in_hardirq)
2502ec37 504 lockdep_hardirq_enter();
f1a83e65 505}
f1a83e65 506#else
5c4853b6
FW
507static inline bool lockdep_softirq_start(void) { return false; }
508static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
509#endif
510
be7635e7 511asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4 512{
c10d7367 513 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 514 unsigned long old_flags = current->flags;
34376a50 515 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 516 struct softirq_action *h;
5c4853b6 517 bool in_hardirq;
f1a83e65 518 __u32 pending;
2e702b9f 519 int softirq_bit;
907aed48
MG
520
521 /*
e45506ac
YL
522 * Mask out PF_MEMALLOC as the current task context is borrowed for the
523 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
524 * again if the socket is related to swapping.
907aed48
MG
525 */
526 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
527
528 pending = local_softirq_pending();
829035fd 529
f02fc963 530 softirq_handle_begin();
5c4853b6 531 in_hardirq = lockdep_softirq_start();
d3759e71 532 account_softirq_enter(current);
1da177e4 533
1da177e4
LT
534restart:
535 /* Reset the pending bitmask before enabling irqs */
3f74478b 536 set_softirq_pending(0);
1da177e4 537
c70f5d66 538 local_irq_enable();
1da177e4
LT
539
540 h = softirq_vec;
541
2e702b9f
JP
542 while ((softirq_bit = ffs(pending))) {
543 unsigned int vec_nr;
544 int prev_count;
545
546 h += softirq_bit - 1;
547
548 vec_nr = h - softirq_vec;
549 prev_count = preempt_count();
550
551 kstat_incr_softirqs_this_cpu(vec_nr);
552
553 trace_softirq_entry(vec_nr);
554 h->action(h);
555 trace_softirq_exit(vec_nr);
556 if (unlikely(prev_count != preempt_count())) {
40322764 557 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
2e702b9f
JP
558 vec_nr, softirq_to_name[vec_nr], h->action,
559 prev_count, preempt_count());
560 preempt_count_set(prev_count);
1da177e4
LT
561 }
562 h++;
2e702b9f
JP
563 pending >>= softirq_bit;
564 }
1da177e4 565
8b1c04ac
TG
566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
567 __this_cpu_read(ksoftirqd) == current)
d28139c4 568 rcu_softirq_qs();
8b1c04ac 569
c70f5d66 570 local_irq_disable();
1da177e4
LT
571
572 pending = local_softirq_pending();
c10d7367 573 if (pending) {
34376a50
BG
574 if (time_before(jiffies, end) && !need_resched() &&
575 --max_restart)
c10d7367 576 goto restart;
1da177e4 577
1da177e4 578 wakeup_softirqd();
c10d7367 579 }
1da177e4 580
d3759e71 581 account_softirq_exit(current);
5c4853b6 582 lockdep_softirq_end(in_hardirq);
f02fc963 583 softirq_handle_end();
717a94b5 584 current_restore_flags(old_flags, PF_MEMALLOC);
1da177e4
LT
585}
586
8a6bc478
TG
587/**
588 * irq_enter_rcu - Enter an interrupt context with RCU watching
dde4b2b5 589 */
8a6bc478 590void irq_enter_rcu(void)
dde4b2b5 591{
d14ce74f
FW
592 __irq_enter_raw();
593
53e87e3c
FW
594 if (tick_nohz_full_cpu(smp_processor_id()) ||
595 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
5acac1be 596 tick_irq_enter();
d14ce74f
FW
597
598 account_hardirq_enter(current);
dde4b2b5
IM
599}
600
8a6bc478
TG
601/**
602 * irq_enter - Enter an interrupt context including RCU update
603 */
604void irq_enter(void)
605{
6f0e6c15 606 ct_irq_enter();
8a6bc478
TG
607 irq_enter_rcu();
608}
609
67826eae
FW
610static inline void tick_irq_exit(void)
611{
612#ifdef CONFIG_NO_HZ_COMMON
613 int cpu = smp_processor_id();
614
615 /* Make sure that timer wheel updates are propagated */
548796e2 616 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
fe13889c 617 if (!in_hardirq())
67826eae
FW
618 tick_nohz_irq_exit();
619 }
620#endif
621}
622
59bc300b 623static inline void __irq_exit_rcu(void)
1da177e4 624{
74eed016 625#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 626 local_irq_disable();
74eed016 627#else
f71b74bc 628 lockdep_assert_irqs_disabled();
74eed016 629#endif
d3759e71 630 account_hardirq_exit(current);
bdb43806 631 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
632 if (!in_interrupt() && local_softirq_pending())
633 invoke_softirq();
79bf2bb3 634
67826eae 635 tick_irq_exit();
8a6bc478
TG
636}
637
59bc300b
PZ
638/**
639 * irq_exit_rcu() - Exit an interrupt context without updating RCU
640 *
641 * Also processes softirqs if needed and possible.
642 */
643void irq_exit_rcu(void)
644{
645 __irq_exit_rcu();
646 /* must be last! */
647 lockdep_hardirq_exit();
648}
649
8a6bc478
TG
650/**
651 * irq_exit - Exit an interrupt context, update RCU and lockdep
652 *
653 * Also processes softirqs if needed and possible.
654 */
655void irq_exit(void)
656{
59bc300b 657 __irq_exit_rcu();
6f0e6c15 658 ct_irq_exit();
2502ec37
TG
659 /* must be last! */
660 lockdep_hardirq_exit();
1da177e4
LT
661}
662
663/*
664 * This function must run with irqs disabled!
665 */
7ad5b3a5 666inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
667{
668 __raise_softirq_irqoff(nr);
669
670 /*
671 * If we're in an interrupt or softirq, we're done
672 * (this also catches softirq-disabled code). We will
673 * actually run the softirq once we return from
674 * the irq or softirq.
675 *
676 * Otherwise we wake up ksoftirqd to make sure we
677 * schedule the softirq soon.
678 */
f02fc963 679 if (!in_interrupt() && should_wake_ksoftirqd())
1da177e4
LT
680 wakeup_softirqd();
681}
682
7ad5b3a5 683void raise_softirq(unsigned int nr)
1da177e4
LT
684{
685 unsigned long flags;
686
687 local_irq_save(flags);
688 raise_softirq_irqoff(nr);
689 local_irq_restore(flags);
690}
691
f069686e
SR
692void __raise_softirq_irqoff(unsigned int nr)
693{
cdabce2e 694 lockdep_assert_irqs_disabled();
f069686e
SR
695 trace_softirq_raise(nr);
696 or_softirq_pending(1UL << nr);
697}
698
962cf36c 699void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 700{
1da177e4
LT
701 softirq_vec[nr].action = action;
702}
703
9ba5f005
PZ
704/*
705 * Tasklets
706 */
ce85b4f2 707struct tasklet_head {
48f20a9a
OJ
708 struct tasklet_struct *head;
709 struct tasklet_struct **tail;
1da177e4
LT
710};
711
4620b49f
VN
712static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
713static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 714
6498ddad
IM
715static void __tasklet_schedule_common(struct tasklet_struct *t,
716 struct tasklet_head __percpu *headp,
717 unsigned int softirq_nr)
1da177e4 718{
6498ddad 719 struct tasklet_head *head;
1da177e4
LT
720 unsigned long flags;
721
722 local_irq_save(flags);
6498ddad 723 head = this_cpu_ptr(headp);
48f20a9a 724 t->next = NULL;
6498ddad
IM
725 *head->tail = t;
726 head->tail = &(t->next);
727 raise_softirq_irqoff(softirq_nr);
1da177e4
LT
728 local_irq_restore(flags);
729}
6498ddad
IM
730
731void __tasklet_schedule(struct tasklet_struct *t)
732{
733 __tasklet_schedule_common(t, &tasklet_vec,
734 TASKLET_SOFTIRQ);
735}
1da177e4
LT
736EXPORT_SYMBOL(__tasklet_schedule);
737
7ad5b3a5 738void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4 739{
6498ddad
IM
740 __tasklet_schedule_common(t, &tasklet_hi_vec,
741 HI_SOFTIRQ);
1da177e4 742}
1da177e4
LT
743EXPORT_SYMBOL(__tasklet_hi_schedule);
744
697d8c63 745static bool tasklet_clear_sched(struct tasklet_struct *t)
6b2c339d 746{
697d8c63
PZ
747 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
748 wake_up_var(&t->state);
6b2c339d 749 return true;
697d8c63 750 }
6b2c339d
DB
751
752 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
753 t->use_callback ? "callback" : "func",
754 t->use_callback ? (void *)t->callback : (void *)t->func);
755
756 return false;
757}
758
82b691be
IM
759static void tasklet_action_common(struct softirq_action *a,
760 struct tasklet_head *tl_head,
761 unsigned int softirq_nr)
1da177e4
LT
762{
763 struct tasklet_struct *list;
764
765 local_irq_disable();
82b691be
IM
766 list = tl_head->head;
767 tl_head->head = NULL;
768 tl_head->tail = &tl_head->head;
1da177e4
LT
769 local_irq_enable();
770
771 while (list) {
772 struct tasklet_struct *t = list;
773
774 list = list->next;
775
776 if (tasklet_trylock(t)) {
777 if (!atomic_read(&t->count)) {
697d8c63 778 if (tasklet_clear_sched(t)) {
f4bf3ca2
LC
779 if (t->use_callback) {
780 trace_tasklet_entry(t, t->callback);
6b2c339d 781 t->callback(t);
f4bf3ca2
LC
782 trace_tasklet_exit(t, t->callback);
783 } else {
784 trace_tasklet_entry(t, t->func);
6b2c339d 785 t->func(t->data);
f4bf3ca2
LC
786 trace_tasklet_exit(t, t->func);
787 }
6b2c339d 788 }
1da177e4
LT
789 tasklet_unlock(t);
790 continue;
791 }
792 tasklet_unlock(t);
793 }
794
795 local_irq_disable();
48f20a9a 796 t->next = NULL;
82b691be
IM
797 *tl_head->tail = t;
798 tl_head->tail = &t->next;
799 __raise_softirq_irqoff(softirq_nr);
1da177e4
LT
800 local_irq_enable();
801 }
802}
803
82b691be 804static __latent_entropy void tasklet_action(struct softirq_action *a)
1da177e4 805{
4cb1ef64 806 workqueue_softirq_action(false);
82b691be
IM
807 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
808}
1da177e4 809
82b691be
IM
810static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
811{
4cb1ef64 812 workqueue_softirq_action(true);
82b691be 813 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
1da177e4
LT
814}
815
12cc923f
RP
816void tasklet_setup(struct tasklet_struct *t,
817 void (*callback)(struct tasklet_struct *))
818{
819 t->next = NULL;
820 t->state = 0;
821 atomic_set(&t->count, 0);
822 t->callback = callback;
823 t->use_callback = true;
824 t->data = 0;
825}
826EXPORT_SYMBOL(tasklet_setup);
827
1da177e4
LT
828void tasklet_init(struct tasklet_struct *t,
829 void (*func)(unsigned long), unsigned long data)
830{
831 t->next = NULL;
832 t->state = 0;
833 atomic_set(&t->count, 0);
834 t->func = func;
12cc923f 835 t->use_callback = false;
1da177e4
LT
836 t->data = data;
837}
1da177e4
LT
838EXPORT_SYMBOL(tasklet_init);
839
eb2dafbb
TG
840#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
841/*
842 * Do not use in new code. Waiting for tasklets from atomic contexts is
843 * error prone and should be avoided.
844 */
845void tasklet_unlock_spin_wait(struct tasklet_struct *t)
846{
847 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
848 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
849 /*
850 * Prevent a live lock when current preempted soft
851 * interrupt processing or prevents ksoftirqd from
852 * running. If the tasklet runs on a different CPU
853 * then this has no effect other than doing the BH
854 * disable/enable dance for nothing.
855 */
856 local_bh_disable();
857 local_bh_enable();
858 } else {
859 cpu_relax();
860 }
861 }
862}
863EXPORT_SYMBOL(tasklet_unlock_spin_wait);
864#endif
865
1da177e4
LT
866void tasklet_kill(struct tasklet_struct *t)
867{
868 if (in_interrupt())
40322764 869 pr_notice("Attempt to kill tasklet from interrupt\n");
1da177e4 870
697d8c63
PZ
871 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
872 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
873
1da177e4 874 tasklet_unlock_wait(t);
697d8c63 875 tasklet_clear_sched(t);
1da177e4 876}
1da177e4
LT
877EXPORT_SYMBOL(tasklet_kill);
878
eb2dafbb 879#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
da044747
PZ
880void tasklet_unlock(struct tasklet_struct *t)
881{
882 smp_mb__before_atomic();
883 clear_bit(TASKLET_STATE_RUN, &t->state);
884 smp_mb__after_atomic();
885 wake_up_var(&t->state);
886}
887EXPORT_SYMBOL_GPL(tasklet_unlock);
888
889void tasklet_unlock_wait(struct tasklet_struct *t)
890{
891 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
892}
893EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
894#endif
895
1da177e4
LT
896void __init softirq_init(void)
897{
48f20a9a
OJ
898 int cpu;
899
900 for_each_possible_cpu(cpu) {
901 per_cpu(tasklet_vec, cpu).tail =
902 &per_cpu(tasklet_vec, cpu).head;
903 per_cpu(tasklet_hi_vec, cpu).tail =
904 &per_cpu(tasklet_hi_vec, cpu).head;
905 }
906
962cf36c
CM
907 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
908 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
909}
910
3e339b5d 911static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 912{
3e339b5d
TG
913 return local_softirq_pending();
914}
1da177e4 915
3e339b5d
TG
916static void run_ksoftirqd(unsigned int cpu)
917{
f02fc963 918 ksoftirqd_run_begin();
3e339b5d 919 if (local_softirq_pending()) {
0bed698a
FW
920 /*
921 * We can safely run softirq on inline stack, as we are not deep
922 * in the task stack here.
923 */
3e339b5d 924 __do_softirq();
f02fc963 925 ksoftirqd_run_end();
edf22f4c 926 cond_resched();
3e339b5d 927 return;
1da177e4 928 }
f02fc963 929 ksoftirqd_run_end();
1da177e4
LT
930}
931
932#ifdef CONFIG_HOTPLUG_CPU
c4544dbc 933static int takeover_tasklets(unsigned int cpu)
1da177e4 934{
1acd92d9
TH
935 workqueue_softirq_dead(cpu);
936
1da177e4
LT
937 /* CPU is dead, so no lock needed. */
938 local_irq_disable();
939
940 /* Find end, append list for that CPU. */
e5e41723 941 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964 942 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
8afecaa6 943 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
944 per_cpu(tasklet_vec, cpu).head = NULL;
945 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
946 }
1da177e4
LT
947 raise_softirq_irqoff(TASKLET_SOFTIRQ);
948
e5e41723 949 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
950 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
951 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
952 per_cpu(tasklet_hi_vec, cpu).head = NULL;
953 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
954 }
1da177e4
LT
955 raise_softirq_irqoff(HI_SOFTIRQ);
956
957 local_irq_enable();
c4544dbc 958 return 0;
1da177e4 959}
c4544dbc
SAS
960#else
961#define takeover_tasklets NULL
1da177e4
LT
962#endif /* CONFIG_HOTPLUG_CPU */
963
3e339b5d
TG
964static struct smp_hotplug_thread softirq_threads = {
965 .store = &ksoftirqd,
966 .thread_should_run = ksoftirqd_should_run,
967 .thread_fn = run_ksoftirqd,
968 .thread_comm = "ksoftirqd/%u",
969};
970
7babe8db 971static __init int spawn_ksoftirqd(void)
1da177e4 972{
c4544dbc
SAS
973 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
974 takeover_tasklets);
3e339b5d
TG
975 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
976
1da177e4
LT
977 return 0;
978}
7babe8db 979early_initcall(spawn_ksoftirqd);
78eef01b 980
43a25632
YL
981/*
982 * [ These __weak aliases are kept in a separate compilation unit, so that
983 * GCC does not inline them incorrectly. ]
984 */
985
986int __init __weak early_irq_init(void)
987{
988 return 0;
989}
990
4a046d17
YL
991int __init __weak arch_probe_nr_irqs(void)
992{
b683de2b 993 return NR_IRQS_LEGACY;
4a046d17
YL
994}
995
43a25632
YL
996int __init __weak arch_early_irq_init(void)
997{
998 return 0;
999}
62a08ae2
TG
1000
1001unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
1002{
1003 return from;
1004}