Merge tag 'nfsd-5.9' of git://git.linux-nfs.org/projects/cel/cel-2.6
[linux-2.6-block.git] / kernel / softirq.c
CommitLineData
767a67b0 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/softirq.c
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 *
b10db7f0 7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
8 */
9
40322764
JP
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
9984de1a 12#include <linux/export.h>
1da177e4
LT
13#include <linux/kernel_stat.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/notifier.h>
18#include <linux/percpu.h>
19#include <linux/cpu.h>
83144186 20#include <linux/freezer.h>
1da177e4
LT
21#include <linux/kthread.h>
22#include <linux/rcupdate.h>
7e49fcce 23#include <linux/ftrace.h>
78eef01b 24#include <linux/smp.h>
3e339b5d 25#include <linux/smpboot.h>
79bf2bb3 26#include <linux/tick.h>
d532676c 27#include <linux/irq.h>
a0e39ed3
HC
28
29#define CREATE_TRACE_POINTS
ad8d75ff 30#include <trace/events/irq.h>
1da177e4 31
1da177e4
LT
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
0f6f47ba
FW
51DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
52EXPORT_PER_CPU_SYMBOL(irq_stat);
1da177e4
LT
53#endif
54
978b0116 55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 56
4dd53d89 57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 58
ce85b4f2 59const char * const softirq_to_name[NR_SOFTIRQS] = {
f660f606 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
09223371 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
62};
63
1da177e4
LT
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
676cb02d 70static void wakeup_softirqd(void)
1da177e4
LT
71{
72 /* Interrupts are disabled: no need to stop preemption */
909ea964 73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
4cd13c21
ED
79/*
80 * If ksoftirqd is scheduled, we do not want to process pending softirqs
3c53776e
LT
81 * right now. Let ksoftirqd handle this at its own rate, to get fairness,
82 * unless we're doing some of the synchronous softirqs.
4cd13c21 83 */
3c53776e
LT
84#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
85static bool ksoftirqd_running(unsigned long pending)
4cd13c21
ED
86{
87 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
88
3c53776e
LT
89 if (pending & SOFTIRQ_NOW_MASK)
90 return false;
1342d808
MK
91 return tsk && (tsk->state == TASK_RUNNING) &&
92 !__kthread_should_park(tsk);
4cd13c21
ED
93}
94
75e1056f
VP
95/*
96 * preempt_count and SOFTIRQ_OFFSET usage:
97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
98 * softirq processing.
99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
100 * on local_bh_disable or local_bh_enable.
101 * This lets us distinguish between whether we are currently processing
102 * softirq and whether we just have bh disabled.
103 */
104
de30a2b3
IM
105/*
106 * This one is for softirq.c-internal use,
107 * where hardirqs are disabled legitimately:
108 */
3c829c36 109#ifdef CONFIG_TRACE_IRQFLAGS
a21ee605
PZ
110
111DEFINE_PER_CPU(int, hardirqs_enabled);
112DEFINE_PER_CPU(int, hardirq_context);
113EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
114EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
115
0bd3a173 116void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
117{
118 unsigned long flags;
119
120 WARN_ON_ONCE(in_irq());
121
122 raw_local_irq_save(flags);
7e49fcce 123 /*
bdb43806 124 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
125 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
126 * is set and before current->softirq_enabled is cleared.
127 * We must manually increment preempt_count here and manually
128 * call the trace_preempt_off later.
129 */
bdb43806 130 __preempt_count_add(cnt);
de30a2b3
IM
131 /*
132 * Were softirqs turned off above:
133 */
9ea4c380 134 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0d38453c 135 lockdep_softirqs_off(ip);
de30a2b3 136 raw_local_irq_restore(flags);
7e49fcce 137
0f1ba9a2
HC
138 if (preempt_count() == cnt) {
139#ifdef CONFIG_DEBUG_PREEMPT
f904f582 140 current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2 141#endif
f904f582 142 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2 143 }
de30a2b3 144}
0bd3a173 145EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 146#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 147
75e1056f
VP
148static void __local_bh_enable(unsigned int cnt)
149{
f71b74bc 150 lockdep_assert_irqs_disabled();
75e1056f 151
1a63dcd8
JFG
152 if (preempt_count() == cnt)
153 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
154
9ea4c380 155 if (softirq_count() == (cnt & SOFTIRQ_MASK))
0d38453c 156 lockdep_softirqs_on(_RET_IP_);
1a63dcd8
JFG
157
158 __preempt_count_sub(cnt);
75e1056f
VP
159}
160
de30a2b3 161/*
c3442697 162 * Special-case - softirqs can safely be enabled by __do_softirq(),
de30a2b3
IM
163 * without processing still-pending softirqs:
164 */
165void _local_bh_enable(void)
166{
5d60d3e7 167 WARN_ON_ONCE(in_irq());
75e1056f 168 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3 169}
de30a2b3
IM
170EXPORT_SYMBOL(_local_bh_enable);
171
0bd3a173 172void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 173{
f71b74bc
FW
174 WARN_ON_ONCE(in_irq());
175 lockdep_assert_irqs_enabled();
3c829c36 176#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 177 local_irq_disable();
3c829c36 178#endif
de30a2b3
IM
179 /*
180 * Are softirqs going to be turned on now:
181 */
75e1056f 182 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0d38453c 183 lockdep_softirqs_on(ip);
de30a2b3
IM
184 /*
185 * Keep preemption disabled until we are done with
186 * softirq processing:
ce85b4f2 187 */
0bd3a173 188 preempt_count_sub(cnt - 1);
de30a2b3 189
0bed698a
FW
190 if (unlikely(!in_interrupt() && local_softirq_pending())) {
191 /*
192 * Run softirq if any pending. And do it in its own stack
193 * as we may be calling this deep in a task call stack already.
194 */
de30a2b3 195 do_softirq();
0bed698a 196 }
de30a2b3 197
bdb43806 198 preempt_count_dec();
3c829c36 199#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 200 local_irq_enable();
3c829c36 201#endif
de30a2b3
IM
202 preempt_check_resched();
203}
0bd3a173 204EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 205
1da177e4 206/*
34376a50
BG
207 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
208 * but break the loop if need_resched() is set or after 2 ms.
209 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
210 * certain cases, such as stop_machine(), jiffies may cease to
211 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
212 * well to make sure we eventually return from this method.
1da177e4 213 *
c10d7367 214 * These limits have been established via experimentation.
1da177e4
LT
215 * The two things to balance is latency against fairness -
216 * we want to handle softirqs as soon as possible, but they
217 * should not be able to lock up the box.
218 */
c10d7367 219#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 220#define MAX_SOFTIRQ_RESTART 10
1da177e4 221
f1a83e65
PZ
222#ifdef CONFIG_TRACE_IRQFLAGS
223/*
f1a83e65
PZ
224 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
225 * to keep the lockdep irq context tracking as tight as possible in order to
226 * not miss-qualify lock contexts and miss possible deadlocks.
227 */
f1a83e65 228
5c4853b6 229static inline bool lockdep_softirq_start(void)
f1a83e65 230{
5c4853b6 231 bool in_hardirq = false;
f1a83e65 232
f9ad4a5f 233 if (lockdep_hardirq_context()) {
5c4853b6 234 in_hardirq = true;
2502ec37 235 lockdep_hardirq_exit();
5c4853b6
FW
236 }
237
f1a83e65 238 lockdep_softirq_enter();
5c4853b6
FW
239
240 return in_hardirq;
f1a83e65
PZ
241}
242
5c4853b6 243static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
244{
245 lockdep_softirq_exit();
5c4853b6
FW
246
247 if (in_hardirq)
2502ec37 248 lockdep_hardirq_enter();
f1a83e65 249}
f1a83e65 250#else
5c4853b6
FW
251static inline bool lockdep_softirq_start(void) { return false; }
252static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
253#endif
254
be7635e7 255asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4 256{
c10d7367 257 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 258 unsigned long old_flags = current->flags;
34376a50 259 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 260 struct softirq_action *h;
5c4853b6 261 bool in_hardirq;
f1a83e65 262 __u32 pending;
2e702b9f 263 int softirq_bit;
907aed48
MG
264
265 /*
e45506ac
YL
266 * Mask out PF_MEMALLOC as the current task context is borrowed for the
267 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
268 * again if the socket is related to swapping.
907aed48
MG
269 */
270 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
271
272 pending = local_softirq_pending();
6a61671b 273 account_irq_enter_time(current);
829035fd 274
0bd3a173 275 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b6 276 in_hardirq = lockdep_softirq_start();
1da177e4 277
1da177e4
LT
278restart:
279 /* Reset the pending bitmask before enabling irqs */
3f74478b 280 set_softirq_pending(0);
1da177e4 281
c70f5d66 282 local_irq_enable();
1da177e4
LT
283
284 h = softirq_vec;
285
2e702b9f
JP
286 while ((softirq_bit = ffs(pending))) {
287 unsigned int vec_nr;
288 int prev_count;
289
290 h += softirq_bit - 1;
291
292 vec_nr = h - softirq_vec;
293 prev_count = preempt_count();
294
295 kstat_incr_softirqs_this_cpu(vec_nr);
296
297 trace_softirq_entry(vec_nr);
298 h->action(h);
299 trace_softirq_exit(vec_nr);
300 if (unlikely(prev_count != preempt_count())) {
40322764 301 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
2e702b9f
JP
302 vec_nr, softirq_to_name[vec_nr], h->action,
303 prev_count, preempt_count());
304 preempt_count_set(prev_count);
1da177e4
LT
305 }
306 h++;
2e702b9f
JP
307 pending >>= softirq_bit;
308 }
1da177e4 309
d28139c4
PM
310 if (__this_cpu_read(ksoftirqd) == current)
311 rcu_softirq_qs();
c70f5d66 312 local_irq_disable();
1da177e4
LT
313
314 pending = local_softirq_pending();
c10d7367 315 if (pending) {
34376a50
BG
316 if (time_before(jiffies, end) && !need_resched() &&
317 --max_restart)
c10d7367 318 goto restart;
1da177e4 319
1da177e4 320 wakeup_softirqd();
c10d7367 321 }
1da177e4 322
5c4853b6 323 lockdep_softirq_end(in_hardirq);
6a61671b 324 account_irq_exit_time(current);
75e1056f 325 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 326 WARN_ON_ONCE(in_interrupt());
717a94b5 327 current_restore_flags(old_flags, PF_MEMALLOC);
1da177e4
LT
328}
329
722a9f92 330asmlinkage __visible void do_softirq(void)
1da177e4
LT
331{
332 __u32 pending;
333 unsigned long flags;
334
335 if (in_interrupt())
336 return;
337
338 local_irq_save(flags);
339
340 pending = local_softirq_pending();
341
3c53776e 342 if (pending && !ksoftirqd_running(pending))
7d65f4a6 343 do_softirq_own_stack();
1da177e4
LT
344
345 local_irq_restore(flags);
346}
347
8a6bc478
TG
348/**
349 * irq_enter_rcu - Enter an interrupt context with RCU watching
dde4b2b5 350 */
8a6bc478 351void irq_enter_rcu(void)
dde4b2b5 352{
0a8a2e78 353 if (is_idle_task(current) && !in_interrupt()) {
d267f87f
VP
354 /*
355 * Prevent raise_softirq from needlessly waking up ksoftirqd
356 * here, as softirq will be serviced on return from interrupt.
357 */
358 local_bh_disable();
5acac1be 359 tick_irq_enter();
d267f87f
VP
360 _local_bh_enable();
361 }
d267f87f 362 __irq_enter();
dde4b2b5
IM
363}
364
8a6bc478
TG
365/**
366 * irq_enter - Enter an interrupt context including RCU update
367 */
368void irq_enter(void)
369{
370 rcu_irq_enter();
371 irq_enter_rcu();
372}
373
8d32a307
TG
374static inline void invoke_softirq(void)
375{
3c53776e 376 if (ksoftirqd_running(local_softirq_pending()))
4cd13c21
ED
377 return;
378
ded79754 379 if (!force_irqthreads) {
cc1f0274 380#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded79754
FW
381 /*
382 * We can safely execute softirq on the current stack if
383 * it is the irq stack, because it should be near empty
cc1f0274
FW
384 * at this stage.
385 */
386 __do_softirq();
387#else
388 /*
389 * Otherwise, irq_exit() is called on the task stack that can
390 * be potentially deep already. So call softirq in its own stack
391 * to prevent from any overrun.
ded79754 392 */
be6e1016 393 do_softirq_own_stack();
cc1f0274 394#endif
ded79754 395 } else {
8d32a307 396 wakeup_softirqd();
ded79754 397 }
8d32a307 398}
1da177e4 399
67826eae
FW
400static inline void tick_irq_exit(void)
401{
402#ifdef CONFIG_NO_HZ_COMMON
403 int cpu = smp_processor_id();
404
405 /* Make sure that timer wheel updates are propagated */
406 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
0a0e0829 407 if (!in_irq())
67826eae
FW
408 tick_nohz_irq_exit();
409 }
410#endif
411}
412
59bc300b 413static inline void __irq_exit_rcu(void)
1da177e4 414{
74eed016 415#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 416 local_irq_disable();
74eed016 417#else
f71b74bc 418 lockdep_assert_irqs_disabled();
74eed016 419#endif
6a61671b 420 account_irq_exit_time(current);
bdb43806 421 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
422 if (!in_interrupt() && local_softirq_pending())
423 invoke_softirq();
79bf2bb3 424
67826eae 425 tick_irq_exit();
8a6bc478
TG
426}
427
59bc300b
PZ
428/**
429 * irq_exit_rcu() - Exit an interrupt context without updating RCU
430 *
431 * Also processes softirqs if needed and possible.
432 */
433void irq_exit_rcu(void)
434{
435 __irq_exit_rcu();
436 /* must be last! */
437 lockdep_hardirq_exit();
438}
439
8a6bc478
TG
440/**
441 * irq_exit - Exit an interrupt context, update RCU and lockdep
442 *
443 * Also processes softirqs if needed and possible.
444 */
445void irq_exit(void)
446{
59bc300b 447 __irq_exit_rcu();
416eb33c 448 rcu_irq_exit();
2502ec37
TG
449 /* must be last! */
450 lockdep_hardirq_exit();
1da177e4
LT
451}
452
453/*
454 * This function must run with irqs disabled!
455 */
7ad5b3a5 456inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
457{
458 __raise_softirq_irqoff(nr);
459
460 /*
461 * If we're in an interrupt or softirq, we're done
462 * (this also catches softirq-disabled code). We will
463 * actually run the softirq once we return from
464 * the irq or softirq.
465 *
466 * Otherwise we wake up ksoftirqd to make sure we
467 * schedule the softirq soon.
468 */
469 if (!in_interrupt())
470 wakeup_softirqd();
471}
472
7ad5b3a5 473void raise_softirq(unsigned int nr)
1da177e4
LT
474{
475 unsigned long flags;
476
477 local_irq_save(flags);
478 raise_softirq_irqoff(nr);
479 local_irq_restore(flags);
480}
481
f069686e
SR
482void __raise_softirq_irqoff(unsigned int nr)
483{
484 trace_softirq_raise(nr);
485 or_softirq_pending(1UL << nr);
486}
487
962cf36c 488void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 489{
1da177e4
LT
490 softirq_vec[nr].action = action;
491}
492
9ba5f005
PZ
493/*
494 * Tasklets
495 */
ce85b4f2 496struct tasklet_head {
48f20a9a
OJ
497 struct tasklet_struct *head;
498 struct tasklet_struct **tail;
1da177e4
LT
499};
500
4620b49f
VN
501static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
502static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 503
6498ddad
IM
504static void __tasklet_schedule_common(struct tasklet_struct *t,
505 struct tasklet_head __percpu *headp,
506 unsigned int softirq_nr)
1da177e4 507{
6498ddad 508 struct tasklet_head *head;
1da177e4
LT
509 unsigned long flags;
510
511 local_irq_save(flags);
6498ddad 512 head = this_cpu_ptr(headp);
48f20a9a 513 t->next = NULL;
6498ddad
IM
514 *head->tail = t;
515 head->tail = &(t->next);
516 raise_softirq_irqoff(softirq_nr);
1da177e4
LT
517 local_irq_restore(flags);
518}
6498ddad
IM
519
520void __tasklet_schedule(struct tasklet_struct *t)
521{
522 __tasklet_schedule_common(t, &tasklet_vec,
523 TASKLET_SOFTIRQ);
524}
1da177e4
LT
525EXPORT_SYMBOL(__tasklet_schedule);
526
7ad5b3a5 527void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4 528{
6498ddad
IM
529 __tasklet_schedule_common(t, &tasklet_hi_vec,
530 HI_SOFTIRQ);
1da177e4 531}
1da177e4
LT
532EXPORT_SYMBOL(__tasklet_hi_schedule);
533
82b691be
IM
534static void tasklet_action_common(struct softirq_action *a,
535 struct tasklet_head *tl_head,
536 unsigned int softirq_nr)
1da177e4
LT
537{
538 struct tasklet_struct *list;
539
540 local_irq_disable();
82b691be
IM
541 list = tl_head->head;
542 tl_head->head = NULL;
543 tl_head->tail = &tl_head->head;
1da177e4
LT
544 local_irq_enable();
545
546 while (list) {
547 struct tasklet_struct *t = list;
548
549 list = list->next;
550
551 if (tasklet_trylock(t)) {
552 if (!atomic_read(&t->count)) {
ce85b4f2
JP
553 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
554 &t->state))
1da177e4 555 BUG();
12cc923f
RP
556 if (t->use_callback)
557 t->callback(t);
558 else
559 t->func(t->data);
1da177e4
LT
560 tasklet_unlock(t);
561 continue;
562 }
563 tasklet_unlock(t);
564 }
565
566 local_irq_disable();
48f20a9a 567 t->next = NULL;
82b691be
IM
568 *tl_head->tail = t;
569 tl_head->tail = &t->next;
570 __raise_softirq_irqoff(softirq_nr);
1da177e4
LT
571 local_irq_enable();
572 }
573}
574
82b691be 575static __latent_entropy void tasklet_action(struct softirq_action *a)
1da177e4 576{
82b691be
IM
577 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
578}
1da177e4 579
82b691be
IM
580static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
581{
582 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
1da177e4
LT
583}
584
12cc923f
RP
585void tasklet_setup(struct tasklet_struct *t,
586 void (*callback)(struct tasklet_struct *))
587{
588 t->next = NULL;
589 t->state = 0;
590 atomic_set(&t->count, 0);
591 t->callback = callback;
592 t->use_callback = true;
593 t->data = 0;
594}
595EXPORT_SYMBOL(tasklet_setup);
596
1da177e4
LT
597void tasklet_init(struct tasklet_struct *t,
598 void (*func)(unsigned long), unsigned long data)
599{
600 t->next = NULL;
601 t->state = 0;
602 atomic_set(&t->count, 0);
603 t->func = func;
12cc923f 604 t->use_callback = false;
1da177e4
LT
605 t->data = data;
606}
1da177e4
LT
607EXPORT_SYMBOL(tasklet_init);
608
609void tasklet_kill(struct tasklet_struct *t)
610{
611 if (in_interrupt())
40322764 612 pr_notice("Attempt to kill tasklet from interrupt\n");
1da177e4
LT
613
614 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 615 do {
1da177e4 616 yield();
79d381c9 617 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
618 }
619 tasklet_unlock_wait(t);
620 clear_bit(TASKLET_STATE_SCHED, &t->state);
621}
1da177e4
LT
622EXPORT_SYMBOL(tasklet_kill);
623
624void __init softirq_init(void)
625{
48f20a9a
OJ
626 int cpu;
627
628 for_each_possible_cpu(cpu) {
629 per_cpu(tasklet_vec, cpu).tail =
630 &per_cpu(tasklet_vec, cpu).head;
631 per_cpu(tasklet_hi_vec, cpu).tail =
632 &per_cpu(tasklet_hi_vec, cpu).head;
633 }
634
962cf36c
CM
635 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
636 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
637}
638
3e339b5d 639static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 640{
3e339b5d
TG
641 return local_softirq_pending();
642}
1da177e4 643
3e339b5d
TG
644static void run_ksoftirqd(unsigned int cpu)
645{
646 local_irq_disable();
647 if (local_softirq_pending()) {
0bed698a
FW
648 /*
649 * We can safely run softirq on inline stack, as we are not deep
650 * in the task stack here.
651 */
3e339b5d 652 __do_softirq();
3e339b5d 653 local_irq_enable();
edf22f4c 654 cond_resched();
3e339b5d 655 return;
1da177e4 656 }
3e339b5d 657 local_irq_enable();
1da177e4
LT
658}
659
660#ifdef CONFIG_HOTPLUG_CPU
661/*
662 * tasklet_kill_immediate is called to remove a tasklet which can already be
663 * scheduled for execution on @cpu.
664 *
665 * Unlike tasklet_kill, this function removes the tasklet
666 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
667 *
668 * When this function is called, @cpu must be in the CPU_DEAD state.
669 */
670void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
671{
672 struct tasklet_struct **i;
673
674 BUG_ON(cpu_online(cpu));
675 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
676
677 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
678 return;
679
680 /* CPU is dead, so no lock needed. */
48f20a9a 681 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
682 if (*i == t) {
683 *i = t->next;
48f20a9a
OJ
684 /* If this was the tail element, move the tail ptr */
685 if (*i == NULL)
686 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
687 return;
688 }
689 }
690 BUG();
691}
692
c4544dbc 693static int takeover_tasklets(unsigned int cpu)
1da177e4 694{
1da177e4
LT
695 /* CPU is dead, so no lock needed. */
696 local_irq_disable();
697
698 /* Find end, append list for that CPU. */
e5e41723 699 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964 700 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
8afecaa6 701 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
702 per_cpu(tasklet_vec, cpu).head = NULL;
703 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
704 }
1da177e4
LT
705 raise_softirq_irqoff(TASKLET_SOFTIRQ);
706
e5e41723 707 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
708 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
709 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
710 per_cpu(tasklet_hi_vec, cpu).head = NULL;
711 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
712 }
1da177e4
LT
713 raise_softirq_irqoff(HI_SOFTIRQ);
714
715 local_irq_enable();
c4544dbc 716 return 0;
1da177e4 717}
c4544dbc
SAS
718#else
719#define takeover_tasklets NULL
1da177e4
LT
720#endif /* CONFIG_HOTPLUG_CPU */
721
3e339b5d
TG
722static struct smp_hotplug_thread softirq_threads = {
723 .store = &ksoftirqd,
724 .thread_should_run = ksoftirqd_should_run,
725 .thread_fn = run_ksoftirqd,
726 .thread_comm = "ksoftirqd/%u",
727};
728
7babe8db 729static __init int spawn_ksoftirqd(void)
1da177e4 730{
c4544dbc
SAS
731 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
732 takeover_tasklets);
3e339b5d
TG
733 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
734
1da177e4
LT
735 return 0;
736}
7babe8db 737early_initcall(spawn_ksoftirqd);
78eef01b 738
43a25632
YL
739/*
740 * [ These __weak aliases are kept in a separate compilation unit, so that
741 * GCC does not inline them incorrectly. ]
742 */
743
744int __init __weak early_irq_init(void)
745{
746 return 0;
747}
748
4a046d17
YL
749int __init __weak arch_probe_nr_irqs(void)
750{
b683de2b 751 return NR_IRQS_LEGACY;
4a046d17
YL
752}
753
43a25632
YL
754int __init __weak arch_early_irq_init(void)
755{
756 return 0;
757}
62a08ae2
TG
758
759unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
760{
761 return from;
762}