1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/softirq.c
5 * Copyright (C) 1992 Linus Torvalds
7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/export.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/interrupt.h>
15 #include <linux/init.h>
16 #include <linux/local_lock.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/smpboot.h>
27 #include <linux/tick.h>
28 #include <linux/irq.h>
29 #include <linux/wait_bit.h>
30 #include <linux/workqueue.h>
32 #include <asm/softirq_stack.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/irq.h>
38 - No shared variables, all the data are CPU local.
39 - If a softirq needs serialization, let it serialize itself
41 - Even if softirq is serialized, only local cpu is marked for
42 execution. Hence, we get something sort of weak cpu binding.
43 Though it is still not clear, will it result in better locality
47 - NET RX softirq. It is multithreaded and does not require
48 any global serialization.
49 - NET TX softirq. It kicks software netdevice queues, hence
50 it is logically serialized per device, but this serialization
51 is invisible to common code.
52 - Tasklets: serialized wrt itself.
55 #ifndef __ARCH_IRQ_STAT
56 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
57 EXPORT_PER_CPU_SYMBOL(irq_stat);
60 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
62 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
64 const char * const softirq_to_name[NR_SOFTIRQS] = {
65 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
66 "TASKLET", "SCHED", "HRTIMER", "RCU"
70 * we cannot loop indefinitely here to avoid userspace starvation,
71 * but we also don't want to introduce a worst case 1/HZ latency
72 * to the pending events, so lets the scheduler to balance
73 * the softirq load for us.
75 static void wakeup_softirqd(void)
77 /* Interrupts are disabled: no need to stop preemption */
78 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
84 #ifdef CONFIG_TRACE_IRQFLAGS
85 DEFINE_PER_CPU(int, hardirqs_enabled);
86 DEFINE_PER_CPU(int, hardirq_context);
87 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
88 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
92 * SOFTIRQ_OFFSET usage:
94 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
95 * to a per CPU counter and to task::softirqs_disabled_cnt.
97 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
100 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
101 * on local_bh_disable or local_bh_enable.
103 * This lets us distinguish between whether we are currently processing
104 * softirq and whether we just have bh disabled.
106 #ifdef CONFIG_PREEMPT_RT
109 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
110 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
111 * softirq disabled section to be preempted.
113 * The per task counter is used for softirq_count(), in_softirq() and
114 * in_serving_softirqs() because these counts are only valid when the task
115 * holding softirq_ctrl::lock is running.
117 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
118 * the task which is in a softirq disabled section is preempted or blocks.
120 struct softirq_ctrl {
125 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
126 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
130 * local_bh_blocked() - Check for idle whether BH processing is blocked
132 * Returns false if the per CPU softirq::cnt is 0 otherwise true.
134 * This is invoked from the idle task to guard against false positive
135 * softirq pending warnings, which would happen when the task which holds
136 * softirq_ctrl::lock was the only running task on the CPU and blocks on
139 bool local_bh_blocked(void)
141 return __this_cpu_read(softirq_ctrl.cnt) != 0;
144 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
149 WARN_ON_ONCE(in_hardirq());
151 /* First entry of a task into a BH disabled section? */
152 if (!current->softirq_disable_cnt) {
154 local_lock(&softirq_ctrl.lock);
155 /* Required to meet the RCU bottomhalf requirements. */
158 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
163 * Track the per CPU softirq disabled state. On RT this is per CPU
164 * state to allow preemption of bottom half disabled sections.
166 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
168 * Reflect the result in the task state to prevent recursion on the
169 * local lock and to make softirq_count() & al work.
171 current->softirq_disable_cnt = newcnt;
173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
174 raw_local_irq_save(flags);
175 lockdep_softirqs_off(ip);
176 raw_local_irq_restore(flags);
179 EXPORT_SYMBOL(__local_bh_disable_ip);
181 static void __local_bh_enable(unsigned int cnt, bool unlock)
186 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
187 this_cpu_read(softirq_ctrl.cnt));
189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
190 raw_local_irq_save(flags);
191 lockdep_softirqs_on(_RET_IP_);
192 raw_local_irq_restore(flags);
195 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
196 current->softirq_disable_cnt = newcnt;
198 if (!newcnt && unlock) {
200 local_unlock(&softirq_ctrl.lock);
204 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
206 bool preempt_on = preemptible();
211 WARN_ON_ONCE(in_hardirq());
212 lockdep_assert_irqs_enabled();
214 local_irq_save(flags);
215 curcnt = __this_cpu_read(softirq_ctrl.cnt);
218 * If this is not reenabling soft interrupts, no point in trying to
224 pending = local_softirq_pending();
229 * If this was called from non preemptible context, wake up the
238 * Adjust softirq count to SOFTIRQ_OFFSET which makes
239 * in_serving_softirq() become true.
241 cnt = SOFTIRQ_OFFSET;
242 __local_bh_enable(cnt, false);
246 __local_bh_enable(cnt, preempt_on);
247 local_irq_restore(flags);
249 EXPORT_SYMBOL(__local_bh_enable_ip);
252 * Invoked from ksoftirqd_run() outside of the interrupt disabled section
253 * to acquire the per CPU local lock for reentrancy protection.
255 static inline void ksoftirqd_run_begin(void)
257 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
261 /* Counterpart to ksoftirqd_run_begin() */
262 static inline void ksoftirqd_run_end(void)
264 __local_bh_enable(SOFTIRQ_OFFSET, true);
265 WARN_ON_ONCE(in_interrupt());
269 static inline void softirq_handle_begin(void) { }
270 static inline void softirq_handle_end(void) { }
272 static inline bool should_wake_ksoftirqd(void)
274 return !this_cpu_read(softirq_ctrl.cnt);
277 static inline void invoke_softirq(void)
279 if (should_wake_ksoftirqd())
284 * flush_smp_call_function_queue() can raise a soft interrupt in a function
285 * call. On RT kernels this is undesired and the only known functionality
286 * in the block layer which does this is disabled on RT. If soft interrupts
287 * get raised which haven't been raised before the flush, warn so it can be
290 void do_softirq_post_smp_call_flush(unsigned int was_pending)
292 if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
296 #else /* CONFIG_PREEMPT_RT */
299 * This one is for softirq.c-internal use, where hardirqs are disabled
302 #ifdef CONFIG_TRACE_IRQFLAGS
303 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
307 WARN_ON_ONCE(in_hardirq());
309 raw_local_irq_save(flags);
311 * The preempt tracer hooks into preempt_count_add and will break
312 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
313 * is set and before current->softirq_enabled is cleared.
314 * We must manually increment preempt_count here and manually
315 * call the trace_preempt_off later.
317 __preempt_count_add(cnt);
319 * Were softirqs turned off above:
321 if (softirq_count() == (cnt & SOFTIRQ_MASK))
322 lockdep_softirqs_off(ip);
323 raw_local_irq_restore(flags);
325 if (preempt_count() == cnt) {
326 #ifdef CONFIG_DEBUG_PREEMPT
327 current->preempt_disable_ip = get_lock_parent_ip();
329 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
332 EXPORT_SYMBOL(__local_bh_disable_ip);
333 #endif /* CONFIG_TRACE_IRQFLAGS */
335 static void __local_bh_enable(unsigned int cnt)
337 lockdep_assert_irqs_disabled();
339 if (preempt_count() == cnt)
340 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
342 if (softirq_count() == (cnt & SOFTIRQ_MASK))
343 lockdep_softirqs_on(_RET_IP_);
345 __preempt_count_sub(cnt);
349 * Special-case - softirqs can safely be enabled by __do_softirq(),
350 * without processing still-pending softirqs:
352 void _local_bh_enable(void)
354 WARN_ON_ONCE(in_hardirq());
355 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
357 EXPORT_SYMBOL(_local_bh_enable);
359 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
361 WARN_ON_ONCE(in_hardirq());
362 lockdep_assert_irqs_enabled();
363 #ifdef CONFIG_TRACE_IRQFLAGS
367 * Are softirqs going to be turned on now:
369 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
370 lockdep_softirqs_on(ip);
372 * Keep preemption disabled until we are done with
373 * softirq processing:
375 __preempt_count_sub(cnt - 1);
377 if (unlikely(!in_interrupt() && local_softirq_pending())) {
379 * Run softirq if any pending. And do it in its own stack
380 * as we may be calling this deep in a task call stack already.
386 #ifdef CONFIG_TRACE_IRQFLAGS
389 preempt_check_resched();
391 EXPORT_SYMBOL(__local_bh_enable_ip);
393 static inline void softirq_handle_begin(void)
395 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
398 static inline void softirq_handle_end(void)
400 __local_bh_enable(SOFTIRQ_OFFSET);
401 WARN_ON_ONCE(in_interrupt());
404 static inline void ksoftirqd_run_begin(void)
409 static inline void ksoftirqd_run_end(void)
414 static inline bool should_wake_ksoftirqd(void)
419 static inline void invoke_softirq(void)
421 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
422 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
424 * We can safely execute softirq on the current stack if
425 * it is the irq stack, because it should be near empty
431 * Otherwise, irq_exit() is called on the task stack that can
432 * be potentially deep already. So call softirq in its own stack
433 * to prevent from any overrun.
435 do_softirq_own_stack();
442 asmlinkage __visible void do_softirq(void)
450 local_irq_save(flags);
452 pending = local_softirq_pending();
455 do_softirq_own_stack();
457 local_irq_restore(flags);
460 #endif /* !CONFIG_PREEMPT_RT */
463 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
464 * but break the loop if need_resched() is set or after 2 ms.
465 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
466 * certain cases, such as stop_machine(), jiffies may cease to
467 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
468 * well to make sure we eventually return from this method.
470 * These limits have been established via experimentation.
471 * The two things to balance is latency against fairness -
472 * we want to handle softirqs as soon as possible, but they
473 * should not be able to lock up the box.
475 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
476 #define MAX_SOFTIRQ_RESTART 10
478 #ifdef CONFIG_TRACE_IRQFLAGS
480 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
481 * to keep the lockdep irq context tracking as tight as possible in order to
482 * not miss-qualify lock contexts and miss possible deadlocks.
485 static inline bool lockdep_softirq_start(void)
487 bool in_hardirq = false;
489 if (lockdep_hardirq_context()) {
491 lockdep_hardirq_exit();
494 lockdep_softirq_enter();
499 static inline void lockdep_softirq_end(bool in_hardirq)
501 lockdep_softirq_exit();
504 lockdep_hardirq_enter();
507 static inline bool lockdep_softirq_start(void) { return false; }
508 static inline void lockdep_softirq_end(bool in_hardirq) { }
511 asmlinkage __visible void __softirq_entry __do_softirq(void)
513 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
514 unsigned long old_flags = current->flags;
515 int max_restart = MAX_SOFTIRQ_RESTART;
516 struct softirq_action *h;
522 * Mask out PF_MEMALLOC as the current task context is borrowed for the
523 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
524 * again if the socket is related to swapping.
526 current->flags &= ~PF_MEMALLOC;
528 pending = local_softirq_pending();
530 softirq_handle_begin();
531 in_hardirq = lockdep_softirq_start();
532 account_softirq_enter(current);
535 /* Reset the pending bitmask before enabling irqs */
536 set_softirq_pending(0);
542 while ((softirq_bit = ffs(pending))) {
546 h += softirq_bit - 1;
548 vec_nr = h - softirq_vec;
549 prev_count = preempt_count();
551 kstat_incr_softirqs_this_cpu(vec_nr);
553 trace_softirq_entry(vec_nr);
555 trace_softirq_exit(vec_nr);
556 if (unlikely(prev_count != preempt_count())) {
557 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
558 vec_nr, softirq_to_name[vec_nr], h->action,
559 prev_count, preempt_count());
560 preempt_count_set(prev_count);
563 pending >>= softirq_bit;
566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
567 __this_cpu_read(ksoftirqd) == current)
572 pending = local_softirq_pending();
574 if (time_before(jiffies, end) && !need_resched() &&
581 account_softirq_exit(current);
582 lockdep_softirq_end(in_hardirq);
583 softirq_handle_end();
584 current_restore_flags(old_flags, PF_MEMALLOC);
588 * irq_enter_rcu - Enter an interrupt context with RCU watching
590 void irq_enter_rcu(void)
594 if (tick_nohz_full_cpu(smp_processor_id()) ||
595 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
598 account_hardirq_enter(current);
602 * irq_enter - Enter an interrupt context including RCU update
610 static inline void tick_irq_exit(void)
612 #ifdef CONFIG_NO_HZ_COMMON
613 int cpu = smp_processor_id();
615 /* Make sure that timer wheel updates are propagated */
616 if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
618 tick_nohz_irq_exit();
623 static inline void __irq_exit_rcu(void)
625 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
628 lockdep_assert_irqs_disabled();
630 account_hardirq_exit(current);
631 preempt_count_sub(HARDIRQ_OFFSET);
632 if (!in_interrupt() && local_softirq_pending())
639 * irq_exit_rcu() - Exit an interrupt context without updating RCU
641 * Also processes softirqs if needed and possible.
643 void irq_exit_rcu(void)
647 lockdep_hardirq_exit();
651 * irq_exit - Exit an interrupt context, update RCU and lockdep
653 * Also processes softirqs if needed and possible.
660 lockdep_hardirq_exit();
664 * This function must run with irqs disabled!
666 inline void raise_softirq_irqoff(unsigned int nr)
668 __raise_softirq_irqoff(nr);
671 * If we're in an interrupt or softirq, we're done
672 * (this also catches softirq-disabled code). We will
673 * actually run the softirq once we return from
674 * the irq or softirq.
676 * Otherwise we wake up ksoftirqd to make sure we
677 * schedule the softirq soon.
679 if (!in_interrupt() && should_wake_ksoftirqd())
683 void raise_softirq(unsigned int nr)
687 local_irq_save(flags);
688 raise_softirq_irqoff(nr);
689 local_irq_restore(flags);
692 void __raise_softirq_irqoff(unsigned int nr)
694 lockdep_assert_irqs_disabled();
695 trace_softirq_raise(nr);
696 or_softirq_pending(1UL << nr);
699 void open_softirq(int nr, void (*action)(struct softirq_action *))
701 softirq_vec[nr].action = action;
707 struct tasklet_head {
708 struct tasklet_struct *head;
709 struct tasklet_struct **tail;
712 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
713 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
715 static void __tasklet_schedule_common(struct tasklet_struct *t,
716 struct tasklet_head __percpu *headp,
717 unsigned int softirq_nr)
719 struct tasklet_head *head;
722 local_irq_save(flags);
723 head = this_cpu_ptr(headp);
726 head->tail = &(t->next);
727 raise_softirq_irqoff(softirq_nr);
728 local_irq_restore(flags);
731 void __tasklet_schedule(struct tasklet_struct *t)
733 __tasklet_schedule_common(t, &tasklet_vec,
736 EXPORT_SYMBOL(__tasklet_schedule);
738 void __tasklet_hi_schedule(struct tasklet_struct *t)
740 __tasklet_schedule_common(t, &tasklet_hi_vec,
743 EXPORT_SYMBOL(__tasklet_hi_schedule);
745 static bool tasklet_clear_sched(struct tasklet_struct *t)
747 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
748 wake_up_var(&t->state);
752 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
753 t->use_callback ? "callback" : "func",
754 t->use_callback ? (void *)t->callback : (void *)t->func);
759 static void tasklet_action_common(struct softirq_action *a,
760 struct tasklet_head *tl_head,
761 unsigned int softirq_nr)
763 struct tasklet_struct *list;
766 list = tl_head->head;
767 tl_head->head = NULL;
768 tl_head->tail = &tl_head->head;
772 struct tasklet_struct *t = list;
776 if (tasklet_trylock(t)) {
777 if (!atomic_read(&t->count)) {
778 if (tasklet_clear_sched(t)) {
779 if (t->use_callback) {
780 trace_tasklet_entry(t, t->callback);
782 trace_tasklet_exit(t, t->callback);
784 trace_tasklet_entry(t, t->func);
786 trace_tasklet_exit(t, t->func);
798 tl_head->tail = &t->next;
799 __raise_softirq_irqoff(softirq_nr);
804 static __latent_entropy void tasklet_action(struct softirq_action *a)
806 workqueue_softirq_action(false);
807 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
810 static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
812 workqueue_softirq_action(true);
813 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
816 void tasklet_setup(struct tasklet_struct *t,
817 void (*callback)(struct tasklet_struct *))
821 atomic_set(&t->count, 0);
822 t->callback = callback;
823 t->use_callback = true;
826 EXPORT_SYMBOL(tasklet_setup);
828 void tasklet_init(struct tasklet_struct *t,
829 void (*func)(unsigned long), unsigned long data)
833 atomic_set(&t->count, 0);
835 t->use_callback = false;
838 EXPORT_SYMBOL(tasklet_init);
840 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
842 * Do not use in new code. Waiting for tasklets from atomic contexts is
843 * error prone and should be avoided.
845 void tasklet_unlock_spin_wait(struct tasklet_struct *t)
847 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
848 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
850 * Prevent a live lock when current preempted soft
851 * interrupt processing or prevents ksoftirqd from
852 * running. If the tasklet runs on a different CPU
853 * then this has no effect other than doing the BH
854 * disable/enable dance for nothing.
863 EXPORT_SYMBOL(tasklet_unlock_spin_wait);
866 void tasklet_kill(struct tasklet_struct *t)
869 pr_notice("Attempt to kill tasklet from interrupt\n");
871 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
872 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
874 tasklet_unlock_wait(t);
875 tasklet_clear_sched(t);
877 EXPORT_SYMBOL(tasklet_kill);
879 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
880 void tasklet_unlock(struct tasklet_struct *t)
882 smp_mb__before_atomic();
883 clear_bit(TASKLET_STATE_RUN, &t->state);
884 smp_mb__after_atomic();
885 wake_up_var(&t->state);
887 EXPORT_SYMBOL_GPL(tasklet_unlock);
889 void tasklet_unlock_wait(struct tasklet_struct *t)
891 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
893 EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
896 void __init softirq_init(void)
900 for_each_possible_cpu(cpu) {
901 per_cpu(tasklet_vec, cpu).tail =
902 &per_cpu(tasklet_vec, cpu).head;
903 per_cpu(tasklet_hi_vec, cpu).tail =
904 &per_cpu(tasklet_hi_vec, cpu).head;
907 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
908 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
911 static int ksoftirqd_should_run(unsigned int cpu)
913 return local_softirq_pending();
916 static void run_ksoftirqd(unsigned int cpu)
918 ksoftirqd_run_begin();
919 if (local_softirq_pending()) {
921 * We can safely run softirq on inline stack, as we are not deep
922 * in the task stack here.
932 #ifdef CONFIG_HOTPLUG_CPU
933 static int takeover_tasklets(unsigned int cpu)
935 workqueue_softirq_dead(cpu);
937 /* CPU is dead, so no lock needed. */
940 /* Find end, append list for that CPU. */
941 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
942 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
943 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
944 per_cpu(tasklet_vec, cpu).head = NULL;
945 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
947 raise_softirq_irqoff(TASKLET_SOFTIRQ);
949 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
950 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
951 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
952 per_cpu(tasklet_hi_vec, cpu).head = NULL;
953 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
955 raise_softirq_irqoff(HI_SOFTIRQ);
961 #define takeover_tasklets NULL
962 #endif /* CONFIG_HOTPLUG_CPU */
964 static struct smp_hotplug_thread softirq_threads = {
966 .thread_should_run = ksoftirqd_should_run,
967 .thread_fn = run_ksoftirqd,
968 .thread_comm = "ksoftirqd/%u",
971 static __init int spawn_ksoftirqd(void)
973 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
975 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
979 early_initcall(spawn_ksoftirqd);
982 * [ These __weak aliases are kept in a separate compilation unit, so that
983 * GCC does not inline them incorrectly. ]
986 int __init __weak early_irq_init(void)
991 int __init __weak arch_probe_nr_irqs(void)
993 return NR_IRQS_LEGACY;
996 int __init __weak arch_early_irq_init(void)
1001 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)