pinctrl: at91-pio4: add missing of_node_put
[linux-2.6-block.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4
LT
9 */
10
40322764
JP
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
9984de1a 13#include <linux/export.h>
1da177e4
LT
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
83144186 21#include <linux/freezer.h>
1da177e4
LT
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
7e49fcce 24#include <linux/ftrace.h>
78eef01b 25#include <linux/smp.h>
3e339b5d 26#include <linux/smpboot.h>
79bf2bb3 27#include <linux/tick.h>
d532676c 28#include <linux/irq.h>
a0e39ed3
HC
29
30#define CREATE_TRACE_POINTS
ad8d75ff 31#include <trace/events/irq.h>
1da177e4 32
1da177e4
LT
33/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
978b0116 56static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4 57
4dd53d89 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4 59
ce85b4f2 60const char * const softirq_to_name[NR_SOFTIRQS] = {
f660f606 61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
09223371 62 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
63};
64
1da177e4
LT
65/*
66 * we cannot loop indefinitely here to avoid userspace starvation,
67 * but we also don't want to introduce a worst case 1/HZ latency
68 * to the pending events, so lets the scheduler to balance
69 * the softirq load for us.
70 */
676cb02d 71static void wakeup_softirqd(void)
1da177e4
LT
72{
73 /* Interrupts are disabled: no need to stop preemption */
909ea964 74 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4
LT
75
76 if (tsk && tsk->state != TASK_RUNNING)
77 wake_up_process(tsk);
78}
79
4cd13c21
ED
80/*
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83 */
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
75e1056f
VP
91/*
92 * preempt_count and SOFTIRQ_OFFSET usage:
93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
94 * softirq processing.
95 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
96 * on local_bh_disable or local_bh_enable.
97 * This lets us distinguish between whether we are currently processing
98 * softirq and whether we just have bh disabled.
99 */
100
de30a2b3
IM
101/*
102 * This one is for softirq.c-internal use,
103 * where hardirqs are disabled legitimately:
104 */
3c829c36 105#ifdef CONFIG_TRACE_IRQFLAGS
0bd3a173 106void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b3
IM
107{
108 unsigned long flags;
109
110 WARN_ON_ONCE(in_irq());
111
112 raw_local_irq_save(flags);
7e49fcce 113 /*
bdb43806 114 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce
SR
115 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
116 * is set and before current->softirq_enabled is cleared.
117 * We must manually increment preempt_count here and manually
118 * call the trace_preempt_off later.
119 */
bdb43806 120 __preempt_count_add(cnt);
de30a2b3
IM
121 /*
122 * Were softirqs turned off above:
123 */
9ea4c380 124 if (softirq_count() == (cnt & SOFTIRQ_MASK))
de30a2b3
IM
125 trace_softirqs_off(ip);
126 raw_local_irq_restore(flags);
7e49fcce 127
0f1ba9a2
HC
128 if (preempt_count() == cnt) {
129#ifdef CONFIG_DEBUG_PREEMPT
f904f582 130 current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2 131#endif
f904f582 132 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2 133 }
de30a2b3 134}
0bd3a173 135EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c36 136#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3 137
75e1056f
VP
138static void __local_bh_enable(unsigned int cnt)
139{
f71b74bc 140 lockdep_assert_irqs_disabled();
75e1056f 141
9ea4c380 142 if (softirq_count() == (cnt & SOFTIRQ_MASK))
d2e08473 143 trace_softirqs_on(_RET_IP_);
bdb43806 144 preempt_count_sub(cnt);
75e1056f
VP
145}
146
de30a2b3
IM
147/*
148 * Special-case - softirqs can safely be enabled in
149 * cond_resched_softirq(), or by __do_softirq(),
150 * without processing still-pending softirqs:
151 */
152void _local_bh_enable(void)
153{
5d60d3e7 154 WARN_ON_ONCE(in_irq());
75e1056f 155 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b3 156}
de30a2b3
IM
157EXPORT_SYMBOL(_local_bh_enable);
158
0bd3a173 159void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b3 160{
f71b74bc
FW
161 WARN_ON_ONCE(in_irq());
162 lockdep_assert_irqs_enabled();
3c829c36 163#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 164 local_irq_disable();
3c829c36 165#endif
de30a2b3
IM
166 /*
167 * Are softirqs going to be turned on now:
168 */
75e1056f 169 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0f476b6d 170 trace_softirqs_on(ip);
de30a2b3
IM
171 /*
172 * Keep preemption disabled until we are done with
173 * softirq processing:
ce85b4f2 174 */
0bd3a173 175 preempt_count_sub(cnt - 1);
de30a2b3 176
0bed698a
FW
177 if (unlikely(!in_interrupt() && local_softirq_pending())) {
178 /*
179 * Run softirq if any pending. And do it in its own stack
180 * as we may be calling this deep in a task call stack already.
181 */
de30a2b3 182 do_softirq();
0bed698a 183 }
de30a2b3 184
bdb43806 185 preempt_count_dec();
3c829c36 186#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 187 local_irq_enable();
3c829c36 188#endif
de30a2b3
IM
189 preempt_check_resched();
190}
0bd3a173 191EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b3 192
1da177e4 193/*
34376a50
BG
194 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
195 * but break the loop if need_resched() is set or after 2 ms.
196 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
197 * certain cases, such as stop_machine(), jiffies may cease to
198 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
199 * well to make sure we eventually return from this method.
1da177e4 200 *
c10d7367 201 * These limits have been established via experimentation.
1da177e4
LT
202 * The two things to balance is latency against fairness -
203 * we want to handle softirqs as soon as possible, but they
204 * should not be able to lock up the box.
205 */
c10d7367 206#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
34376a50 207#define MAX_SOFTIRQ_RESTART 10
1da177e4 208
f1a83e65
PZ
209#ifdef CONFIG_TRACE_IRQFLAGS
210/*
f1a83e65
PZ
211 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
212 * to keep the lockdep irq context tracking as tight as possible in order to
213 * not miss-qualify lock contexts and miss possible deadlocks.
214 */
f1a83e65 215
5c4853b6 216static inline bool lockdep_softirq_start(void)
f1a83e65 217{
5c4853b6 218 bool in_hardirq = false;
f1a83e65 219
5c4853b6
FW
220 if (trace_hardirq_context(current)) {
221 in_hardirq = true;
f1a83e65 222 trace_hardirq_exit();
5c4853b6
FW
223 }
224
f1a83e65 225 lockdep_softirq_enter();
5c4853b6
FW
226
227 return in_hardirq;
f1a83e65
PZ
228}
229
5c4853b6 230static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e65
PZ
231{
232 lockdep_softirq_exit();
5c4853b6
FW
233
234 if (in_hardirq)
f1a83e65 235 trace_hardirq_enter();
f1a83e65 236}
f1a83e65 237#else
5c4853b6
FW
238static inline bool lockdep_softirq_start(void) { return false; }
239static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e65
PZ
240#endif
241
be7635e7 242asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4 243{
c10d7367 244 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48 245 unsigned long old_flags = current->flags;
34376a50 246 int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e65 247 struct softirq_action *h;
5c4853b6 248 bool in_hardirq;
f1a83e65 249 __u32 pending;
2e702b9f 250 int softirq_bit;
907aed48
MG
251
252 /*
253 * Mask out PF_MEMALLOC s current task context is borrowed for the
254 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
255 * again if the socket is related to swap
256 */
257 current->flags &= ~PF_MEMALLOC;
1da177e4
LT
258
259 pending = local_softirq_pending();
6a61671b 260 account_irq_enter_time(current);
829035fd 261
0bd3a173 262 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b6 263 in_hardirq = lockdep_softirq_start();
1da177e4 264
1da177e4
LT
265restart:
266 /* Reset the pending bitmask before enabling irqs */
3f74478b 267 set_softirq_pending(0);
1da177e4 268
c70f5d66 269 local_irq_enable();
1da177e4
LT
270
271 h = softirq_vec;
272
2e702b9f
JP
273 while ((softirq_bit = ffs(pending))) {
274 unsigned int vec_nr;
275 int prev_count;
276
277 h += softirq_bit - 1;
278
279 vec_nr = h - softirq_vec;
280 prev_count = preempt_count();
281
282 kstat_incr_softirqs_this_cpu(vec_nr);
283
284 trace_softirq_entry(vec_nr);
285 h->action(h);
286 trace_softirq_exit(vec_nr);
287 if (unlikely(prev_count != preempt_count())) {
40322764 288 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
2e702b9f
JP
289 vec_nr, softirq_to_name[vec_nr], h->action,
290 prev_count, preempt_count());
291 preempt_count_set(prev_count);
1da177e4
LT
292 }
293 h++;
2e702b9f
JP
294 pending >>= softirq_bit;
295 }
1da177e4 296
284a8c93 297 rcu_bh_qs();
c70f5d66 298 local_irq_disable();
1da177e4
LT
299
300 pending = local_softirq_pending();
c10d7367 301 if (pending) {
34376a50
BG
302 if (time_before(jiffies, end) && !need_resched() &&
303 --max_restart)
c10d7367 304 goto restart;
1da177e4 305
1da177e4 306 wakeup_softirqd();
c10d7367 307 }
1da177e4 308
5c4853b6 309 lockdep_softirq_end(in_hardirq);
6a61671b 310 account_irq_exit_time(current);
75e1056f 311 __local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7 312 WARN_ON_ONCE(in_interrupt());
717a94b5 313 current_restore_flags(old_flags, PF_MEMALLOC);
1da177e4
LT
314}
315
722a9f92 316asmlinkage __visible void do_softirq(void)
1da177e4
LT
317{
318 __u32 pending;
319 unsigned long flags;
320
321 if (in_interrupt())
322 return;
323
324 local_irq_save(flags);
325
326 pending = local_softirq_pending();
327
4cd13c21 328 if (pending && !ksoftirqd_running())
7d65f4a6 329 do_softirq_own_stack();
1da177e4
LT
330
331 local_irq_restore(flags);
332}
333
dde4b2b5
IM
334/*
335 * Enter an interrupt context.
336 */
337void irq_enter(void)
338{
64db4cff 339 rcu_irq_enter();
0a8a2e78 340 if (is_idle_task(current) && !in_interrupt()) {
d267f87f
VP
341 /*
342 * Prevent raise_softirq from needlessly waking up ksoftirqd
343 * here, as softirq will be serviced on return from interrupt.
344 */
345 local_bh_disable();
5acac1be 346 tick_irq_enter();
d267f87f
VP
347 _local_bh_enable();
348 }
349
350 __irq_enter();
dde4b2b5
IM
351}
352
8d32a307
TG
353static inline void invoke_softirq(void)
354{
4cd13c21
ED
355 if (ksoftirqd_running())
356 return;
357
ded79754 358 if (!force_irqthreads) {
cc1f0274 359#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded79754
FW
360 /*
361 * We can safely execute softirq on the current stack if
362 * it is the irq stack, because it should be near empty
cc1f0274
FW
363 * at this stage.
364 */
365 __do_softirq();
366#else
367 /*
368 * Otherwise, irq_exit() is called on the task stack that can
369 * be potentially deep already. So call softirq in its own stack
370 * to prevent from any overrun.
ded79754 371 */
be6e1016 372 do_softirq_own_stack();
cc1f0274 373#endif
ded79754 374 } else {
8d32a307 375 wakeup_softirqd();
ded79754 376 }
8d32a307 377}
1da177e4 378
67826eae
FW
379static inline void tick_irq_exit(void)
380{
381#ifdef CONFIG_NO_HZ_COMMON
382 int cpu = smp_processor_id();
383
384 /* Make sure that timer wheel updates are propagated */
385 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
386 if (!in_interrupt())
387 tick_nohz_irq_exit();
388 }
389#endif
390}
391
1da177e4
LT
392/*
393 * Exit an interrupt context. Process softirqs if needed and possible:
394 */
395void irq_exit(void)
396{
74eed016 397#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d111 398 local_irq_disable();
74eed016 399#else
f71b74bc 400 lockdep_assert_irqs_disabled();
74eed016 401#endif
6a61671b 402 account_irq_exit_time(current);
bdb43806 403 preempt_count_sub(HARDIRQ_OFFSET);
1da177e4
LT
404 if (!in_interrupt() && local_softirq_pending())
405 invoke_softirq();
79bf2bb3 406
67826eae 407 tick_irq_exit();
416eb33c 408 rcu_irq_exit();
f1a83e65 409 trace_hardirq_exit(); /* must be last! */
1da177e4
LT
410}
411
412/*
413 * This function must run with irqs disabled!
414 */
7ad5b3a5 415inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
416{
417 __raise_softirq_irqoff(nr);
418
419 /*
420 * If we're in an interrupt or softirq, we're done
421 * (this also catches softirq-disabled code). We will
422 * actually run the softirq once we return from
423 * the irq or softirq.
424 *
425 * Otherwise we wake up ksoftirqd to make sure we
426 * schedule the softirq soon.
427 */
428 if (!in_interrupt())
429 wakeup_softirqd();
430}
431
7ad5b3a5 432void raise_softirq(unsigned int nr)
1da177e4
LT
433{
434 unsigned long flags;
435
436 local_irq_save(flags);
437 raise_softirq_irqoff(nr);
438 local_irq_restore(flags);
439}
440
f069686e
SR
441void __raise_softirq_irqoff(unsigned int nr)
442{
443 trace_softirq_raise(nr);
444 or_softirq_pending(1UL << nr);
445}
446
962cf36c 447void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 448{
1da177e4
LT
449 softirq_vec[nr].action = action;
450}
451
9ba5f005
PZ
452/*
453 * Tasklets
454 */
ce85b4f2 455struct tasklet_head {
48f20a9a
OJ
456 struct tasklet_struct *head;
457 struct tasklet_struct **tail;
1da177e4
LT
458};
459
4620b49f
VN
460static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
461static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 462
6498ddad
IM
463static void __tasklet_schedule_common(struct tasklet_struct *t,
464 struct tasklet_head __percpu *headp,
465 unsigned int softirq_nr)
1da177e4 466{
6498ddad 467 struct tasklet_head *head;
1da177e4
LT
468 unsigned long flags;
469
470 local_irq_save(flags);
6498ddad 471 head = this_cpu_ptr(headp);
48f20a9a 472 t->next = NULL;
6498ddad
IM
473 *head->tail = t;
474 head->tail = &(t->next);
475 raise_softirq_irqoff(softirq_nr);
1da177e4
LT
476 local_irq_restore(flags);
477}
6498ddad
IM
478
479void __tasklet_schedule(struct tasklet_struct *t)
480{
481 __tasklet_schedule_common(t, &tasklet_vec,
482 TASKLET_SOFTIRQ);
483}
1da177e4
LT
484EXPORT_SYMBOL(__tasklet_schedule);
485
7ad5b3a5 486void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4 487{
6498ddad
IM
488 __tasklet_schedule_common(t, &tasklet_hi_vec,
489 HI_SOFTIRQ);
1da177e4 490}
1da177e4
LT
491EXPORT_SYMBOL(__tasklet_hi_schedule);
492
82b691be
IM
493static void tasklet_action_common(struct softirq_action *a,
494 struct tasklet_head *tl_head,
495 unsigned int softirq_nr)
1da177e4
LT
496{
497 struct tasklet_struct *list;
498
499 local_irq_disable();
82b691be
IM
500 list = tl_head->head;
501 tl_head->head = NULL;
502 tl_head->tail = &tl_head->head;
1da177e4
LT
503 local_irq_enable();
504
505 while (list) {
506 struct tasklet_struct *t = list;
507
508 list = list->next;
509
510 if (tasklet_trylock(t)) {
511 if (!atomic_read(&t->count)) {
ce85b4f2
JP
512 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
513 &t->state))
1da177e4
LT
514 BUG();
515 t->func(t->data);
516 tasklet_unlock(t);
517 continue;
518 }
519 tasklet_unlock(t);
520 }
521
522 local_irq_disable();
48f20a9a 523 t->next = NULL;
82b691be
IM
524 *tl_head->tail = t;
525 tl_head->tail = &t->next;
526 __raise_softirq_irqoff(softirq_nr);
1da177e4
LT
527 local_irq_enable();
528 }
529}
530
82b691be 531static __latent_entropy void tasklet_action(struct softirq_action *a)
1da177e4 532{
82b691be
IM
533 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
534}
1da177e4 535
82b691be
IM
536static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
537{
538 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
1da177e4
LT
539}
540
1da177e4
LT
541void tasklet_init(struct tasklet_struct *t,
542 void (*func)(unsigned long), unsigned long data)
543{
544 t->next = NULL;
545 t->state = 0;
546 atomic_set(&t->count, 0);
547 t->func = func;
548 t->data = data;
549}
1da177e4
LT
550EXPORT_SYMBOL(tasklet_init);
551
552void tasklet_kill(struct tasklet_struct *t)
553{
554 if (in_interrupt())
40322764 555 pr_notice("Attempt to kill tasklet from interrupt\n");
1da177e4
LT
556
557 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 558 do {
1da177e4 559 yield();
79d381c9 560 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
561 }
562 tasklet_unlock_wait(t);
563 clear_bit(TASKLET_STATE_SCHED, &t->state);
564}
1da177e4
LT
565EXPORT_SYMBOL(tasklet_kill);
566
9ba5f005
PZ
567/*
568 * tasklet_hrtimer
569 */
570
571/*
b9c30322
PZ
572 * The trampoline is called when the hrtimer expires. It schedules a tasklet
573 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
574 * hrtimer callback, but from softirq context.
9ba5f005
PZ
575 */
576static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
577{
578 struct tasklet_hrtimer *ttimer =
579 container_of(timer, struct tasklet_hrtimer, timer);
580
b9c30322
PZ
581 tasklet_hi_schedule(&ttimer->tasklet);
582 return HRTIMER_NORESTART;
9ba5f005
PZ
583}
584
585/*
586 * Helper function which calls the hrtimer callback from
587 * tasklet/softirq context
588 */
589static void __tasklet_hrtimer_trampoline(unsigned long data)
590{
591 struct tasklet_hrtimer *ttimer = (void *)data;
592 enum hrtimer_restart restart;
593
594 restart = ttimer->function(&ttimer->timer);
595 if (restart != HRTIMER_NORESTART)
596 hrtimer_restart(&ttimer->timer);
597}
598
599/**
600 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
601 * @ttimer: tasklet_hrtimer which is initialized
25985edc 602 * @function: hrtimer callback function which gets called from softirq context
9ba5f005
PZ
603 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
604 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
605 */
606void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
607 enum hrtimer_restart (*function)(struct hrtimer *),
608 clockid_t which_clock, enum hrtimer_mode mode)
609{
610 hrtimer_init(&ttimer->timer, which_clock, mode);
611 ttimer->timer.function = __hrtimer_tasklet_trampoline;
612 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
613 (unsigned long)ttimer);
614 ttimer->function = function;
615}
616EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
617
1da177e4
LT
618void __init softirq_init(void)
619{
48f20a9a
OJ
620 int cpu;
621
622 for_each_possible_cpu(cpu) {
623 per_cpu(tasklet_vec, cpu).tail =
624 &per_cpu(tasklet_vec, cpu).head;
625 per_cpu(tasklet_hi_vec, cpu).tail =
626 &per_cpu(tasklet_hi_vec, cpu).head;
627 }
628
962cf36c
CM
629 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
630 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
631}
632
3e339b5d 633static int ksoftirqd_should_run(unsigned int cpu)
1da177e4 634{
3e339b5d
TG
635 return local_softirq_pending();
636}
1da177e4 637
3e339b5d
TG
638static void run_ksoftirqd(unsigned int cpu)
639{
640 local_irq_disable();
641 if (local_softirq_pending()) {
0bed698a
FW
642 /*
643 * We can safely run softirq on inline stack, as we are not deep
644 * in the task stack here.
645 */
3e339b5d 646 __do_softirq();
3e339b5d 647 local_irq_enable();
edf22f4c 648 cond_resched();
3e339b5d 649 return;
1da177e4 650 }
3e339b5d 651 local_irq_enable();
1da177e4
LT
652}
653
654#ifdef CONFIG_HOTPLUG_CPU
655/*
656 * tasklet_kill_immediate is called to remove a tasklet which can already be
657 * scheduled for execution on @cpu.
658 *
659 * Unlike tasklet_kill, this function removes the tasklet
660 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
661 *
662 * When this function is called, @cpu must be in the CPU_DEAD state.
663 */
664void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
665{
666 struct tasklet_struct **i;
667
668 BUG_ON(cpu_online(cpu));
669 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
670
671 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
672 return;
673
674 /* CPU is dead, so no lock needed. */
48f20a9a 675 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
676 if (*i == t) {
677 *i = t->next;
48f20a9a
OJ
678 /* If this was the tail element, move the tail ptr */
679 if (*i == NULL)
680 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
681 return;
682 }
683 }
684 BUG();
685}
686
c4544dbc 687static int takeover_tasklets(unsigned int cpu)
1da177e4 688{
1da177e4
LT
689 /* CPU is dead, so no lock needed. */
690 local_irq_disable();
691
692 /* Find end, append list for that CPU. */
e5e41723 693 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea964
CL
694 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
695 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e41723
CB
696 per_cpu(tasklet_vec, cpu).head = NULL;
697 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
698 }
1da177e4
LT
699 raise_softirq_irqoff(TASKLET_SOFTIRQ);
700
e5e41723 701 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea964
CL
702 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
703 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e41723
CB
704 per_cpu(tasklet_hi_vec, cpu).head = NULL;
705 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
706 }
1da177e4
LT
707 raise_softirq_irqoff(HI_SOFTIRQ);
708
709 local_irq_enable();
c4544dbc 710 return 0;
1da177e4 711}
c4544dbc
SAS
712#else
713#define takeover_tasklets NULL
1da177e4
LT
714#endif /* CONFIG_HOTPLUG_CPU */
715
3e339b5d
TG
716static struct smp_hotplug_thread softirq_threads = {
717 .store = &ksoftirqd,
718 .thread_should_run = ksoftirqd_should_run,
719 .thread_fn = run_ksoftirqd,
720 .thread_comm = "ksoftirqd/%u",
721};
722
7babe8db 723static __init int spawn_ksoftirqd(void)
1da177e4 724{
c4544dbc
SAS
725 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
726 takeover_tasklets);
3e339b5d
TG
727 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
728
1da177e4
LT
729 return 0;
730}
7babe8db 731early_initcall(spawn_ksoftirqd);
78eef01b 732
43a25632
YL
733/*
734 * [ These __weak aliases are kept in a separate compilation unit, so that
735 * GCC does not inline them incorrectly. ]
736 */
737
738int __init __weak early_irq_init(void)
739{
740 return 0;
741}
742
4a046d17
YL
743int __init __weak arch_probe_nr_irqs(void)
744{
b683de2b 745 return NR_IRQS_LEGACY;
4a046d17
YL
746}
747
43a25632
YL
748int __init __weak arch_early_irq_init(void)
749{
750 return 0;
751}
62a08ae2
TG
752
753unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
754{
755 return from;
756}