Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
[linux-2.6-block.git] / kernel / softirq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
b10db7f0
PM
6 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
54514a70
DM
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
1da177e4
LT
11 */
12
13#include <linux/module.h>
14#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
83144186 21#include <linux/freezer.h>
1da177e4
LT
22#include <linux/kthread.h>
23#include <linux/rcupdate.h>
7e49fcce 24#include <linux/ftrace.h>
78eef01b 25#include <linux/smp.h>
79bf2bb3 26#include <linux/tick.h>
a0e39ed3
HC
27
28#define CREATE_TRACE_POINTS
ad8d75ff 29#include <trace/events/irq.h>
1da177e4
LT
30
31#include <asm/irq.h>
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
978b0116 55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4
LT
56
57static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
5d592b44 59char *softirq_to_name[NR_SOFTIRQS] = {
5dd4de58 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
899039e8 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44
JB
62};
63
1da177e4
LT
64/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
7f1e2ca9 70void wakeup_softirqd(void)
1da177e4
LT
71{
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
de30a2b3
IM
79/*
80 * This one is for softirq.c-internal use,
81 * where hardirqs are disabled legitimately:
82 */
3c829c36 83#ifdef CONFIG_TRACE_IRQFLAGS
de30a2b3
IM
84static void __local_bh_disable(unsigned long ip)
85{
86 unsigned long flags;
87
88 WARN_ON_ONCE(in_irq());
89
90 raw_local_irq_save(flags);
7e49fcce
SR
91 /*
92 * The preempt tracer hooks into add_preempt_count and will break
93 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94 * is set and before current->softirq_enabled is cleared.
95 * We must manually increment preempt_count here and manually
96 * call the trace_preempt_off later.
97 */
98 preempt_count() += SOFTIRQ_OFFSET;
de30a2b3
IM
99 /*
100 * Were softirqs turned off above:
101 */
102 if (softirq_count() == SOFTIRQ_OFFSET)
103 trace_softirqs_off(ip);
104 raw_local_irq_restore(flags);
7e49fcce
SR
105
106 if (preempt_count() == SOFTIRQ_OFFSET)
107 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
de30a2b3 108}
3c829c36
TC
109#else /* !CONFIG_TRACE_IRQFLAGS */
110static inline void __local_bh_disable(unsigned long ip)
111{
112 add_preempt_count(SOFTIRQ_OFFSET);
113 barrier();
114}
115#endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b3
IM
116
117void local_bh_disable(void)
118{
119 __local_bh_disable((unsigned long)__builtin_return_address(0));
120}
121
122EXPORT_SYMBOL(local_bh_disable);
123
de30a2b3
IM
124/*
125 * Special-case - softirqs can safely be enabled in
126 * cond_resched_softirq(), or by __do_softirq(),
127 * without processing still-pending softirqs:
128 */
129void _local_bh_enable(void)
130{
131 WARN_ON_ONCE(in_irq());
132 WARN_ON_ONCE(!irqs_disabled());
133
134 if (softirq_count() == SOFTIRQ_OFFSET)
135 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136 sub_preempt_count(SOFTIRQ_OFFSET);
137}
138
139EXPORT_SYMBOL(_local_bh_enable);
140
0f476b6d 141static inline void _local_bh_enable_ip(unsigned long ip)
de30a2b3 142{
0f476b6d 143 WARN_ON_ONCE(in_irq() || irqs_disabled());
3c829c36 144#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 145 local_irq_disable();
3c829c36 146#endif
de30a2b3
IM
147 /*
148 * Are softirqs going to be turned on now:
149 */
150 if (softirq_count() == SOFTIRQ_OFFSET)
0f476b6d 151 trace_softirqs_on(ip);
de30a2b3
IM
152 /*
153 * Keep preemption disabled until we are done with
154 * softirq processing:
155 */
156 sub_preempt_count(SOFTIRQ_OFFSET - 1);
157
158 if (unlikely(!in_interrupt() && local_softirq_pending()))
159 do_softirq();
160
161 dec_preempt_count();
3c829c36 162#ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d 163 local_irq_enable();
3c829c36 164#endif
de30a2b3
IM
165 preempt_check_resched();
166}
0f476b6d
JB
167
168void local_bh_enable(void)
169{
170 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
171}
de30a2b3
IM
172EXPORT_SYMBOL(local_bh_enable);
173
174void local_bh_enable_ip(unsigned long ip)
175{
0f476b6d 176 _local_bh_enable_ip(ip);
de30a2b3
IM
177}
178EXPORT_SYMBOL(local_bh_enable_ip);
179
1da177e4
LT
180/*
181 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that.
183 *
184 * This number has been established via experimentation.
185 * The two things to balance is latency against fairness -
186 * we want to handle softirqs as soon as possible, but they
187 * should not be able to lock up the box.
188 */
189#define MAX_SOFTIRQ_RESTART 10
190
191asmlinkage void __do_softirq(void)
192{
193 struct softirq_action *h;
194 __u32 pending;
195 int max_restart = MAX_SOFTIRQ_RESTART;
196 int cpu;
197
198 pending = local_softirq_pending();
829035fd
PM
199 account_system_vtime(current);
200
de30a2b3 201 __local_bh_disable((unsigned long)__builtin_return_address(0));
d820ac4c 202 lockdep_softirq_enter();
1da177e4 203
1da177e4
LT
204 cpu = smp_processor_id();
205restart:
206 /* Reset the pending bitmask before enabling irqs */
3f74478b 207 set_softirq_pending(0);
1da177e4 208
c70f5d66 209 local_irq_enable();
1da177e4
LT
210
211 h = softirq_vec;
212
213 do {
214 if (pending & 1) {
8e85b4b5 215 int prev_count = preempt_count();
aa0ce5bb 216 kstat_incr_softirqs_this_cpu(h - softirq_vec);
8e85b4b5 217
39842323 218 trace_softirq_entry(h, softirq_vec);
1da177e4 219 h->action(h);
39842323 220 trace_softirq_exit(h, softirq_vec);
8e85b4b5 221 if (unlikely(prev_count != preempt_count())) {
5d592b44 222 printk(KERN_ERR "huh, entered softirq %td %s %p"
8e85b4b5
TG
223 "with preempt_count %08x,"
224 " exited with %08x?\n", h - softirq_vec,
5d592b44 225 softirq_to_name[h - softirq_vec],
8e85b4b5
TG
226 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count;
228 }
229
d6714c22 230 rcu_bh_qs(cpu);
1da177e4
LT
231 }
232 h++;
233 pending >>= 1;
234 } while (pending);
235
c70f5d66 236 local_irq_disable();
1da177e4
LT
237
238 pending = local_softirq_pending();
239 if (pending && --max_restart)
240 goto restart;
241
242 if (pending)
243 wakeup_softirqd();
244
d820ac4c 245 lockdep_softirq_exit();
829035fd
PM
246
247 account_system_vtime(current);
de30a2b3 248 _local_bh_enable();
1da177e4
LT
249}
250
251#ifndef __ARCH_HAS_DO_SOFTIRQ
252
253asmlinkage void do_softirq(void)
254{
255 __u32 pending;
256 unsigned long flags;
257
258 if (in_interrupt())
259 return;
260
261 local_irq_save(flags);
262
263 pending = local_softirq_pending();
264
265 if (pending)
266 __do_softirq();
267
268 local_irq_restore(flags);
269}
270
1da177e4
LT
271#endif
272
dde4b2b5
IM
273/*
274 * Enter an interrupt context.
275 */
276void irq_enter(void)
277{
6378ddb5 278 int cpu = smp_processor_id();
719254fa 279
64db4cff 280 rcu_irq_enter();
ee5f80a9
TG
281 if (idle_cpu(cpu) && !in_interrupt()) {
282 __irq_enter();
719254fa 283 tick_check_idle(cpu);
ee5f80a9
TG
284 } else
285 __irq_enter();
dde4b2b5
IM
286}
287
1da177e4
LT
288#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289# define invoke_softirq() __do_softirq()
290#else
291# define invoke_softirq() do_softirq()
292#endif
293
294/*
295 * Exit an interrupt context. Process softirqs if needed and possible:
296 */
297void irq_exit(void)
298{
299 account_system_vtime(current);
de30a2b3 300 trace_hardirq_exit();
1da177e4
LT
301 sub_preempt_count(IRQ_EXIT_OFFSET);
302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq();
79bf2bb3 304
c5e0cb3d 305 rcu_irq_exit();
79bf2bb3
TG
306#ifdef CONFIG_NO_HZ
307 /* Make sure that timer wheel updates are propagated */
64db4cff
PM
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0);
79bf2bb3 310#endif
1da177e4
LT
311 preempt_enable_no_resched();
312}
313
314/*
315 * This function must run with irqs disabled!
316 */
7ad5b3a5 317inline void raise_softirq_irqoff(unsigned int nr)
1da177e4
LT
318{
319 __raise_softirq_irqoff(nr);
320
321 /*
322 * If we're in an interrupt or softirq, we're done
323 * (this also catches softirq-disabled code). We will
324 * actually run the softirq once we return from
325 * the irq or softirq.
326 *
327 * Otherwise we wake up ksoftirqd to make sure we
328 * schedule the softirq soon.
329 */
330 if (!in_interrupt())
331 wakeup_softirqd();
332}
333
7ad5b3a5 334void raise_softirq(unsigned int nr)
1da177e4
LT
335{
336 unsigned long flags;
337
338 local_irq_save(flags);
339 raise_softirq_irqoff(nr);
340 local_irq_restore(flags);
341}
342
962cf36c 343void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4 344{
1da177e4
LT
345 softirq_vec[nr].action = action;
346}
347
9ba5f005
PZ
348/*
349 * Tasklets
350 */
1da177e4
LT
351struct tasklet_head
352{
48f20a9a
OJ
353 struct tasklet_struct *head;
354 struct tasklet_struct **tail;
1da177e4
LT
355};
356
4620b49f
VN
357static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
358static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4 359
7ad5b3a5 360void __tasklet_schedule(struct tasklet_struct *t)
1da177e4
LT
361{
362 unsigned long flags;
363
364 local_irq_save(flags);
48f20a9a
OJ
365 t->next = NULL;
366 *__get_cpu_var(tasklet_vec).tail = t;
367 __get_cpu_var(tasklet_vec).tail = &(t->next);
1da177e4
LT
368 raise_softirq_irqoff(TASKLET_SOFTIRQ);
369 local_irq_restore(flags);
370}
371
372EXPORT_SYMBOL(__tasklet_schedule);
373
7ad5b3a5 374void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4
LT
375{
376 unsigned long flags;
377
378 local_irq_save(flags);
48f20a9a
OJ
379 t->next = NULL;
380 *__get_cpu_var(tasklet_hi_vec).tail = t;
381 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
1da177e4
LT
382 raise_softirq_irqoff(HI_SOFTIRQ);
383 local_irq_restore(flags);
384}
385
386EXPORT_SYMBOL(__tasklet_hi_schedule);
387
7c692cba
VN
388void __tasklet_hi_schedule_first(struct tasklet_struct *t)
389{
390 BUG_ON(!irqs_disabled());
391
392 t->next = __get_cpu_var(tasklet_hi_vec).head;
393 __get_cpu_var(tasklet_hi_vec).head = t;
394 __raise_softirq_irqoff(HI_SOFTIRQ);
395}
396
397EXPORT_SYMBOL(__tasklet_hi_schedule_first);
398
1da177e4
LT
399static void tasklet_action(struct softirq_action *a)
400{
401 struct tasklet_struct *list;
402
403 local_irq_disable();
48f20a9a
OJ
404 list = __get_cpu_var(tasklet_vec).head;
405 __get_cpu_var(tasklet_vec).head = NULL;
406 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
1da177e4
LT
407 local_irq_enable();
408
409 while (list) {
410 struct tasklet_struct *t = list;
411
412 list = list->next;
413
414 if (tasklet_trylock(t)) {
415 if (!atomic_read(&t->count)) {
416 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
417 BUG();
418 t->func(t->data);
419 tasklet_unlock(t);
420 continue;
421 }
422 tasklet_unlock(t);
423 }
424
425 local_irq_disable();
48f20a9a
OJ
426 t->next = NULL;
427 *__get_cpu_var(tasklet_vec).tail = t;
428 __get_cpu_var(tasklet_vec).tail = &(t->next);
1da177e4
LT
429 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
430 local_irq_enable();
431 }
432}
433
434static void tasklet_hi_action(struct softirq_action *a)
435{
436 struct tasklet_struct *list;
437
438 local_irq_disable();
48f20a9a
OJ
439 list = __get_cpu_var(tasklet_hi_vec).head;
440 __get_cpu_var(tasklet_hi_vec).head = NULL;
441 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
1da177e4
LT
442 local_irq_enable();
443
444 while (list) {
445 struct tasklet_struct *t = list;
446
447 list = list->next;
448
449 if (tasklet_trylock(t)) {
450 if (!atomic_read(&t->count)) {
451 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
452 BUG();
453 t->func(t->data);
454 tasklet_unlock(t);
455 continue;
456 }
457 tasklet_unlock(t);
458 }
459
460 local_irq_disable();
48f20a9a
OJ
461 t->next = NULL;
462 *__get_cpu_var(tasklet_hi_vec).tail = t;
463 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
1da177e4
LT
464 __raise_softirq_irqoff(HI_SOFTIRQ);
465 local_irq_enable();
466 }
467}
468
469
470void tasklet_init(struct tasklet_struct *t,
471 void (*func)(unsigned long), unsigned long data)
472{
473 t->next = NULL;
474 t->state = 0;
475 atomic_set(&t->count, 0);
476 t->func = func;
477 t->data = data;
478}
479
480EXPORT_SYMBOL(tasklet_init);
481
482void tasklet_kill(struct tasklet_struct *t)
483{
484 if (in_interrupt())
485 printk("Attempt to kill tasklet from interrupt\n");
486
487 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9 488 do {
1da177e4 489 yield();
79d381c9 490 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4
LT
491 }
492 tasklet_unlock_wait(t);
493 clear_bit(TASKLET_STATE_SCHED, &t->state);
494}
495
496EXPORT_SYMBOL(tasklet_kill);
497
9ba5f005
PZ
498/*
499 * tasklet_hrtimer
500 */
501
502/*
b9c30322
PZ
503 * The trampoline is called when the hrtimer expires. It schedules a tasklet
504 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
505 * hrtimer callback, but from softirq context.
9ba5f005
PZ
506 */
507static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
508{
509 struct tasklet_hrtimer *ttimer =
510 container_of(timer, struct tasklet_hrtimer, timer);
511
b9c30322
PZ
512 tasklet_hi_schedule(&ttimer->tasklet);
513 return HRTIMER_NORESTART;
9ba5f005
PZ
514}
515
516/*
517 * Helper function which calls the hrtimer callback from
518 * tasklet/softirq context
519 */
520static void __tasklet_hrtimer_trampoline(unsigned long data)
521{
522 struct tasklet_hrtimer *ttimer = (void *)data;
523 enum hrtimer_restart restart;
524
525 restart = ttimer->function(&ttimer->timer);
526 if (restart != HRTIMER_NORESTART)
527 hrtimer_restart(&ttimer->timer);
528}
529
530/**
531 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
532 * @ttimer: tasklet_hrtimer which is initialized
533 * @function: hrtimer callback funtion which gets called from softirq context
534 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
535 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
536 */
537void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
538 enum hrtimer_restart (*function)(struct hrtimer *),
539 clockid_t which_clock, enum hrtimer_mode mode)
540{
541 hrtimer_init(&ttimer->timer, which_clock, mode);
542 ttimer->timer.function = __hrtimer_tasklet_trampoline;
543 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
544 (unsigned long)ttimer);
545 ttimer->function = function;
546}
547EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
548
549/*
550 * Remote softirq bits
551 */
552
54514a70
DM
553DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
554EXPORT_PER_CPU_SYMBOL(softirq_work_list);
555
556static void __local_trigger(struct call_single_data *cp, int softirq)
557{
558 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
559
560 list_add_tail(&cp->list, head);
561
562 /* Trigger the softirq only if the list was previously empty. */
563 if (head->next == &cp->list)
564 raise_softirq_irqoff(softirq);
565}
566
567#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
568static void remote_softirq_receive(void *data)
569{
570 struct call_single_data *cp = data;
571 unsigned long flags;
572 int softirq;
573
574 softirq = cp->priv;
575
576 local_irq_save(flags);
577 __local_trigger(cp, softirq);
578 local_irq_restore(flags);
579}
580
581static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
582{
583 if (cpu_online(cpu)) {
584 cp->func = remote_softirq_receive;
585 cp->info = cp;
586 cp->flags = 0;
587 cp->priv = softirq;
588
6e275637 589 __smp_call_function_single(cpu, cp, 0);
54514a70
DM
590 return 0;
591 }
592 return 1;
593}
594#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
595static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
596{
597 return 1;
598}
599#endif
600
601/**
602 * __send_remote_softirq - try to schedule softirq work on a remote cpu
603 * @cp: private SMP call function data area
604 * @cpu: the remote cpu
605 * @this_cpu: the currently executing cpu
606 * @softirq: the softirq for the work
607 *
608 * Attempt to schedule softirq work on a remote cpu. If this cannot be
609 * done, the work is instead queued up on the local cpu.
610 *
611 * Interrupts must be disabled.
612 */
613void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
614{
615 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
616 __local_trigger(cp, softirq);
617}
618EXPORT_SYMBOL(__send_remote_softirq);
619
620/**
621 * send_remote_softirq - try to schedule softirq work on a remote cpu
622 * @cp: private SMP call function data area
623 * @cpu: the remote cpu
624 * @softirq: the softirq for the work
625 *
626 * Like __send_remote_softirq except that disabling interrupts and
627 * computing the current cpu is done for the caller.
628 */
629void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
630{
631 unsigned long flags;
632 int this_cpu;
633
634 local_irq_save(flags);
635 this_cpu = smp_processor_id();
636 __send_remote_softirq(cp, cpu, this_cpu, softirq);
637 local_irq_restore(flags);
638}
639EXPORT_SYMBOL(send_remote_softirq);
640
641static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
642 unsigned long action, void *hcpu)
643{
644 /*
645 * If a CPU goes away, splice its entries to the current CPU
646 * and trigger a run of the softirq
647 */
648 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
649 int cpu = (unsigned long) hcpu;
650 int i;
651
652 local_irq_disable();
653 for (i = 0; i < NR_SOFTIRQS; i++) {
654 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
655 struct list_head *local_head;
656
657 if (list_empty(head))
658 continue;
659
660 local_head = &__get_cpu_var(softirq_work_list[i]);
661 list_splice_init(head, local_head);
662 raise_softirq_irqoff(i);
663 }
664 local_irq_enable();
665 }
666
667 return NOTIFY_OK;
668}
669
670static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
671 .notifier_call = remote_softirq_cpu_notify,
672};
673
1da177e4
LT
674void __init softirq_init(void)
675{
48f20a9a
OJ
676 int cpu;
677
678 for_each_possible_cpu(cpu) {
54514a70
DM
679 int i;
680
48f20a9a
OJ
681 per_cpu(tasklet_vec, cpu).tail =
682 &per_cpu(tasklet_vec, cpu).head;
683 per_cpu(tasklet_hi_vec, cpu).tail =
684 &per_cpu(tasklet_hi_vec, cpu).head;
54514a70
DM
685 for (i = 0; i < NR_SOFTIRQS; i++)
686 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
48f20a9a
OJ
687 }
688
54514a70
DM
689 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
690
962cf36c
CM
691 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
692 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4
LT
693}
694
1871e52c 695static int run_ksoftirqd(void * __bind_cpu)
1da177e4 696{
1da177e4
LT
697 set_current_state(TASK_INTERRUPTIBLE);
698
699 while (!kthread_should_stop()) {
700 preempt_disable();
701 if (!local_softirq_pending()) {
702 preempt_enable_no_resched();
703 schedule();
704 preempt_disable();
705 }
706
707 __set_current_state(TASK_RUNNING);
708
709 while (local_softirq_pending()) {
710 /* Preempt disable stops cpu going offline.
711 If already offline, we'll be on wrong CPU:
712 don't process */
713 if (cpu_is_offline((long)__bind_cpu))
714 goto wait_to_die;
715 do_softirq();
716 preempt_enable_no_resched();
717 cond_resched();
718 preempt_disable();
25502a6c 719 rcu_note_context_switch((long)__bind_cpu);
1da177e4
LT
720 }
721 preempt_enable();
722 set_current_state(TASK_INTERRUPTIBLE);
723 }
724 __set_current_state(TASK_RUNNING);
725 return 0;
726
727wait_to_die:
728 preempt_enable();
729 /* Wait for kthread_stop */
730 set_current_state(TASK_INTERRUPTIBLE);
731 while (!kthread_should_stop()) {
732 schedule();
733 set_current_state(TASK_INTERRUPTIBLE);
734 }
735 __set_current_state(TASK_RUNNING);
736 return 0;
737}
738
739#ifdef CONFIG_HOTPLUG_CPU
740/*
741 * tasklet_kill_immediate is called to remove a tasklet which can already be
742 * scheduled for execution on @cpu.
743 *
744 * Unlike tasklet_kill, this function removes the tasklet
745 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
746 *
747 * When this function is called, @cpu must be in the CPU_DEAD state.
748 */
749void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
750{
751 struct tasklet_struct **i;
752
753 BUG_ON(cpu_online(cpu));
754 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
755
756 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
757 return;
758
759 /* CPU is dead, so no lock needed. */
48f20a9a 760 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4
LT
761 if (*i == t) {
762 *i = t->next;
48f20a9a
OJ
763 /* If this was the tail element, move the tail ptr */
764 if (*i == NULL)
765 per_cpu(tasklet_vec, cpu).tail = i;
1da177e4
LT
766 return;
767 }
768 }
769 BUG();
770}
771
772static void takeover_tasklets(unsigned int cpu)
773{
1da177e4
LT
774 /* CPU is dead, so no lock needed. */
775 local_irq_disable();
776
777 /* Find end, append list for that CPU. */
e5e41723
CB
778 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
779 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
780 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
781 per_cpu(tasklet_vec, cpu).head = NULL;
782 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
783 }
1da177e4
LT
784 raise_softirq_irqoff(TASKLET_SOFTIRQ);
785
e5e41723
CB
786 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
787 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
788 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
789 per_cpu(tasklet_hi_vec, cpu).head = NULL;
790 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
791 }
1da177e4
LT
792 raise_softirq_irqoff(HI_SOFTIRQ);
793
794 local_irq_enable();
795}
796#endif /* CONFIG_HOTPLUG_CPU */
797
8c78f307 798static int __cpuinit cpu_callback(struct notifier_block *nfb,
1da177e4
LT
799 unsigned long action,
800 void *hcpu)
801{
802 int hotcpu = (unsigned long)hcpu;
803 struct task_struct *p;
804
805 switch (action) {
806 case CPU_UP_PREPARE:
8bb78442 807 case CPU_UP_PREPARE_FROZEN:
1871e52c 808 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
1da177e4
LT
809 if (IS_ERR(p)) {
810 printk("ksoftirqd for %i failed\n", hotcpu);
811 return NOTIFY_BAD;
812 }
813 kthread_bind(p, hotcpu);
814 per_cpu(ksoftirqd, hotcpu) = p;
815 break;
816 case CPU_ONLINE:
8bb78442 817 case CPU_ONLINE_FROZEN:
1da177e4
LT
818 wake_up_process(per_cpu(ksoftirqd, hotcpu));
819 break;
820#ifdef CONFIG_HOTPLUG_CPU
821 case CPU_UP_CANCELED:
8bb78442 822 case CPU_UP_CANCELED_FROZEN:
fc75cdfa
HC
823 if (!per_cpu(ksoftirqd, hotcpu))
824 break;
1da177e4 825 /* Unbind so it can run. Fall thru. */
a4c4af7c 826 kthread_bind(per_cpu(ksoftirqd, hotcpu),
f1fc057c 827 cpumask_any(cpu_online_mask));
1da177e4 828 case CPU_DEAD:
1c6b4aa9
ST
829 case CPU_DEAD_FROZEN: {
830 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
831
1da177e4
LT
832 p = per_cpu(ksoftirqd, hotcpu);
833 per_cpu(ksoftirqd, hotcpu) = NULL;
961ccddd 834 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
1da177e4
LT
835 kthread_stop(p);
836 takeover_tasklets(hotcpu);
837 break;
1c6b4aa9 838 }
1da177e4
LT
839#endif /* CONFIG_HOTPLUG_CPU */
840 }
841 return NOTIFY_OK;
842}
843
8c78f307 844static struct notifier_block __cpuinitdata cpu_nfb = {
1da177e4
LT
845 .notifier_call = cpu_callback
846};
847
7babe8db 848static __init int spawn_ksoftirqd(void)
1da177e4
LT
849{
850 void *cpu = (void *)(long)smp_processor_id();
07dccf33
AM
851 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
852
853 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
854 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
855 register_cpu_notifier(&cpu_nfb);
856 return 0;
857}
7babe8db 858early_initcall(spawn_ksoftirqd);
78eef01b
AM
859
860#ifdef CONFIG_SMP
861/*
862 * Call a function on all processors
863 */
15c8b6c1 864int on_each_cpu(void (*func) (void *info), void *info, int wait)
78eef01b
AM
865{
866 int ret = 0;
867
868 preempt_disable();
8691e5a8 869 ret = smp_call_function(func, info, wait);
78eef01b
AM
870 local_irq_disable();
871 func(info);
872 local_irq_enable();
873 preempt_enable();
874 return ret;
875}
876EXPORT_SYMBOL(on_each_cpu);
877#endif
43a25632
YL
878
879/*
880 * [ These __weak aliases are kept in a separate compilation unit, so that
881 * GCC does not inline them incorrectly. ]
882 */
883
884int __init __weak early_irq_init(void)
885{
886 return 0;
887}
888
4a046d17
YL
889int __init __weak arch_probe_nr_irqs(void)
890{
891 return 0;
892}
893
43a25632
YL
894int __init __weak arch_early_irq_init(void)
895{
896 return 0;
897}
898
85ac16d0 899int __weak arch_init_chip_data(struct irq_desc *desc, int node)
43a25632
YL
900{
901 return 0;
902}