Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/softirq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * | |
b10db7f0 PM |
6 | * Distribute under GPLv2. |
7 | * | |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | |
54514a70 DM |
9 | * |
10 | * Remote softirq infrastructure is by Jens Axboe. | |
1da177e4 LT |
11 | */ |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/kernel_stat.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/notifier.h> | |
19 | #include <linux/percpu.h> | |
20 | #include <linux/cpu.h> | |
83144186 | 21 | #include <linux/freezer.h> |
1da177e4 LT |
22 | #include <linux/kthread.h> |
23 | #include <linux/rcupdate.h> | |
7e49fcce | 24 | #include <linux/ftrace.h> |
78eef01b | 25 | #include <linux/smp.h> |
79bf2bb3 | 26 | #include <linux/tick.h> |
39842323 | 27 | #include <trace/irq.h> |
1da177e4 LT |
28 | |
29 | #include <asm/irq.h> | |
30 | /* | |
31 | - No shared variables, all the data are CPU local. | |
32 | - If a softirq needs serialization, let it serialize itself | |
33 | by its own spinlocks. | |
34 | - Even if softirq is serialized, only local cpu is marked for | |
35 | execution. Hence, we get something sort of weak cpu binding. | |
36 | Though it is still not clear, will it result in better locality | |
37 | or will not. | |
38 | ||
39 | Examples: | |
40 | - NET RX softirq. It is multithreaded and does not require | |
41 | any global serialization. | |
42 | - NET TX softirq. It kicks software netdevice queues, hence | |
43 | it is logically serialized per device, but this serialization | |
44 | is invisible to common code. | |
45 | - Tasklets: serialized wrt itself. | |
46 | */ | |
47 | ||
48 | #ifndef __ARCH_IRQ_STAT | |
49 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | |
50 | EXPORT_SYMBOL(irq_stat); | |
51 | #endif | |
52 | ||
978b0116 | 53 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4 LT |
54 | |
55 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | |
56 | ||
5d592b44 JB |
57 | char *softirq_to_name[NR_SOFTIRQS] = { |
58 | "HI_SOFTIRQ", "TIMER_SOFTIRQ", "NET_TX_SOFTIRQ", "NET_RX_SOFTIRQ", | |
59 | "BLOCK_SOFTIRQ", "TASKLET_SOFTIRQ", "SCHED_SOFTIRQ", "HRTIMER_SOFTIRQ", | |
60 | "RCU_SOFTIRQ" | |
61 | }; | |
62 | ||
1da177e4 LT |
63 | /* |
64 | * we cannot loop indefinitely here to avoid userspace starvation, | |
65 | * but we also don't want to introduce a worst case 1/HZ latency | |
66 | * to the pending events, so lets the scheduler to balance | |
67 | * the softirq load for us. | |
68 | */ | |
69 | static inline void wakeup_softirqd(void) | |
70 | { | |
71 | /* Interrupts are disabled: no need to stop preemption */ | |
72 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | |
73 | ||
74 | if (tsk && tsk->state != TASK_RUNNING) | |
75 | wake_up_process(tsk); | |
76 | } | |
77 | ||
de30a2b3 IM |
78 | /* |
79 | * This one is for softirq.c-internal use, | |
80 | * where hardirqs are disabled legitimately: | |
81 | */ | |
3c829c36 | 82 | #ifdef CONFIG_TRACE_IRQFLAGS |
de30a2b3 IM |
83 | static void __local_bh_disable(unsigned long ip) |
84 | { | |
85 | unsigned long flags; | |
86 | ||
87 | WARN_ON_ONCE(in_irq()); | |
88 | ||
89 | raw_local_irq_save(flags); | |
7e49fcce SR |
90 | /* |
91 | * The preempt tracer hooks into add_preempt_count and will break | |
92 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | |
93 | * is set and before current->softirq_enabled is cleared. | |
94 | * We must manually increment preempt_count here and manually | |
95 | * call the trace_preempt_off later. | |
96 | */ | |
97 | preempt_count() += SOFTIRQ_OFFSET; | |
de30a2b3 IM |
98 | /* |
99 | * Were softirqs turned off above: | |
100 | */ | |
101 | if (softirq_count() == SOFTIRQ_OFFSET) | |
102 | trace_softirqs_off(ip); | |
103 | raw_local_irq_restore(flags); | |
7e49fcce SR |
104 | |
105 | if (preempt_count() == SOFTIRQ_OFFSET) | |
106 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | |
de30a2b3 | 107 | } |
3c829c36 TC |
108 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
109 | static inline void __local_bh_disable(unsigned long ip) | |
110 | { | |
111 | add_preempt_count(SOFTIRQ_OFFSET); | |
112 | barrier(); | |
113 | } | |
114 | #endif /* CONFIG_TRACE_IRQFLAGS */ | |
de30a2b3 IM |
115 | |
116 | void local_bh_disable(void) | |
117 | { | |
118 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | |
119 | } | |
120 | ||
121 | EXPORT_SYMBOL(local_bh_disable); | |
122 | ||
de30a2b3 IM |
123 | /* |
124 | * Special-case - softirqs can safely be enabled in | |
125 | * cond_resched_softirq(), or by __do_softirq(), | |
126 | * without processing still-pending softirqs: | |
127 | */ | |
128 | void _local_bh_enable(void) | |
129 | { | |
130 | WARN_ON_ONCE(in_irq()); | |
131 | WARN_ON_ONCE(!irqs_disabled()); | |
132 | ||
133 | if (softirq_count() == SOFTIRQ_OFFSET) | |
134 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | |
135 | sub_preempt_count(SOFTIRQ_OFFSET); | |
136 | } | |
137 | ||
138 | EXPORT_SYMBOL(_local_bh_enable); | |
139 | ||
0f476b6d | 140 | static inline void _local_bh_enable_ip(unsigned long ip) |
de30a2b3 | 141 | { |
0f476b6d | 142 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
3c829c36 | 143 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 144 | local_irq_disable(); |
3c829c36 | 145 | #endif |
de30a2b3 IM |
146 | /* |
147 | * Are softirqs going to be turned on now: | |
148 | */ | |
149 | if (softirq_count() == SOFTIRQ_OFFSET) | |
0f476b6d | 150 | trace_softirqs_on(ip); |
de30a2b3 IM |
151 | /* |
152 | * Keep preemption disabled until we are done with | |
153 | * softirq processing: | |
154 | */ | |
155 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | |
156 | ||
157 | if (unlikely(!in_interrupt() && local_softirq_pending())) | |
158 | do_softirq(); | |
159 | ||
160 | dec_preempt_count(); | |
3c829c36 | 161 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 162 | local_irq_enable(); |
3c829c36 | 163 | #endif |
de30a2b3 IM |
164 | preempt_check_resched(); |
165 | } | |
0f476b6d JB |
166 | |
167 | void local_bh_enable(void) | |
168 | { | |
169 | _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
170 | } | |
de30a2b3 IM |
171 | EXPORT_SYMBOL(local_bh_enable); |
172 | ||
173 | void local_bh_enable_ip(unsigned long ip) | |
174 | { | |
0f476b6d | 175 | _local_bh_enable_ip(ip); |
de30a2b3 IM |
176 | } |
177 | EXPORT_SYMBOL(local_bh_enable_ip); | |
178 | ||
1da177e4 LT |
179 | /* |
180 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | |
181 | * and we fall back to softirqd after that. | |
182 | * | |
183 | * This number has been established via experimentation. | |
184 | * The two things to balance is latency against fairness - | |
185 | * we want to handle softirqs as soon as possible, but they | |
186 | * should not be able to lock up the box. | |
187 | */ | |
188 | #define MAX_SOFTIRQ_RESTART 10 | |
189 | ||
39842323 JB |
190 | DEFINE_TRACE(softirq_entry); |
191 | DEFINE_TRACE(softirq_exit); | |
192 | ||
1da177e4 LT |
193 | asmlinkage void __do_softirq(void) |
194 | { | |
195 | struct softirq_action *h; | |
196 | __u32 pending; | |
197 | int max_restart = MAX_SOFTIRQ_RESTART; | |
198 | int cpu; | |
199 | ||
200 | pending = local_softirq_pending(); | |
829035fd PM |
201 | account_system_vtime(current); |
202 | ||
de30a2b3 | 203 | __local_bh_disable((unsigned long)__builtin_return_address(0)); |
d820ac4c | 204 | lockdep_softirq_enter(); |
1da177e4 | 205 | |
1da177e4 LT |
206 | cpu = smp_processor_id(); |
207 | restart: | |
208 | /* Reset the pending bitmask before enabling irqs */ | |
3f74478b | 209 | set_softirq_pending(0); |
1da177e4 | 210 | |
c70f5d66 | 211 | local_irq_enable(); |
1da177e4 LT |
212 | |
213 | h = softirq_vec; | |
214 | ||
215 | do { | |
216 | if (pending & 1) { | |
8e85b4b5 TG |
217 | int prev_count = preempt_count(); |
218 | ||
39842323 | 219 | trace_softirq_entry(h, softirq_vec); |
1da177e4 | 220 | h->action(h); |
39842323 | 221 | trace_softirq_exit(h, softirq_vec); |
8e85b4b5 | 222 | if (unlikely(prev_count != preempt_count())) { |
5d592b44 | 223 | printk(KERN_ERR "huh, entered softirq %td %s %p" |
8e85b4b5 TG |
224 | "with preempt_count %08x," |
225 | " exited with %08x?\n", h - softirq_vec, | |
5d592b44 | 226 | softirq_to_name[h - softirq_vec], |
8e85b4b5 TG |
227 | h->action, prev_count, preempt_count()); |
228 | preempt_count() = prev_count; | |
229 | } | |
230 | ||
1da177e4 LT |
231 | rcu_bh_qsctr_inc(cpu); |
232 | } | |
233 | h++; | |
234 | pending >>= 1; | |
235 | } while (pending); | |
236 | ||
c70f5d66 | 237 | local_irq_disable(); |
1da177e4 LT |
238 | |
239 | pending = local_softirq_pending(); | |
240 | if (pending && --max_restart) | |
241 | goto restart; | |
242 | ||
243 | if (pending) | |
244 | wakeup_softirqd(); | |
245 | ||
d820ac4c | 246 | lockdep_softirq_exit(); |
829035fd PM |
247 | |
248 | account_system_vtime(current); | |
de30a2b3 | 249 | _local_bh_enable(); |
1da177e4 LT |
250 | } |
251 | ||
252 | #ifndef __ARCH_HAS_DO_SOFTIRQ | |
253 | ||
254 | asmlinkage void do_softirq(void) | |
255 | { | |
256 | __u32 pending; | |
257 | unsigned long flags; | |
258 | ||
259 | if (in_interrupt()) | |
260 | return; | |
261 | ||
262 | local_irq_save(flags); | |
263 | ||
264 | pending = local_softirq_pending(); | |
265 | ||
266 | if (pending) | |
267 | __do_softirq(); | |
268 | ||
269 | local_irq_restore(flags); | |
270 | } | |
271 | ||
1da177e4 LT |
272 | #endif |
273 | ||
dde4b2b5 IM |
274 | /* |
275 | * Enter an interrupt context. | |
276 | */ | |
277 | void irq_enter(void) | |
278 | { | |
6378ddb5 | 279 | int cpu = smp_processor_id(); |
719254fa | 280 | |
64db4cff | 281 | rcu_irq_enter(); |
ee5f80a9 TG |
282 | if (idle_cpu(cpu) && !in_interrupt()) { |
283 | __irq_enter(); | |
719254fa | 284 | tick_check_idle(cpu); |
ee5f80a9 TG |
285 | } else |
286 | __irq_enter(); | |
dde4b2b5 IM |
287 | } |
288 | ||
1da177e4 LT |
289 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
290 | # define invoke_softirq() __do_softirq() | |
291 | #else | |
292 | # define invoke_softirq() do_softirq() | |
293 | #endif | |
294 | ||
295 | /* | |
296 | * Exit an interrupt context. Process softirqs if needed and possible: | |
297 | */ | |
298 | void irq_exit(void) | |
299 | { | |
300 | account_system_vtime(current); | |
de30a2b3 | 301 | trace_hardirq_exit(); |
1da177e4 LT |
302 | sub_preempt_count(IRQ_EXIT_OFFSET); |
303 | if (!in_interrupt() && local_softirq_pending()) | |
304 | invoke_softirq(); | |
79bf2bb3 TG |
305 | |
306 | #ifdef CONFIG_NO_HZ | |
307 | /* Make sure that timer wheel updates are propagated */ | |
2232c2d8 | 308 | rcu_irq_exit(); |
64db4cff PM |
309 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) |
310 | tick_nohz_stop_sched_tick(0); | |
79bf2bb3 | 311 | #endif |
1da177e4 LT |
312 | preempt_enable_no_resched(); |
313 | } | |
314 | ||
315 | /* | |
316 | * This function must run with irqs disabled! | |
317 | */ | |
7ad5b3a5 | 318 | inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4 LT |
319 | { |
320 | __raise_softirq_irqoff(nr); | |
321 | ||
322 | /* | |
323 | * If we're in an interrupt or softirq, we're done | |
324 | * (this also catches softirq-disabled code). We will | |
325 | * actually run the softirq once we return from | |
326 | * the irq or softirq. | |
327 | * | |
328 | * Otherwise we wake up ksoftirqd to make sure we | |
329 | * schedule the softirq soon. | |
330 | */ | |
331 | if (!in_interrupt()) | |
332 | wakeup_softirqd(); | |
333 | } | |
334 | ||
7ad5b3a5 | 335 | void raise_softirq(unsigned int nr) |
1da177e4 LT |
336 | { |
337 | unsigned long flags; | |
338 | ||
339 | local_irq_save(flags); | |
340 | raise_softirq_irqoff(nr); | |
341 | local_irq_restore(flags); | |
342 | } | |
343 | ||
962cf36c | 344 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4 | 345 | { |
1da177e4 LT |
346 | softirq_vec[nr].action = action; |
347 | } | |
348 | ||
1da177e4 LT |
349 | /* Tasklets */ |
350 | struct tasklet_head | |
351 | { | |
48f20a9a OJ |
352 | struct tasklet_struct *head; |
353 | struct tasklet_struct **tail; | |
1da177e4 LT |
354 | }; |
355 | ||
4620b49f VN |
356 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
357 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | |
1da177e4 | 358 | |
7ad5b3a5 | 359 | void __tasklet_schedule(struct tasklet_struct *t) |
1da177e4 LT |
360 | { |
361 | unsigned long flags; | |
362 | ||
363 | local_irq_save(flags); | |
48f20a9a OJ |
364 | t->next = NULL; |
365 | *__get_cpu_var(tasklet_vec).tail = t; | |
366 | __get_cpu_var(tasklet_vec).tail = &(t->next); | |
1da177e4 LT |
367 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
368 | local_irq_restore(flags); | |
369 | } | |
370 | ||
371 | EXPORT_SYMBOL(__tasklet_schedule); | |
372 | ||
7ad5b3a5 | 373 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4 LT |
374 | { |
375 | unsigned long flags; | |
376 | ||
377 | local_irq_save(flags); | |
48f20a9a OJ |
378 | t->next = NULL; |
379 | *__get_cpu_var(tasklet_hi_vec).tail = t; | |
380 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | |
1da177e4 LT |
381 | raise_softirq_irqoff(HI_SOFTIRQ); |
382 | local_irq_restore(flags); | |
383 | } | |
384 | ||
385 | EXPORT_SYMBOL(__tasklet_hi_schedule); | |
386 | ||
387 | static void tasklet_action(struct softirq_action *a) | |
388 | { | |
389 | struct tasklet_struct *list; | |
390 | ||
391 | local_irq_disable(); | |
48f20a9a OJ |
392 | list = __get_cpu_var(tasklet_vec).head; |
393 | __get_cpu_var(tasklet_vec).head = NULL; | |
394 | __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; | |
1da177e4 LT |
395 | local_irq_enable(); |
396 | ||
397 | while (list) { | |
398 | struct tasklet_struct *t = list; | |
399 | ||
400 | list = list->next; | |
401 | ||
402 | if (tasklet_trylock(t)) { | |
403 | if (!atomic_read(&t->count)) { | |
404 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | |
405 | BUG(); | |
406 | t->func(t->data); | |
407 | tasklet_unlock(t); | |
408 | continue; | |
409 | } | |
410 | tasklet_unlock(t); | |
411 | } | |
412 | ||
413 | local_irq_disable(); | |
48f20a9a OJ |
414 | t->next = NULL; |
415 | *__get_cpu_var(tasklet_vec).tail = t; | |
416 | __get_cpu_var(tasklet_vec).tail = &(t->next); | |
1da177e4 LT |
417 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
418 | local_irq_enable(); | |
419 | } | |
420 | } | |
421 | ||
422 | static void tasklet_hi_action(struct softirq_action *a) | |
423 | { | |
424 | struct tasklet_struct *list; | |
425 | ||
426 | local_irq_disable(); | |
48f20a9a OJ |
427 | list = __get_cpu_var(tasklet_hi_vec).head; |
428 | __get_cpu_var(tasklet_hi_vec).head = NULL; | |
429 | __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; | |
1da177e4 LT |
430 | local_irq_enable(); |
431 | ||
432 | while (list) { | |
433 | struct tasklet_struct *t = list; | |
434 | ||
435 | list = list->next; | |
436 | ||
437 | if (tasklet_trylock(t)) { | |
438 | if (!atomic_read(&t->count)) { | |
439 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | |
440 | BUG(); | |
441 | t->func(t->data); | |
442 | tasklet_unlock(t); | |
443 | continue; | |
444 | } | |
445 | tasklet_unlock(t); | |
446 | } | |
447 | ||
448 | local_irq_disable(); | |
48f20a9a OJ |
449 | t->next = NULL; |
450 | *__get_cpu_var(tasklet_hi_vec).tail = t; | |
451 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | |
1da177e4 LT |
452 | __raise_softirq_irqoff(HI_SOFTIRQ); |
453 | local_irq_enable(); | |
454 | } | |
455 | } | |
456 | ||
457 | ||
458 | void tasklet_init(struct tasklet_struct *t, | |
459 | void (*func)(unsigned long), unsigned long data) | |
460 | { | |
461 | t->next = NULL; | |
462 | t->state = 0; | |
463 | atomic_set(&t->count, 0); | |
464 | t->func = func; | |
465 | t->data = data; | |
466 | } | |
467 | ||
468 | EXPORT_SYMBOL(tasklet_init); | |
469 | ||
470 | void tasklet_kill(struct tasklet_struct *t) | |
471 | { | |
472 | if (in_interrupt()) | |
473 | printk("Attempt to kill tasklet from interrupt\n"); | |
474 | ||
475 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
476 | do | |
477 | yield(); | |
478 | while (test_bit(TASKLET_STATE_SCHED, &t->state)); | |
479 | } | |
480 | tasklet_unlock_wait(t); | |
481 | clear_bit(TASKLET_STATE_SCHED, &t->state); | |
482 | } | |
483 | ||
484 | EXPORT_SYMBOL(tasklet_kill); | |
485 | ||
54514a70 DM |
486 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
487 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | |
488 | ||
489 | static void __local_trigger(struct call_single_data *cp, int softirq) | |
490 | { | |
491 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | |
492 | ||
493 | list_add_tail(&cp->list, head); | |
494 | ||
495 | /* Trigger the softirq only if the list was previously empty. */ | |
496 | if (head->next == &cp->list) | |
497 | raise_softirq_irqoff(softirq); | |
498 | } | |
499 | ||
500 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | |
501 | static void remote_softirq_receive(void *data) | |
502 | { | |
503 | struct call_single_data *cp = data; | |
504 | unsigned long flags; | |
505 | int softirq; | |
506 | ||
507 | softirq = cp->priv; | |
508 | ||
509 | local_irq_save(flags); | |
510 | __local_trigger(cp, softirq); | |
511 | local_irq_restore(flags); | |
512 | } | |
513 | ||
514 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |
515 | { | |
516 | if (cpu_online(cpu)) { | |
517 | cp->func = remote_softirq_receive; | |
518 | cp->info = cp; | |
519 | cp->flags = 0; | |
520 | cp->priv = softirq; | |
521 | ||
522 | __smp_call_function_single(cpu, cp); | |
523 | return 0; | |
524 | } | |
525 | return 1; | |
526 | } | |
527 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | |
528 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |
529 | { | |
530 | return 1; | |
531 | } | |
532 | #endif | |
533 | ||
534 | /** | |
535 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | |
536 | * @cp: private SMP call function data area | |
537 | * @cpu: the remote cpu | |
538 | * @this_cpu: the currently executing cpu | |
539 | * @softirq: the softirq for the work | |
540 | * | |
541 | * Attempt to schedule softirq work on a remote cpu. If this cannot be | |
542 | * done, the work is instead queued up on the local cpu. | |
543 | * | |
544 | * Interrupts must be disabled. | |
545 | */ | |
546 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | |
547 | { | |
548 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | |
549 | __local_trigger(cp, softirq); | |
550 | } | |
551 | EXPORT_SYMBOL(__send_remote_softirq); | |
552 | ||
553 | /** | |
554 | * send_remote_softirq - try to schedule softirq work on a remote cpu | |
555 | * @cp: private SMP call function data area | |
556 | * @cpu: the remote cpu | |
557 | * @softirq: the softirq for the work | |
558 | * | |
559 | * Like __send_remote_softirq except that disabling interrupts and | |
560 | * computing the current cpu is done for the caller. | |
561 | */ | |
562 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |
563 | { | |
564 | unsigned long flags; | |
565 | int this_cpu; | |
566 | ||
567 | local_irq_save(flags); | |
568 | this_cpu = smp_processor_id(); | |
569 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | |
570 | local_irq_restore(flags); | |
571 | } | |
572 | EXPORT_SYMBOL(send_remote_softirq); | |
573 | ||
574 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | |
575 | unsigned long action, void *hcpu) | |
576 | { | |
577 | /* | |
578 | * If a CPU goes away, splice its entries to the current CPU | |
579 | * and trigger a run of the softirq | |
580 | */ | |
581 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | |
582 | int cpu = (unsigned long) hcpu; | |
583 | int i; | |
584 | ||
585 | local_irq_disable(); | |
586 | for (i = 0; i < NR_SOFTIRQS; i++) { | |
587 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | |
588 | struct list_head *local_head; | |
589 | ||
590 | if (list_empty(head)) | |
591 | continue; | |
592 | ||
593 | local_head = &__get_cpu_var(softirq_work_list[i]); | |
594 | list_splice_init(head, local_head); | |
595 | raise_softirq_irqoff(i); | |
596 | } | |
597 | local_irq_enable(); | |
598 | } | |
599 | ||
600 | return NOTIFY_OK; | |
601 | } | |
602 | ||
603 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | |
604 | .notifier_call = remote_softirq_cpu_notify, | |
605 | }; | |
606 | ||
1da177e4 LT |
607 | void __init softirq_init(void) |
608 | { | |
48f20a9a OJ |
609 | int cpu; |
610 | ||
611 | for_each_possible_cpu(cpu) { | |
54514a70 DM |
612 | int i; |
613 | ||
48f20a9a OJ |
614 | per_cpu(tasklet_vec, cpu).tail = |
615 | &per_cpu(tasklet_vec, cpu).head; | |
616 | per_cpu(tasklet_hi_vec, cpu).tail = | |
617 | &per_cpu(tasklet_hi_vec, cpu).head; | |
54514a70 DM |
618 | for (i = 0; i < NR_SOFTIRQS; i++) |
619 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | |
48f20a9a OJ |
620 | } |
621 | ||
54514a70 DM |
622 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); |
623 | ||
962cf36c CM |
624 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
625 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | |
1da177e4 LT |
626 | } |
627 | ||
628 | static int ksoftirqd(void * __bind_cpu) | |
629 | { | |
1da177e4 LT |
630 | set_current_state(TASK_INTERRUPTIBLE); |
631 | ||
632 | while (!kthread_should_stop()) { | |
633 | preempt_disable(); | |
634 | if (!local_softirq_pending()) { | |
635 | preempt_enable_no_resched(); | |
636 | schedule(); | |
637 | preempt_disable(); | |
638 | } | |
639 | ||
640 | __set_current_state(TASK_RUNNING); | |
641 | ||
642 | while (local_softirq_pending()) { | |
643 | /* Preempt disable stops cpu going offline. | |
644 | If already offline, we'll be on wrong CPU: | |
645 | don't process */ | |
646 | if (cpu_is_offline((long)__bind_cpu)) | |
647 | goto wait_to_die; | |
648 | do_softirq(); | |
649 | preempt_enable_no_resched(); | |
650 | cond_resched(); | |
651 | preempt_disable(); | |
64ca5ab9 | 652 | rcu_qsctr_inc((long)__bind_cpu); |
1da177e4 LT |
653 | } |
654 | preempt_enable(); | |
655 | set_current_state(TASK_INTERRUPTIBLE); | |
656 | } | |
657 | __set_current_state(TASK_RUNNING); | |
658 | return 0; | |
659 | ||
660 | wait_to_die: | |
661 | preempt_enable(); | |
662 | /* Wait for kthread_stop */ | |
663 | set_current_state(TASK_INTERRUPTIBLE); | |
664 | while (!kthread_should_stop()) { | |
665 | schedule(); | |
666 | set_current_state(TASK_INTERRUPTIBLE); | |
667 | } | |
668 | __set_current_state(TASK_RUNNING); | |
669 | return 0; | |
670 | } | |
671 | ||
672 | #ifdef CONFIG_HOTPLUG_CPU | |
673 | /* | |
674 | * tasklet_kill_immediate is called to remove a tasklet which can already be | |
675 | * scheduled for execution on @cpu. | |
676 | * | |
677 | * Unlike tasklet_kill, this function removes the tasklet | |
678 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | |
679 | * | |
680 | * When this function is called, @cpu must be in the CPU_DEAD state. | |
681 | */ | |
682 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |
683 | { | |
684 | struct tasklet_struct **i; | |
685 | ||
686 | BUG_ON(cpu_online(cpu)); | |
687 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | |
688 | ||
689 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | |
690 | return; | |
691 | ||
692 | /* CPU is dead, so no lock needed. */ | |
48f20a9a | 693 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4 LT |
694 | if (*i == t) { |
695 | *i = t->next; | |
48f20a9a OJ |
696 | /* If this was the tail element, move the tail ptr */ |
697 | if (*i == NULL) | |
698 | per_cpu(tasklet_vec, cpu).tail = i; | |
1da177e4 LT |
699 | return; |
700 | } | |
701 | } | |
702 | BUG(); | |
703 | } | |
704 | ||
705 | static void takeover_tasklets(unsigned int cpu) | |
706 | { | |
1da177e4 LT |
707 | /* CPU is dead, so no lock needed. */ |
708 | local_irq_disable(); | |
709 | ||
710 | /* Find end, append list for that CPU. */ | |
e5e41723 CB |
711 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
712 | *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; | |
713 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; | |
714 | per_cpu(tasklet_vec, cpu).head = NULL; | |
715 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | |
716 | } | |
1da177e4 LT |
717 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
718 | ||
e5e41723 CB |
719 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
720 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; | |
721 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; | |
722 | per_cpu(tasklet_hi_vec, cpu).head = NULL; | |
723 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | |
724 | } | |
1da177e4 LT |
725 | raise_softirq_irqoff(HI_SOFTIRQ); |
726 | ||
727 | local_irq_enable(); | |
728 | } | |
729 | #endif /* CONFIG_HOTPLUG_CPU */ | |
730 | ||
8c78f307 | 731 | static int __cpuinit cpu_callback(struct notifier_block *nfb, |
1da177e4 LT |
732 | unsigned long action, |
733 | void *hcpu) | |
734 | { | |
735 | int hotcpu = (unsigned long)hcpu; | |
736 | struct task_struct *p; | |
737 | ||
738 | switch (action) { | |
739 | case CPU_UP_PREPARE: | |
8bb78442 | 740 | case CPU_UP_PREPARE_FROZEN: |
1da177e4 LT |
741 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); |
742 | if (IS_ERR(p)) { | |
743 | printk("ksoftirqd for %i failed\n", hotcpu); | |
744 | return NOTIFY_BAD; | |
745 | } | |
746 | kthread_bind(p, hotcpu); | |
747 | per_cpu(ksoftirqd, hotcpu) = p; | |
748 | break; | |
749 | case CPU_ONLINE: | |
8bb78442 | 750 | case CPU_ONLINE_FROZEN: |
1da177e4 LT |
751 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); |
752 | break; | |
753 | #ifdef CONFIG_HOTPLUG_CPU | |
754 | case CPU_UP_CANCELED: | |
8bb78442 | 755 | case CPU_UP_CANCELED_FROZEN: |
fc75cdfa HC |
756 | if (!per_cpu(ksoftirqd, hotcpu)) |
757 | break; | |
1da177e4 | 758 | /* Unbind so it can run. Fall thru. */ |
a4c4af7c | 759 | kthread_bind(per_cpu(ksoftirqd, hotcpu), |
f1fc057c | 760 | cpumask_any(cpu_online_mask)); |
1da177e4 | 761 | case CPU_DEAD: |
1c6b4aa9 ST |
762 | case CPU_DEAD_FROZEN: { |
763 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | |
764 | ||
1da177e4 LT |
765 | p = per_cpu(ksoftirqd, hotcpu); |
766 | per_cpu(ksoftirqd, hotcpu) = NULL; | |
961ccddd | 767 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
1da177e4 LT |
768 | kthread_stop(p); |
769 | takeover_tasklets(hotcpu); | |
770 | break; | |
1c6b4aa9 | 771 | } |
1da177e4 LT |
772 | #endif /* CONFIG_HOTPLUG_CPU */ |
773 | } | |
774 | return NOTIFY_OK; | |
775 | } | |
776 | ||
8c78f307 | 777 | static struct notifier_block __cpuinitdata cpu_nfb = { |
1da177e4 LT |
778 | .notifier_call = cpu_callback |
779 | }; | |
780 | ||
7babe8db | 781 | static __init int spawn_ksoftirqd(void) |
1da177e4 LT |
782 | { |
783 | void *cpu = (void *)(long)smp_processor_id(); | |
07dccf33 AM |
784 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
785 | ||
786 | BUG_ON(err == NOTIFY_BAD); | |
1da177e4 LT |
787 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
788 | register_cpu_notifier(&cpu_nfb); | |
789 | return 0; | |
790 | } | |
7babe8db | 791 | early_initcall(spawn_ksoftirqd); |
78eef01b AM |
792 | |
793 | #ifdef CONFIG_SMP | |
794 | /* | |
795 | * Call a function on all processors | |
796 | */ | |
15c8b6c1 | 797 | int on_each_cpu(void (*func) (void *info), void *info, int wait) |
78eef01b AM |
798 | { |
799 | int ret = 0; | |
800 | ||
801 | preempt_disable(); | |
8691e5a8 | 802 | ret = smp_call_function(func, info, wait); |
78eef01b AM |
803 | local_irq_disable(); |
804 | func(info); | |
805 | local_irq_enable(); | |
806 | preempt_enable(); | |
807 | return ret; | |
808 | } | |
809 | EXPORT_SYMBOL(on_each_cpu); | |
810 | #endif | |
43a25632 YL |
811 | |
812 | /* | |
813 | * [ These __weak aliases are kept in a separate compilation unit, so that | |
814 | * GCC does not inline them incorrectly. ] | |
815 | */ | |
816 | ||
817 | int __init __weak early_irq_init(void) | |
818 | { | |
819 | return 0; | |
820 | } | |
821 | ||
4a046d17 YL |
822 | int __init __weak arch_probe_nr_irqs(void) |
823 | { | |
824 | return 0; | |
825 | } | |
826 | ||
43a25632 YL |
827 | int __init __weak arch_early_irq_init(void) |
828 | { | |
829 | return 0; | |
830 | } | |
831 | ||
832 | int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) | |
833 | { | |
834 | return 0; | |
835 | } |