Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/softirq.c | |
3 | * | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * | |
b10db7f0 PM |
6 | * Distribute under GPLv2. |
7 | * | |
8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | |
1da177e4 LT |
9 | */ |
10 | ||
9984de1a | 11 | #include <linux/export.h> |
1da177e4 LT |
12 | #include <linux/kernel_stat.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/notifier.h> | |
17 | #include <linux/percpu.h> | |
18 | #include <linux/cpu.h> | |
83144186 | 19 | #include <linux/freezer.h> |
1da177e4 LT |
20 | #include <linux/kthread.h> |
21 | #include <linux/rcupdate.h> | |
7e49fcce | 22 | #include <linux/ftrace.h> |
78eef01b | 23 | #include <linux/smp.h> |
3e339b5d | 24 | #include <linux/smpboot.h> |
79bf2bb3 | 25 | #include <linux/tick.h> |
a0e39ed3 HC |
26 | |
27 | #define CREATE_TRACE_POINTS | |
ad8d75ff | 28 | #include <trace/events/irq.h> |
1da177e4 | 29 | |
1da177e4 LT |
30 | /* |
31 | - No shared variables, all the data are CPU local. | |
32 | - If a softirq needs serialization, let it serialize itself | |
33 | by its own spinlocks. | |
34 | - Even if softirq is serialized, only local cpu is marked for | |
35 | execution. Hence, we get something sort of weak cpu binding. | |
36 | Though it is still not clear, will it result in better locality | |
37 | or will not. | |
38 | ||
39 | Examples: | |
40 | - NET RX softirq. It is multithreaded and does not require | |
41 | any global serialization. | |
42 | - NET TX softirq. It kicks software netdevice queues, hence | |
43 | it is logically serialized per device, but this serialization | |
44 | is invisible to common code. | |
45 | - Tasklets: serialized wrt itself. | |
46 | */ | |
47 | ||
48 | #ifndef __ARCH_IRQ_STAT | |
49 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | |
50 | EXPORT_SYMBOL(irq_stat); | |
51 | #endif | |
52 | ||
978b0116 | 53 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4 | 54 | |
4dd53d89 | 55 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4 | 56 | |
5d592b44 | 57 | char *softirq_to_name[NR_SOFTIRQS] = { |
5dd4de58 | 58 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
09223371 | 59 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44 JB |
60 | }; |
61 | ||
1da177e4 LT |
62 | /* |
63 | * we cannot loop indefinitely here to avoid userspace starvation, | |
64 | * but we also don't want to introduce a worst case 1/HZ latency | |
65 | * to the pending events, so lets the scheduler to balance | |
66 | * the softirq load for us. | |
67 | */ | |
676cb02d | 68 | static void wakeup_softirqd(void) |
1da177e4 LT |
69 | { |
70 | /* Interrupts are disabled: no need to stop preemption */ | |
909ea964 | 71 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4 LT |
72 | |
73 | if (tsk && tsk->state != TASK_RUNNING) | |
74 | wake_up_process(tsk); | |
75 | } | |
76 | ||
75e1056f VP |
77 | /* |
78 | * preempt_count and SOFTIRQ_OFFSET usage: | |
79 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | |
80 | * softirq processing. | |
81 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | |
82 | * on local_bh_disable or local_bh_enable. | |
83 | * This lets us distinguish between whether we are currently processing | |
84 | * softirq and whether we just have bh disabled. | |
85 | */ | |
86 | ||
de30a2b3 IM |
87 | /* |
88 | * This one is for softirq.c-internal use, | |
89 | * where hardirqs are disabled legitimately: | |
90 | */ | |
3c829c36 | 91 | #ifdef CONFIG_TRACE_IRQFLAGS |
75e1056f | 92 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) |
de30a2b3 IM |
93 | { |
94 | unsigned long flags; | |
95 | ||
96 | WARN_ON_ONCE(in_irq()); | |
97 | ||
98 | raw_local_irq_save(flags); | |
7e49fcce | 99 | /* |
bdb43806 | 100 | * The preempt tracer hooks into preempt_count_add and will break |
7e49fcce SR |
101 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
102 | * is set and before current->softirq_enabled is cleared. | |
103 | * We must manually increment preempt_count here and manually | |
104 | * call the trace_preempt_off later. | |
105 | */ | |
bdb43806 | 106 | __preempt_count_add(cnt); |
de30a2b3 IM |
107 | /* |
108 | * Were softirqs turned off above: | |
109 | */ | |
75e1056f | 110 | if (softirq_count() == cnt) |
de30a2b3 IM |
111 | trace_softirqs_off(ip); |
112 | raw_local_irq_restore(flags); | |
7e49fcce | 113 | |
75e1056f | 114 | if (preempt_count() == cnt) |
7e49fcce | 115 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
de30a2b3 | 116 | } |
3c829c36 | 117 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
75e1056f | 118 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) |
3c829c36 | 119 | { |
bdb43806 | 120 | preempt_count_add(cnt); |
3c829c36 TC |
121 | barrier(); |
122 | } | |
123 | #endif /* CONFIG_TRACE_IRQFLAGS */ | |
de30a2b3 IM |
124 | |
125 | void local_bh_disable(void) | |
126 | { | |
d2e08473 | 127 | __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET); |
de30a2b3 IM |
128 | } |
129 | ||
130 | EXPORT_SYMBOL(local_bh_disable); | |
131 | ||
75e1056f VP |
132 | static void __local_bh_enable(unsigned int cnt) |
133 | { | |
75e1056f VP |
134 | WARN_ON_ONCE(!irqs_disabled()); |
135 | ||
136 | if (softirq_count() == cnt) | |
d2e08473 | 137 | trace_softirqs_on(_RET_IP_); |
bdb43806 | 138 | preempt_count_sub(cnt); |
75e1056f VP |
139 | } |
140 | ||
de30a2b3 IM |
141 | /* |
142 | * Special-case - softirqs can safely be enabled in | |
143 | * cond_resched_softirq(), or by __do_softirq(), | |
144 | * without processing still-pending softirqs: | |
145 | */ | |
146 | void _local_bh_enable(void) | |
147 | { | |
5d60d3e7 | 148 | WARN_ON_ONCE(in_irq()); |
75e1056f | 149 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b3 IM |
150 | } |
151 | ||
152 | EXPORT_SYMBOL(_local_bh_enable); | |
153 | ||
0f476b6d | 154 | static inline void _local_bh_enable_ip(unsigned long ip) |
de30a2b3 | 155 | { |
0f476b6d | 156 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
3c829c36 | 157 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 158 | local_irq_disable(); |
3c829c36 | 159 | #endif |
de30a2b3 IM |
160 | /* |
161 | * Are softirqs going to be turned on now: | |
162 | */ | |
75e1056f | 163 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0f476b6d | 164 | trace_softirqs_on(ip); |
de30a2b3 IM |
165 | /* |
166 | * Keep preemption disabled until we are done with | |
167 | * softirq processing: | |
168 | */ | |
bdb43806 | 169 | preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1); |
de30a2b3 | 170 | |
0bed698a FW |
171 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
172 | /* | |
173 | * Run softirq if any pending. And do it in its own stack | |
174 | * as we may be calling this deep in a task call stack already. | |
175 | */ | |
de30a2b3 | 176 | do_softirq(); |
0bed698a | 177 | } |
de30a2b3 | 178 | |
bdb43806 | 179 | preempt_count_dec(); |
3c829c36 | 180 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 181 | local_irq_enable(); |
3c829c36 | 182 | #endif |
de30a2b3 IM |
183 | preempt_check_resched(); |
184 | } | |
0f476b6d JB |
185 | |
186 | void local_bh_enable(void) | |
187 | { | |
d2e08473 | 188 | _local_bh_enable_ip(_RET_IP_); |
0f476b6d | 189 | } |
de30a2b3 IM |
190 | EXPORT_SYMBOL(local_bh_enable); |
191 | ||
192 | void local_bh_enable_ip(unsigned long ip) | |
193 | { | |
0f476b6d | 194 | _local_bh_enable_ip(ip); |
de30a2b3 IM |
195 | } |
196 | EXPORT_SYMBOL(local_bh_enable_ip); | |
197 | ||
1da177e4 | 198 | /* |
34376a50 BG |
199 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
200 | * but break the loop if need_resched() is set or after 2 ms. | |
201 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in | |
202 | * certain cases, such as stop_machine(), jiffies may cease to | |
203 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as | |
204 | * well to make sure we eventually return from this method. | |
1da177e4 | 205 | * |
c10d7367 | 206 | * These limits have been established via experimentation. |
1da177e4 LT |
207 | * The two things to balance is latency against fairness - |
208 | * we want to handle softirqs as soon as possible, but they | |
209 | * should not be able to lock up the box. | |
210 | */ | |
c10d7367 | 211 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50 | 212 | #define MAX_SOFTIRQ_RESTART 10 |
1da177e4 | 213 | |
f1a83e65 PZ |
214 | #ifdef CONFIG_TRACE_IRQFLAGS |
215 | /* | |
216 | * Convoluted means of passing __do_softirq() a message through the various | |
217 | * architecture execute_on_stack() bits. | |
218 | * | |
219 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need | |
220 | * to keep the lockdep irq context tracking as tight as possible in order to | |
221 | * not miss-qualify lock contexts and miss possible deadlocks. | |
222 | */ | |
223 | static DEFINE_PER_CPU(int, softirq_from_hardirq); | |
224 | ||
225 | static inline void lockdep_softirq_from_hardirq(void) | |
226 | { | |
227 | this_cpu_write(softirq_from_hardirq, 1); | |
228 | } | |
229 | ||
230 | static inline void lockdep_softirq_start(void) | |
231 | { | |
232 | if (this_cpu_read(softirq_from_hardirq)) | |
233 | trace_hardirq_exit(); | |
234 | lockdep_softirq_enter(); | |
235 | } | |
236 | ||
237 | static inline void lockdep_softirq_end(void) | |
238 | { | |
239 | lockdep_softirq_exit(); | |
240 | if (this_cpu_read(softirq_from_hardirq)) { | |
241 | this_cpu_write(softirq_from_hardirq, 0); | |
242 | trace_hardirq_enter(); | |
243 | } | |
244 | } | |
245 | ||
246 | #else | |
247 | static inline void lockdep_softirq_from_hardirq(void) { } | |
248 | static inline void lockdep_softirq_start(void) { } | |
249 | static inline void lockdep_softirq_end(void) { } | |
250 | #endif | |
251 | ||
1da177e4 LT |
252 | asmlinkage void __do_softirq(void) |
253 | { | |
c10d7367 | 254 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
907aed48 | 255 | unsigned long old_flags = current->flags; |
34376a50 | 256 | int max_restart = MAX_SOFTIRQ_RESTART; |
f1a83e65 PZ |
257 | struct softirq_action *h; |
258 | __u32 pending; | |
259 | int cpu; | |
907aed48 MG |
260 | |
261 | /* | |
262 | * Mask out PF_MEMALLOC s current task context is borrowed for the | |
263 | * softirq. A softirq handled such as network RX might set PF_MEMALLOC | |
264 | * again if the socket is related to swap | |
265 | */ | |
266 | current->flags &= ~PF_MEMALLOC; | |
1da177e4 LT |
267 | |
268 | pending = local_softirq_pending(); | |
6a61671b | 269 | account_irq_enter_time(current); |
829035fd | 270 | |
d2e08473 | 271 | __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); |
f1a83e65 | 272 | lockdep_softirq_start(); |
1da177e4 | 273 | |
1da177e4 LT |
274 | cpu = smp_processor_id(); |
275 | restart: | |
276 | /* Reset the pending bitmask before enabling irqs */ | |
3f74478b | 277 | set_softirq_pending(0); |
1da177e4 | 278 | |
c70f5d66 | 279 | local_irq_enable(); |
1da177e4 LT |
280 | |
281 | h = softirq_vec; | |
282 | ||
283 | do { | |
284 | if (pending & 1) { | |
f4bc6bb2 | 285 | unsigned int vec_nr = h - softirq_vec; |
8e85b4b5 TG |
286 | int prev_count = preempt_count(); |
287 | ||
f4bc6bb2 TG |
288 | kstat_incr_softirqs_this_cpu(vec_nr); |
289 | ||
290 | trace_softirq_entry(vec_nr); | |
1da177e4 | 291 | h->action(h); |
f4bc6bb2 | 292 | trace_softirq_exit(vec_nr); |
8e85b4b5 | 293 | if (unlikely(prev_count != preempt_count())) { |
f4bc6bb2 | 294 | printk(KERN_ERR "huh, entered softirq %u %s %p" |
8e85b4b5 | 295 | "with preempt_count %08x," |
f4bc6bb2 TG |
296 | " exited with %08x?\n", vec_nr, |
297 | softirq_to_name[vec_nr], h->action, | |
298 | prev_count, preempt_count()); | |
4a2b4b22 | 299 | preempt_count_set(prev_count); |
8e85b4b5 TG |
300 | } |
301 | ||
d6714c22 | 302 | rcu_bh_qs(cpu); |
1da177e4 LT |
303 | } |
304 | h++; | |
305 | pending >>= 1; | |
306 | } while (pending); | |
307 | ||
c70f5d66 | 308 | local_irq_disable(); |
1da177e4 LT |
309 | |
310 | pending = local_softirq_pending(); | |
c10d7367 | 311 | if (pending) { |
34376a50 BG |
312 | if (time_before(jiffies, end) && !need_resched() && |
313 | --max_restart) | |
c10d7367 | 314 | goto restart; |
1da177e4 | 315 | |
1da177e4 | 316 | wakeup_softirqd(); |
c10d7367 | 317 | } |
1da177e4 | 318 | |
f1a83e65 | 319 | lockdep_softirq_end(); |
6a61671b | 320 | account_irq_exit_time(current); |
75e1056f | 321 | __local_bh_enable(SOFTIRQ_OFFSET); |
5d60d3e7 | 322 | WARN_ON_ONCE(in_interrupt()); |
907aed48 | 323 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
1da177e4 LT |
324 | } |
325 | ||
1da177e4 LT |
326 | asmlinkage void do_softirq(void) |
327 | { | |
328 | __u32 pending; | |
329 | unsigned long flags; | |
330 | ||
331 | if (in_interrupt()) | |
332 | return; | |
333 | ||
334 | local_irq_save(flags); | |
335 | ||
336 | pending = local_softirq_pending(); | |
337 | ||
338 | if (pending) | |
7d65f4a6 | 339 | do_softirq_own_stack(); |
1da177e4 LT |
340 | |
341 | local_irq_restore(flags); | |
342 | } | |
343 | ||
dde4b2b5 IM |
344 | /* |
345 | * Enter an interrupt context. | |
346 | */ | |
347 | void irq_enter(void) | |
348 | { | |
6378ddb5 | 349 | int cpu = smp_processor_id(); |
719254fa | 350 | |
64db4cff | 351 | rcu_irq_enter(); |
0a8a2e78 | 352 | if (is_idle_task(current) && !in_interrupt()) { |
d267f87f VP |
353 | /* |
354 | * Prevent raise_softirq from needlessly waking up ksoftirqd | |
355 | * here, as softirq will be serviced on return from interrupt. | |
356 | */ | |
357 | local_bh_disable(); | |
719254fa | 358 | tick_check_idle(cpu); |
d267f87f VP |
359 | _local_bh_enable(); |
360 | } | |
361 | ||
362 | __irq_enter(); | |
dde4b2b5 IM |
363 | } |
364 | ||
8d32a307 TG |
365 | static inline void invoke_softirq(void) |
366 | { | |
ded79754 | 367 | if (!force_irqthreads) { |
f1a83e65 | 368 | lockdep_softirq_from_hardirq(); |
cc1f0274 | 369 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
ded79754 FW |
370 | /* |
371 | * We can safely execute softirq on the current stack if | |
372 | * it is the irq stack, because it should be near empty | |
cc1f0274 FW |
373 | * at this stage. |
374 | */ | |
375 | __do_softirq(); | |
376 | #else | |
377 | /* | |
378 | * Otherwise, irq_exit() is called on the task stack that can | |
379 | * be potentially deep already. So call softirq in its own stack | |
380 | * to prevent from any overrun. | |
ded79754 | 381 | */ |
be6e1016 | 382 | do_softirq_own_stack(); |
cc1f0274 | 383 | #endif |
ded79754 | 384 | } else { |
8d32a307 | 385 | wakeup_softirqd(); |
ded79754 | 386 | } |
8d32a307 | 387 | } |
1da177e4 | 388 | |
67826eae FW |
389 | static inline void tick_irq_exit(void) |
390 | { | |
391 | #ifdef CONFIG_NO_HZ_COMMON | |
392 | int cpu = smp_processor_id(); | |
393 | ||
394 | /* Make sure that timer wheel updates are propagated */ | |
395 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { | |
396 | if (!in_interrupt()) | |
397 | tick_nohz_irq_exit(); | |
398 | } | |
399 | #endif | |
400 | } | |
401 | ||
1da177e4 LT |
402 | /* |
403 | * Exit an interrupt context. Process softirqs if needed and possible: | |
404 | */ | |
405 | void irq_exit(void) | |
406 | { | |
74eed016 | 407 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d111 | 408 | local_irq_disable(); |
74eed016 TG |
409 | #else |
410 | WARN_ON_ONCE(!irqs_disabled()); | |
411 | #endif | |
412 | ||
6a61671b | 413 | account_irq_exit_time(current); |
bdb43806 | 414 | preempt_count_sub(HARDIRQ_OFFSET); |
1da177e4 LT |
415 | if (!in_interrupt() && local_softirq_pending()) |
416 | invoke_softirq(); | |
79bf2bb3 | 417 | |
67826eae | 418 | tick_irq_exit(); |
416eb33c | 419 | rcu_irq_exit(); |
f1a83e65 | 420 | trace_hardirq_exit(); /* must be last! */ |
1da177e4 LT |
421 | } |
422 | ||
423 | /* | |
424 | * This function must run with irqs disabled! | |
425 | */ | |
7ad5b3a5 | 426 | inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4 LT |
427 | { |
428 | __raise_softirq_irqoff(nr); | |
429 | ||
430 | /* | |
431 | * If we're in an interrupt or softirq, we're done | |
432 | * (this also catches softirq-disabled code). We will | |
433 | * actually run the softirq once we return from | |
434 | * the irq or softirq. | |
435 | * | |
436 | * Otherwise we wake up ksoftirqd to make sure we | |
437 | * schedule the softirq soon. | |
438 | */ | |
439 | if (!in_interrupt()) | |
440 | wakeup_softirqd(); | |
441 | } | |
442 | ||
7ad5b3a5 | 443 | void raise_softirq(unsigned int nr) |
1da177e4 LT |
444 | { |
445 | unsigned long flags; | |
446 | ||
447 | local_irq_save(flags); | |
448 | raise_softirq_irqoff(nr); | |
449 | local_irq_restore(flags); | |
450 | } | |
451 | ||
f069686e SR |
452 | void __raise_softirq_irqoff(unsigned int nr) |
453 | { | |
454 | trace_softirq_raise(nr); | |
455 | or_softirq_pending(1UL << nr); | |
456 | } | |
457 | ||
962cf36c | 458 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4 | 459 | { |
1da177e4 LT |
460 | softirq_vec[nr].action = action; |
461 | } | |
462 | ||
9ba5f005 PZ |
463 | /* |
464 | * Tasklets | |
465 | */ | |
1da177e4 LT |
466 | struct tasklet_head |
467 | { | |
48f20a9a OJ |
468 | struct tasklet_struct *head; |
469 | struct tasklet_struct **tail; | |
1da177e4 LT |
470 | }; |
471 | ||
4620b49f VN |
472 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
473 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | |
1da177e4 | 474 | |
7ad5b3a5 | 475 | void __tasklet_schedule(struct tasklet_struct *t) |
1da177e4 LT |
476 | { |
477 | unsigned long flags; | |
478 | ||
479 | local_irq_save(flags); | |
48f20a9a | 480 | t->next = NULL; |
909ea964 CL |
481 | *__this_cpu_read(tasklet_vec.tail) = t; |
482 | __this_cpu_write(tasklet_vec.tail, &(t->next)); | |
1da177e4 LT |
483 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
484 | local_irq_restore(flags); | |
485 | } | |
486 | ||
487 | EXPORT_SYMBOL(__tasklet_schedule); | |
488 | ||
7ad5b3a5 | 489 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4 LT |
490 | { |
491 | unsigned long flags; | |
492 | ||
493 | local_irq_save(flags); | |
48f20a9a | 494 | t->next = NULL; |
909ea964 CL |
495 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
496 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | |
1da177e4 LT |
497 | raise_softirq_irqoff(HI_SOFTIRQ); |
498 | local_irq_restore(flags); | |
499 | } | |
500 | ||
501 | EXPORT_SYMBOL(__tasklet_hi_schedule); | |
502 | ||
7c692cba VN |
503 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
504 | { | |
505 | BUG_ON(!irqs_disabled()); | |
506 | ||
909ea964 CL |
507 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
508 | __this_cpu_write(tasklet_hi_vec.head, t); | |
7c692cba VN |
509 | __raise_softirq_irqoff(HI_SOFTIRQ); |
510 | } | |
511 | ||
512 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | |
513 | ||
1da177e4 LT |
514 | static void tasklet_action(struct softirq_action *a) |
515 | { | |
516 | struct tasklet_struct *list; | |
517 | ||
518 | local_irq_disable(); | |
909ea964 CL |
519 | list = __this_cpu_read(tasklet_vec.head); |
520 | __this_cpu_write(tasklet_vec.head, NULL); | |
521 | __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); | |
1da177e4 LT |
522 | local_irq_enable(); |
523 | ||
524 | while (list) { | |
525 | struct tasklet_struct *t = list; | |
526 | ||
527 | list = list->next; | |
528 | ||
529 | if (tasklet_trylock(t)) { | |
530 | if (!atomic_read(&t->count)) { | |
531 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | |
532 | BUG(); | |
533 | t->func(t->data); | |
534 | tasklet_unlock(t); | |
535 | continue; | |
536 | } | |
537 | tasklet_unlock(t); | |
538 | } | |
539 | ||
540 | local_irq_disable(); | |
48f20a9a | 541 | t->next = NULL; |
909ea964 CL |
542 | *__this_cpu_read(tasklet_vec.tail) = t; |
543 | __this_cpu_write(tasklet_vec.tail, &(t->next)); | |
1da177e4 LT |
544 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
545 | local_irq_enable(); | |
546 | } | |
547 | } | |
548 | ||
549 | static void tasklet_hi_action(struct softirq_action *a) | |
550 | { | |
551 | struct tasklet_struct *list; | |
552 | ||
553 | local_irq_disable(); | |
909ea964 CL |
554 | list = __this_cpu_read(tasklet_hi_vec.head); |
555 | __this_cpu_write(tasklet_hi_vec.head, NULL); | |
556 | __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); | |
1da177e4 LT |
557 | local_irq_enable(); |
558 | ||
559 | while (list) { | |
560 | struct tasklet_struct *t = list; | |
561 | ||
562 | list = list->next; | |
563 | ||
564 | if (tasklet_trylock(t)) { | |
565 | if (!atomic_read(&t->count)) { | |
566 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | |
567 | BUG(); | |
568 | t->func(t->data); | |
569 | tasklet_unlock(t); | |
570 | continue; | |
571 | } | |
572 | tasklet_unlock(t); | |
573 | } | |
574 | ||
575 | local_irq_disable(); | |
48f20a9a | 576 | t->next = NULL; |
909ea964 CL |
577 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
578 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | |
1da177e4 LT |
579 | __raise_softirq_irqoff(HI_SOFTIRQ); |
580 | local_irq_enable(); | |
581 | } | |
582 | } | |
583 | ||
584 | ||
585 | void tasklet_init(struct tasklet_struct *t, | |
586 | void (*func)(unsigned long), unsigned long data) | |
587 | { | |
588 | t->next = NULL; | |
589 | t->state = 0; | |
590 | atomic_set(&t->count, 0); | |
591 | t->func = func; | |
592 | t->data = data; | |
593 | } | |
594 | ||
595 | EXPORT_SYMBOL(tasklet_init); | |
596 | ||
597 | void tasklet_kill(struct tasklet_struct *t) | |
598 | { | |
599 | if (in_interrupt()) | |
600 | printk("Attempt to kill tasklet from interrupt\n"); | |
601 | ||
602 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
79d381c9 | 603 | do { |
1da177e4 | 604 | yield(); |
79d381c9 | 605 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4 LT |
606 | } |
607 | tasklet_unlock_wait(t); | |
608 | clear_bit(TASKLET_STATE_SCHED, &t->state); | |
609 | } | |
610 | ||
611 | EXPORT_SYMBOL(tasklet_kill); | |
612 | ||
9ba5f005 PZ |
613 | /* |
614 | * tasklet_hrtimer | |
615 | */ | |
616 | ||
617 | /* | |
b9c30322 PZ |
618 | * The trampoline is called when the hrtimer expires. It schedules a tasklet |
619 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended | |
620 | * hrtimer callback, but from softirq context. | |
9ba5f005 PZ |
621 | */ |
622 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | |
623 | { | |
624 | struct tasklet_hrtimer *ttimer = | |
625 | container_of(timer, struct tasklet_hrtimer, timer); | |
626 | ||
b9c30322 PZ |
627 | tasklet_hi_schedule(&ttimer->tasklet); |
628 | return HRTIMER_NORESTART; | |
9ba5f005 PZ |
629 | } |
630 | ||
631 | /* | |
632 | * Helper function which calls the hrtimer callback from | |
633 | * tasklet/softirq context | |
634 | */ | |
635 | static void __tasklet_hrtimer_trampoline(unsigned long data) | |
636 | { | |
637 | struct tasklet_hrtimer *ttimer = (void *)data; | |
638 | enum hrtimer_restart restart; | |
639 | ||
640 | restart = ttimer->function(&ttimer->timer); | |
641 | if (restart != HRTIMER_NORESTART) | |
642 | hrtimer_restart(&ttimer->timer); | |
643 | } | |
644 | ||
645 | /** | |
646 | * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks | |
647 | * @ttimer: tasklet_hrtimer which is initialized | |
25985edc | 648 | * @function: hrtimer callback function which gets called from softirq context |
9ba5f005 PZ |
649 | * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) |
650 | * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) | |
651 | */ | |
652 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |
653 | enum hrtimer_restart (*function)(struct hrtimer *), | |
654 | clockid_t which_clock, enum hrtimer_mode mode) | |
655 | { | |
656 | hrtimer_init(&ttimer->timer, which_clock, mode); | |
657 | ttimer->timer.function = __hrtimer_tasklet_trampoline; | |
658 | tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, | |
659 | (unsigned long)ttimer); | |
660 | ttimer->function = function; | |
661 | } | |
662 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); | |
663 | ||
1da177e4 LT |
664 | void __init softirq_init(void) |
665 | { | |
48f20a9a OJ |
666 | int cpu; |
667 | ||
668 | for_each_possible_cpu(cpu) { | |
669 | per_cpu(tasklet_vec, cpu).tail = | |
670 | &per_cpu(tasklet_vec, cpu).head; | |
671 | per_cpu(tasklet_hi_vec, cpu).tail = | |
672 | &per_cpu(tasklet_hi_vec, cpu).head; | |
673 | } | |
674 | ||
962cf36c CM |
675 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
676 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | |
1da177e4 LT |
677 | } |
678 | ||
3e339b5d | 679 | static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4 | 680 | { |
3e339b5d TG |
681 | return local_softirq_pending(); |
682 | } | |
1da177e4 | 683 | |
3e339b5d TG |
684 | static void run_ksoftirqd(unsigned int cpu) |
685 | { | |
686 | local_irq_disable(); | |
687 | if (local_softirq_pending()) { | |
0bed698a FW |
688 | /* |
689 | * We can safely run softirq on inline stack, as we are not deep | |
690 | * in the task stack here. | |
691 | */ | |
3e339b5d TG |
692 | __do_softirq(); |
693 | rcu_note_context_switch(cpu); | |
694 | local_irq_enable(); | |
695 | cond_resched(); | |
696 | return; | |
1da177e4 | 697 | } |
3e339b5d | 698 | local_irq_enable(); |
1da177e4 LT |
699 | } |
700 | ||
701 | #ifdef CONFIG_HOTPLUG_CPU | |
702 | /* | |
703 | * tasklet_kill_immediate is called to remove a tasklet which can already be | |
704 | * scheduled for execution on @cpu. | |
705 | * | |
706 | * Unlike tasklet_kill, this function removes the tasklet | |
707 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | |
708 | * | |
709 | * When this function is called, @cpu must be in the CPU_DEAD state. | |
710 | */ | |
711 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |
712 | { | |
713 | struct tasklet_struct **i; | |
714 | ||
715 | BUG_ON(cpu_online(cpu)); | |
716 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | |
717 | ||
718 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | |
719 | return; | |
720 | ||
721 | /* CPU is dead, so no lock needed. */ | |
48f20a9a | 722 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4 LT |
723 | if (*i == t) { |
724 | *i = t->next; | |
48f20a9a OJ |
725 | /* If this was the tail element, move the tail ptr */ |
726 | if (*i == NULL) | |
727 | per_cpu(tasklet_vec, cpu).tail = i; | |
1da177e4 LT |
728 | return; |
729 | } | |
730 | } | |
731 | BUG(); | |
732 | } | |
733 | ||
734 | static void takeover_tasklets(unsigned int cpu) | |
735 | { | |
1da177e4 LT |
736 | /* CPU is dead, so no lock needed. */ |
737 | local_irq_disable(); | |
738 | ||
739 | /* Find end, append list for that CPU. */ | |
e5e41723 | 740 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea964 CL |
741 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
742 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); | |
e5e41723 CB |
743 | per_cpu(tasklet_vec, cpu).head = NULL; |
744 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | |
745 | } | |
1da177e4 LT |
746 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
747 | ||
e5e41723 | 748 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea964 CL |
749 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
750 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); | |
e5e41723 CB |
751 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
752 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | |
753 | } | |
1da177e4 LT |
754 | raise_softirq_irqoff(HI_SOFTIRQ); |
755 | ||
756 | local_irq_enable(); | |
757 | } | |
758 | #endif /* CONFIG_HOTPLUG_CPU */ | |
759 | ||
0db0628d | 760 | static int cpu_callback(struct notifier_block *nfb, |
1da177e4 LT |
761 | unsigned long action, |
762 | void *hcpu) | |
763 | { | |
1da177e4 | 764 | switch (action) { |
1da177e4 | 765 | #ifdef CONFIG_HOTPLUG_CPU |
1da177e4 | 766 | case CPU_DEAD: |
3e339b5d TG |
767 | case CPU_DEAD_FROZEN: |
768 | takeover_tasklets((unsigned long)hcpu); | |
1da177e4 LT |
769 | break; |
770 | #endif /* CONFIG_HOTPLUG_CPU */ | |
3e339b5d | 771 | } |
1da177e4 LT |
772 | return NOTIFY_OK; |
773 | } | |
774 | ||
0db0628d | 775 | static struct notifier_block cpu_nfb = { |
1da177e4 LT |
776 | .notifier_call = cpu_callback |
777 | }; | |
778 | ||
3e339b5d TG |
779 | static struct smp_hotplug_thread softirq_threads = { |
780 | .store = &ksoftirqd, | |
781 | .thread_should_run = ksoftirqd_should_run, | |
782 | .thread_fn = run_ksoftirqd, | |
783 | .thread_comm = "ksoftirqd/%u", | |
784 | }; | |
785 | ||
7babe8db | 786 | static __init int spawn_ksoftirqd(void) |
1da177e4 | 787 | { |
1da177e4 | 788 | register_cpu_notifier(&cpu_nfb); |
3e339b5d TG |
789 | |
790 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | |
791 | ||
1da177e4 LT |
792 | return 0; |
793 | } | |
7babe8db | 794 | early_initcall(spawn_ksoftirqd); |
78eef01b | 795 | |
43a25632 YL |
796 | /* |
797 | * [ These __weak aliases are kept in a separate compilation unit, so that | |
798 | * GCC does not inline them incorrectly. ] | |
799 | */ | |
800 | ||
801 | int __init __weak early_irq_init(void) | |
802 | { | |
803 | return 0; | |
804 | } | |
805 | ||
4a046d17 YL |
806 | int __init __weak arch_probe_nr_irqs(void) |
807 | { | |
b683de2b | 808 | return NR_IRQS_LEGACY; |
4a046d17 YL |
809 | } |
810 | ||
43a25632 YL |
811 | int __init __weak arch_early_irq_init(void) |
812 | { | |
813 | return 0; | |
814 | } |