rcu: Deconfuse dynticks entry-exit tracing
[linux-block.git] / kernel / rcutiny.c
CommitLineData
9b1d82fa
PM
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
4ce5b903 23 * Documentation/RCU
9b1d82fa 24 */
4ce5b903
IM
25#include <linux/completion.h>
26#include <linux/interrupt.h>
9b1d82fa 27#include <linux/notifier.h>
4ce5b903
IM
28#include <linux/rcupdate.h>
29#include <linux/kernel.h>
9984de1a 30#include <linux/export.h>
9b1d82fa 31#include <linux/mutex.h>
4ce5b903
IM
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
9b1d82fa 35#include <linux/time.h>
4ce5b903 36#include <linux/cpu.h>
268bb0ce 37#include <linux/prefetch.h>
9b1d82fa 38
29c00b4a 39#ifdef CONFIG_RCU_TRACE
29c00b4a 40#include <trace/events/rcu.h>
29c00b4a
PM
41#endif /* #else #ifdef CONFIG_RCU_TRACE */
42
43#include "rcu.h"
44
a57eb940 45/* Forward declarations for rcutiny_plugin.h. */
24278d14 46struct rcu_ctrlblk;
965a002b
PM
47static void invoke_rcu_callbacks(void);
48static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49static void rcu_process_callbacks(struct softirq_action *unused);
a57eb940
PM
50static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
53
54#include "rcutiny_plugin.h"
55
4145fa7f 56static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
9b1d82fa 57
9b2e4f18 58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
4145fa7f 59static void rcu_idle_enter_common(long long oldval)
9b2e4f18
PM
60{
61 if (rcu_dynticks_nesting) {
4145fa7f
PM
62 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval, rcu_dynticks_nesting));
9b2e4f18
PM
64 return;
65 }
4145fa7f 66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
9b2e4f18
PM
67 if (!idle_cpu(smp_processor_id())) {
68 WARN_ON_ONCE(1); /* must be idle task! */
69 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
4145fa7f 70 oldval, rcu_dynticks_nesting));
9b2e4f18
PM
71 ftrace_dump(DUMP_ALL);
72 }
73 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
74}
75
76/*
77 * Enter idle, which is an extended quiescent state if we have fully
78 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
79 */
80void rcu_idle_enter(void)
81{
82 unsigned long flags;
4145fa7f 83 long long oldval;
9b2e4f18
PM
84
85 local_irq_save(flags);
4145fa7f 86 oldval = rcu_dynticks_nesting;
9b2e4f18 87 rcu_dynticks_nesting = 0;
4145fa7f 88 rcu_idle_enter_common(oldval);
9b2e4f18
PM
89 local_irq_restore(flags);
90}
91
92/*
93 * Exit an interrupt handler towards idle.
94 */
95void rcu_irq_exit(void)
96{
97 unsigned long flags;
4145fa7f 98 long long oldval;
9b2e4f18
PM
99
100 local_irq_save(flags);
4145fa7f 101 oldval = rcu_dynticks_nesting;
9b2e4f18
PM
102 rcu_dynticks_nesting--;
103 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
4145fa7f 104 rcu_idle_enter_common(oldval);
9b2e4f18
PM
105 local_irq_restore(flags);
106}
107
108/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
109static void rcu_idle_exit_common(long long oldval)
110{
111 if (oldval) {
4145fa7f
PM
112 RCU_TRACE(trace_rcu_dyntick("++=",
113 oldval, rcu_dynticks_nesting));
9b2e4f18
PM
114 return;
115 }
4145fa7f 116 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
9b2e4f18
PM
117 if (!idle_cpu(smp_processor_id())) {
118 WARN_ON_ONCE(1); /* must be idle task! */
119 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
4145fa7f 120 oldval, rcu_dynticks_nesting));
9b2e4f18
PM
121 ftrace_dump(DUMP_ALL);
122 }
123}
9b1d82fa
PM
124
125/*
9b2e4f18 126 * Exit idle, so that we are no longer in an extended quiescent state.
9b1d82fa 127 */
9b2e4f18 128void rcu_idle_exit(void)
9b1d82fa 129{
9b2e4f18
PM
130 unsigned long flags;
131 long long oldval;
132
133 local_irq_save(flags);
134 oldval = rcu_dynticks_nesting;
135 WARN_ON_ONCE(oldval != 0);
4145fa7f 136 rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
9b2e4f18
PM
137 rcu_idle_exit_common(oldval);
138 local_irq_restore(flags);
9b1d82fa
PM
139}
140
141/*
9b2e4f18 142 * Enter an interrupt handler, moving away from idle.
9b1d82fa 143 */
9b2e4f18 144void rcu_irq_enter(void)
9b1d82fa 145{
9b2e4f18
PM
146 unsigned long flags;
147 long long oldval;
148
149 local_irq_save(flags);
150 oldval = rcu_dynticks_nesting;
9b1d82fa 151 rcu_dynticks_nesting++;
9b2e4f18
PM
152 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
9b1d82fa
PM
155}
156
9b2e4f18
PM
157#ifdef CONFIG_PROVE_RCU
158
159/*
160 * Test whether RCU thinks that the current CPU is idle.
161 */
162int rcu_is_cpu_idle(void)
163{
164 return !rcu_dynticks_nesting;
165}
e6b80a3b 166EXPORT_SYMBOL(rcu_is_cpu_idle);
9b2e4f18
PM
167
168#endif /* #ifdef CONFIG_PROVE_RCU */
169
170/*
171 * Test whether the current CPU was interrupted from idle. Nested
172 * interrupts don't count, we must be running at the first interrupt
173 * level.
174 */
175int rcu_is_cpu_rrupt_from_idle(void)
176{
177 return rcu_dynticks_nesting <= 0;
178}
9b1d82fa
PM
179
180/*
b554d7de
ED
181 * Helper function for rcu_sched_qs() and rcu_bh_qs().
182 * Also irqs are disabled to avoid confusion due to interrupt handlers
4ce5b903 183 * invoking call_rcu().
9b1d82fa
PM
184 */
185static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
186{
9b1d82fa
PM
187 if (rcp->rcucblist != NULL &&
188 rcp->donetail != rcp->curtail) {
189 rcp->donetail = rcp->curtail;
9b1d82fa
PM
190 return 1;
191 }
4ce5b903 192
9b1d82fa
PM
193 return 0;
194}
195
196/*
197 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
198 * are at it, given that any rcu quiescent state is also an rcu_bh
199 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
200 */
201void rcu_sched_qs(int cpu)
202{
b554d7de
ED
203 unsigned long flags;
204
205 local_irq_save(flags);
99652b54
PM
206 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
207 rcu_qsctr_help(&rcu_bh_ctrlblk))
965a002b 208 invoke_rcu_callbacks();
b554d7de 209 local_irq_restore(flags);
9b1d82fa
PM
210}
211
212/*
213 * Record an rcu_bh quiescent state.
214 */
215void rcu_bh_qs(int cpu)
216{
b554d7de
ED
217 unsigned long flags;
218
219 local_irq_save(flags);
9b1d82fa 220 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
965a002b 221 invoke_rcu_callbacks();
b554d7de 222 local_irq_restore(flags);
9b1d82fa
PM
223}
224
225/*
226 * Check to see if the scheduling-clock interrupt came from an extended
9b2e4f18
PM
227 * quiescent state, and, if so, tell RCU about it. This function must
228 * be called from hardirq context. It is normally called from the
229 * scheduling-clock interrupt.
9b1d82fa
PM
230 */
231void rcu_check_callbacks(int cpu, int user)
232{
9b2e4f18 233 if (user || rcu_is_cpu_rrupt_from_idle())
9b1d82fa
PM
234 rcu_sched_qs(cpu);
235 else if (!in_softirq())
236 rcu_bh_qs(cpu);
a57eb940 237 rcu_preempt_check_callbacks();
9b1d82fa
PM
238}
239
240/*
b2c0710c
PM
241 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
242 * whose grace period has elapsed.
9b1d82fa 243 */
965a002b 244static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
9b1d82fa 245{
d4c08f2a 246 char *rn = NULL;
9b1d82fa 247 struct rcu_head *next, *list;
4ce5b903 248 unsigned long flags;
9e571a82 249 RCU_TRACE(int cb_count = 0);
9b1d82fa
PM
250
251 /* If no RCU callbacks ready to invoke, just return. */
29c00b4a 252 if (&rcp->rcucblist == rcp->donetail) {
72fe701b
PM
253 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
254 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
9b1d82fa 255 return;
29c00b4a 256 }
9b1d82fa
PM
257
258 /* Move the ready-to-invoke callbacks to a local list. */
259 local_irq_save(flags);
72fe701b 260 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
9b1d82fa
PM
261 list = rcp->rcucblist;
262 rcp->rcucblist = *rcp->donetail;
263 *rcp->donetail = NULL;
264 if (rcp->curtail == rcp->donetail)
265 rcp->curtail = &rcp->rcucblist;
a57eb940 266 rcu_preempt_remove_callbacks(rcp);
9b1d82fa
PM
267 rcp->donetail = &rcp->rcucblist;
268 local_irq_restore(flags);
269
270 /* Invoke the callbacks on the local list. */
d4c08f2a 271 RCU_TRACE(rn = rcp->name);
9b1d82fa
PM
272 while (list) {
273 next = list->next;
274 prefetch(next);
551d55a9 275 debug_rcu_head_unqueue(list);
b2c0710c 276 local_bh_disable();
d4c08f2a 277 __rcu_reclaim(rn, list);
b2c0710c 278 local_bh_enable();
9b1d82fa 279 list = next;
9e571a82 280 RCU_TRACE(cb_count++);
9b1d82fa 281 }
9e571a82 282 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
72fe701b 283 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
9b1d82fa
PM
284}
285
965a002b 286static void rcu_process_callbacks(struct softirq_action *unused)
b2c0710c 287{
965a002b
PM
288 __rcu_process_callbacks(&rcu_sched_ctrlblk);
289 __rcu_process_callbacks(&rcu_bh_ctrlblk);
290 rcu_preempt_process_callbacks();
b2c0710c
PM
291}
292
9b1d82fa
PM
293/*
294 * Wait for a grace period to elapse. But it is illegal to invoke
295 * synchronize_sched() from within an RCU read-side critical section.
296 * Therefore, any legal call to synchronize_sched() is a quiescent
297 * state, and so on a UP system, synchronize_sched() need do nothing.
298 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
299 * benefits of doing might_sleep() to reduce latency.)
300 *
301 * Cool, huh? (Due to Josh Triplett.)
302 *
da848c47
PM
303 * But we want to make this a static inline later. The cond_resched()
304 * currently makes this problematic.
9b1d82fa
PM
305 */
306void synchronize_sched(void)
307{
308 cond_resched();
309}
310EXPORT_SYMBOL_GPL(synchronize_sched);
311
9b1d82fa
PM
312/*
313 * Helper function for call_rcu() and call_rcu_bh().
314 */
315static void __call_rcu(struct rcu_head *head,
316 void (*func)(struct rcu_head *rcu),
317 struct rcu_ctrlblk *rcp)
318{
319 unsigned long flags;
320
551d55a9 321 debug_rcu_head_queue(head);
9b1d82fa
PM
322 head->func = func;
323 head->next = NULL;
4ce5b903 324
9b1d82fa
PM
325 local_irq_save(flags);
326 *rcp->curtail = head;
327 rcp->curtail = &head->next;
9e571a82 328 RCU_TRACE(rcp->qlen++);
9b1d82fa
PM
329 local_irq_restore(flags);
330}
331
332/*
a57eb940 333 * Post an RCU callback to be invoked after the end of an RCU-sched grace
9b1d82fa
PM
334 * period. But since we have but one CPU, that would be after any
335 * quiescent state.
336 */
a57eb940 337void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
9b1d82fa 338{
99652b54 339 __call_rcu(head, func, &rcu_sched_ctrlblk);
9b1d82fa 340}
a57eb940 341EXPORT_SYMBOL_GPL(call_rcu_sched);
9b1d82fa
PM
342
343/*
344 * Post an RCU bottom-half callback to be invoked after any subsequent
345 * quiescent state.
346 */
4ce5b903 347void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
9b1d82fa
PM
348{
349 __call_rcu(head, func, &rcu_bh_ctrlblk);
350}
351EXPORT_SYMBOL_GPL(call_rcu_bh);