rcu: "Tiny RCU", The Bloatwatch Edition
[linux-2.6-block.git] / kernel / rcupdate.c
CommitLineData
1da177e4
LT
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
01c1c660 18 * Copyright IBM Corporation, 2001
1da177e4
LT
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
a71fca58 22 *
1da177e4
LT
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
a71fca58 30 * http://lse.sourceforge.net/locking/rcupdate.html
1da177e4
LT
31 *
32 */
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/interrupt.h>
39#include <linux/sched.h>
40#include <asm/atomic.h>
41#include <linux/bitops.h>
1da177e4
LT
42#include <linux/percpu.h>
43#include <linux/notifier.h>
1da177e4 44#include <linux/cpu.h>
9331b315 45#include <linux/mutex.h>
01c1c660 46#include <linux/module.h>
a6826048 47#include <linux/kernel_stat.h>
1da177e4 48
162cc279
PM
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key;
51struct lockdep_map rcu_lock_map =
52 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
53EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif
55
a6826048 56int rcu_scheduler_active __read_mostly;
21a1ea9e 57
fbf6bfca
PM
58/*
59 * Awaken the corresponding synchronize_rcu() instance now that a
60 * grace period has elapsed.
61 */
4446a36f 62void wakeme_after_rcu(struct rcu_head *head)
21a1ea9e 63{
01c1c660
PM
64 struct rcu_synchronize *rcu;
65
66 rcu = container_of(head, struct rcu_synchronize, head);
67 complete(&rcu->completion);
21a1ea9e 68}
1da177e4 69
9b1d82fa
PM
70#ifndef CONFIG_TINY_RCU
71
16e30811
PM
72#ifdef CONFIG_TREE_PREEMPT_RCU
73
1da177e4 74/**
01c1c660 75 * synchronize_rcu - wait until a grace period has elapsed.
1da177e4 76 *
01c1c660
PM
77 * Control will return to the caller some time after a full grace
78 * period has elapsed, in other words after all currently executing RCU
1da177e4
LT
79 * read-side critical sections have completed. RCU read-side critical
80 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
81 * and may be nested.
82 */
ea7d3fef
PM
83void synchronize_rcu(void)
84{
85 struct rcu_synchronize rcu;
a6826048 86
16e30811 87 if (!rcu_scheduler_active)
a6826048
PM
88 return;
89
ea7d3fef
PM
90 init_completion(&rcu.completion);
91 /* Will wake me after RCU finished. */
92 call_rcu(&rcu.head, wakeme_after_rcu);
93 /* Wait for it. */
94 wait_for_completion(&rcu.completion);
95}
01c1c660 96EXPORT_SYMBOL_GPL(synchronize_rcu);
c32e0660 97
16e30811
PM
98#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
99
100/**
101 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
102 *
103 * Control will return to the caller some time after a full rcu-sched
104 * grace period has elapsed, in other words after all currently executing
105 * rcu-sched read-side critical sections have completed. These read-side
106 * critical sections are delimited by rcu_read_lock_sched() and
107 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
108 * local_irq_disable(), and so on may be used in place of
109 * rcu_read_lock_sched().
110 *
111 * This means that all preempt_disable code sequences, including NMI and
112 * hardware-interrupt handlers, in progress on entry will have completed
113 * before this primitive returns. However, this does not guarantee that
114 * softirq handlers will have completed, since in some kernels, these
115 * handlers can run in process context, and can block.
116 *
117 * This primitive provides the guarantees made by the (now removed)
118 * synchronize_kernel() API. In contrast, synchronize_rcu() only
119 * guarantees that rcu_read_lock() sections will have completed.
120 * In "classic RCU", these two guarantees happen to be one and
121 * the same, but can differ in realtime RCU implementations.
122 */
123void synchronize_sched(void)
124{
125 struct rcu_synchronize rcu;
126
127 if (rcu_blocking_is_gp())
128 return;
129
130 init_completion(&rcu.completion);
131 /* Will wake me after RCU finished. */
132 call_rcu_sched(&rcu.head, wakeme_after_rcu);
133 /* Wait for it. */
134 wait_for_completion(&rcu.completion);
135}
136EXPORT_SYMBOL_GPL(synchronize_sched);
137
03b042bf
PM
138/**
139 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
140 *
141 * Control will return to the caller some time after a full rcu_bh grace
142 * period has elapsed, in other words after all currently executing rcu_bh
143 * read-side critical sections have completed. RCU read-side critical
144 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
145 * and may be nested.
146 */
147void synchronize_rcu_bh(void)
148{
149 struct rcu_synchronize rcu;
150
151 if (rcu_blocking_is_gp())
152 return;
153
154 init_completion(&rcu.completion);
155 /* Will wake me after RCU finished. */
156 call_rcu_bh(&rcu.head, wakeme_after_rcu);
157 /* Wait for it. */
158 wait_for_completion(&rcu.completion);
159}
160EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
161
9b1d82fa
PM
162#endif /* #ifndef CONFIG_TINY_RCU */
163
f69b17d7
LJ
164static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
165 unsigned long action, void *hcpu)
166{
d0ec774c 167 return rcu_cpu_notify(self, action, hcpu);
f69b17d7
LJ
168}
169
1da177e4
LT
170void __init rcu_init(void)
171{
2e597558
PM
172 int i;
173
01c1c660 174 __rcu_init();
2e597558
PM
175 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
176
177 /*
178 * We don't need protection against CPU-hotplug here because
179 * this is called early in boot, before either interrupts
180 * or the scheduler are operational.
181 */
182 for_each_online_cpu(i)
183 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
1da177e4
LT
184}
185
a6826048
PM
186void rcu_scheduler_starting(void)
187{
188 WARN_ON(num_online_cpus() != 1);
189 WARN_ON(nr_context_switches() > 0);
190 rcu_scheduler_active = 1;
191}