Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2001 | |
19 | * | |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * | |
595182bc | 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
1da177e4 LT |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
24 | * Papers: | |
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
27 | * | |
28 | * For detailed explanation of Read-Copy Update mechanism see - | |
29 | * http://lse.sourceforge.net/locking/rcupdate.html | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef __LINUX_RCUPDATE_H | |
34 | #define __LINUX_RCUPDATE_H | |
35 | ||
36 | #ifdef __KERNEL__ | |
37 | ||
38 | #include <linux/cache.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/threads.h> | |
41 | #include <linux/percpu.h> | |
42 | #include <linux/cpumask.h> | |
43 | #include <linux/seqlock.h> | |
851a67b8 | 44 | #include <linux/lockdep.h> |
1da177e4 LT |
45 | |
46 | /** | |
47 | * struct rcu_head - callback structure for use with RCU | |
48 | * @next: next update requests in a list | |
49 | * @func: actual update function to call after the grace period. | |
50 | */ | |
51 | struct rcu_head { | |
52 | struct rcu_head *next; | |
53 | void (*func)(struct rcu_head *head); | |
54 | }; | |
55 | ||
8b6490e5 DS |
56 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
57 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | |
1da177e4 LT |
58 | #define INIT_RCU_HEAD(ptr) do { \ |
59 | (ptr)->next = NULL; (ptr)->func = NULL; \ | |
60 | } while (0) | |
61 | ||
62 | ||
63 | ||
64 | /* Global control variables for rcupdate callback mechanism. */ | |
65 | struct rcu_ctrlblk { | |
66 | long cur; /* Current batch number. */ | |
67 | long completed; /* Number of the last completed batch */ | |
68 | int next_pending; /* Is the next batch already waiting? */ | |
69a0b315 | 69 | |
20e9751b ON |
70 | int signaled; |
71 | ||
69a0b315 ON |
72 | spinlock_t lock ____cacheline_internodealigned_in_smp; |
73 | cpumask_t cpumask; /* CPUs that need to switch in order */ | |
74 | /* for current batch to proceed. */ | |
22fc6ecc | 75 | } ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
76 | |
77 | /* Is batch a before batch b ? */ | |
78 | static inline int rcu_batch_before(long a, long b) | |
79 | { | |
80 | return (a - b) < 0; | |
81 | } | |
82 | ||
83 | /* Is batch a after batch b ? */ | |
84 | static inline int rcu_batch_after(long a, long b) | |
85 | { | |
86 | return (a - b) > 0; | |
87 | } | |
88 | ||
89 | /* | |
90 | * Per-CPU data for Read-Copy UPdate. | |
91 | * nxtlist - new callbacks are added here | |
92 | * curlist - current batch for which quiescent cycle started if any | |
93 | */ | |
94 | struct rcu_data { | |
95 | /* 1) quiescent state handling : */ | |
96 | long quiescbatch; /* Batch # for grace period */ | |
97 | int passed_quiesc; /* User-mode/idle loop etc. */ | |
98 | int qs_pending; /* core waits for quiesc state */ | |
99 | ||
100 | /* 2) batch handling */ | |
101 | long batch; /* Batch # for current RCU batch */ | |
102 | struct rcu_head *nxtlist; | |
103 | struct rcu_head **nxttail; | |
21a1ea9e | 104 | long qlen; /* # of queued callbacks */ |
1da177e4 LT |
105 | struct rcu_head *curlist; |
106 | struct rcu_head **curtail; | |
107 | struct rcu_head *donelist; | |
108 | struct rcu_head **donetail; | |
21a1ea9e | 109 | long blimit; /* Upper limit on a processed batch */ |
1da177e4 | 110 | int cpu; |
ab4720ec | 111 | struct rcu_head barrier; |
1da177e4 LT |
112 | }; |
113 | ||
114 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | |
115 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | |
1da177e4 LT |
116 | |
117 | /* | |
118 | * Increment the quiescent state counter. | |
119 | * The counter is a bit degenerated: We do not need to know | |
120 | * how many quiescent states passed, just if there was at least | |
121 | * one since the start of the grace period. Thus just a flag. | |
122 | */ | |
123 | static inline void rcu_qsctr_inc(int cpu) | |
124 | { | |
125 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
126 | rdp->passed_quiesc = 1; | |
127 | } | |
128 | static inline void rcu_bh_qsctr_inc(int cpu) | |
129 | { | |
130 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | |
131 | rdp->passed_quiesc = 1; | |
132 | } | |
133 | ||
67751777 | 134 | extern int rcu_pending(int cpu); |
986733e0 | 135 | extern int rcu_needs_cpu(int cpu); |
1da177e4 | 136 | |
851a67b8 PZ |
137 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
138 | extern struct lockdep_map rcu_lock_map; | |
139 | # define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | |
140 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | |
141 | #else | |
142 | # define rcu_read_acquire() do { } while (0) | |
143 | # define rcu_read_release() do { } while (0) | |
144 | #endif | |
145 | ||
1da177e4 LT |
146 | /** |
147 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | |
148 | * | |
9b06e818 | 149 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
1da177e4 | 150 | * are within RCU read-side critical sections, then the |
9b06e818 | 151 | * synchronize_rcu() is guaranteed to block until after all the other |
1da177e4 LT |
152 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
153 | * on one CPU while other CPUs are within RCU read-side critical | |
154 | * sections, invocation of the corresponding RCU callback is deferred | |
155 | * until after the all the other CPUs exit their critical sections. | |
156 | * | |
157 | * Note, however, that RCU callbacks are permitted to run concurrently | |
158 | * with RCU read-side critical sections. One way that this can happen | |
159 | * is via the following sequence of events: (1) CPU 0 enters an RCU | |
160 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register | |
161 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | |
162 | * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU | |
163 | * callback is invoked. This is legal, because the RCU read-side critical | |
164 | * section that was running concurrently with the call_rcu() (and which | |
165 | * therefore might be referencing something that the corresponding RCU | |
166 | * callback would free up) has completed before the corresponding | |
167 | * RCU callback is invoked. | |
168 | * | |
169 | * RCU read-side critical sections may be nested. Any deferred actions | |
170 | * will be deferred until the outermost RCU read-side critical section | |
171 | * completes. | |
172 | * | |
173 | * It is illegal to block while in an RCU read-side critical section. | |
174 | */ | |
7f04ac06 JT |
175 | #define rcu_read_lock() \ |
176 | do { \ | |
177 | preempt_disable(); \ | |
178 | __acquire(RCU); \ | |
851a67b8 | 179 | rcu_read_acquire(); \ |
7f04ac06 | 180 | } while(0) |
1da177e4 LT |
181 | |
182 | /** | |
183 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | |
184 | * | |
185 | * See rcu_read_lock() for more information. | |
186 | */ | |
7f04ac06 JT |
187 | #define rcu_read_unlock() \ |
188 | do { \ | |
851a67b8 | 189 | rcu_read_release(); \ |
7f04ac06 JT |
190 | __release(RCU); \ |
191 | preempt_enable(); \ | |
192 | } while(0) | |
1da177e4 LT |
193 | |
194 | /* | |
195 | * So where is rcu_write_lock()? It does not exist, as there is no | |
196 | * way for writers to lock out RCU readers. This is a feature, not | |
197 | * a bug -- this property is what provides RCU's performance benefits. | |
198 | * Of course, writers must coordinate with each other. The normal | |
199 | * spinlock primitives work well for this, but any other technique may be | |
200 | * used as well. RCU does not care how the writers keep out of each | |
201 | * others' way, as long as they do so. | |
202 | */ | |
203 | ||
204 | /** | |
205 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | |
206 | * | |
207 | * This is equivalent of rcu_read_lock(), but to be used when updates | |
208 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | |
209 | * consider completion of a softirq handler to be a quiescent state, | |
210 | * a process in RCU read-side critical section must be protected by | |
211 | * disabling softirqs. Read-side critical sections in interrupt context | |
212 | * can use just rcu_read_lock(). | |
213 | * | |
214 | */ | |
7f04ac06 JT |
215 | #define rcu_read_lock_bh() \ |
216 | do { \ | |
217 | local_bh_disable(); \ | |
218 | __acquire(RCU_BH); \ | |
851a67b8 | 219 | rcu_read_acquire(); \ |
7f04ac06 | 220 | } while(0) |
1da177e4 LT |
221 | |
222 | /* | |
223 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | |
224 | * | |
225 | * See rcu_read_lock_bh() for more information. | |
226 | */ | |
7f04ac06 JT |
227 | #define rcu_read_unlock_bh() \ |
228 | do { \ | |
851a67b8 | 229 | rcu_read_release(); \ |
7f04ac06 JT |
230 | __release(RCU_BH); \ |
231 | local_bh_enable(); \ | |
232 | } while(0) | |
1da177e4 | 233 | |
97b43032 PM |
234 | /* |
235 | * Prevent the compiler from merging or refetching accesses. The compiler | |
236 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
237 | * but only when the compiler is aware of some particular ordering. One way | |
238 | * to make the compiler aware of ordering is to put the two invocations of | |
239 | * ACCESS_ONCE() in different C statements. | |
240 | * | |
241 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
242 | * merging, or refetching absolutely anything at any time. | |
243 | */ | |
244 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
245 | ||
1da177e4 LT |
246 | /** |
247 | * rcu_dereference - fetch an RCU-protected pointer in an | |
248 | * RCU read-side critical section. This pointer may later | |
249 | * be safely dereferenced. | |
250 | * | |
251 | * Inserts memory barriers on architectures that require them | |
252 | * (currently only the Alpha), and, more importantly, documents | |
253 | * exactly which pointers are protected by RCU. | |
254 | */ | |
255 | ||
256 | #define rcu_dereference(p) ({ \ | |
97b43032 | 257 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
1da177e4 LT |
258 | smp_read_barrier_depends(); \ |
259 | (_________p1); \ | |
260 | }) | |
261 | ||
262 | /** | |
263 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
264 | * initialized structure that will be dereferenced by RCU read-side | |
265 | * critical sections. Returns the value assigned. | |
266 | * | |
267 | * Inserts memory barriers on architectures that require them | |
268 | * (pretty much all of them other than x86), and also prevents | |
269 | * the compiler from reordering the code that initializes the | |
270 | * structure after the pointer assignment. More importantly, this | |
271 | * call documents which pointers will be dereferenced by RCU read-side | |
272 | * code. | |
273 | */ | |
274 | ||
275 | #define rcu_assign_pointer(p, v) ({ \ | |
276 | smp_wmb(); \ | |
277 | (p) = (v); \ | |
278 | }) | |
279 | ||
9b06e818 PM |
280 | /** |
281 | * synchronize_sched - block until all CPUs have exited any non-preemptive | |
282 | * kernel code sequences. | |
283 | * | |
284 | * This means that all preempt_disable code sequences, including NMI and | |
285 | * hardware-interrupt handlers, in progress on entry will have completed | |
286 | * before this primitive returns. However, this does not guarantee that | |
bb3b9cf1 PM |
287 | * softirq handlers will have completed, since in some kernels, these |
288 | * handlers can run in process context, and can block. | |
9b06e818 | 289 | * |
d83015b8 | 290 | * This primitive provides the guarantees made by the (now removed) |
9b06e818 PM |
291 | * synchronize_kernel() API. In contrast, synchronize_rcu() only |
292 | * guarantees that rcu_read_lock() sections will have completed. | |
bb3b9cf1 PM |
293 | * In "classic RCU", these two guarantees happen to be one and |
294 | * the same, but can differ in realtime RCU implementations. | |
9b06e818 PM |
295 | */ |
296 | #define synchronize_sched() synchronize_rcu() | |
297 | ||
1da177e4 LT |
298 | extern void rcu_init(void); |
299 | extern void rcu_check_callbacks(int cpu, int user); | |
300 | extern void rcu_restart_cpu(int cpu); | |
a241ec65 | 301 | extern long rcu_batches_completed(void); |
c32e0660 | 302 | extern long rcu_batches_completed_bh(void); |
1da177e4 LT |
303 | |
304 | /* Exported interfaces */ | |
305 | extern void FASTCALL(call_rcu(struct rcu_head *head, | |
306 | void (*func)(struct rcu_head *head))); | |
307 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, | |
308 | void (*func)(struct rcu_head *head))); | |
9b06e818 | 309 | extern void synchronize_rcu(void); |
ab4720ec | 310 | extern void rcu_barrier(void); |
1da177e4 LT |
311 | |
312 | #endif /* __KERNEL__ */ | |
313 | #endif /* __LINUX_RCUPDATE_H */ |