Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2001 | |
19 | * | |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * | |
595182bc | 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
1da177e4 LT |
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
24 | * Papers: | |
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
27 | * | |
28 | * For detailed explanation of Read-Copy Update mechanism see - | |
29 | * http://lse.sourceforge.net/locking/rcupdate.html | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef __LINUX_RCUPDATE_H | |
34 | #define __LINUX_RCUPDATE_H | |
35 | ||
36 | #ifdef __KERNEL__ | |
37 | ||
38 | #include <linux/cache.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/threads.h> | |
41 | #include <linux/percpu.h> | |
42 | #include <linux/cpumask.h> | |
43 | #include <linux/seqlock.h> | |
44 | ||
45 | /** | |
46 | * struct rcu_head - callback structure for use with RCU | |
47 | * @next: next update requests in a list | |
48 | * @func: actual update function to call after the grace period. | |
49 | */ | |
50 | struct rcu_head { | |
51 | struct rcu_head *next; | |
52 | void (*func)(struct rcu_head *head); | |
53 | }; | |
54 | ||
8b6490e5 DS |
55 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
56 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | |
1da177e4 LT |
57 | #define INIT_RCU_HEAD(ptr) do { \ |
58 | (ptr)->next = NULL; (ptr)->func = NULL; \ | |
59 | } while (0) | |
60 | ||
61 | ||
62 | ||
63 | /* Global control variables for rcupdate callback mechanism. */ | |
64 | struct rcu_ctrlblk { | |
65 | long cur; /* Current batch number. */ | |
66 | long completed; /* Number of the last completed batch */ | |
67 | int next_pending; /* Is the next batch already waiting? */ | |
69a0b315 | 68 | |
20e9751b ON |
69 | int signaled; |
70 | ||
69a0b315 ON |
71 | spinlock_t lock ____cacheline_internodealigned_in_smp; |
72 | cpumask_t cpumask; /* CPUs that need to switch in order */ | |
73 | /* for current batch to proceed. */ | |
22fc6ecc | 74 | } ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
75 | |
76 | /* Is batch a before batch b ? */ | |
77 | static inline int rcu_batch_before(long a, long b) | |
78 | { | |
79 | return (a - b) < 0; | |
80 | } | |
81 | ||
82 | /* Is batch a after batch b ? */ | |
83 | static inline int rcu_batch_after(long a, long b) | |
84 | { | |
85 | return (a - b) > 0; | |
86 | } | |
87 | ||
88 | /* | |
89 | * Per-CPU data for Read-Copy UPdate. | |
90 | * nxtlist - new callbacks are added here | |
91 | * curlist - current batch for which quiescent cycle started if any | |
92 | */ | |
93 | struct rcu_data { | |
94 | /* 1) quiescent state handling : */ | |
95 | long quiescbatch; /* Batch # for grace period */ | |
96 | int passed_quiesc; /* User-mode/idle loop etc. */ | |
97 | int qs_pending; /* core waits for quiesc state */ | |
98 | ||
99 | /* 2) batch handling */ | |
100 | long batch; /* Batch # for current RCU batch */ | |
101 | struct rcu_head *nxtlist; | |
102 | struct rcu_head **nxttail; | |
21a1ea9e | 103 | long qlen; /* # of queued callbacks */ |
1da177e4 LT |
104 | struct rcu_head *curlist; |
105 | struct rcu_head **curtail; | |
106 | struct rcu_head *donelist; | |
107 | struct rcu_head **donetail; | |
21a1ea9e | 108 | long blimit; /* Upper limit on a processed batch */ |
1da177e4 | 109 | int cpu; |
ab4720ec | 110 | struct rcu_head barrier; |
1da177e4 LT |
111 | }; |
112 | ||
113 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | |
114 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | |
1da177e4 LT |
115 | |
116 | /* | |
117 | * Increment the quiescent state counter. | |
118 | * The counter is a bit degenerated: We do not need to know | |
119 | * how many quiescent states passed, just if there was at least | |
120 | * one since the start of the grace period. Thus just a flag. | |
121 | */ | |
122 | static inline void rcu_qsctr_inc(int cpu) | |
123 | { | |
124 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
125 | rdp->passed_quiesc = 1; | |
126 | } | |
127 | static inline void rcu_bh_qsctr_inc(int cpu) | |
128 | { | |
129 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | |
130 | rdp->passed_quiesc = 1; | |
131 | } | |
132 | ||
67751777 | 133 | extern int rcu_pending(int cpu); |
986733e0 | 134 | extern int rcu_needs_cpu(int cpu); |
1da177e4 LT |
135 | |
136 | /** | |
137 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | |
138 | * | |
9b06e818 | 139 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
1da177e4 | 140 | * are within RCU read-side critical sections, then the |
9b06e818 | 141 | * synchronize_rcu() is guaranteed to block until after all the other |
1da177e4 LT |
142 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
143 | * on one CPU while other CPUs are within RCU read-side critical | |
144 | * sections, invocation of the corresponding RCU callback is deferred | |
145 | * until after the all the other CPUs exit their critical sections. | |
146 | * | |
147 | * Note, however, that RCU callbacks are permitted to run concurrently | |
148 | * with RCU read-side critical sections. One way that this can happen | |
149 | * is via the following sequence of events: (1) CPU 0 enters an RCU | |
150 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register | |
151 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | |
152 | * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU | |
153 | * callback is invoked. This is legal, because the RCU read-side critical | |
154 | * section that was running concurrently with the call_rcu() (and which | |
155 | * therefore might be referencing something that the corresponding RCU | |
156 | * callback would free up) has completed before the corresponding | |
157 | * RCU callback is invoked. | |
158 | * | |
159 | * RCU read-side critical sections may be nested. Any deferred actions | |
160 | * will be deferred until the outermost RCU read-side critical section | |
161 | * completes. | |
162 | * | |
163 | * It is illegal to block while in an RCU read-side critical section. | |
164 | */ | |
7f04ac06 JT |
165 | #define rcu_read_lock() \ |
166 | do { \ | |
167 | preempt_disable(); \ | |
168 | __acquire(RCU); \ | |
169 | } while(0) | |
1da177e4 LT |
170 | |
171 | /** | |
172 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | |
173 | * | |
174 | * See rcu_read_lock() for more information. | |
175 | */ | |
7f04ac06 JT |
176 | #define rcu_read_unlock() \ |
177 | do { \ | |
178 | __release(RCU); \ | |
179 | preempt_enable(); \ | |
180 | } while(0) | |
1da177e4 LT |
181 | |
182 | /* | |
183 | * So where is rcu_write_lock()? It does not exist, as there is no | |
184 | * way for writers to lock out RCU readers. This is a feature, not | |
185 | * a bug -- this property is what provides RCU's performance benefits. | |
186 | * Of course, writers must coordinate with each other. The normal | |
187 | * spinlock primitives work well for this, but any other technique may be | |
188 | * used as well. RCU does not care how the writers keep out of each | |
189 | * others' way, as long as they do so. | |
190 | */ | |
191 | ||
192 | /** | |
193 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | |
194 | * | |
195 | * This is equivalent of rcu_read_lock(), but to be used when updates | |
196 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | |
197 | * consider completion of a softirq handler to be a quiescent state, | |
198 | * a process in RCU read-side critical section must be protected by | |
199 | * disabling softirqs. Read-side critical sections in interrupt context | |
200 | * can use just rcu_read_lock(). | |
201 | * | |
202 | */ | |
7f04ac06 JT |
203 | #define rcu_read_lock_bh() \ |
204 | do { \ | |
205 | local_bh_disable(); \ | |
206 | __acquire(RCU_BH); \ | |
207 | } while(0) | |
1da177e4 LT |
208 | |
209 | /* | |
210 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | |
211 | * | |
212 | * See rcu_read_lock_bh() for more information. | |
213 | */ | |
7f04ac06 JT |
214 | #define rcu_read_unlock_bh() \ |
215 | do { \ | |
216 | __release(RCU_BH); \ | |
217 | local_bh_enable(); \ | |
218 | } while(0) | |
1da177e4 LT |
219 | |
220 | /** | |
221 | * rcu_dereference - fetch an RCU-protected pointer in an | |
222 | * RCU read-side critical section. This pointer may later | |
223 | * be safely dereferenced. | |
224 | * | |
225 | * Inserts memory barriers on architectures that require them | |
226 | * (currently only the Alpha), and, more importantly, documents | |
227 | * exactly which pointers are protected by RCU. | |
228 | */ | |
229 | ||
230 | #define rcu_dereference(p) ({ \ | |
231 | typeof(p) _________p1 = p; \ | |
232 | smp_read_barrier_depends(); \ | |
233 | (_________p1); \ | |
234 | }) | |
235 | ||
236 | /** | |
237 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
238 | * initialized structure that will be dereferenced by RCU read-side | |
239 | * critical sections. Returns the value assigned. | |
240 | * | |
241 | * Inserts memory barriers on architectures that require them | |
242 | * (pretty much all of them other than x86), and also prevents | |
243 | * the compiler from reordering the code that initializes the | |
244 | * structure after the pointer assignment. More importantly, this | |
245 | * call documents which pointers will be dereferenced by RCU read-side | |
246 | * code. | |
247 | */ | |
248 | ||
249 | #define rcu_assign_pointer(p, v) ({ \ | |
250 | smp_wmb(); \ | |
251 | (p) = (v); \ | |
252 | }) | |
253 | ||
9b06e818 PM |
254 | /** |
255 | * synchronize_sched - block until all CPUs have exited any non-preemptive | |
256 | * kernel code sequences. | |
257 | * | |
258 | * This means that all preempt_disable code sequences, including NMI and | |
259 | * hardware-interrupt handlers, in progress on entry will have completed | |
260 | * before this primitive returns. However, this does not guarantee that | |
bb3b9cf1 PM |
261 | * softirq handlers will have completed, since in some kernels, these |
262 | * handlers can run in process context, and can block. | |
9b06e818 | 263 | * |
d83015b8 | 264 | * This primitive provides the guarantees made by the (now removed) |
9b06e818 PM |
265 | * synchronize_kernel() API. In contrast, synchronize_rcu() only |
266 | * guarantees that rcu_read_lock() sections will have completed. | |
bb3b9cf1 PM |
267 | * In "classic RCU", these two guarantees happen to be one and |
268 | * the same, but can differ in realtime RCU implementations. | |
9b06e818 PM |
269 | */ |
270 | #define synchronize_sched() synchronize_rcu() | |
271 | ||
1da177e4 LT |
272 | extern void rcu_init(void); |
273 | extern void rcu_check_callbacks(int cpu, int user); | |
274 | extern void rcu_restart_cpu(int cpu); | |
a241ec65 | 275 | extern long rcu_batches_completed(void); |
c32e0660 | 276 | extern long rcu_batches_completed_bh(void); |
1da177e4 LT |
277 | |
278 | /* Exported interfaces */ | |
279 | extern void FASTCALL(call_rcu(struct rcu_head *head, | |
280 | void (*func)(struct rcu_head *head))); | |
281 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, | |
282 | void (*func)(struct rcu_head *head))); | |
9b06e818 PM |
283 | extern void synchronize_rcu(void); |
284 | void synchronize_idle(void); | |
ab4720ec | 285 | extern void rcu_barrier(void); |
1da177e4 LT |
286 | |
287 | #endif /* __KERNEL__ */ | |
288 | #endif /* __LINUX_RCUPDATE_H */ |