Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2001 | |
19 | * | |
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | |
21 | * Manfred Spraul <manfred@colorfullife.com> | |
22 | * | |
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | |
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | |
25 | * Papers: | |
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | |
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | |
28 | * | |
29 | * For detailed explanation of Read-Copy Update mechanism see - | |
30 | * http://lse.sourceforge.net/locking/rcupdate.html | |
31 | * | |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/kernel.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/smp.h> | |
e56d0903 | 38 | #include <linux/rcupdate.h> |
1da177e4 LT |
39 | #include <linux/interrupt.h> |
40 | #include <linux/sched.h> | |
41 | #include <asm/atomic.h> | |
42 | #include <linux/bitops.h> | |
43 | #include <linux/module.h> | |
44 | #include <linux/completion.h> | |
45 | #include <linux/moduleparam.h> | |
46 | #include <linux/percpu.h> | |
47 | #include <linux/notifier.h> | |
1da177e4 | 48 | #include <linux/cpu.h> |
9331b315 | 49 | #include <linux/mutex.h> |
1da177e4 | 50 | |
851a67b8 PZ |
51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
52 | static struct lock_class_key rcu_lock_key; | |
53 | struct lockdep_map rcu_lock_map = | |
54 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | |
55 | ||
56 | EXPORT_SYMBOL_GPL(rcu_lock_map); | |
57 | #endif | |
58 | ||
1da177e4 | 59 | /* Definition for rcupdate control block. */ |
2178426d | 60 | static struct rcu_ctrlblk rcu_ctrlblk = { |
69a0b315 ON |
61 | .cur = -300, |
62 | .completed = -300, | |
e4d91918 | 63 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
69a0b315 ON |
64 | .cpumask = CPU_MASK_NONE, |
65 | }; | |
2178426d | 66 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
69a0b315 ON |
67 | .cur = -300, |
68 | .completed = -300, | |
e4d91918 | 69 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
69a0b315 | 70 | .cpumask = CPU_MASK_NONE, |
1da177e4 | 71 | }; |
1da177e4 LT |
72 | |
73 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | |
74 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | |
75 | ||
21a1ea9e DS |
76 | static int blimit = 10; |
77 | static int qhimark = 10000; | |
78 | static int qlowmark = 100; | |
21a1ea9e DS |
79 | |
80 | static atomic_t rcu_barrier_cpu_count; | |
9331b315 | 81 | static DEFINE_MUTEX(rcu_barrier_mutex); |
21a1ea9e DS |
82 | static struct completion rcu_barrier_completion; |
83 | ||
84 | #ifdef CONFIG_SMP | |
85 | static void force_quiescent_state(struct rcu_data *rdp, | |
86 | struct rcu_ctrlblk *rcp) | |
87 | { | |
88 | int cpu; | |
89 | cpumask_t cpumask; | |
90 | set_need_resched(); | |
20e9751b ON |
91 | if (unlikely(!rcp->signaled)) { |
92 | rcp->signaled = 1; | |
21a1ea9e DS |
93 | /* |
94 | * Don't send IPI to itself. With irqs disabled, | |
95 | * rdp->cpu is the current cpu. | |
96 | */ | |
97 | cpumask = rcp->cpumask; | |
98 | cpu_clear(rdp->cpu, cpumask); | |
99 | for_each_cpu_mask(cpu, cpumask) | |
100 | smp_send_reschedule(cpu); | |
101 | } | |
102 | } | |
103 | #else | |
104 | static inline void force_quiescent_state(struct rcu_data *rdp, | |
105 | struct rcu_ctrlblk *rcp) | |
106 | { | |
107 | set_need_resched(); | |
108 | } | |
109 | #endif | |
1da177e4 LT |
110 | |
111 | /** | |
112 | * call_rcu - Queue an RCU callback for invocation after a grace period. | |
113 | * @head: structure to be used for queueing the RCU updates. | |
114 | * @func: actual update function to be invoked after the grace period | |
115 | * | |
116 | * The update function will be invoked some time after a full grace | |
117 | * period elapses, in other words after all currently executing RCU | |
118 | * read-side critical sections have completed. RCU read-side critical | |
119 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
120 | * and may be nested. | |
121 | */ | |
122 | void fastcall call_rcu(struct rcu_head *head, | |
123 | void (*func)(struct rcu_head *rcu)) | |
124 | { | |
125 | unsigned long flags; | |
126 | struct rcu_data *rdp; | |
127 | ||
128 | head->func = func; | |
129 | head->next = NULL; | |
130 | local_irq_save(flags); | |
131 | rdp = &__get_cpu_var(rcu_data); | |
132 | *rdp->nxttail = head; | |
133 | rdp->nxttail = &head->next; | |
21a1ea9e DS |
134 | if (unlikely(++rdp->qlen > qhimark)) { |
135 | rdp->blimit = INT_MAX; | |
136 | force_quiescent_state(rdp, &rcu_ctrlblk); | |
137 | } | |
1da177e4 LT |
138 | local_irq_restore(flags); |
139 | } | |
140 | ||
141 | /** | |
142 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | |
143 | * @head: structure to be used for queueing the RCU updates. | |
144 | * @func: actual update function to be invoked after the grace period | |
145 | * | |
146 | * The update function will be invoked some time after a full grace | |
147 | * period elapses, in other words after all currently executing RCU | |
148 | * read-side critical sections have completed. call_rcu_bh() assumes | |
149 | * that the read-side critical sections end on completion of a softirq | |
150 | * handler. This means that read-side critical sections in process | |
151 | * context must not be interrupted by softirqs. This interface is to be | |
152 | * used when most of the read-side critical sections are in softirq context. | |
153 | * RCU read-side critical sections are delimited by rcu_read_lock() and | |
154 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() | |
155 | * and rcu_read_unlock_bh(), if in process context. These may be nested. | |
156 | */ | |
157 | void fastcall call_rcu_bh(struct rcu_head *head, | |
158 | void (*func)(struct rcu_head *rcu)) | |
159 | { | |
160 | unsigned long flags; | |
161 | struct rcu_data *rdp; | |
162 | ||
163 | head->func = func; | |
164 | head->next = NULL; | |
165 | local_irq_save(flags); | |
166 | rdp = &__get_cpu_var(rcu_bh_data); | |
167 | *rdp->nxttail = head; | |
168 | rdp->nxttail = &head->next; | |
21a1ea9e DS |
169 | |
170 | if (unlikely(++rdp->qlen > qhimark)) { | |
171 | rdp->blimit = INT_MAX; | |
172 | force_quiescent_state(rdp, &rcu_bh_ctrlblk); | |
173 | } | |
174 | ||
1da177e4 LT |
175 | local_irq_restore(flags); |
176 | } | |
177 | ||
a241ec65 PM |
178 | /* |
179 | * Return the number of RCU batches processed thus far. Useful | |
180 | * for debug and statistics. | |
181 | */ | |
182 | long rcu_batches_completed(void) | |
183 | { | |
184 | return rcu_ctrlblk.completed; | |
185 | } | |
186 | ||
c32e0660 PM |
187 | /* |
188 | * Return the number of RCU batches processed thus far. Useful | |
189 | * for debug and statistics. | |
190 | */ | |
191 | long rcu_batches_completed_bh(void) | |
192 | { | |
193 | return rcu_bh_ctrlblk.completed; | |
194 | } | |
195 | ||
ab4720ec DS |
196 | static void rcu_barrier_callback(struct rcu_head *notused) |
197 | { | |
198 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | |
199 | complete(&rcu_barrier_completion); | |
200 | } | |
201 | ||
202 | /* | |
203 | * Called with preemption disabled, and from cross-cpu IRQ context. | |
204 | */ | |
205 | static void rcu_barrier_func(void *notused) | |
206 | { | |
207 | int cpu = smp_processor_id(); | |
208 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
209 | struct rcu_head *head; | |
210 | ||
211 | head = &rdp->barrier; | |
212 | atomic_inc(&rcu_barrier_cpu_count); | |
213 | call_rcu(head, rcu_barrier_callback); | |
214 | } | |
215 | ||
216 | /** | |
217 | * rcu_barrier - Wait until all the in-flight RCUs are complete. | |
218 | */ | |
219 | void rcu_barrier(void) | |
220 | { | |
221 | BUG_ON(in_interrupt()); | |
9331b315 IM |
222 | /* Take cpucontrol mutex to protect against CPU hotplug */ |
223 | mutex_lock(&rcu_barrier_mutex); | |
ab4720ec DS |
224 | init_completion(&rcu_barrier_completion); |
225 | atomic_set(&rcu_barrier_cpu_count, 0); | |
226 | on_each_cpu(rcu_barrier_func, NULL, 0, 1); | |
227 | wait_for_completion(&rcu_barrier_completion); | |
9331b315 | 228 | mutex_unlock(&rcu_barrier_mutex); |
ab4720ec DS |
229 | } |
230 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
231 | ||
c2d727aa DS |
232 | /* Raises the softirq for processing rcu_callbacks. */ |
233 | static inline void raise_rcu_softirq(void) | |
234 | { | |
235 | raise_softirq(RCU_SOFTIRQ); | |
236 | /* | |
237 | * The smp_mb() here is required to ensure that this cpu's | |
238 | * __rcu_process_callbacks() reads the most recently updated | |
239 | * value of rcu->cur. | |
240 | */ | |
241 | smp_mb(); | |
242 | } | |
243 | ||
1da177e4 LT |
244 | /* |
245 | * Invoke the completed RCU callbacks. They are expected to be in | |
246 | * a per-cpu list. | |
247 | */ | |
248 | static void rcu_do_batch(struct rcu_data *rdp) | |
249 | { | |
250 | struct rcu_head *next, *list; | |
251 | int count = 0; | |
252 | ||
253 | list = rdp->donelist; | |
254 | while (list) { | |
1c69d921 ED |
255 | next = list->next; |
256 | prefetch(next); | |
1da177e4 LT |
257 | list->func(list); |
258 | list = next; | |
21a1ea9e | 259 | if (++count >= rdp->blimit) |
1da177e4 LT |
260 | break; |
261 | } | |
1c69d921 | 262 | rdp->donelist = list; |
dd9daa22 ON |
263 | |
264 | local_irq_disable(); | |
265 | rdp->qlen -= count; | |
266 | local_irq_enable(); | |
21a1ea9e DS |
267 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
268 | rdp->blimit = blimit; | |
dd9daa22 | 269 | |
1da177e4 LT |
270 | if (!rdp->donelist) |
271 | rdp->donetail = &rdp->donelist; | |
272 | else | |
c2d727aa | 273 | raise_rcu_softirq(); |
1da177e4 LT |
274 | } |
275 | ||
276 | /* | |
277 | * Grace period handling: | |
278 | * The grace period handling consists out of two steps: | |
279 | * - A new grace period is started. | |
280 | * This is done by rcu_start_batch. The start is not broadcasted to | |
281 | * all cpus, they must pick this up by comparing rcp->cur with | |
282 | * rdp->quiescbatch. All cpus are recorded in the | |
69a0b315 | 283 | * rcu_ctrlblk.cpumask bitmap. |
1da177e4 LT |
284 | * - All cpus must go through a quiescent state. |
285 | * Since the start of the grace period is not broadcasted, at least two | |
286 | * calls to rcu_check_quiescent_state are required: | |
287 | * The first call just notices that a new grace period is running. The | |
288 | * following calls check if there was a quiescent state since the beginning | |
69a0b315 | 289 | * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If |
1da177e4 LT |
290 | * the bitmap is empty, then the grace period is completed. |
291 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | |
292 | * period (if necessary). | |
293 | */ | |
294 | /* | |
295 | * Register a new batch of callbacks, and start it up if there is currently no | |
296 | * active batch and the batch to be registered has not already occurred. | |
69a0b315 | 297 | * Caller must hold rcu_ctrlblk.lock. |
1da177e4 | 298 | */ |
69a0b315 | 299 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
1da177e4 | 300 | { |
1da177e4 LT |
301 | if (rcp->next_pending && |
302 | rcp->completed == rcp->cur) { | |
1da177e4 | 303 | rcp->next_pending = 0; |
c3f59023 SV |
304 | /* |
305 | * next_pending == 0 must be visible in | |
306 | * __rcu_process_callbacks() before it can see new value of cur. | |
1da177e4 LT |
307 | */ |
308 | smp_wmb(); | |
309 | rcp->cur++; | |
c3f59023 SV |
310 | |
311 | /* | |
312 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | |
313 | * Barrier Otherwise it can cause tickless idle CPUs to be | |
69a0b315 | 314 | * included in rcp->cpumask, which will extend graceperiods |
c3f59023 SV |
315 | * unnecessarily. |
316 | */ | |
317 | smp_mb(); | |
69a0b315 | 318 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); |
c3f59023 | 319 | |
20e9751b | 320 | rcp->signaled = 0; |
1da177e4 LT |
321 | } |
322 | } | |
323 | ||
324 | /* | |
325 | * cpu went through a quiescent state since the beginning of the grace period. | |
326 | * Clear it from the cpu mask and complete the grace period if it was the last | |
327 | * cpu. Start another grace period if someone has further entries pending | |
328 | */ | |
69a0b315 | 329 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) |
1da177e4 | 330 | { |
69a0b315 ON |
331 | cpu_clear(cpu, rcp->cpumask); |
332 | if (cpus_empty(rcp->cpumask)) { | |
1da177e4 LT |
333 | /* batch completed ! */ |
334 | rcp->completed = rcp->cur; | |
69a0b315 | 335 | rcu_start_batch(rcp); |
1da177e4 LT |
336 | } |
337 | } | |
338 | ||
339 | /* | |
340 | * Check if the cpu has gone through a quiescent state (say context | |
341 | * switch). If so and if it already hasn't done so in this RCU | |
342 | * quiescent cycle, then indicate that it has done so. | |
343 | */ | |
344 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |
69a0b315 | 345 | struct rcu_data *rdp) |
1da177e4 LT |
346 | { |
347 | if (rdp->quiescbatch != rcp->cur) { | |
348 | /* start new grace period: */ | |
349 | rdp->qs_pending = 1; | |
350 | rdp->passed_quiesc = 0; | |
351 | rdp->quiescbatch = rcp->cur; | |
352 | return; | |
353 | } | |
354 | ||
355 | /* Grace period already completed for this cpu? | |
356 | * qs_pending is checked instead of the actual bitmap to avoid | |
357 | * cacheline trashing. | |
358 | */ | |
359 | if (!rdp->qs_pending) | |
360 | return; | |
361 | ||
362 | /* | |
363 | * Was there a quiescent state since the beginning of the grace | |
364 | * period? If no, then exit and wait for the next call. | |
365 | */ | |
366 | if (!rdp->passed_quiesc) | |
367 | return; | |
368 | rdp->qs_pending = 0; | |
369 | ||
69a0b315 | 370 | spin_lock(&rcp->lock); |
1da177e4 LT |
371 | /* |
372 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | |
373 | * during cpu startup. Ignore the quiescent state. | |
374 | */ | |
375 | if (likely(rdp->quiescbatch == rcp->cur)) | |
69a0b315 | 376 | cpu_quiet(rdp->cpu, rcp); |
1da177e4 | 377 | |
69a0b315 | 378 | spin_unlock(&rcp->lock); |
1da177e4 LT |
379 | } |
380 | ||
381 | ||
382 | #ifdef CONFIG_HOTPLUG_CPU | |
383 | ||
384 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing | |
385 | * locking requirements, the list it's pulling from has to belong to a cpu | |
386 | * which is dead and hence not processing interrupts. | |
387 | */ | |
388 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | |
389 | struct rcu_head **tail) | |
390 | { | |
391 | local_irq_disable(); | |
392 | *this_rdp->nxttail = list; | |
393 | if (list) | |
394 | this_rdp->nxttail = tail; | |
395 | local_irq_enable(); | |
396 | } | |
397 | ||
398 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | |
69a0b315 | 399 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
1da177e4 LT |
400 | { |
401 | /* if the cpu going offline owns the grace period | |
402 | * we can block indefinitely waiting for it, so flush | |
403 | * it here | |
404 | */ | |
69a0b315 | 405 | spin_lock_bh(&rcp->lock); |
1da177e4 | 406 | if (rcp->cur != rcp->completed) |
69a0b315 ON |
407 | cpu_quiet(rdp->cpu, rcp); |
408 | spin_unlock_bh(&rcp->lock); | |
1da177e4 LT |
409 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); |
410 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | |
a9c82815 | 411 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); |
1da177e4 | 412 | } |
a9c82815 | 413 | |
1da177e4 LT |
414 | static void rcu_offline_cpu(int cpu) |
415 | { | |
416 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | |
417 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | |
418 | ||
69a0b315 | 419 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, |
1da177e4 | 420 | &per_cpu(rcu_data, cpu)); |
69a0b315 | 421 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, |
1da177e4 LT |
422 | &per_cpu(rcu_bh_data, cpu)); |
423 | put_cpu_var(rcu_data); | |
424 | put_cpu_var(rcu_bh_data); | |
1da177e4 LT |
425 | } |
426 | ||
427 | #else | |
428 | ||
429 | static void rcu_offline_cpu(int cpu) | |
430 | { | |
431 | } | |
432 | ||
433 | #endif | |
434 | ||
435 | /* | |
c2d727aa | 436 | * This does the RCU processing work from softirq context. |
1da177e4 LT |
437 | */ |
438 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |
69a0b315 | 439 | struct rcu_data *rdp) |
1da177e4 LT |
440 | { |
441 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | |
442 | *rdp->donetail = rdp->curlist; | |
443 | rdp->donetail = rdp->curtail; | |
444 | rdp->curlist = NULL; | |
445 | rdp->curtail = &rdp->curlist; | |
446 | } | |
447 | ||
1da177e4 | 448 | if (rdp->nxtlist && !rdp->curlist) { |
caa9ee77 | 449 | local_irq_disable(); |
1da177e4 LT |
450 | rdp->curlist = rdp->nxtlist; |
451 | rdp->curtail = rdp->nxttail; | |
452 | rdp->nxtlist = NULL; | |
453 | rdp->nxttail = &rdp->nxtlist; | |
454 | local_irq_enable(); | |
455 | ||
456 | /* | |
457 | * start the next batch of callbacks | |
458 | */ | |
459 | ||
460 | /* determine batch number */ | |
461 | rdp->batch = rcp->cur + 1; | |
462 | /* see the comment and corresponding wmb() in | |
463 | * the rcu_start_batch() | |
464 | */ | |
465 | smp_rmb(); | |
466 | ||
467 | if (!rcp->next_pending) { | |
468 | /* and start it/schedule start if it's a new batch */ | |
69a0b315 | 469 | spin_lock(&rcp->lock); |
dbc1651f | 470 | rcp->next_pending = 1; |
69a0b315 ON |
471 | rcu_start_batch(rcp); |
472 | spin_unlock(&rcp->lock); | |
1da177e4 | 473 | } |
1da177e4 | 474 | } |
caa9ee77 | 475 | |
69a0b315 | 476 | rcu_check_quiescent_state(rcp, rdp); |
1da177e4 LT |
477 | if (rdp->donelist) |
478 | rcu_do_batch(rdp); | |
479 | } | |
480 | ||
c2d727aa | 481 | static void rcu_process_callbacks(struct softirq_action *unused) |
1da177e4 | 482 | { |
69a0b315 ON |
483 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
484 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | |
1da177e4 LT |
485 | } |
486 | ||
67751777 ON |
487 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
488 | { | |
489 | /* This cpu has pending rcu entries and the grace period | |
490 | * for them has completed. | |
491 | */ | |
492 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) | |
493 | return 1; | |
494 | ||
495 | /* This cpu has no pending entries, but there are new entries */ | |
496 | if (!rdp->curlist && rdp->nxtlist) | |
497 | return 1; | |
498 | ||
499 | /* This cpu has finished callbacks to invoke */ | |
500 | if (rdp->donelist) | |
501 | return 1; | |
502 | ||
503 | /* The rcu core waits for a quiescent state from the cpu */ | |
504 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) | |
505 | return 1; | |
506 | ||
507 | /* nothing to do */ | |
508 | return 0; | |
509 | } | |
510 | ||
986733e0 HC |
511 | /* |
512 | * Check to see if there is any immediate RCU-related work to be done | |
513 | * by the current CPU, returning 1 if so. This function is part of the | |
514 | * RCU implementation; it is -not- an exported member of the RCU API. | |
515 | */ | |
67751777 ON |
516 | int rcu_pending(int cpu) |
517 | { | |
518 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | |
519 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | |
520 | } | |
521 | ||
986733e0 HC |
522 | /* |
523 | * Check to see if any future RCU-related work will need to be done | |
524 | * by the current CPU, even if none need be done immediately, returning | |
525 | * 1 if so. This function is part of the RCU implementation; it is -not- | |
526 | * an exported member of the RCU API. | |
527 | */ | |
528 | int rcu_needs_cpu(int cpu) | |
529 | { | |
530 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
531 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | |
532 | ||
533 | return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); | |
534 | } | |
535 | ||
1da177e4 LT |
536 | void rcu_check_callbacks(int cpu, int user) |
537 | { | |
538 | if (user || | |
539 | (idle_cpu(cpu) && !in_softirq() && | |
540 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | |
541 | rcu_qsctr_inc(cpu); | |
542 | rcu_bh_qsctr_inc(cpu); | |
543 | } else if (!in_softirq()) | |
544 | rcu_bh_qsctr_inc(cpu); | |
c2d727aa | 545 | raise_rcu_softirq(); |
1da177e4 LT |
546 | } |
547 | ||
548 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | |
549 | struct rcu_data *rdp) | |
550 | { | |
551 | memset(rdp, 0, sizeof(*rdp)); | |
552 | rdp->curtail = &rdp->curlist; | |
553 | rdp->nxttail = &rdp->nxtlist; | |
554 | rdp->donetail = &rdp->donelist; | |
555 | rdp->quiescbatch = rcp->completed; | |
556 | rdp->qs_pending = 0; | |
557 | rdp->cpu = cpu; | |
21a1ea9e | 558 | rdp->blimit = blimit; |
1da177e4 LT |
559 | } |
560 | ||
00e10776 | 561 | static void __cpuinit rcu_online_cpu(int cpu) |
1da177e4 LT |
562 | { |
563 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
564 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); | |
565 | ||
566 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); | |
567 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); | |
c2d727aa | 568 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL); |
1da177e4 LT |
569 | } |
570 | ||
8c78f307 | 571 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
572 | unsigned long action, void *hcpu) |
573 | { | |
574 | long cpu = (long)hcpu; | |
575 | switch (action) { | |
576 | case CPU_UP_PREPARE: | |
8bb78442 | 577 | case CPU_UP_PREPARE_FROZEN: |
1da177e4 LT |
578 | rcu_online_cpu(cpu); |
579 | break; | |
580 | case CPU_DEAD: | |
8bb78442 | 581 | case CPU_DEAD_FROZEN: |
1da177e4 LT |
582 | rcu_offline_cpu(cpu); |
583 | break; | |
584 | default: | |
585 | break; | |
586 | } | |
587 | return NOTIFY_OK; | |
588 | } | |
589 | ||
8c78f307 | 590 | static struct notifier_block __cpuinitdata rcu_nb = { |
1da177e4 LT |
591 | .notifier_call = rcu_cpu_notify, |
592 | }; | |
593 | ||
594 | /* | |
595 | * Initializes rcu mechanism. Assumed to be called early. | |
596 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. | |
597 | * Note that rcu_qsctr and friends are implicitly | |
598 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. | |
599 | */ | |
600 | void __init rcu_init(void) | |
601 | { | |
602 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | |
603 | (void *)(long)smp_processor_id()); | |
604 | /* Register notifier for non-boot CPUs */ | |
605 | register_cpu_notifier(&rcu_nb); | |
606 | } | |
607 | ||
608 | struct rcu_synchronize { | |
609 | struct rcu_head head; | |
610 | struct completion completion; | |
611 | }; | |
612 | ||
613 | /* Because of FASTCALL declaration of complete, we use this wrapper */ | |
614 | static void wakeme_after_rcu(struct rcu_head *head) | |
615 | { | |
616 | struct rcu_synchronize *rcu; | |
617 | ||
618 | rcu = container_of(head, struct rcu_synchronize, head); | |
619 | complete(&rcu->completion); | |
620 | } | |
621 | ||
622 | /** | |
9b06e818 | 623 | * synchronize_rcu - wait until a grace period has elapsed. |
1da177e4 LT |
624 | * |
625 | * Control will return to the caller some time after a full grace | |
626 | * period has elapsed, in other words after all currently executing RCU | |
627 | * read-side critical sections have completed. RCU read-side critical | |
628 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
629 | * and may be nested. | |
9b06e818 PM |
630 | * |
631 | * If your read-side code is not protected by rcu_read_lock(), do -not- | |
632 | * use synchronize_rcu(). | |
1da177e4 | 633 | */ |
9b06e818 | 634 | void synchronize_rcu(void) |
1da177e4 LT |
635 | { |
636 | struct rcu_synchronize rcu; | |
637 | ||
638 | init_completion(&rcu.completion); | |
639 | /* Will wake me after RCU finished */ | |
640 | call_rcu(&rcu.head, wakeme_after_rcu); | |
641 | ||
642 | /* Wait for it */ | |
643 | wait_for_completion(&rcu.completion); | |
644 | } | |
645 | ||
21a1ea9e DS |
646 | module_param(blimit, int, 0); |
647 | module_param(qhimark, int, 0); | |
648 | module_param(qlowmark, int, 0); | |
a241ec65 | 649 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
c32e0660 | 650 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); |
d83015b8 PM |
651 | EXPORT_SYMBOL_GPL(call_rcu); |
652 | EXPORT_SYMBOL_GPL(call_rcu_bh); | |
9b06e818 | 653 | EXPORT_SYMBOL_GPL(synchronize_rcu); |