Commit | Line | Data |
---|---|---|
dad81a20 PM |
1 | /* |
2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, you can access it online at | |
16 | * http://www.gnu.org/licenses/gpl-2.0.html. | |
17 | * | |
18 | * Copyright (C) IBM Corporation, 2006 | |
19 | * Copyright (C) Fujitsu, 2012 | |
20 | * | |
21 | * Author: Paul McKenney <paulmck@us.ibm.com> | |
22 | * Lai Jiangshan <laijs@cn.fujitsu.com> | |
23 | * | |
24 | * For detailed explanation of Read-Copy Update mechanism see - | |
25 | * Documentation/RCU/ *.txt | |
26 | * | |
27 | */ | |
28 | ||
29 | #include <linux/export.h> | |
30 | #include <linux/mutex.h> | |
31 | #include <linux/percpu.h> | |
32 | #include <linux/preempt.h> | |
33 | #include <linux/rcupdate_wait.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/smp.h> | |
36 | #include <linux/delay.h> | |
22607d66 | 37 | #include <linux/module.h> |
dad81a20 PM |
38 | #include <linux/srcu.h> |
39 | ||
dad81a20 | 40 | #include "rcu.h" |
45753c5f | 41 | #include "rcu_segcblist.h" |
dad81a20 | 42 | |
0c8e0e3c PM |
43 | /* Holdoff in nanoseconds for auto-expediting. */ |
44 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) | |
45 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; | |
22607d66 PM |
46 | module_param(exp_holdoff, ulong, 0444); |
47 | ||
c350c008 PM |
48 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
49 | static ulong counter_wrap_check = (ULONG_MAX >> 2); | |
50 | module_param(counter_wrap_check, ulong, 0444); | |
51 | ||
da915ad5 PM |
52 | static void srcu_invoke_callbacks(struct work_struct *work); |
53 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); | |
0d8a1e83 | 54 | static void process_srcu(struct work_struct *work); |
da915ad5 | 55 | |
d6331980 PM |
56 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
57 | #define spin_lock_rcu_node(p) \ | |
58 | do { \ | |
59 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ | |
60 | smp_mb__after_unlock_lock(); \ | |
61 | } while (0) | |
62 | ||
63 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) | |
64 | ||
65 | #define spin_lock_irq_rcu_node(p) \ | |
66 | do { \ | |
67 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
68 | smp_mb__after_unlock_lock(); \ | |
69 | } while (0) | |
70 | ||
71 | #define spin_unlock_irq_rcu_node(p) \ | |
72 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) | |
73 | ||
74 | #define spin_lock_irqsave_rcu_node(p, flags) \ | |
75 | do { \ | |
76 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ | |
77 | smp_mb__after_unlock_lock(); \ | |
78 | } while (0) | |
79 | ||
80 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ | |
81 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ | |
82 | ||
da915ad5 PM |
83 | /* |
84 | * Initialize SRCU combining tree. Note that statically allocated | |
85 | * srcu_struct structures might already have srcu_read_lock() and | |
86 | * srcu_read_unlock() running against them. So if the is_static parameter | |
87 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | |
88 | */ | |
89 | static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) | |
dad81a20 | 90 | { |
da915ad5 PM |
91 | int cpu; |
92 | int i; | |
93 | int level = 0; | |
94 | int levelspread[RCU_NUM_LVLS]; | |
95 | struct srcu_data *sdp; | |
96 | struct srcu_node *snp; | |
97 | struct srcu_node *snp_first; | |
98 | ||
99 | /* Work out the overall tree geometry. */ | |
100 | sp->level[0] = &sp->node[0]; | |
101 | for (i = 1; i < rcu_num_lvls; i++) | |
102 | sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; | |
103 | rcu_init_levelspread(levelspread, num_rcu_lvl); | |
104 | ||
105 | /* Each pass through this loop initializes one srcu_node structure. */ | |
106 | rcu_for_each_node_breadth_first(sp, snp) { | |
d6331980 | 107 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
c7e88067 PM |
108 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
109 | ARRAY_SIZE(snp->srcu_data_have_cbs)); | |
110 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { | |
da915ad5 | 111 | snp->srcu_have_cbs[i] = 0; |
c7e88067 PM |
112 | snp->srcu_data_have_cbs[i] = 0; |
113 | } | |
1e9a038b | 114 | snp->srcu_gp_seq_needed_exp = 0; |
da915ad5 PM |
115 | snp->grplo = -1; |
116 | snp->grphi = -1; | |
117 | if (snp == &sp->node[0]) { | |
118 | /* Root node, special case. */ | |
119 | snp->srcu_parent = NULL; | |
120 | continue; | |
121 | } | |
122 | ||
123 | /* Non-root node. */ | |
124 | if (snp == sp->level[level + 1]) | |
125 | level++; | |
126 | snp->srcu_parent = sp->level[level - 1] + | |
127 | (snp - sp->level[level]) / | |
128 | levelspread[level - 1]; | |
129 | } | |
130 | ||
131 | /* | |
132 | * Initialize the per-CPU srcu_data array, which feeds into the | |
133 | * leaves of the srcu_node tree. | |
134 | */ | |
135 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | |
136 | ARRAY_SIZE(sdp->srcu_unlock_count)); | |
137 | level = rcu_num_lvls - 1; | |
138 | snp_first = sp->level[level]; | |
139 | for_each_possible_cpu(cpu) { | |
140 | sdp = per_cpu_ptr(sp->sda, cpu); | |
d6331980 | 141 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
da915ad5 PM |
142 | rcu_segcblist_init(&sdp->srcu_cblist); |
143 | sdp->srcu_cblist_invoking = false; | |
144 | sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; | |
1e9a038b | 145 | sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; |
da915ad5 PM |
146 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
147 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | |
148 | if (snp->grplo < 0) | |
149 | snp->grplo = cpu; | |
150 | snp->grphi = cpu; | |
151 | } | |
152 | sdp->cpu = cpu; | |
153 | INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); | |
154 | sdp->sp = sp; | |
c7e88067 | 155 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
da915ad5 PM |
156 | if (is_static) |
157 | continue; | |
158 | ||
159 | /* Dynamically allocated, better be no srcu_read_locks()! */ | |
160 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { | |
161 | sdp->srcu_lock_count[i] = 0; | |
162 | sdp->srcu_unlock_count[i] = 0; | |
163 | } | |
164 | } | |
165 | } | |
166 | ||
167 | /* | |
168 | * Initialize non-compile-time initialized fields, including the | |
169 | * associated srcu_node and srcu_data structures. The is_static | |
170 | * parameter is passed through to init_srcu_struct_nodes(), and | |
171 | * also tells us that ->sda has already been wired up to srcu_data. | |
172 | */ | |
173 | static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) | |
174 | { | |
175 | mutex_init(&sp->srcu_cb_mutex); | |
176 | mutex_init(&sp->srcu_gp_mutex); | |
177 | sp->srcu_idx = 0; | |
dad81a20 | 178 | sp->srcu_gp_seq = 0; |
da915ad5 PM |
179 | sp->srcu_barrier_seq = 0; |
180 | mutex_init(&sp->srcu_barrier_mutex); | |
181 | atomic_set(&sp->srcu_barrier_cpu_cnt, 0); | |
dad81a20 | 182 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
da915ad5 PM |
183 | if (!is_static) |
184 | sp->sda = alloc_percpu(struct srcu_data); | |
185 | init_srcu_struct_nodes(sp, is_static); | |
1e9a038b | 186 | sp->srcu_gp_seq_needed_exp = 0; |
22607d66 | 187 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
da915ad5 PM |
188 | smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ |
189 | return sp->sda ? 0 : -ENOMEM; | |
dad81a20 PM |
190 | } |
191 | ||
192 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
193 | ||
194 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |
195 | struct lock_class_key *key) | |
196 | { | |
197 | /* Don't re-initialize a lock while it is held. */ | |
198 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); | |
199 | lockdep_init_map(&sp->dep_map, name, key, 0); | |
d6331980 | 200 | spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
da915ad5 | 201 | return init_srcu_struct_fields(sp, false); |
dad81a20 PM |
202 | } |
203 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
204 | ||
205 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
206 | ||
207 | /** | |
208 | * init_srcu_struct - initialize a sleep-RCU structure | |
209 | * @sp: structure to initialize. | |
210 | * | |
211 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
212 | * to any other function. Each srcu_struct represents a separate domain | |
213 | * of SRCU protection. | |
214 | */ | |
215 | int init_srcu_struct(struct srcu_struct *sp) | |
216 | { | |
d6331980 | 217 | spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
da915ad5 | 218 | return init_srcu_struct_fields(sp, false); |
dad81a20 PM |
219 | } |
220 | EXPORT_SYMBOL_GPL(init_srcu_struct); | |
221 | ||
222 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
223 | ||
224 | /* | |
da915ad5 PM |
225 | * First-use initialization of statically allocated srcu_struct |
226 | * structure. Wiring up the combining tree is more than can be | |
227 | * done with compile-time initialization, so this check is added | |
a3883df3 | 228 | * to each update-side SRCU primitive. Use sp->lock, which -is- |
da915ad5 PM |
229 | * compile-time initialized, to resolve races involving multiple |
230 | * CPUs trying to garner first-use privileges. | |
231 | */ | |
232 | static void check_init_srcu_struct(struct srcu_struct *sp) | |
233 | { | |
234 | unsigned long flags; | |
235 | ||
236 | WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); | |
237 | /* The smp_load_acquire() pairs with the smp_store_release(). */ | |
238 | if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ | |
239 | return; /* Already initialized. */ | |
d6331980 | 240 | spin_lock_irqsave_rcu_node(sp, flags); |
da915ad5 | 241 | if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { |
d6331980 | 242 | spin_unlock_irqrestore_rcu_node(sp, flags); |
da915ad5 PM |
243 | return; |
244 | } | |
245 | init_srcu_struct_fields(sp, true); | |
d6331980 | 246 | spin_unlock_irqrestore_rcu_node(sp, flags); |
da915ad5 PM |
247 | } |
248 | ||
249 | /* | |
250 | * Returns approximate total of the readers' ->srcu_lock_count[] values | |
251 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 PM |
252 | */ |
253 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) | |
254 | { | |
255 | int cpu; | |
256 | unsigned long sum = 0; | |
257 | ||
258 | for_each_possible_cpu(cpu) { | |
da915ad5 | 259 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
dad81a20 | 260 | |
da915ad5 | 261 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
dad81a20 PM |
262 | } |
263 | return sum; | |
264 | } | |
265 | ||
266 | /* | |
da915ad5 PM |
267 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
268 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 PM |
269 | */ |
270 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) | |
271 | { | |
272 | int cpu; | |
273 | unsigned long sum = 0; | |
274 | ||
275 | for_each_possible_cpu(cpu) { | |
da915ad5 | 276 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
dad81a20 | 277 | |
da915ad5 | 278 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
dad81a20 PM |
279 | } |
280 | return sum; | |
281 | } | |
282 | ||
283 | /* | |
284 | * Return true if the number of pre-existing readers is determined to | |
285 | * be zero. | |
286 | */ | |
287 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) | |
288 | { | |
289 | unsigned long unlocks; | |
290 | ||
291 | unlocks = srcu_readers_unlock_idx(sp, idx); | |
292 | ||
293 | /* | |
294 | * Make sure that a lock is always counted if the corresponding | |
295 | * unlock is counted. Needs to be a smp_mb() as the read side may | |
296 | * contain a read from a variable that is written to before the | |
297 | * synchronize_srcu() in the write side. In this case smp_mb()s | |
298 | * A and B act like the store buffering pattern. | |
299 | * | |
300 | * This smp_mb() also pairs with smp_mb() C to prevent accesses | |
301 | * after the synchronize_srcu() from being executed before the | |
302 | * grace period ends. | |
303 | */ | |
304 | smp_mb(); /* A */ | |
305 | ||
306 | /* | |
307 | * If the locks are the same as the unlocks, then there must have | |
308 | * been no readers on this index at some time in between. This does | |
309 | * not mean that there are no more readers, as one could have read | |
310 | * the current index but not have incremented the lock counter yet. | |
311 | * | |
881ec9d2 PM |
312 | * So suppose that the updater is preempted here for so long |
313 | * that more than ULONG_MAX non-nested readers come and go in | |
314 | * the meantime. It turns out that this cannot result in overflow | |
315 | * because if a reader modifies its unlock count after we read it | |
316 | * above, then that reader's next load of ->srcu_idx is guaranteed | |
317 | * to get the new value, which will cause it to operate on the | |
318 | * other bank of counters, where it cannot contribute to the | |
319 | * overflow of these counters. This means that there is a maximum | |
320 | * of 2*NR_CPUS increments, which cannot overflow given current | |
321 | * systems, especially not on 64-bit systems. | |
322 | * | |
323 | * OK, how about nesting? This does impose a limit on nesting | |
324 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | |
325 | * especially on 64-bit systems. | |
dad81a20 PM |
326 | */ |
327 | return srcu_readers_lock_idx(sp, idx) == unlocks; | |
328 | } | |
329 | ||
330 | /** | |
331 | * srcu_readers_active - returns true if there are readers. and false | |
332 | * otherwise | |
333 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). | |
334 | * | |
335 | * Note that this is not an atomic primitive, and can therefore suffer | |
336 | * severe errors when invoked on an active srcu_struct. That said, it | |
337 | * can be useful as an error check at cleanup time. | |
338 | */ | |
339 | static bool srcu_readers_active(struct srcu_struct *sp) | |
340 | { | |
341 | int cpu; | |
342 | unsigned long sum = 0; | |
343 | ||
344 | for_each_possible_cpu(cpu) { | |
da915ad5 | 345 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
dad81a20 | 346 | |
da915ad5 PM |
347 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
348 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); | |
349 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); | |
350 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); | |
dad81a20 PM |
351 | } |
352 | return sum; | |
353 | } | |
354 | ||
355 | #define SRCU_INTERVAL 1 | |
356 | ||
1e9a038b PM |
357 | /* |
358 | * Return grace-period delay, zero if there are expedited grace | |
359 | * periods pending, SRCU_INTERVAL otherwise. | |
360 | */ | |
361 | static unsigned long srcu_get_delay(struct srcu_struct *sp) | |
362 | { | |
363 | if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), | |
364 | READ_ONCE(sp->srcu_gp_seq_needed_exp))) | |
365 | return 0; | |
366 | return SRCU_INTERVAL; | |
367 | } | |
368 | ||
dad81a20 PM |
369 | /** |
370 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
371 | * @sp: structure to clean up. | |
372 | * | |
373 | * Must invoke this after you are finished using a given srcu_struct that | |
374 | * was initialized via init_srcu_struct(), else you leak memory. | |
375 | */ | |
376 | void cleanup_srcu_struct(struct srcu_struct *sp) | |
377 | { | |
da915ad5 PM |
378 | int cpu; |
379 | ||
1e9a038b PM |
380 | if (WARN_ON(!srcu_get_delay(sp))) |
381 | return; /* Leakage unless caller handles error. */ | |
dad81a20 PM |
382 | if (WARN_ON(srcu_readers_active(sp))) |
383 | return; /* Leakage unless caller handles error. */ | |
dad81a20 | 384 | flush_delayed_work(&sp->work); |
da915ad5 PM |
385 | for_each_possible_cpu(cpu) |
386 | flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); | |
387 | if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || | |
388 | WARN_ON(srcu_readers_active(sp))) { | |
389 | pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); | |
dad81a20 PM |
390 | return; /* Caller forgot to stop doing call_srcu()? */ |
391 | } | |
da915ad5 PM |
392 | free_percpu(sp->sda); |
393 | sp->sda = NULL; | |
dad81a20 PM |
394 | } |
395 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | |
396 | ||
397 | /* | |
398 | * Counts the new reader in the appropriate per-CPU element of the | |
cdf7abc4 | 399 | * srcu_struct. |
dad81a20 PM |
400 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
401 | */ | |
402 | int __srcu_read_lock(struct srcu_struct *sp) | |
403 | { | |
404 | int idx; | |
405 | ||
da915ad5 | 406 | idx = READ_ONCE(sp->srcu_idx) & 0x1; |
cdf7abc4 | 407 | this_cpu_inc(sp->sda->srcu_lock_count[idx]); |
dad81a20 PM |
408 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
409 | return idx; | |
410 | } | |
411 | EXPORT_SYMBOL_GPL(__srcu_read_lock); | |
412 | ||
413 | /* | |
414 | * Removes the count for the old reader from the appropriate per-CPU | |
415 | * element of the srcu_struct. Note that this may well be a different | |
416 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
dad81a20 PM |
417 | */ |
418 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) | |
419 | { | |
420 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | |
da915ad5 | 421 | this_cpu_inc(sp->sda->srcu_unlock_count[idx]); |
dad81a20 PM |
422 | } |
423 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | |
424 | ||
425 | /* | |
426 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
427 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
428 | * (defined below) to allow SRCU readers to exit their read-side critical | |
429 | * sections. If there are still some readers after a few microseconds, | |
430 | * we repeatedly block for 1-millisecond time periods. | |
431 | */ | |
432 | #define SRCU_RETRY_CHECK_DELAY 5 | |
433 | ||
434 | /* | |
435 | * Start an SRCU grace period. | |
436 | */ | |
437 | static void srcu_gp_start(struct srcu_struct *sp) | |
438 | { | |
da915ad5 | 439 | struct srcu_data *sdp = this_cpu_ptr(sp->sda); |
dad81a20 PM |
440 | int state; |
441 | ||
a3883df3 | 442 | lockdep_assert_held(&sp->lock); |
da915ad5 PM |
443 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
444 | rcu_segcblist_advance(&sdp->srcu_cblist, | |
445 | rcu_seq_current(&sp->srcu_gp_seq)); | |
446 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | |
447 | rcu_seq_snap(&sp->srcu_gp_seq)); | |
2da4b2a7 | 448 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
dad81a20 PM |
449 | rcu_seq_start(&sp->srcu_gp_seq); |
450 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); | |
451 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); | |
452 | } | |
453 | ||
da915ad5 PM |
454 | /* |
455 | * Track online CPUs to guide callback workqueue placement. | |
456 | */ | |
457 | DEFINE_PER_CPU(bool, srcu_online); | |
458 | ||
459 | void srcu_online_cpu(unsigned int cpu) | |
460 | { | |
461 | WRITE_ONCE(per_cpu(srcu_online, cpu), true); | |
462 | } | |
463 | ||
464 | void srcu_offline_cpu(unsigned int cpu) | |
465 | { | |
466 | WRITE_ONCE(per_cpu(srcu_online, cpu), false); | |
467 | } | |
468 | ||
469 | /* | |
470 | * Place the workqueue handler on the specified CPU if online, otherwise | |
471 | * just run it whereever. This is useful for placing workqueue handlers | |
472 | * that are to invoke the specified CPU's callbacks. | |
473 | */ | |
474 | static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |
475 | struct delayed_work *dwork, | |
476 | unsigned long delay) | |
477 | { | |
478 | bool ret; | |
479 | ||
480 | preempt_disable(); | |
481 | if (READ_ONCE(per_cpu(srcu_online, cpu))) | |
482 | ret = queue_delayed_work_on(cpu, wq, dwork, delay); | |
483 | else | |
484 | ret = queue_delayed_work(wq, dwork, delay); | |
485 | preempt_enable(); | |
486 | return ret; | |
487 | } | |
488 | ||
489 | /* | |
490 | * Schedule callback invocation for the specified srcu_data structure, | |
491 | * if possible, on the corresponding CPU. | |
492 | */ | |
493 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | |
494 | { | |
495 | srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, | |
496 | &sdp->work, delay); | |
497 | } | |
498 | ||
499 | /* | |
500 | * Schedule callback invocation for all srcu_data structures associated | |
c7e88067 PM |
501 | * with the specified srcu_node structure that have callbacks for the |
502 | * just-completed grace period, the one corresponding to idx. If possible, | |
503 | * schedule this invocation on the corresponding CPUs. | |
da915ad5 | 504 | */ |
c7e88067 | 505 | static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, |
1e9a038b | 506 | unsigned long mask, unsigned long delay) |
da915ad5 PM |
507 | { |
508 | int cpu; | |
509 | ||
c7e88067 PM |
510 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
511 | if (!(mask & (1 << (cpu - snp->grplo)))) | |
512 | continue; | |
1e9a038b | 513 | srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); |
c7e88067 | 514 | } |
da915ad5 PM |
515 | } |
516 | ||
517 | /* | |
518 | * Note the end of an SRCU grace period. Initiates callback invocation | |
519 | * and starts a new grace period if needed. | |
520 | * | |
521 | * The ->srcu_cb_mutex acquisition does not protect any data, but | |
522 | * instead prevents more than one grace period from starting while we | |
523 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] | |
524 | * array to have a finite number of elements. | |
525 | */ | |
526 | static void srcu_gp_end(struct srcu_struct *sp) | |
527 | { | |
1e9a038b | 528 | unsigned long cbdelay; |
da915ad5 | 529 | bool cbs; |
c350c008 PM |
530 | int cpu; |
531 | unsigned long flags; | |
da915ad5 PM |
532 | unsigned long gpseq; |
533 | int idx; | |
534 | int idxnext; | |
c7e88067 | 535 | unsigned long mask; |
c350c008 | 536 | struct srcu_data *sdp; |
da915ad5 PM |
537 | struct srcu_node *snp; |
538 | ||
539 | /* Prevent more than one additional grace period. */ | |
540 | mutex_lock(&sp->srcu_cb_mutex); | |
541 | ||
542 | /* End the current grace period. */ | |
d6331980 | 543 | spin_lock_irq_rcu_node(sp); |
da915ad5 PM |
544 | idx = rcu_seq_state(sp->srcu_gp_seq); |
545 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); | |
1e9a038b | 546 | cbdelay = srcu_get_delay(sp); |
22607d66 | 547 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
da915ad5 PM |
548 | rcu_seq_end(&sp->srcu_gp_seq); |
549 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); | |
1e9a038b PM |
550 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) |
551 | sp->srcu_gp_seq_needed_exp = gpseq; | |
d6331980 | 552 | spin_unlock_irq_rcu_node(sp); |
da915ad5 PM |
553 | mutex_unlock(&sp->srcu_gp_mutex); |
554 | /* A new grace period can start at this point. But only one. */ | |
555 | ||
556 | /* Initiate callback invocation as needed. */ | |
557 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | |
558 | idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); | |
559 | rcu_for_each_node_breadth_first(sp, snp) { | |
d6331980 | 560 | spin_lock_irq_rcu_node(snp); |
da915ad5 PM |
561 | cbs = false; |
562 | if (snp >= sp->level[rcu_num_lvls - 1]) | |
563 | cbs = snp->srcu_have_cbs[idx] == gpseq; | |
564 | snp->srcu_have_cbs[idx] = gpseq; | |
565 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); | |
1e9a038b PM |
566 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
567 | snp->srcu_gp_seq_needed_exp = gpseq; | |
c7e88067 PM |
568 | mask = snp->srcu_data_have_cbs[idx]; |
569 | snp->srcu_data_have_cbs[idx] = 0; | |
d6331980 | 570 | spin_unlock_irq_rcu_node(snp); |
a3883df3 | 571 | if (cbs) |
1e9a038b | 572 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); |
c350c008 PM |
573 | |
574 | /* Occasionally prevent srcu_data counter wrap. */ | |
575 | if (!(gpseq & counter_wrap_check)) | |
576 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | |
577 | sdp = per_cpu_ptr(sp->sda, cpu); | |
d6331980 | 578 | spin_lock_irqsave_rcu_node(sdp, flags); |
c350c008 PM |
579 | if (ULONG_CMP_GE(gpseq, |
580 | sdp->srcu_gp_seq_needed + 100)) | |
581 | sdp->srcu_gp_seq_needed = gpseq; | |
d6331980 | 582 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
c350c008 | 583 | } |
da915ad5 PM |
584 | } |
585 | ||
586 | /* Callback initiation done, allow grace periods after next. */ | |
587 | mutex_unlock(&sp->srcu_cb_mutex); | |
588 | ||
589 | /* Start a new grace period if needed. */ | |
d6331980 | 590 | spin_lock_irq_rcu_node(sp); |
da915ad5 PM |
591 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
592 | if (!rcu_seq_state(gpseq) && | |
593 | ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { | |
594 | srcu_gp_start(sp); | |
d6331980 | 595 | spin_unlock_irq_rcu_node(sp); |
da915ad5 | 596 | /* Throttle expedited grace periods: Should be rare! */ |
1e9a038b PM |
597 | srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff |
598 | ? 0 : SRCU_INTERVAL); | |
da915ad5 | 599 | } else { |
d6331980 | 600 | spin_unlock_irq_rcu_node(sp); |
da915ad5 PM |
601 | } |
602 | } | |
603 | ||
1e9a038b PM |
604 | /* |
605 | * Funnel-locking scheme to scalably mediate many concurrent expedited | |
606 | * grace-period requests. This function is invoked for the first known | |
607 | * expedited request for a grace period that has already been requested, | |
608 | * but without expediting. To start a completely new grace period, | |
609 | * whether expedited or not, use srcu_funnel_gp_start() instead. | |
610 | */ | |
611 | static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, | |
612 | unsigned long s) | |
613 | { | |
614 | unsigned long flags; | |
615 | ||
616 | for (; snp != NULL; snp = snp->srcu_parent) { | |
617 | if (rcu_seq_done(&sp->srcu_gp_seq, s) || | |
618 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) | |
619 | return; | |
d6331980 | 620 | spin_lock_irqsave_rcu_node(snp, flags); |
1e9a038b | 621 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
d6331980 | 622 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b PM |
623 | return; |
624 | } | |
625 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | |
d6331980 | 626 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b | 627 | } |
d6331980 | 628 | spin_lock_irqsave_rcu_node(sp, flags); |
1e9a038b PM |
629 | if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
630 | sp->srcu_gp_seq_needed_exp = s; | |
d6331980 | 631 | spin_unlock_irqrestore_rcu_node(sp, flags); |
1e9a038b PM |
632 | } |
633 | ||
da915ad5 PM |
634 | /* |
635 | * Funnel-locking scheme to scalably mediate many concurrent grace-period | |
636 | * requests. The winner has to do the work of actually starting grace | |
637 | * period s. Losers must either ensure that their desired grace-period | |
638 | * number is recorded on at least their leaf srcu_node structure, or they | |
639 | * must take steps to invoke their own callbacks. | |
640 | */ | |
1e9a038b PM |
641 | static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, |
642 | unsigned long s, bool do_norm) | |
da915ad5 PM |
643 | { |
644 | unsigned long flags; | |
645 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); | |
646 | struct srcu_node *snp = sdp->mynode; | |
647 | unsigned long snp_seq; | |
648 | ||
649 | /* Each pass through the loop does one level of the srcu_node tree. */ | |
650 | for (; snp != NULL; snp = snp->srcu_parent) { | |
651 | if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) | |
652 | return; /* GP already done and CBs recorded. */ | |
d6331980 | 653 | spin_lock_irqsave_rcu_node(snp, flags); |
da915ad5 PM |
654 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
655 | snp_seq = snp->srcu_have_cbs[idx]; | |
c7e88067 PM |
656 | if (snp == sdp->mynode && snp_seq == s) |
657 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
d6331980 | 658 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 | 659 | if (snp == sdp->mynode && snp_seq != s) { |
1e9a038b PM |
660 | srcu_schedule_cbs_sdp(sdp, do_norm |
661 | ? SRCU_INTERVAL | |
662 | : 0); | |
663 | return; | |
da915ad5 | 664 | } |
1e9a038b PM |
665 | if (!do_norm) |
666 | srcu_funnel_exp_start(sp, snp, s); | |
da915ad5 PM |
667 | return; |
668 | } | |
669 | snp->srcu_have_cbs[idx] = s; | |
c7e88067 PM |
670 | if (snp == sdp->mynode) |
671 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
1e9a038b PM |
672 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
673 | snp->srcu_gp_seq_needed_exp = s; | |
d6331980 | 674 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 PM |
675 | } |
676 | ||
677 | /* Top of tree, must ensure the grace period will be started. */ | |
d6331980 | 678 | spin_lock_irqsave_rcu_node(sp, flags); |
da915ad5 PM |
679 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { |
680 | /* | |
681 | * Record need for grace period s. Pair with load | |
682 | * acquire setting up for initialization. | |
683 | */ | |
684 | smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ | |
685 | } | |
1e9a038b PM |
686 | if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
687 | sp->srcu_gp_seq_needed_exp = s; | |
da915ad5 PM |
688 | |
689 | /* If grace period not already done and none in progress, start it. */ | |
690 | if (!rcu_seq_done(&sp->srcu_gp_seq, s) && | |
691 | rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { | |
692 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); | |
693 | srcu_gp_start(sp); | |
694 | queue_delayed_work(system_power_efficient_wq, &sp->work, | |
1e9a038b | 695 | srcu_get_delay(sp)); |
da915ad5 | 696 | } |
d6331980 | 697 | spin_unlock_irqrestore_rcu_node(sp, flags); |
da915ad5 PM |
698 | } |
699 | ||
dad81a20 PM |
700 | /* |
701 | * Wait until all readers counted by array index idx complete, but | |
702 | * loop an additional time if there is an expedited grace period pending. | |
da915ad5 | 703 | * The caller must ensure that ->srcu_idx is not changed while checking. |
dad81a20 PM |
704 | */ |
705 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) | |
706 | { | |
707 | for (;;) { | |
708 | if (srcu_readers_active_idx_check(sp, idx)) | |
709 | return true; | |
1e9a038b | 710 | if (--trycount + !srcu_get_delay(sp) <= 0) |
dad81a20 PM |
711 | return false; |
712 | udelay(SRCU_RETRY_CHECK_DELAY); | |
713 | } | |
714 | } | |
715 | ||
716 | /* | |
da915ad5 PM |
717 | * Increment the ->srcu_idx counter so that future SRCU readers will |
718 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows | |
dad81a20 PM |
719 | * us to wait for pre-existing readers in a starvation-free manner. |
720 | */ | |
721 | static void srcu_flip(struct srcu_struct *sp) | |
722 | { | |
881ec9d2 PM |
723 | /* |
724 | * Ensure that if this updater saw a given reader's increment | |
725 | * from __srcu_read_lock(), that reader was using an old value | |
726 | * of ->srcu_idx. Also ensure that if a given reader sees the | |
727 | * new value of ->srcu_idx, this updater's earlier scans cannot | |
728 | * have seen that reader's increments (which is OK, because this | |
729 | * grace period need not wait on that reader). | |
730 | */ | |
731 | smp_mb(); /* E */ /* Pairs with B and C. */ | |
732 | ||
da915ad5 | 733 | WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); |
dad81a20 PM |
734 | |
735 | /* | |
736 | * Ensure that if the updater misses an __srcu_read_unlock() | |
737 | * increment, that task's next __srcu_read_lock() will see the | |
738 | * above counter update. Note that both this memory barrier | |
739 | * and the one in srcu_readers_active_idx_check() provide the | |
740 | * guarantee for __srcu_read_lock(). | |
741 | */ | |
742 | smp_mb(); /* D */ /* Pairs with C. */ | |
743 | } | |
744 | ||
2da4b2a7 PM |
745 | /* |
746 | * If SRCU is likely idle, return true, otherwise return false. | |
747 | * | |
748 | * Note that it is OK for several current from-idle requests for a new | |
749 | * grace period from idle to specify expediting because they will all end | |
750 | * up requesting the same grace period anyhow. So no loss. | |
751 | * | |
752 | * Note also that if any CPU (including the current one) is still invoking | |
753 | * callbacks, this function will nevertheless say "idle". This is not | |
754 | * ideal, but the overhead of checking all CPUs' callback lists is even | |
755 | * less ideal, especially on large systems. Furthermore, the wakeup | |
756 | * can happen before the callback is fully removed, so we have no choice | |
757 | * but to accept this type of error. | |
758 | * | |
759 | * This function is also subject to counter-wrap errors, but let's face | |
760 | * it, if this function was preempted for enough time for the counters | |
761 | * to wrap, it really doesn't matter whether or not we expedite the grace | |
762 | * period. The extra overhead of a needlessly expedited grace period is | |
763 | * negligible when amoritized over that time period, and the extra latency | |
764 | * of a needlessly non-expedited grace period is similarly negligible. | |
765 | */ | |
766 | static bool srcu_might_be_idle(struct srcu_struct *sp) | |
767 | { | |
22607d66 | 768 | unsigned long curseq; |
2da4b2a7 PM |
769 | unsigned long flags; |
770 | struct srcu_data *sdp; | |
22607d66 | 771 | unsigned long t; |
2da4b2a7 PM |
772 | |
773 | /* If the local srcu_data structure has callbacks, not idle. */ | |
774 | local_irq_save(flags); | |
775 | sdp = this_cpu_ptr(sp->sda); | |
776 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { | |
777 | local_irq_restore(flags); | |
778 | return false; /* Callbacks already present, so not idle. */ | |
779 | } | |
780 | local_irq_restore(flags); | |
781 | ||
782 | /* | |
783 | * No local callbacks, so probabalistically probe global state. | |
784 | * Exact information would require acquiring locks, which would | |
785 | * kill scalability, hence the probabalistic nature of the probe. | |
786 | */ | |
22607d66 PM |
787 | |
788 | /* First, see if enough time has passed since the last GP. */ | |
789 | t = ktime_get_mono_fast_ns(); | |
790 | if (exp_holdoff == 0 || | |
791 | time_in_range_open(t, sp->srcu_last_gp_end, | |
792 | sp->srcu_last_gp_end + exp_holdoff)) | |
793 | return false; /* Too soon after last GP. */ | |
794 | ||
795 | /* Next, check for probable idleness. */ | |
2da4b2a7 PM |
796 | curseq = rcu_seq_current(&sp->srcu_gp_seq); |
797 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ | |
798 | if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) | |
799 | return false; /* Grace period in progress, so not idle. */ | |
800 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | |
801 | if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) | |
802 | return false; /* GP # changed, so not idle. */ | |
803 | return true; /* With reasonable probability, idle! */ | |
804 | } | |
805 | ||
a602538e PM |
806 | /* |
807 | * SRCU callback function to leak a callback. | |
808 | */ | |
809 | static void srcu_leak_callback(struct rcu_head *rhp) | |
810 | { | |
811 | } | |
812 | ||
dad81a20 | 813 | /* |
da915ad5 PM |
814 | * Enqueue an SRCU callback on the srcu_data structure associated with |
815 | * the current CPU and the specified srcu_struct structure, initiating | |
816 | * grace-period processing if it is not already running. | |
dad81a20 PM |
817 | * |
818 | * Note that all CPUs must agree that the grace period extended beyond | |
819 | * all pre-existing SRCU read-side critical section. On systems with | |
820 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
821 | * is guaranteed to have executed a full memory barrier since the end of | |
822 | * its last corresponding SRCU read-side critical section whose beginning | |
823 | * preceded the call to call_rcu(). It also means that each CPU executing | |
824 | * an SRCU read-side critical section that continues beyond the start of | |
825 | * "func()" must have executed a memory barrier after the call_rcu() | |
826 | * but before the beginning of that SRCU read-side critical section. | |
827 | * Note that these guarantees include CPUs that are offline, idle, or | |
828 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
829 | * | |
830 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the | |
831 | * resulting SRCU callback function "func()", then both CPU A and CPU | |
832 | * B are guaranteed to execute a full memory barrier during the time | |
833 | * interval between the call to call_rcu() and the invocation of "func()". | |
834 | * This guarantee applies even if CPU A and CPU B are the same CPU (but | |
835 | * again only if the system has more than one CPU). | |
836 | * | |
837 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
838 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
839 | * srcu_struct structure. | |
840 | */ | |
1e9a038b PM |
841 | void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
842 | rcu_callback_t func, bool do_norm) | |
dad81a20 PM |
843 | { |
844 | unsigned long flags; | |
1e9a038b | 845 | bool needexp = false; |
da915ad5 PM |
846 | bool needgp = false; |
847 | unsigned long s; | |
848 | struct srcu_data *sdp; | |
849 | ||
850 | check_init_srcu_struct(sp); | |
a602538e PM |
851 | if (debug_rcu_head_queue(rhp)) { |
852 | /* Probable double call_srcu(), so leak the callback. */ | |
853 | WRITE_ONCE(rhp->func, srcu_leak_callback); | |
854 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); | |
855 | return; | |
856 | } | |
da915ad5 PM |
857 | rhp->func = func; |
858 | local_irq_save(flags); | |
859 | sdp = this_cpu_ptr(sp->sda); | |
d6331980 | 860 | spin_lock_rcu_node(sdp); |
da915ad5 PM |
861 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); |
862 | rcu_segcblist_advance(&sdp->srcu_cblist, | |
863 | rcu_seq_current(&sp->srcu_gp_seq)); | |
864 | s = rcu_seq_snap(&sp->srcu_gp_seq); | |
865 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); | |
866 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { | |
867 | sdp->srcu_gp_seq_needed = s; | |
868 | needgp = true; | |
dad81a20 | 869 | } |
1e9a038b PM |
870 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
871 | sdp->srcu_gp_seq_needed_exp = s; | |
872 | needexp = true; | |
873 | } | |
d6331980 | 874 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
da915ad5 | 875 | if (needgp) |
1e9a038b PM |
876 | srcu_funnel_gp_start(sp, sdp, s, do_norm); |
877 | else if (needexp) | |
878 | srcu_funnel_exp_start(sp, sdp->mynode, s); | |
879 | } | |
880 | ||
5a0465e1 PM |
881 | /** |
882 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | |
883 | * @sp: srcu_struct in queue the callback | |
27fdb35f | 884 | * @rhp: structure to be used for queueing the SRCU callback. |
5a0465e1 PM |
885 | * @func: function to be invoked after the SRCU grace period |
886 | * | |
887 | * The callback function will be invoked some time after a full SRCU | |
888 | * grace period elapses, in other words after all pre-existing SRCU | |
889 | * read-side critical sections have completed. However, the callback | |
890 | * function might well execute concurrently with other SRCU read-side | |
891 | * critical sections that started after call_srcu() was invoked. SRCU | |
892 | * read-side critical sections are delimited by srcu_read_lock() and | |
893 | * srcu_read_unlock(), and may be nested. | |
894 | * | |
895 | * The callback will be invoked from process context, but must nevertheless | |
896 | * be fast and must not block. | |
897 | */ | |
1e9a038b PM |
898 | void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
899 | rcu_callback_t func) | |
900 | { | |
901 | __call_srcu(sp, rhp, func, true); | |
dad81a20 PM |
902 | } |
903 | EXPORT_SYMBOL_GPL(call_srcu); | |
904 | ||
dad81a20 PM |
905 | /* |
906 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
907 | */ | |
1e9a038b | 908 | static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) |
dad81a20 PM |
909 | { |
910 | struct rcu_synchronize rcu; | |
dad81a20 PM |
911 | |
912 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || | |
913 | lock_is_held(&rcu_bh_lock_map) || | |
914 | lock_is_held(&rcu_lock_map) || | |
915 | lock_is_held(&rcu_sched_lock_map), | |
916 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); | |
917 | ||
918 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | |
919 | return; | |
920 | might_sleep(); | |
da915ad5 | 921 | check_init_srcu_struct(sp); |
dad81a20 | 922 | init_completion(&rcu.completion); |
da915ad5 | 923 | init_rcu_head_on_stack(&rcu.head); |
1e9a038b | 924 | __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); |
dad81a20 | 925 | wait_for_completion(&rcu.completion); |
da915ad5 | 926 | destroy_rcu_head_on_stack(&rcu.head); |
35732cf9 PM |
927 | |
928 | /* | |
929 | * Make sure that later code is ordered after the SRCU grace | |
d6331980 | 930 | * period. This pairs with the spin_lock_irq_rcu_node() |
35732cf9 PM |
931 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
932 | * because the current CPU might have been totally uninvolved with | |
933 | * (and thus unordered against) that grace period. | |
934 | */ | |
935 | smp_mb(); | |
dad81a20 PM |
936 | } |
937 | ||
938 | /** | |
939 | * synchronize_srcu_expedited - Brute-force SRCU grace period | |
940 | * @sp: srcu_struct with which to synchronize. | |
941 | * | |
942 | * Wait for an SRCU grace period to elapse, but be more aggressive about | |
943 | * spinning rather than blocking when waiting. | |
944 | * | |
945 | * Note that synchronize_srcu_expedited() has the same deadlock and | |
946 | * memory-ordering properties as does synchronize_srcu(). | |
947 | */ | |
948 | void synchronize_srcu_expedited(struct srcu_struct *sp) | |
949 | { | |
1e9a038b | 950 | __synchronize_srcu(sp, rcu_gp_is_normal()); |
dad81a20 PM |
951 | } |
952 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
953 | ||
954 | /** | |
955 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
956 | * @sp: srcu_struct with which to synchronize. | |
957 | * | |
958 | * Wait for the count to drain to zero of both indexes. To avoid the | |
959 | * possible starvation of synchronize_srcu(), it waits for the count of | |
da915ad5 PM |
960 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
961 | * and then flip the srcu_idx and wait for the count of the other index. | |
dad81a20 PM |
962 | * |
963 | * Can block; must be called from process context. | |
964 | * | |
965 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
966 | * SRCU read-side critical section; doing so will result in deadlock. | |
967 | * However, it is perfectly legal to call synchronize_srcu() on one | |
968 | * srcu_struct from some other srcu_struct's read-side critical section, | |
969 | * as long as the resulting graph of srcu_structs is acyclic. | |
970 | * | |
971 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
972 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
973 | * each CPU is guaranteed to have executed a full memory barrier since | |
974 | * the end of its last corresponding SRCU-sched read-side critical section | |
975 | * whose beginning preceded the call to synchronize_srcu(). In addition, | |
976 | * each CPU having an SRCU read-side critical section that extends beyond | |
977 | * the return from synchronize_srcu() is guaranteed to have executed a | |
978 | * full memory barrier after the beginning of synchronize_srcu() and before | |
979 | * the beginning of that SRCU read-side critical section. Note that these | |
980 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
981 | * as well as CPUs that are executing in the kernel. | |
982 | * | |
983 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
984 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
985 | * to have executed a full memory barrier during the execution of | |
986 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
987 | * are the same CPU, but again only if the system has more than one CPU. | |
988 | * | |
989 | * Of course, these memory-ordering guarantees apply only when | |
990 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
991 | * passed the same srcu_struct structure. | |
2da4b2a7 PM |
992 | * |
993 | * If SRCU is likely idle, expedite the first request. This semantic | |
994 | * was provided by Classic SRCU, and is relied upon by its users, so TREE | |
995 | * SRCU must also provide it. Note that detecting idleness is heuristic | |
996 | * and subject to both false positives and negatives. | |
dad81a20 PM |
997 | */ |
998 | void synchronize_srcu(struct srcu_struct *sp) | |
999 | { | |
2da4b2a7 | 1000 | if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) |
dad81a20 PM |
1001 | synchronize_srcu_expedited(sp); |
1002 | else | |
1e9a038b | 1003 | __synchronize_srcu(sp, true); |
dad81a20 PM |
1004 | } |
1005 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
1006 | ||
da915ad5 PM |
1007 | /* |
1008 | * Callback function for srcu_barrier() use. | |
1009 | */ | |
1010 | static void srcu_barrier_cb(struct rcu_head *rhp) | |
1011 | { | |
1012 | struct srcu_data *sdp; | |
1013 | struct srcu_struct *sp; | |
1014 | ||
1015 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | |
1016 | sp = sdp->sp; | |
1017 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) | |
1018 | complete(&sp->srcu_barrier_completion); | |
1019 | } | |
1020 | ||
dad81a20 PM |
1021 | /** |
1022 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
1023 | * @sp: srcu_struct on which to wait for in-flight callbacks. | |
1024 | */ | |
1025 | void srcu_barrier(struct srcu_struct *sp) | |
1026 | { | |
da915ad5 PM |
1027 | int cpu; |
1028 | struct srcu_data *sdp; | |
1029 | unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); | |
1030 | ||
1031 | check_init_srcu_struct(sp); | |
1032 | mutex_lock(&sp->srcu_barrier_mutex); | |
1033 | if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { | |
1034 | smp_mb(); /* Force ordering following return. */ | |
1035 | mutex_unlock(&sp->srcu_barrier_mutex); | |
1036 | return; /* Someone else did our work for us. */ | |
1037 | } | |
1038 | rcu_seq_start(&sp->srcu_barrier_seq); | |
1039 | init_completion(&sp->srcu_barrier_completion); | |
1040 | ||
1041 | /* Initial count prevents reaching zero until all CBs are posted. */ | |
1042 | atomic_set(&sp->srcu_barrier_cpu_cnt, 1); | |
1043 | ||
1044 | /* | |
1045 | * Each pass through this loop enqueues a callback, but only | |
1046 | * on CPUs already having callbacks enqueued. Note that if | |
1047 | * a CPU already has callbacks enqueue, it must have already | |
1048 | * registered the need for a future grace period, so all we | |
1049 | * need do is enqueue a callback that will use the same | |
1050 | * grace period as the last callback already in the queue. | |
1051 | */ | |
1052 | for_each_possible_cpu(cpu) { | |
1053 | sdp = per_cpu_ptr(sp->sda, cpu); | |
d6331980 | 1054 | spin_lock_irq_rcu_node(sdp); |
da915ad5 PM |
1055 | atomic_inc(&sp->srcu_barrier_cpu_cnt); |
1056 | sdp->srcu_barrier_head.func = srcu_barrier_cb; | |
a602538e | 1057 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
da915ad5 | 1058 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
a602538e PM |
1059 | &sdp->srcu_barrier_head, 0)) { |
1060 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); | |
da915ad5 | 1061 | atomic_dec(&sp->srcu_barrier_cpu_cnt); |
a602538e | 1062 | } |
d6331980 | 1063 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1064 | } |
1065 | ||
1066 | /* Remove the initial count, at which point reaching zero can happen. */ | |
1067 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) | |
1068 | complete(&sp->srcu_barrier_completion); | |
1069 | wait_for_completion(&sp->srcu_barrier_completion); | |
1070 | ||
1071 | rcu_seq_end(&sp->srcu_barrier_seq); | |
1072 | mutex_unlock(&sp->srcu_barrier_mutex); | |
dad81a20 PM |
1073 | } |
1074 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
1075 | ||
1076 | /** | |
1077 | * srcu_batches_completed - return batches completed. | |
1078 | * @sp: srcu_struct on which to report batch completion. | |
1079 | * | |
1080 | * Report the number of batches, correlated with, but not necessarily | |
1081 | * precisely the same as, the number of grace periods that have elapsed. | |
1082 | */ | |
1083 | unsigned long srcu_batches_completed(struct srcu_struct *sp) | |
1084 | { | |
da915ad5 | 1085 | return sp->srcu_idx; |
dad81a20 PM |
1086 | } |
1087 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | |
1088 | ||
1089 | /* | |
da915ad5 PM |
1090 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
1091 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | |
1092 | * completed in that state. | |
dad81a20 | 1093 | */ |
da915ad5 | 1094 | static void srcu_advance_state(struct srcu_struct *sp) |
dad81a20 PM |
1095 | { |
1096 | int idx; | |
1097 | ||
da915ad5 PM |
1098 | mutex_lock(&sp->srcu_gp_mutex); |
1099 | ||
dad81a20 PM |
1100 | /* |
1101 | * Because readers might be delayed for an extended period after | |
da915ad5 | 1102 | * fetching ->srcu_idx for their index, at any point in time there |
dad81a20 PM |
1103 | * might well be readers using both idx=0 and idx=1. We therefore |
1104 | * need to wait for readers to clear from both index values before | |
1105 | * invoking a callback. | |
1106 | * | |
1107 | * The load-acquire ensures that we see the accesses performed | |
1108 | * by the prior grace period. | |
1109 | */ | |
1110 | idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ | |
1111 | if (idx == SRCU_STATE_IDLE) { | |
d6331980 | 1112 | spin_lock_irq_rcu_node(sp); |
da915ad5 PM |
1113 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
1114 | WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); | |
d6331980 | 1115 | spin_unlock_irq_rcu_node(sp); |
da915ad5 | 1116 | mutex_unlock(&sp->srcu_gp_mutex); |
dad81a20 PM |
1117 | return; |
1118 | } | |
1119 | idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); | |
1120 | if (idx == SRCU_STATE_IDLE) | |
1121 | srcu_gp_start(sp); | |
d6331980 | 1122 | spin_unlock_irq_rcu_node(sp); |
da915ad5 PM |
1123 | if (idx != SRCU_STATE_IDLE) { |
1124 | mutex_unlock(&sp->srcu_gp_mutex); | |
dad81a20 | 1125 | return; /* Someone else started the grace period. */ |
da915ad5 | 1126 | } |
dad81a20 PM |
1127 | } |
1128 | ||
1129 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { | |
da915ad5 PM |
1130 | idx = 1 ^ (sp->srcu_idx & 1); |
1131 | if (!try_check_zero(sp, idx, 1)) { | |
1132 | mutex_unlock(&sp->srcu_gp_mutex); | |
dad81a20 | 1133 | return; /* readers present, retry later. */ |
da915ad5 | 1134 | } |
dad81a20 PM |
1135 | srcu_flip(sp); |
1136 | rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); | |
1137 | } | |
1138 | ||
1139 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { | |
1140 | ||
1141 | /* | |
1142 | * SRCU read-side critical sections are normally short, | |
1143 | * so check at least twice in quick succession after a flip. | |
1144 | */ | |
da915ad5 PM |
1145 | idx = 1 ^ (sp->srcu_idx & 1); |
1146 | if (!try_check_zero(sp, idx, 2)) { | |
1147 | mutex_unlock(&sp->srcu_gp_mutex); | |
1148 | return; /* readers present, retry later. */ | |
1149 | } | |
1150 | srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ | |
dad81a20 PM |
1151 | } |
1152 | } | |
1153 | ||
1154 | /* | |
1155 | * Invoke a limited number of SRCU callbacks that have passed through | |
1156 | * their grace period. If there are more to do, SRCU will reschedule | |
1157 | * the workqueue. Note that needed memory barriers have been executed | |
1158 | * in this task's context by srcu_readers_active_idx_check(). | |
1159 | */ | |
da915ad5 | 1160 | static void srcu_invoke_callbacks(struct work_struct *work) |
dad81a20 | 1161 | { |
da915ad5 | 1162 | bool more; |
dad81a20 PM |
1163 | struct rcu_cblist ready_cbs; |
1164 | struct rcu_head *rhp; | |
da915ad5 PM |
1165 | struct srcu_data *sdp; |
1166 | struct srcu_struct *sp; | |
dad81a20 | 1167 | |
da915ad5 PM |
1168 | sdp = container_of(work, struct srcu_data, work.work); |
1169 | sp = sdp->sp; | |
dad81a20 | 1170 | rcu_cblist_init(&ready_cbs); |
d6331980 | 1171 | spin_lock_irq_rcu_node(sdp); |
da915ad5 PM |
1172 | rcu_segcblist_advance(&sdp->srcu_cblist, |
1173 | rcu_seq_current(&sp->srcu_gp_seq)); | |
1174 | if (sdp->srcu_cblist_invoking || | |
1175 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | |
d6331980 | 1176 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1177 | return; /* Someone else on the job or nothing to do. */ |
1178 | } | |
1179 | ||
1180 | /* We are on the job! Extract and invoke ready callbacks. */ | |
1181 | sdp->srcu_cblist_invoking = true; | |
1182 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); | |
d6331980 | 1183 | spin_unlock_irq_rcu_node(sdp); |
dad81a20 PM |
1184 | rhp = rcu_cblist_dequeue(&ready_cbs); |
1185 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { | |
a602538e | 1186 | debug_rcu_head_unqueue(rhp); |
dad81a20 PM |
1187 | local_bh_disable(); |
1188 | rhp->func(rhp); | |
1189 | local_bh_enable(); | |
1190 | } | |
da915ad5 PM |
1191 | |
1192 | /* | |
1193 | * Update counts, accelerate new callbacks, and if needed, | |
1194 | * schedule another round of callback invocation. | |
1195 | */ | |
d6331980 | 1196 | spin_lock_irq_rcu_node(sdp); |
da915ad5 PM |
1197 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); |
1198 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | |
1199 | rcu_seq_snap(&sp->srcu_gp_seq)); | |
1200 | sdp->srcu_cblist_invoking = false; | |
1201 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | |
d6331980 | 1202 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1203 | if (more) |
1204 | srcu_schedule_cbs_sdp(sdp, 0); | |
dad81a20 PM |
1205 | } |
1206 | ||
1207 | /* | |
1208 | * Finished one round of SRCU grace period. Start another if there are | |
1209 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
1210 | */ | |
1211 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) | |
1212 | { | |
da915ad5 | 1213 | bool pushgp = true; |
dad81a20 | 1214 | |
d6331980 | 1215 | spin_lock_irq_rcu_node(sp); |
da915ad5 PM |
1216 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
1217 | if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { | |
1218 | /* All requests fulfilled, time to go idle. */ | |
1219 | pushgp = false; | |
1220 | } | |
1221 | } else if (!rcu_seq_state(sp->srcu_gp_seq)) { | |
1222 | /* Outstanding request and no GP. Start one. */ | |
1223 | srcu_gp_start(sp); | |
dad81a20 | 1224 | } |
d6331980 | 1225 | spin_unlock_irq_rcu_node(sp); |
dad81a20 | 1226 | |
da915ad5 | 1227 | if (pushgp) |
dad81a20 PM |
1228 | queue_delayed_work(system_power_efficient_wq, &sp->work, delay); |
1229 | } | |
1230 | ||
1231 | /* | |
1232 | * This is the work-queue function that handles SRCU grace periods. | |
1233 | */ | |
0d8a1e83 | 1234 | static void process_srcu(struct work_struct *work) |
dad81a20 PM |
1235 | { |
1236 | struct srcu_struct *sp; | |
1237 | ||
1238 | sp = container_of(work, struct srcu_struct, work.work); | |
1239 | ||
da915ad5 | 1240 | srcu_advance_state(sp); |
1e9a038b | 1241 | srcu_reschedule(sp, srcu_get_delay(sp)); |
dad81a20 | 1242 | } |
7f6733c3 PM |
1243 | |
1244 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
1e9a038b PM |
1245 | struct srcu_struct *sp, int *flags, |
1246 | unsigned long *gpnum, unsigned long *completed) | |
7f6733c3 PM |
1247 | { |
1248 | if (test_type != SRCU_FLAVOR) | |
1249 | return; | |
1250 | *flags = 0; | |
1251 | *completed = rcu_seq_ctr(sp->srcu_gp_seq); | |
1252 | *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); | |
1253 | } | |
1254 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | |
1f4f6da1 | 1255 | |
115a1a52 PM |
1256 | void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) |
1257 | { | |
1258 | int cpu; | |
1259 | int idx; | |
ac3748c6 | 1260 | unsigned long s0 = 0, s1 = 0; |
115a1a52 PM |
1261 | |
1262 | idx = sp->srcu_idx & 0x1; | |
1263 | pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx); | |
1264 | for_each_possible_cpu(cpu) { | |
1265 | unsigned long l0, l1; | |
1266 | unsigned long u0, u1; | |
1267 | long c0, c1; | |
1268 | struct srcu_data *counts; | |
1269 | ||
1270 | counts = per_cpu_ptr(sp->sda, cpu); | |
1271 | u0 = counts->srcu_unlock_count[!idx]; | |
1272 | u1 = counts->srcu_unlock_count[idx]; | |
1273 | ||
1274 | /* | |
1275 | * Make sure that a lock is always counted if the corresponding | |
1276 | * unlock is counted. | |
1277 | */ | |
1278 | smp_rmb(); | |
1279 | ||
1280 | l0 = counts->srcu_lock_count[!idx]; | |
1281 | l1 = counts->srcu_lock_count[idx]; | |
1282 | ||
1283 | c0 = l0 - u0; | |
1284 | c1 = l1 - u1; | |
1285 | pr_cont(" %d(%ld,%ld)", cpu, c0, c1); | |
ac3748c6 PM |
1286 | s0 += c0; |
1287 | s1 += c1; | |
115a1a52 | 1288 | } |
ac3748c6 | 1289 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
115a1a52 PM |
1290 | } |
1291 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); | |
1292 | ||
1f4f6da1 PM |
1293 | static int __init srcu_bootup_announce(void) |
1294 | { | |
1295 | pr_info("Hierarchical SRCU implementation.\n"); | |
0c8e0e3c PM |
1296 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
1297 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); | |
1f4f6da1 PM |
1298 | return 0; |
1299 | } | |
1300 | early_initcall(srcu_bootup_announce); |