Commit | Line | Data |
---|---|---|
b5b11890 | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
29c00b4a PM |
2 | /* |
3 | * Read-Copy Update definitions shared among RCU implementations. | |
4 | * | |
29c00b4a PM |
5 | * Copyright IBM Corporation, 2011 |
6 | * | |
b5b11890 | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
29c00b4a PM |
8 | */ |
9 | ||
10 | #ifndef __LINUX_RCU_H | |
11 | #define __LINUX_RCU_H | |
12 | ||
2cbc482d | 13 | #include <linux/slab.h> |
5cb5c6e1 | 14 | #include <trace/events/rcu.h> |
e99033c5 | 15 | |
2e8c28c2 PM |
16 | /* |
17 | * Grace-period counter management. | |
3636d8d1 FW |
18 | * |
19 | * The two least significant bits contain the control flags. | |
20 | * The most significant bits contain the grace-period sequence counter. | |
21 | * | |
22 | * When both control flags are zero, no grace period is in progress. | |
23 | * When either bit is non-zero, a grace period has started and is in | |
24 | * progress. When the grace period completes, the control flags are reset | |
25 | * to 0 and the grace-period sequence counter is incremented. | |
26 | * | |
27 | * However some specific RCU usages make use of custom values. | |
28 | * | |
29 | * SRCU special control values: | |
30 | * | |
31 | * SRCU_SNP_INIT_SEQ : Invalid/init value set when SRCU node | |
32 | * is initialized. | |
33 | * | |
34 | * SRCU_STATE_IDLE : No SRCU gp is in progress | |
35 | * | |
36 | * SRCU_STATE_SCAN1 : State set by rcu_seq_start(). Indicates | |
37 | * we are scanning the readers on the slot | |
38 | * defined as inactive (there might well | |
39 | * be pending readers that will use that | |
40 | * index, but their number is bounded). | |
41 | * | |
42 | * SRCU_STATE_SCAN2 : State set manually via rcu_seq_set_state() | |
43 | * Indicates we are flipping the readers | |
44 | * index and then scanning the readers on the | |
45 | * slot newly designated as inactive (again, | |
46 | * the number of pending readers that will use | |
47 | * this inactive index is bounded). | |
48 | * | |
49 | * RCU polled GP special control value: | |
50 | * | |
51 | * RCU_GET_STATE_COMPLETED : State value indicating an already-completed | |
52 | * polled GP has completed. This value covers | |
53 | * both the state and the counter of the | |
54 | * grace-period sequence number. | |
2e8c28c2 PM |
55 | */ |
56 | ||
f1ec57a4 | 57 | #define RCU_SEQ_CTR_SHIFT 2 |
031aeee0 PM |
58 | #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) |
59 | ||
414c1238 PM |
60 | /* Low-order bit definition for polled grace-period APIs. */ |
61 | #define RCU_GET_STATE_COMPLETED 0x1 | |
62 | ||
d9ab0e63 ZN |
63 | extern int sysctl_sched_rt_runtime; |
64 | ||
031aeee0 PM |
65 | /* |
66 | * Return the counter portion of a sequence number previously returned | |
67 | * by rcu_seq_snap() or rcu_seq_current(). | |
68 | */ | |
69 | static inline unsigned long rcu_seq_ctr(unsigned long s) | |
70 | { | |
71 | return s >> RCU_SEQ_CTR_SHIFT; | |
72 | } | |
73 | ||
74 | /* | |
75 | * Return the state portion of a sequence number previously returned | |
76 | * by rcu_seq_snap() or rcu_seq_current(). | |
77 | */ | |
78 | static inline int rcu_seq_state(unsigned long s) | |
79 | { | |
80 | return s & RCU_SEQ_STATE_MASK; | |
81 | } | |
82 | ||
80a7956f PM |
83 | /* |
84 | * Set the state portion of the pointed-to sequence number. | |
85 | * The caller is responsible for preventing conflicting updates. | |
86 | */ | |
87 | static inline void rcu_seq_set_state(unsigned long *sp, int newstate) | |
88 | { | |
89 | WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); | |
90 | WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); | |
91 | } | |
92 | ||
2e8c28c2 PM |
93 | /* Adjust sequence number for start of update-side operation. */ |
94 | static inline void rcu_seq_start(unsigned long *sp) | |
95 | { | |
96 | WRITE_ONCE(*sp, *sp + 1); | |
97 | smp_mb(); /* Ensure update-side operation after counter increment. */ | |
031aeee0 | 98 | WARN_ON_ONCE(rcu_seq_state(*sp) != 1); |
2e8c28c2 PM |
99 | } |
100 | ||
9a414201 PM |
101 | /* Compute the end-of-grace-period value for the specified sequence number. */ |
102 | static inline unsigned long rcu_seq_endval(unsigned long *sp) | |
103 | { | |
104 | return (*sp | RCU_SEQ_STATE_MASK) + 1; | |
105 | } | |
106 | ||
2e8c28c2 PM |
107 | /* Adjust sequence number for end of update-side operation. */ |
108 | static inline void rcu_seq_end(unsigned long *sp) | |
109 | { | |
110 | smp_mb(); /* Ensure update-side operation before counter increment. */ | |
031aeee0 | 111 | WARN_ON_ONCE(!rcu_seq_state(*sp)); |
9a414201 | 112 | WRITE_ONCE(*sp, rcu_seq_endval(sp)); |
2e8c28c2 PM |
113 | } |
114 | ||
0d805a70 JFG |
115 | /* |
116 | * rcu_seq_snap - Take a snapshot of the update side's sequence number. | |
117 | * | |
118 | * This function returns the earliest value of the grace-period sequence number | |
119 | * that will indicate that a full grace period has elapsed since the current | |
120 | * time. Once the grace-period sequence number has reached this value, it will | |
121 | * be safe to invoke all callbacks that have been registered prior to the | |
122 | * current time. This value is the current grace-period number plus two to the | |
123 | * power of the number of low-order bits reserved for state, then rounded up to | |
124 | * the next value in which the state bits are all zero. | |
125 | */ | |
2e8c28c2 PM |
126 | static inline unsigned long rcu_seq_snap(unsigned long *sp) |
127 | { | |
128 | unsigned long s; | |
129 | ||
031aeee0 | 130 | s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; |
2e8c28c2 PM |
131 | smp_mb(); /* Above access must not bleed into critical section. */ |
132 | return s; | |
133 | } | |
134 | ||
8660b7d8 PM |
135 | /* Return the current value the update side's sequence number, no ordering. */ |
136 | static inline unsigned long rcu_seq_current(unsigned long *sp) | |
137 | { | |
138 | return READ_ONCE(*sp); | |
139 | } | |
140 | ||
2e3e5e55 PM |
141 | /* |
142 | * Given a snapshot from rcu_seq_snap(), determine whether or not the | |
143 | * corresponding update-side operation has started. | |
144 | */ | |
145 | static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) | |
146 | { | |
147 | return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); | |
148 | } | |
149 | ||
2e8c28c2 PM |
150 | /* |
151 | * Given a snapshot from rcu_seq_snap(), determine whether or not a | |
152 | * full update-side operation has occurred. | |
153 | */ | |
154 | static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) | |
155 | { | |
156 | return ULONG_CMP_GE(READ_ONCE(*sp), s); | |
157 | } | |
158 | ||
2403e804 PM |
159 | /* |
160 | * Given a snapshot from rcu_seq_snap(), determine whether or not a | |
161 | * full update-side operation has occurred, but do not allow the | |
162 | * (ULONG_MAX / 2) safety-factor/guard-band. | |
163 | */ | |
164 | static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s) | |
165 | { | |
166 | unsigned long cur_s = READ_ONCE(*sp); | |
167 | ||
168 | return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_STATE_MASK + 1)); | |
169 | } | |
170 | ||
67e14c1e PM |
171 | /* |
172 | * Has a grace period completed since the time the old gp_seq was collected? | |
173 | */ | |
174 | static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) | |
175 | { | |
176 | return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Has a grace period started since the time the old gp_seq was collected? | |
181 | */ | |
182 | static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) | |
183 | { | |
184 | return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, | |
185 | new); | |
186 | } | |
187 | ||
d7219312 PM |
188 | /* |
189 | * Roughly how many full grace periods have elapsed between the collection | |
190 | * of the two specified grace periods? | |
191 | */ | |
192 | static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) | |
193 | { | |
2ee5aca5 PM |
194 | unsigned long rnd_diff; |
195 | ||
196 | if (old == new) | |
197 | return 0; | |
198 | /* | |
199 | * Compute the number of grace periods (still shifted up), plus | |
200 | * one if either of new and old is not an exact grace period. | |
201 | */ | |
202 | rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - | |
203 | ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + | |
204 | ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); | |
205 | if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) | |
206 | return 1; /* Definitely no grace period has elapsed. */ | |
207 | return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; | |
d7219312 PM |
208 | } |
209 | ||
29c00b4a PM |
210 | /* |
211 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally | |
7f87c036 PM |
212 | * by call_rcu() and rcu callback execution, and are therefore not part |
213 | * of the RCU API. These are in rcupdate.h because they are used by all | |
214 | * RCU implementations. | |
29c00b4a PM |
215 | */ |
216 | ||
217 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | |
218 | # define STATE_RCU_HEAD_READY 0 | |
219 | # define STATE_RCU_HEAD_QUEUED 1 | |
220 | ||
f9e62f31 | 221 | extern const struct debug_obj_descr rcuhead_debug_descr; |
29c00b4a | 222 | |
ae150184 | 223 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
29c00b4a | 224 | { |
ae150184 PM |
225 | int r1; |
226 | ||
227 | r1 = debug_object_activate(head, &rcuhead_debug_descr); | |
29c00b4a PM |
228 | debug_object_active_state(head, &rcuhead_debug_descr, |
229 | STATE_RCU_HEAD_READY, | |
230 | STATE_RCU_HEAD_QUEUED); | |
ae150184 | 231 | return r1; |
29c00b4a PM |
232 | } |
233 | ||
234 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |
235 | { | |
236 | debug_object_active_state(head, &rcuhead_debug_descr, | |
237 | STATE_RCU_HEAD_QUEUED, | |
238 | STATE_RCU_HEAD_READY); | |
239 | debug_object_deactivate(head, &rcuhead_debug_descr); | |
240 | } | |
241 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
ae150184 | 242 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
29c00b4a | 243 | { |
ae150184 | 244 | return 0; |
29c00b4a PM |
245 | } |
246 | ||
247 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |
248 | { | |
249 | } | |
250 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | |
251 | ||
2cbc482d ZL |
252 | static inline void debug_rcu_head_callback(struct rcu_head *rhp) |
253 | { | |
254 | if (unlikely(!rhp->func)) | |
255 | kmem_dump_obj(rhp); | |
256 | } | |
257 | ||
58c53360 PM |
258 | extern int rcu_cpu_stall_suppress_at_boot; |
259 | ||
260 | static inline bool rcu_stall_is_suppressed_at_boot(void) | |
261 | { | |
262 | return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended(); | |
263 | } | |
264 | ||
4e58aaee PM |
265 | extern int rcu_cpu_stall_notifiers; |
266 | ||
6bfc09e2 PM |
267 | #ifdef CONFIG_RCU_STALL_COMMON |
268 | ||
cdc694b2 | 269 | extern int rcu_cpu_stall_ftrace_dump; |
6bfc09e2 | 270 | extern int rcu_cpu_stall_suppress; |
10462d6f | 271 | extern int rcu_cpu_stall_timeout; |
28b3ae42 | 272 | extern int rcu_exp_cpu_stall_timeout; |
be42f00b | 273 | extern int rcu_cpu_stall_cputime; |
92987fe8 | 274 | extern bool rcu_exp_stall_task_details __read_mostly; |
6bfc09e2 | 275 | int rcu_jiffies_till_stall_check(void); |
28b3ae42 | 276 | int rcu_exp_jiffies_till_stall_check(void); |
6bfc09e2 | 277 | |
58c53360 PM |
278 | static inline bool rcu_stall_is_suppressed(void) |
279 | { | |
280 | return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress; | |
281 | } | |
282 | ||
f22ce091 PM |
283 | #define rcu_ftrace_dump_stall_suppress() \ |
284 | do { \ | |
285 | if (!rcu_cpu_stall_suppress) \ | |
286 | rcu_cpu_stall_suppress = 3; \ | |
287 | } while (0) | |
288 | ||
289 | #define rcu_ftrace_dump_stall_unsuppress() \ | |
290 | do { \ | |
291 | if (rcu_cpu_stall_suppress == 3) \ | |
292 | rcu_cpu_stall_suppress = 0; \ | |
293 | } while (0) | |
294 | ||
295 | #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ | |
58c53360 PM |
296 | |
297 | static inline bool rcu_stall_is_suppressed(void) | |
298 | { | |
299 | return rcu_stall_is_suppressed_at_boot(); | |
300 | } | |
f22ce091 PM |
301 | #define rcu_ftrace_dump_stall_suppress() |
302 | #define rcu_ftrace_dump_stall_unsuppress() | |
6bfc09e2 PM |
303 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
304 | ||
0d752924 PM |
305 | /* |
306 | * Strings used in tracepoints need to be exported via the | |
307 | * tracing system such that tools like perf and trace-cmd can | |
308 | * translate the string address pointers to actual text. | |
309 | */ | |
310 | #define TPS(x) tracepoint_string(x) | |
311 | ||
b8989b76 PM |
312 | /* |
313 | * Dump the ftrace buffer, but only one time per callsite per boot. | |
314 | */ | |
315 | #define rcu_ftrace_dump(oops_dump_mode) \ | |
316 | do { \ | |
317 | static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ | |
318 | \ | |
319 | if (!atomic_read(&___rfd_beenhere) && \ | |
83b6ca1f PM |
320 | !atomic_xchg(&___rfd_beenhere, 1)) { \ |
321 | tracing_off(); \ | |
f22ce091 | 322 | rcu_ftrace_dump_stall_suppress(); \ |
b8989b76 | 323 | ftrace_dump(oops_dump_mode); \ |
f22ce091 | 324 | rcu_ftrace_dump_stall_unsuppress(); \ |
83b6ca1f | 325 | } \ |
b8989b76 PM |
326 | } while (0) |
327 | ||
aa23c6fb | 328 | void rcu_early_boot_tests(void); |
52d7e48b | 329 | void rcu_test_sync_prims(void); |
aa23c6fb | 330 | |
5f6130fa LJ |
331 | /* |
332 | * This function really isn't for public consumption, but RCU is special in | |
333 | * that context switches can allow the state machine to make progress. | |
334 | */ | |
335 | extern void resched_cpu(int cpu); | |
336 | ||
0cd7e350 | 337 | #if !defined(CONFIG_TINY_RCU) |
2b34c43c PM |
338 | |
339 | #include <linux/rcu_node_tree.h> | |
340 | ||
341 | extern int rcu_num_lvls; | |
e95d68d2 | 342 | extern int num_rcu_lvl[]; |
2b34c43c PM |
343 | extern int rcu_num_nodes; |
344 | static bool rcu_fanout_exact; | |
345 | static int rcu_fanout_leaf; | |
346 | ||
347 | /* | |
348 | * Compute the per-level fanout, either using the exact fanout specified | |
349 | * or balancing the tree, depending on the rcu_fanout_exact boot parameter. | |
350 | */ | |
351 | static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) | |
352 | { | |
353 | int i; | |
354 | ||
36b5dae6 PM |
355 | for (i = 0; i < RCU_NUM_LVLS; i++) |
356 | levelspread[i] = INT_MIN; | |
2b34c43c PM |
357 | if (rcu_fanout_exact) { |
358 | levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; | |
359 | for (i = rcu_num_lvls - 2; i >= 0; i--) | |
360 | levelspread[i] = RCU_FANOUT; | |
361 | } else { | |
362 | int ccur; | |
363 | int cprv; | |
364 | ||
365 | cprv = nr_cpu_ids; | |
366 | for (i = rcu_num_lvls - 1; i >= 0; i--) { | |
367 | ccur = levelcnt[i]; | |
368 | levelspread[i] = (cprv + ccur - 1) / ccur; | |
369 | cprv = ccur; | |
370 | } | |
371 | } | |
372 | } | |
373 | ||
b5befe84 FW |
374 | extern void rcu_init_geometry(void); |
375 | ||
7f87c036 | 376 | /* Returns a pointer to the first leaf rcu_node structure. */ |
aedf4ba9 | 377 | #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) |
5b4c11d5 PM |
378 | |
379 | /* Is this rcu_node a leaf? */ | |
380 | #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) | |
381 | ||
5257514d | 382 | /* Is this rcu_node the last leaf? */ |
aedf4ba9 | 383 | #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) |
5257514d | 384 | |
efbe451d | 385 | /* |
aedf4ba9 | 386 | * Do a full breadth-first scan of the {s,}rcu_node structures for the |
7f87c036 PM |
387 | * specified state structure (for SRCU) or the only rcu_state structure |
388 | * (for RCU). | |
efbe451d | 389 | */ |
95433f72 | 390 | #define _rcu_for_each_node_breadth_first(sp, rnp) \ |
aedf4ba9 PM |
391 | for ((rnp) = &(sp)->node[0]; \ |
392 | (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) | |
393 | #define rcu_for_each_node_breadth_first(rnp) \ | |
95433f72 PM |
394 | _rcu_for_each_node_breadth_first(&rcu_state, rnp) |
395 | #define srcu_for_each_node_breadth_first(ssp, rnp) \ | |
396 | _rcu_for_each_node_breadth_first(ssp->srcu_sup, rnp) | |
efbe451d PM |
397 | |
398 | /* | |
7f87c036 PM |
399 | * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. |
400 | * Note that if there is a singleton rcu_node tree with but one rcu_node | |
401 | * structure, this loop -will- visit the rcu_node structure. It is still | |
402 | * a leaf node, even if it is also the root node. | |
efbe451d | 403 | */ |
aedf4ba9 PM |
404 | #define rcu_for_each_leaf_node(rnp) \ |
405 | for ((rnp) = rcu_first_leaf_node(); \ | |
406 | (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) | |
efbe451d PM |
407 | |
408 | /* | |
409 | * Iterate over all possible CPUs in a leaf RCU node. | |
410 | */ | |
411 | #define for_each_leaf_node_possible_cpu(rnp, cpu) \ | |
82dd8419 PM |
412 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
413 | (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ | |
65963d24 PM |
414 | (cpu) <= rnp->grphi; \ |
415 | (cpu) = cpumask_next((cpu), cpu_possible_mask)) | |
416 | ||
417 | /* | |
418 | * Iterate over all CPUs in a leaf RCU node's specified mask. | |
419 | */ | |
420 | #define rcu_find_next_bit(rnp, cpu, mask) \ | |
421 | ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) | |
422 | #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ | |
82dd8419 PM |
423 | for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \ |
424 | (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ | |
65963d24 PM |
425 | (cpu) <= rnp->grphi; \ |
426 | (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) | |
efbe451d | 427 | |
0cd7e350 PM |
428 | #endif /* !defined(CONFIG_TINY_RCU) */ |
429 | ||
430 | #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) | |
431 | ||
83d40bd3 PM |
432 | /* |
433 | * Wrappers for the rcu_node::lock acquire and release. | |
434 | * | |
435 | * Because the rcu_nodes form a tree, the tree traversal locking will observe | |
436 | * different lock values, this in turn means that an UNLOCK of one level | |
437 | * followed by a LOCK of another level does not imply a full memory barrier; | |
438 | * and most importantly transitivity is lost. | |
439 | * | |
440 | * In order to restore full ordering between tree levels, augment the regular | |
441 | * lock acquire functions with smp_mb__after_unlock_lock(). | |
442 | * | |
443 | * As ->lock of struct rcu_node is a __private field, therefore one should use | |
444 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. | |
445 | */ | |
446 | #define raw_spin_lock_rcu_node(p) \ | |
447 | do { \ | |
448 | raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ | |
449 | smp_mb__after_unlock_lock(); \ | |
450 | } while (0) | |
451 | ||
7dffe017 PM |
452 | #define raw_spin_unlock_rcu_node(p) \ |
453 | do { \ | |
454 | lockdep_assert_irqs_disabled(); \ | |
455 | raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \ | |
456 | } while (0) | |
83d40bd3 PM |
457 | |
458 | #define raw_spin_lock_irq_rcu_node(p) \ | |
459 | do { \ | |
460 | raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
461 | smp_mb__after_unlock_lock(); \ | |
462 | } while (0) | |
463 | ||
464 | #define raw_spin_unlock_irq_rcu_node(p) \ | |
7dffe017 PM |
465 | do { \ |
466 | lockdep_assert_irqs_disabled(); \ | |
467 | raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
468 | } while (0) | |
83d40bd3 | 469 | |
4e4bea74 | 470 | #define raw_spin_lock_irqsave_rcu_node(p, flags) \ |
83d40bd3 | 471 | do { \ |
4e4bea74 | 472 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
83d40bd3 PM |
473 | smp_mb__after_unlock_lock(); \ |
474 | } while (0) | |
475 | ||
4e4bea74 | 476 | #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ |
7dffe017 PM |
477 | do { \ |
478 | lockdep_assert_irqs_disabled(); \ | |
479 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \ | |
480 | } while (0) | |
83d40bd3 PM |
481 | |
482 | #define raw_spin_trylock_rcu_node(p) \ | |
483 | ({ \ | |
484 | bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ | |
485 | \ | |
486 | if (___locked) \ | |
487 | smp_mb__after_unlock_lock(); \ | |
488 | ___locked; \ | |
489 | }) | |
490 | ||
a32e01ee MW |
491 | #define raw_lockdep_assert_held_rcu_node(p) \ |
492 | lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) | |
493 | ||
0cd7e350 | 494 | #endif // #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_TASKS_RCU_GENERIC) |
2b34c43c | 495 | |
25c36329 PM |
496 | #ifdef CONFIG_TINY_RCU |
497 | /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ | |
7414fac0 PM |
498 | static inline bool rcu_gp_is_normal(void) { return true; } |
499 | static inline bool rcu_gp_is_expedited(void) { return false; } | |
6efdda8b | 500 | static inline bool rcu_async_should_hurry(void) { return false; } |
7414fac0 PM |
501 | static inline void rcu_expedite_gp(void) { } |
502 | static inline void rcu_unexpedite_gp(void) { } | |
6efdda8b JFG |
503 | static inline void rcu_async_hurry(void) { } |
504 | static inline void rcu_async_relax(void) { } | |
2be4686d | 505 | static inline bool rcu_cpu_online(int cpu) { return true; } |
25c36329 PM |
506 | #else /* #ifdef CONFIG_TINY_RCU */ |
507 | bool rcu_gp_is_normal(void); /* Internal RCU use. */ | |
508 | bool rcu_gp_is_expedited(void); /* Internal RCU use. */ | |
6efdda8b | 509 | bool rcu_async_should_hurry(void); /* Internal RCU use. */ |
25c36329 PM |
510 | void rcu_expedite_gp(void); |
511 | void rcu_unexpedite_gp(void); | |
6efdda8b JFG |
512 | void rcu_async_hurry(void); |
513 | void rcu_async_relax(void); | |
25c36329 | 514 | void rcupdate_announce_bootup_oddness(void); |
2be4686d | 515 | bool rcu_cpu_online(int cpu); |
474d0997 | 516 | #ifdef CONFIG_TASKS_RCU_GENERIC |
e21408ce | 517 | void show_rcu_tasks_gp_kthreads(void); |
474d0997 PM |
518 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
519 | static inline void show_rcu_tasks_gp_kthreads(void) {} | |
520 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ | |
25c36329 PM |
521 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
522 | ||
e0a34641 AB |
523 | #ifdef CONFIG_TASKS_RCU |
524 | struct task_struct *get_rcu_tasks_gp_kthread(void); | |
525 | #endif // # ifdef CONFIG_TASKS_RCU | |
526 | ||
527 | #ifdef CONFIG_TASKS_RUDE_RCU | |
528 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void); | |
529 | #endif // # ifdef CONFIG_TASKS_RUDE_RCU | |
530 | ||
82118249 PM |
531 | #define RCU_SCHEDULER_INACTIVE 0 |
532 | #define RCU_SCHEDULER_INIT 1 | |
533 | #define RCU_SCHEDULER_RUNNING 2 | |
534 | ||
cad7b389 PM |
535 | enum rcutorture_type { |
536 | RCU_FLAVOR, | |
cad7b389 | 537 | RCU_TASKS_FLAVOR, |
3d6e43c7 | 538 | RCU_TASKS_RUDE_FLAVOR, |
c1a76c0b | 539 | RCU_TASKS_TRACING_FLAVOR, |
c682db55 | 540 | RCU_TRIVIAL_FLAVOR, |
cad7b389 PM |
541 | SRCU_FLAVOR, |
542 | INVALID_RCU_FLAVOR | |
543 | }; | |
544 | ||
3cb278e7 JFG |
545 | #if defined(CONFIG_RCU_LAZY) |
546 | unsigned long rcu_lazy_get_jiffies_till_flush(void); | |
547 | void rcu_lazy_set_jiffies_till_flush(unsigned long j); | |
548 | #else | |
549 | static inline unsigned long rcu_lazy_get_jiffies_till_flush(void) { return 0; } | |
550 | static inline void rcu_lazy_set_jiffies_till_flush(unsigned long j) { } | |
551 | #endif | |
552 | ||
b3e627d3 | 553 | #if defined(CONFIG_TREE_RCU) |
cad7b389 | 554 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
aebc8264 | 555 | unsigned long *gp_seq); |
cad7b389 PM |
556 | void do_trace_rcu_torture_read(const char *rcutorturename, |
557 | struct rcu_head *rhp, | |
558 | unsigned long secs, | |
559 | unsigned long c_old, | |
560 | unsigned long c); | |
55b2dcf5 | 561 | void rcu_gp_set_torture_wait(int duration); |
cad7b389 PM |
562 | #else |
563 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, | |
aebc8264 | 564 | int *flags, unsigned long *gp_seq) |
cad7b389 PM |
565 | { |
566 | *flags = 0; | |
aebc8264 | 567 | *gp_seq = 0; |
cad7b389 | 568 | } |
cad7b389 PM |
569 | #ifdef CONFIG_RCU_TRACE |
570 | void do_trace_rcu_torture_read(const char *rcutorturename, | |
571 | struct rcu_head *rhp, | |
572 | unsigned long secs, | |
573 | unsigned long c_old, | |
574 | unsigned long c); | |
575 | #else | |
576 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ | |
577 | do { } while (0) | |
578 | #endif | |
55b2dcf5 | 579 | static inline void rcu_gp_set_torture_wait(int duration) { } |
cad7b389 PM |
580 | #endif |
581 | ||
582 | #ifdef CONFIG_TINY_SRCU | |
583 | ||
584 | static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
585 | struct srcu_struct *sp, int *flags, | |
aebc8264 | 586 | unsigned long *gp_seq) |
cad7b389 PM |
587 | { |
588 | if (test_type != SRCU_FLAVOR) | |
589 | return; | |
590 | *flags = 0; | |
aebc8264 | 591 | *gp_seq = sp->srcu_idx; |
cad7b389 PM |
592 | } |
593 | ||
594 | #elif defined(CONFIG_TREE_SRCU) | |
595 | ||
596 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
597 | struct srcu_struct *sp, int *flags, | |
aebc8264 | 598 | unsigned long *gp_seq); |
cad7b389 | 599 | |
cad7b389 PM |
600 | #endif |
601 | ||
e3c8d51e | 602 | #ifdef CONFIG_TINY_RCU |
7d0c9c50 | 603 | static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; } |
17ef2fe9 | 604 | static inline unsigned long rcu_get_gp_seq(void) { return 0; } |
7414fac0 | 605 | static inline unsigned long rcu_exp_batches_completed(void) { return 0; } |
7414fac0 PM |
606 | static inline unsigned long |
607 | srcu_batches_completed(struct srcu_struct *sp) { return 0; } | |
608 | static inline void rcu_force_quiescent_state(void) { } | |
0260b92e | 609 | static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; } |
7414fac0 | 610 | static inline void show_rcu_gp_kthreads(void) { } |
4babd855 | 611 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } |
e0aff973 | 612 | static inline void rcu_fwd_progress_check(unsigned long j) { } |
99d6a2ac PM |
613 | static inline void rcu_gp_slow_register(atomic_t *rgssp) { } |
614 | static inline void rcu_gp_slow_unregister(atomic_t *rgssp) { } | |
e3c8d51e | 615 | #else /* #ifdef CONFIG_TINY_RCU */ |
7d0c9c50 | 616 | bool rcu_dynticks_zero_in_eqs(int cpu, int *vp); |
17ef2fe9 | 617 | unsigned long rcu_get_gp_seq(void); |
e3c8d51e | 618 | unsigned long rcu_exp_batches_completed(void); |
5a0465e1 | 619 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
0260b92e | 620 | bool rcu_check_boost_fail(unsigned long gp_state, int *cpup); |
e3c8d51e | 621 | void show_rcu_gp_kthreads(void); |
4babd855 | 622 | int rcu_get_gp_kthreads_prio(void); |
e0aff973 | 623 | void rcu_fwd_progress_check(unsigned long j); |
e3c8d51e | 624 | void rcu_force_quiescent_state(void); |
ad7c946b | 625 | extern struct workqueue_struct *rcu_gp_wq; |
9621fbee KS |
626 | #ifdef CONFIG_RCU_EXP_KTHREAD |
627 | extern struct kthread_worker *rcu_exp_gp_kworker; | |
628 | extern struct kthread_worker *rcu_exp_par_gp_kworker; | |
629 | #else /* !CONFIG_RCU_EXP_KTHREAD */ | |
25f3d7ef | 630 | extern struct workqueue_struct *rcu_par_gp_wq; |
9621fbee | 631 | #endif /* CONFIG_RCU_EXP_KTHREAD */ |
99d6a2ac PM |
632 | void rcu_gp_slow_register(atomic_t *rgssp); |
633 | void rcu_gp_slow_unregister(atomic_t *rgssp); | |
e3c8d51e PM |
634 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
635 | ||
44c65ff2 | 636 | #ifdef CONFIG_RCU_NOCB_CPU |
5ab7ab83 | 637 | void rcu_bind_current_to_nocb(void); |
3d54f798 | 638 | #else |
5ab7ab83 | 639 | static inline void rcu_bind_current_to_nocb(void) { } |
3d54f798 PM |
640 | #endif |
641 | ||
27c0f144 PM |
642 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU) |
643 | void show_rcu_tasks_classic_gp_kthread(void); | |
644 | #else | |
645 | static inline void show_rcu_tasks_classic_gp_kthread(void) {} | |
646 | #endif | |
647 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU) | |
648 | void show_rcu_tasks_rude_gp_kthread(void); | |
649 | #else | |
650 | static inline void show_rcu_tasks_rude_gp_kthread(void) {} | |
651 | #endif | |
652 | #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU) | |
653 | void show_rcu_tasks_trace_gp_kthread(void); | |
654 | #else | |
655 | static inline void show_rcu_tasks_trace_gp_kthread(void) {} | |
656 | #endif | |
657 | ||
401b0de3 PM |
658 | #ifdef CONFIG_TINY_RCU |
659 | static inline bool rcu_cpu_beenfullyonline(int cpu) { return true; } | |
660 | #else | |
661 | bool rcu_cpu_beenfullyonline(int cpu); | |
662 | #endif | |
663 | ||
4e58aaee | 664 | #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
5b404fda | 665 | int rcu_stall_notifier_call_chain(unsigned long val, void *v); |
4e58aaee | 666 | #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
5b404fda | 667 | static inline int rcu_stall_notifier_call_chain(unsigned long val, void *v) { return NOTIFY_DONE; } |
4e58aaee | 668 | #endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER) |
5b404fda | 669 | |
29c00b4a | 670 | #endif /* __LINUX_RCU_H */ |