| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* |
| 3 | * Read-Copy Update mechanism for mutual exclusion |
| 4 | * |
| 5 | * Copyright IBM Corporation, 2001 |
| 6 | * |
| 7 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
| 8 | * |
| 9 | * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> |
| 10 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 11 | * Papers: |
| 12 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| 13 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| 14 | * |
| 15 | * For detailed explanation of Read-Copy Update mechanism see - |
| 16 | * http://lse.sourceforge.net/locking/rcupdate.html |
| 17 | * |
| 18 | */ |
| 19 | |
| 20 | #ifndef __LINUX_RCUPDATE_H |
| 21 | #define __LINUX_RCUPDATE_H |
| 22 | |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/compiler.h> |
| 25 | #include <linux/atomic.h> |
| 26 | #include <linux/irqflags.h> |
| 27 | #include <linux/preempt.h> |
| 28 | #include <linux/bottom_half.h> |
| 29 | #include <linux/lockdep.h> |
| 30 | #include <linux/cleanup.h> |
| 31 | #include <asm/processor.h> |
| 32 | #include <linux/context_tracking_irq.h> |
| 33 | |
| 34 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
| 35 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
| 36 | |
| 37 | #define RCU_SEQ_CTR_SHIFT 2 |
| 38 | #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) |
| 39 | |
| 40 | /* Exported common interfaces */ |
| 41 | void call_rcu(struct rcu_head *head, rcu_callback_t func); |
| 42 | void rcu_barrier_tasks(void); |
| 43 | void synchronize_rcu(void); |
| 44 | |
| 45 | struct rcu_gp_oldstate; |
| 46 | unsigned long get_completed_synchronize_rcu(void); |
| 47 | void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
| 48 | |
| 49 | // Maximum number of unsigned long values corresponding to |
| 50 | // not-yet-completed RCU grace periods. |
| 51 | #define NUM_ACTIVE_RCU_POLL_OLDSTATE 2 |
| 52 | |
| 53 | /** |
| 54 | * same_state_synchronize_rcu - Are two old-state values identical? |
| 55 | * @oldstate1: First old-state value. |
| 56 | * @oldstate2: Second old-state value. |
| 57 | * |
| 58 | * The two old-state values must have been obtained from either |
| 59 | * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or |
| 60 | * get_completed_synchronize_rcu(). Returns @true if the two values are |
| 61 | * identical and @false otherwise. This allows structures whose lifetimes |
| 62 | * are tracked by old-state values to push these values to a list header, |
| 63 | * allowing those structures to be slightly smaller. |
| 64 | */ |
| 65 | static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2) |
| 66 | { |
| 67 | return oldstate1 == oldstate2; |
| 68 | } |
| 69 | |
| 70 | #ifdef CONFIG_PREEMPT_RCU |
| 71 | |
| 72 | void __rcu_read_lock(void); |
| 73 | void __rcu_read_unlock(void); |
| 74 | |
| 75 | /* |
| 76 | * Defined as a macro as it is a very low level header included from |
| 77 | * areas that don't even know about current. This gives the rcu_read_lock() |
| 78 | * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other |
| 79 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. |
| 80 | */ |
| 81 | #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) |
| 82 | |
| 83 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 84 | |
| 85 | #ifdef CONFIG_TINY_RCU |
| 86 | #define rcu_read_unlock_strict() do { } while (0) |
| 87 | #else |
| 88 | void rcu_read_unlock_strict(void); |
| 89 | #endif |
| 90 | |
| 91 | static inline void __rcu_read_lock(void) |
| 92 | { |
| 93 | preempt_disable(); |
| 94 | } |
| 95 | |
| 96 | static inline void __rcu_read_unlock(void) |
| 97 | { |
| 98 | if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) |
| 99 | rcu_read_unlock_strict(); |
| 100 | preempt_enable(); |
| 101 | } |
| 102 | |
| 103 | static inline int rcu_preempt_depth(void) |
| 104 | { |
| 105 | return 0; |
| 106 | } |
| 107 | |
| 108 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 109 | |
| 110 | #ifdef CONFIG_RCU_LAZY |
| 111 | void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func); |
| 112 | #else |
| 113 | static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) |
| 114 | { |
| 115 | call_rcu(head, func); |
| 116 | } |
| 117 | #endif |
| 118 | |
| 119 | /* Internal to kernel */ |
| 120 | void rcu_init(void); |
| 121 | extern int rcu_scheduler_active; |
| 122 | void rcu_sched_clock_irq(int user); |
| 123 | |
| 124 | #ifdef CONFIG_RCU_STALL_COMMON |
| 125 | void rcu_sysrq_start(void); |
| 126 | void rcu_sysrq_end(void); |
| 127 | #else /* #ifdef CONFIG_RCU_STALL_COMMON */ |
| 128 | static inline void rcu_sysrq_start(void) { } |
| 129 | static inline void rcu_sysrq_end(void) { } |
| 130 | #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ |
| 131 | |
| 132 | #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) |
| 133 | void rcu_irq_work_resched(void); |
| 134 | #else |
| 135 | static __always_inline void rcu_irq_work_resched(void) { } |
| 136 | #endif |
| 137 | |
| 138 | #ifdef CONFIG_RCU_NOCB_CPU |
| 139 | void rcu_init_nohz(void); |
| 140 | int rcu_nocb_cpu_offload(int cpu); |
| 141 | int rcu_nocb_cpu_deoffload(int cpu); |
| 142 | void rcu_nocb_flush_deferred_wakeup(void); |
| 143 | |
| 144 | #define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) |
| 145 | |
| 146 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 147 | |
| 148 | static inline void rcu_init_nohz(void) { } |
| 149 | static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } |
| 150 | static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } |
| 151 | static inline void rcu_nocb_flush_deferred_wakeup(void) { } |
| 152 | |
| 153 | #define RCU_NOCB_LOCKDEP_WARN(c, s) |
| 154 | |
| 155 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
| 156 | |
| 157 | /* |
| 158 | * Note a quasi-voluntary context switch for RCU-tasks's benefit. |
| 159 | * This is a macro rather than an inline function to avoid #include hell. |
| 160 | */ |
| 161 | #ifdef CONFIG_TASKS_RCU_GENERIC |
| 162 | |
| 163 | # ifdef CONFIG_TASKS_RCU |
| 164 | # define rcu_tasks_classic_qs(t, preempt) \ |
| 165 | do { \ |
| 166 | if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ |
| 167 | WRITE_ONCE((t)->rcu_tasks_holdout, false); \ |
| 168 | } while (0) |
| 169 | void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); |
| 170 | void synchronize_rcu_tasks(void); |
| 171 | void rcu_tasks_torture_stats_print(char *tt, char *tf); |
| 172 | # else |
| 173 | # define rcu_tasks_classic_qs(t, preempt) do { } while (0) |
| 174 | # define call_rcu_tasks call_rcu |
| 175 | # define synchronize_rcu_tasks synchronize_rcu |
| 176 | # endif |
| 177 | |
| 178 | # ifdef CONFIG_TASKS_TRACE_RCU |
| 179 | // Bits for ->trc_reader_special.b.need_qs field. |
| 180 | #define TRC_NEED_QS 0x1 // Task needs a quiescent state. |
| 181 | #define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. |
| 182 | |
| 183 | u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); |
| 184 | void rcu_tasks_trace_qs_blkd(struct task_struct *t); |
| 185 | |
| 186 | # define rcu_tasks_trace_qs(t) \ |
| 187 | do { \ |
| 188 | int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ |
| 189 | \ |
| 190 | if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ |
| 191 | likely(!___rttq_nesting)) { \ |
| 192 | rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \ |
| 193 | } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ |
| 194 | !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ |
| 195 | rcu_tasks_trace_qs_blkd(t); \ |
| 196 | } \ |
| 197 | } while (0) |
| 198 | void rcu_tasks_trace_torture_stats_print(char *tt, char *tf); |
| 199 | # else |
| 200 | # define rcu_tasks_trace_qs(t) do { } while (0) |
| 201 | # endif |
| 202 | |
| 203 | #define rcu_tasks_qs(t, preempt) \ |
| 204 | do { \ |
| 205 | rcu_tasks_classic_qs((t), (preempt)); \ |
| 206 | rcu_tasks_trace_qs(t); \ |
| 207 | } while (0) |
| 208 | |
| 209 | # ifdef CONFIG_TASKS_RUDE_RCU |
| 210 | void synchronize_rcu_tasks_rude(void); |
| 211 | void rcu_tasks_rude_torture_stats_print(char *tt, char *tf); |
| 212 | # endif |
| 213 | |
| 214 | #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) |
| 215 | void exit_tasks_rcu_start(void); |
| 216 | void exit_tasks_rcu_finish(void); |
| 217 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 218 | #define rcu_tasks_classic_qs(t, preempt) do { } while (0) |
| 219 | #define rcu_tasks_qs(t, preempt) do { } while (0) |
| 220 | #define rcu_note_voluntary_context_switch(t) do { } while (0) |
| 221 | #define call_rcu_tasks call_rcu |
| 222 | #define synchronize_rcu_tasks synchronize_rcu |
| 223 | static inline void exit_tasks_rcu_start(void) { } |
| 224 | static inline void exit_tasks_rcu_finish(void) { } |
| 225 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
| 226 | |
| 227 | /** |
| 228 | * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? |
| 229 | * |
| 230 | * As an accident of implementation, an RCU Tasks Trace grace period also |
| 231 | * acts as an RCU grace period. However, this could change at any time. |
| 232 | * Code relying on this accident must call this function to verify that |
| 233 | * this accident is still happening. |
| 234 | * |
| 235 | * You have been warned! |
| 236 | */ |
| 237 | static inline bool rcu_trace_implies_rcu_gp(void) { return true; } |
| 238 | |
| 239 | /** |
| 240 | * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU |
| 241 | * |
| 242 | * This macro resembles cond_resched(), except that it is defined to |
| 243 | * report potential quiescent states to RCU-tasks even if the cond_resched() |
| 244 | * machinery were to be shut off, as some advocate for PREEMPTION kernels. |
| 245 | */ |
| 246 | #define cond_resched_tasks_rcu_qs() \ |
| 247 | do { \ |
| 248 | rcu_tasks_qs(current, false); \ |
| 249 | cond_resched(); \ |
| 250 | } while (0) |
| 251 | |
| 252 | /** |
| 253 | * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states |
| 254 | * @old_ts: jiffies at start of processing. |
| 255 | * |
| 256 | * This helper is for long-running softirq handlers, such as NAPI threads in |
| 257 | * networking. The caller should initialize the variable passed in as @old_ts |
| 258 | * at the beginning of the softirq handler. When invoked frequently, this macro |
| 259 | * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will |
| 260 | * provide both RCU and RCU-Tasks quiescent states. Note that this macro |
| 261 | * modifies its old_ts argument. |
| 262 | * |
| 263 | * Because regions of code that have disabled softirq act as RCU read-side |
| 264 | * critical sections, this macro should be invoked with softirq (and |
| 265 | * preemption) enabled. |
| 266 | * |
| 267 | * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would |
| 268 | * have more chance to invoke schedule() calls and provide necessary quiescent |
| 269 | * states. As a contrast, calling cond_resched() only won't achieve the same |
| 270 | * effect because cond_resched() does not provide RCU-Tasks quiescent states. |
| 271 | */ |
| 272 | #define rcu_softirq_qs_periodic(old_ts) \ |
| 273 | do { \ |
| 274 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ |
| 275 | time_after(jiffies, (old_ts) + HZ / 10)) { \ |
| 276 | preempt_disable(); \ |
| 277 | rcu_softirq_qs(); \ |
| 278 | preempt_enable(); \ |
| 279 | (old_ts) = jiffies; \ |
| 280 | } \ |
| 281 | } while (0) |
| 282 | |
| 283 | /* |
| 284 | * Infrastructure to implement the synchronize_() primitives in |
| 285 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
| 286 | */ |
| 287 | |
| 288 | #if defined(CONFIG_TREE_RCU) |
| 289 | #include <linux/rcutree.h> |
| 290 | #elif defined(CONFIG_TINY_RCU) |
| 291 | #include <linux/rcutiny.h> |
| 292 | #else |
| 293 | #error "Unknown RCU implementation specified to kernel configuration" |
| 294 | #endif |
| 295 | |
| 296 | /* |
| 297 | * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls |
| 298 | * are needed for dynamic initialization and destruction of rcu_head |
| 299 | * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for |
| 300 | * dynamic initialization and destruction of statically allocated rcu_head |
| 301 | * structures. However, rcu_head structures allocated dynamically in the |
| 302 | * heap don't need any initialization. |
| 303 | */ |
| 304 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 305 | void init_rcu_head(struct rcu_head *head); |
| 306 | void destroy_rcu_head(struct rcu_head *head); |
| 307 | void init_rcu_head_on_stack(struct rcu_head *head); |
| 308 | void destroy_rcu_head_on_stack(struct rcu_head *head); |
| 309 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 310 | static inline void init_rcu_head(struct rcu_head *head) { } |
| 311 | static inline void destroy_rcu_head(struct rcu_head *head) { } |
| 312 | static inline void init_rcu_head_on_stack(struct rcu_head *head) { } |
| 313 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } |
| 314 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 315 | |
| 316 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
| 317 | bool rcu_lockdep_current_cpu_online(void); |
| 318 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
| 319 | static inline bool rcu_lockdep_current_cpu_online(void) { return true; } |
| 320 | #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
| 321 | |
| 322 | extern struct lockdep_map rcu_lock_map; |
| 323 | extern struct lockdep_map rcu_bh_lock_map; |
| 324 | extern struct lockdep_map rcu_sched_lock_map; |
| 325 | extern struct lockdep_map rcu_callback_map; |
| 326 | |
| 327 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 328 | |
| 329 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
| 330 | { |
| 331 | lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); |
| 332 | } |
| 333 | |
| 334 | static inline void rcu_try_lock_acquire(struct lockdep_map *map) |
| 335 | { |
| 336 | lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_); |
| 337 | } |
| 338 | |
| 339 | static inline void rcu_lock_release(struct lockdep_map *map) |
| 340 | { |
| 341 | lock_release(map, _THIS_IP_); |
| 342 | } |
| 343 | |
| 344 | int debug_lockdep_rcu_enabled(void); |
| 345 | int rcu_read_lock_held(void); |
| 346 | int rcu_read_lock_bh_held(void); |
| 347 | int rcu_read_lock_sched_held(void); |
| 348 | int rcu_read_lock_any_held(void); |
| 349 | |
| 350 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 351 | |
| 352 | # define rcu_lock_acquire(a) do { } while (0) |
| 353 | # define rcu_try_lock_acquire(a) do { } while (0) |
| 354 | # define rcu_lock_release(a) do { } while (0) |
| 355 | |
| 356 | static inline int rcu_read_lock_held(void) |
| 357 | { |
| 358 | return 1; |
| 359 | } |
| 360 | |
| 361 | static inline int rcu_read_lock_bh_held(void) |
| 362 | { |
| 363 | return 1; |
| 364 | } |
| 365 | |
| 366 | static inline int rcu_read_lock_sched_held(void) |
| 367 | { |
| 368 | return !preemptible(); |
| 369 | } |
| 370 | |
| 371 | static inline int rcu_read_lock_any_held(void) |
| 372 | { |
| 373 | return !preemptible(); |
| 374 | } |
| 375 | |
| 376 | static inline int debug_lockdep_rcu_enabled(void) |
| 377 | { |
| 378 | return 0; |
| 379 | } |
| 380 | |
| 381 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 382 | |
| 383 | #ifdef CONFIG_PROVE_RCU |
| 384 | |
| 385 | /** |
| 386 | * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met |
| 387 | * @c: condition to check |
| 388 | * @s: informative message |
| 389 | * |
| 390 | * This checks debug_lockdep_rcu_enabled() before checking (c) to |
| 391 | * prevent early boot splats due to lockdep not yet being initialized, |
| 392 | * and rechecks it after checking (c) to prevent false-positive splats |
| 393 | * due to races with lockdep being disabled. See commit 3066820034b5dd |
| 394 | * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail. |
| 395 | */ |
| 396 | #define RCU_LOCKDEP_WARN(c, s) \ |
| 397 | do { \ |
| 398 | static bool __section(".data..unlikely") __warned; \ |
| 399 | if (debug_lockdep_rcu_enabled() && (c) && \ |
| 400 | debug_lockdep_rcu_enabled() && !__warned) { \ |
| 401 | __warned = true; \ |
| 402 | lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
| 403 | } \ |
| 404 | } while (0) |
| 405 | |
| 406 | #ifndef CONFIG_PREEMPT_RCU |
| 407 | static inline void rcu_preempt_sleep_check(void) |
| 408 | { |
| 409 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), |
| 410 | "Illegal context switch in RCU read-side critical section"); |
| 411 | } |
| 412 | #else // #ifndef CONFIG_PREEMPT_RCU |
| 413 | static inline void rcu_preempt_sleep_check(void) { } |
| 414 | #endif // #else // #ifndef CONFIG_PREEMPT_RCU |
| 415 | |
| 416 | #define rcu_sleep_check() \ |
| 417 | do { \ |
| 418 | rcu_preempt_sleep_check(); \ |
| 419 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ |
| 420 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ |
| 421 | "Illegal context switch in RCU-bh read-side critical section"); \ |
| 422 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ |
| 423 | "Illegal context switch in RCU-sched read-side critical section"); \ |
| 424 | } while (0) |
| 425 | |
| 426 | // See RCU_LOCKDEP_WARN() for an explanation of the double call to |
| 427 | // debug_lockdep_rcu_enabled(). |
| 428 | static inline bool lockdep_assert_rcu_helper(bool c) |
| 429 | { |
| 430 | return debug_lockdep_rcu_enabled() && |
| 431 | (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && |
| 432 | debug_lockdep_rcu_enabled(); |
| 433 | } |
| 434 | |
| 435 | /** |
| 436 | * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock() |
| 437 | * |
| 438 | * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. |
| 439 | */ |
| 440 | #define lockdep_assert_in_rcu_read_lock() \ |
| 441 | WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) |
| 442 | |
| 443 | /** |
| 444 | * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() |
| 445 | * |
| 446 | * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect. |
| 447 | * Note that local_bh_disable() and friends do not suffice here, instead an |
| 448 | * actual rcu_read_lock_bh() is required. |
| 449 | */ |
| 450 | #define lockdep_assert_in_rcu_read_lock_bh() \ |
| 451 | WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) |
| 452 | |
| 453 | /** |
| 454 | * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() |
| 455 | * |
| 456 | * Splats if lockdep is enabled and there is no rcu_read_lock_sched() |
| 457 | * in effect. Note that preempt_disable() and friends do not suffice here, |
| 458 | * instead an actual rcu_read_lock_sched() is required. |
| 459 | */ |
| 460 | #define lockdep_assert_in_rcu_read_lock_sched() \ |
| 461 | WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) |
| 462 | |
| 463 | /** |
| 464 | * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader |
| 465 | * |
| 466 | * Splats if lockdep is enabled and there is no RCU reader of any |
| 467 | * type in effect. Note that regions of code protected by things like |
| 468 | * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify |
| 469 | * as RCU readers. |
| 470 | * |
| 471 | * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY |
| 472 | * kernels that are not also built with PREEMPT_COUNT. But if you have |
| 473 | * lockdep enabled, you might as well also enable PREEMPT_COUNT. |
| 474 | */ |
| 475 | #define lockdep_assert_in_rcu_reader() \ |
| 476 | WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ |
| 477 | !lock_is_held(&rcu_bh_lock_map) && \ |
| 478 | !lock_is_held(&rcu_sched_lock_map) && \ |
| 479 | preemptible())) |
| 480 | |
| 481 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 482 | |
| 483 | #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) |
| 484 | #define rcu_sleep_check() do { } while (0) |
| 485 | |
| 486 | #define lockdep_assert_in_rcu_read_lock() do { } while (0) |
| 487 | #define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) |
| 488 | #define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) |
| 489 | #define lockdep_assert_in_rcu_reader() do { } while (0) |
| 490 | |
| 491 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
| 492 | |
| 493 | /* |
| 494 | * Helper functions for rcu_dereference_check(), rcu_dereference_protected() |
| 495 | * and rcu_assign_pointer(). Some of these could be folded into their |
| 496 | * callers, but they are left separate in order to ease introduction of |
| 497 | * multiple pointers markings to match different RCU implementations |
| 498 | * (e.g., __srcu), should this make sense in the future. |
| 499 | */ |
| 500 | |
| 501 | #ifdef __CHECKER__ |
| 502 | #define rcu_check_sparse(p, space) \ |
| 503 | ((void)(((typeof(*p) space *)p) == p)) |
| 504 | #else /* #ifdef __CHECKER__ */ |
| 505 | #define rcu_check_sparse(p, space) |
| 506 | #endif /* #else #ifdef __CHECKER__ */ |
| 507 | |
| 508 | #define __unrcu_pointer(p, local) \ |
| 509 | ({ \ |
| 510 | typeof(*p) *local = (typeof(*p) *__force)(p); \ |
| 511 | rcu_check_sparse(p, __rcu); \ |
| 512 | ((typeof(*p) __force __kernel *)(local)); \ |
| 513 | }) |
| 514 | /** |
| 515 | * unrcu_pointer - mark a pointer as not being RCU protected |
| 516 | * @p: pointer needing to lose its __rcu property |
| 517 | * |
| 518 | * Converts @p from an __rcu pointer to a __kernel pointer. |
| 519 | * This allows an __rcu pointer to be used with xchg() and friends. |
| 520 | */ |
| 521 | #define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu)) |
| 522 | |
| 523 | #define __rcu_access_pointer(p, local, space) \ |
| 524 | ({ \ |
| 525 | typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ |
| 526 | rcu_check_sparse(p, space); \ |
| 527 | ((typeof(*p) __force __kernel *)(local)); \ |
| 528 | }) |
| 529 | #define __rcu_dereference_check(p, local, c, space) \ |
| 530 | ({ \ |
| 531 | /* Dependency order vs. p above. */ \ |
| 532 | typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ |
| 533 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ |
| 534 | rcu_check_sparse(p, space); \ |
| 535 | ((typeof(*p) __force __kernel *)(local)); \ |
| 536 | }) |
| 537 | #define __rcu_dereference_protected(p, local, c, space) \ |
| 538 | ({ \ |
| 539 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ |
| 540 | rcu_check_sparse(p, space); \ |
| 541 | ((typeof(*p) __force __kernel *)(p)); \ |
| 542 | }) |
| 543 | #define __rcu_dereference_raw(p, local) \ |
| 544 | ({ \ |
| 545 | /* Dependency order vs. p above. */ \ |
| 546 | typeof(p) local = READ_ONCE(p); \ |
| 547 | ((typeof(*p) __force __kernel *)(local)); \ |
| 548 | }) |
| 549 | #define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu)) |
| 550 | |
| 551 | /** |
| 552 | * RCU_INITIALIZER() - statically initialize an RCU-protected global variable |
| 553 | * @v: The value to statically initialize with. |
| 554 | */ |
| 555 | #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
| 556 | |
| 557 | /** |
| 558 | * rcu_assign_pointer() - assign to RCU-protected pointer |
| 559 | * @p: pointer to assign to |
| 560 | * @v: value to assign (publish) |
| 561 | * |
| 562 | * Assigns the specified value to the specified RCU-protected |
| 563 | * pointer, ensuring that any concurrent RCU readers will see |
| 564 | * any prior initialization. |
| 565 | * |
| 566 | * Inserts memory barriers on architectures that require them |
| 567 | * (which is most of them), and also prevents the compiler from |
| 568 | * reordering the code that initializes the structure after the pointer |
| 569 | * assignment. More importantly, this call documents which pointers |
| 570 | * will be dereferenced by RCU read-side code. |
| 571 | * |
| 572 | * In some special cases, you may use RCU_INIT_POINTER() instead |
| 573 | * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due |
| 574 | * to the fact that it does not constrain either the CPU or the compiler. |
| 575 | * That said, using RCU_INIT_POINTER() when you should have used |
| 576 | * rcu_assign_pointer() is a very bad thing that results in |
| 577 | * impossible-to-diagnose memory corruption. So please be careful. |
| 578 | * See the RCU_INIT_POINTER() comment header for details. |
| 579 | * |
| 580 | * Note that rcu_assign_pointer() evaluates each of its arguments only |
| 581 | * once, appearances notwithstanding. One of the "extra" evaluations |
| 582 | * is in typeof() and the other visible only to sparse (__CHECKER__), |
| 583 | * neither of which actually execute the argument. As with most cpp |
| 584 | * macros, this execute-arguments-only-once property is important, so |
| 585 | * please be careful when making changes to rcu_assign_pointer() and the |
| 586 | * other macros that it invokes. |
| 587 | */ |
| 588 | #define rcu_assign_pointer(p, v) \ |
| 589 | do { \ |
| 590 | uintptr_t _r_a_p__v = (uintptr_t)(v); \ |
| 591 | rcu_check_sparse(p, __rcu); \ |
| 592 | \ |
| 593 | if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ |
| 594 | WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ |
| 595 | else \ |
| 596 | smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ |
| 597 | } while (0) |
| 598 | |
| 599 | /** |
| 600 | * rcu_replace_pointer() - replace an RCU pointer, returning its old value |
| 601 | * @rcu_ptr: RCU pointer, whose old value is returned |
| 602 | * @ptr: regular pointer |
| 603 | * @c: the lockdep conditions under which the dereference will take place |
| 604 | * |
| 605 | * Perform a replacement, where @rcu_ptr is an RCU-annotated |
| 606 | * pointer and @c is the lockdep argument that is passed to the |
| 607 | * rcu_dereference_protected() call used to read that pointer. The old |
| 608 | * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. |
| 609 | */ |
| 610 | #define rcu_replace_pointer(rcu_ptr, ptr, c) \ |
| 611 | ({ \ |
| 612 | typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ |
| 613 | rcu_assign_pointer((rcu_ptr), (ptr)); \ |
| 614 | __tmp; \ |
| 615 | }) |
| 616 | |
| 617 | /** |
| 618 | * rcu_access_pointer() - fetch RCU pointer with no dereferencing |
| 619 | * @p: The pointer to read |
| 620 | * |
| 621 | * Return the value of the specified RCU-protected pointer, but omit the |
| 622 | * lockdep checks for being in an RCU read-side critical section. This is |
| 623 | * useful when the value of this pointer is accessed, but the pointer is |
| 624 | * not dereferenced, for example, when testing an RCU-protected pointer |
| 625 | * against NULL. Although rcu_access_pointer() may also be used in cases |
| 626 | * where update-side locks prevent the value of the pointer from changing, |
| 627 | * you should instead use rcu_dereference_protected() for this use case. |
| 628 | * Within an RCU read-side critical section, there is little reason to |
| 629 | * use rcu_access_pointer(). |
| 630 | * |
| 631 | * It is usually best to test the rcu_access_pointer() return value |
| 632 | * directly in order to avoid accidental dereferences being introduced |
| 633 | * by later inattentive changes. In other words, assigning the |
| 634 | * rcu_access_pointer() return value to a local variable results in an |
| 635 | * accident waiting to happen. |
| 636 | * |
| 637 | * It is also permissible to use rcu_access_pointer() when read-side |
| 638 | * access to the pointer was removed at least one grace period ago, as is |
| 639 | * the case in the context of the RCU callback that is freeing up the data, |
| 640 | * or after a synchronize_rcu() returns. This can be useful when tearing |
| 641 | * down multi-linked structures after a grace period has elapsed. However, |
| 642 | * rcu_dereference_protected() is normally preferred for this use case. |
| 643 | */ |
| 644 | #define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu) |
| 645 | |
| 646 | /** |
| 647 | * rcu_dereference_check() - rcu_dereference with debug checking |
| 648 | * @p: The pointer to read, prior to dereferencing |
| 649 | * @c: The conditions under which the dereference will take place |
| 650 | * |
| 651 | * Do an rcu_dereference(), but check that the conditions under which the |
| 652 | * dereference will take place are correct. Typically the conditions |
| 653 | * indicate the various locking conditions that should be held at that |
| 654 | * point. The check should return true if the conditions are satisfied. |
| 655 | * An implicit check for being in an RCU read-side critical section |
| 656 | * (rcu_read_lock()) is included. |
| 657 | * |
| 658 | * For example: |
| 659 | * |
| 660 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
| 661 | * |
| 662 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
| 663 | * if either rcu_read_lock() is held, or that the lock required to replace |
| 664 | * the bar struct at foo->bar is held. |
| 665 | * |
| 666 | * Note that the list of conditions may also include indications of when a lock |
| 667 | * need not be held, for example during initialisation or destruction of the |
| 668 | * target struct: |
| 669 | * |
| 670 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
| 671 | * atomic_read(&foo->usage) == 0); |
| 672 | * |
| 673 | * Inserts memory barriers on architectures that require them |
| 674 | * (currently only the Alpha), prevents the compiler from refetching |
| 675 | * (and from merging fetches), and, more importantly, documents exactly |
| 676 | * which pointers are protected by RCU and checks that the pointer is |
| 677 | * annotated as __rcu. |
| 678 | */ |
| 679 | #define rcu_dereference_check(p, c) \ |
| 680 | __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ |
| 681 | (c) || rcu_read_lock_held(), __rcu) |
| 682 | |
| 683 | /** |
| 684 | * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
| 685 | * @p: The pointer to read, prior to dereferencing |
| 686 | * @c: The conditions under which the dereference will take place |
| 687 | * |
| 688 | * This is the RCU-bh counterpart to rcu_dereference_check(). However, |
| 689 | * please note that starting in v5.0 kernels, vanilla RCU grace periods |
| 690 | * wait for local_bh_disable() regions of code in addition to regions of |
| 691 | * code demarked by rcu_read_lock() and rcu_read_unlock(). This means |
| 692 | * that synchronize_rcu(), call_rcu, and friends all take not only |
| 693 | * rcu_read_lock() but also rcu_read_lock_bh() into account. |
| 694 | */ |
| 695 | #define rcu_dereference_bh_check(p, c) \ |
| 696 | __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ |
| 697 | (c) || rcu_read_lock_bh_held(), __rcu) |
| 698 | |
| 699 | /** |
| 700 | * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
| 701 | * @p: The pointer to read, prior to dereferencing |
| 702 | * @c: The conditions under which the dereference will take place |
| 703 | * |
| 704 | * This is the RCU-sched counterpart to rcu_dereference_check(). |
| 705 | * However, please note that starting in v5.0 kernels, vanilla RCU grace |
| 706 | * periods wait for preempt_disable() regions of code in addition to |
| 707 | * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). |
| 708 | * This means that synchronize_rcu(), call_rcu, and friends all take not |
| 709 | * only rcu_read_lock() but also rcu_read_lock_sched() into account. |
| 710 | */ |
| 711 | #define rcu_dereference_sched_check(p, c) \ |
| 712 | __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ |
| 713 | (c) || rcu_read_lock_sched_held(), \ |
| 714 | __rcu) |
| 715 | |
| 716 | /* |
| 717 | * The tracing infrastructure traces RCU (we want that), but unfortunately |
| 718 | * some of the RCU checks causes tracing to lock up the system. |
| 719 | * |
| 720 | * The no-tracing version of rcu_dereference_raw() must not call |
| 721 | * rcu_read_lock_held(). |
| 722 | */ |
| 723 | #define rcu_dereference_raw_check(p) \ |
| 724 | __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu) |
| 725 | |
| 726 | /** |
| 727 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented |
| 728 | * @p: The pointer to read, prior to dereferencing |
| 729 | * @c: The conditions under which the dereference will take place |
| 730 | * |
| 731 | * Return the value of the specified RCU-protected pointer, but omit |
| 732 | * the READ_ONCE(). This is useful in cases where update-side locks |
| 733 | * prevent the value of the pointer from changing. Please note that this |
| 734 | * primitive does *not* prevent the compiler from repeating this reference |
| 735 | * or combining it with other references, so it should not be used without |
| 736 | * protection of appropriate locks. |
| 737 | * |
| 738 | * This function is only for update-side use. Using this function |
| 739 | * when protected only by rcu_read_lock() will result in infrequent |
| 740 | * but very ugly failures. |
| 741 | */ |
| 742 | #define rcu_dereference_protected(p, c) \ |
| 743 | __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu) |
| 744 | |
| 745 | |
| 746 | /** |
| 747 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
| 748 | * @p: The pointer to read, prior to dereferencing |
| 749 | * |
| 750 | * This is a simple wrapper around rcu_dereference_check(). |
| 751 | */ |
| 752 | #define rcu_dereference(p) rcu_dereference_check(p, 0) |
| 753 | |
| 754 | /** |
| 755 | * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
| 756 | * @p: The pointer to read, prior to dereferencing |
| 757 | * |
| 758 | * Makes rcu_dereference_check() do the dirty work. |
| 759 | */ |
| 760 | #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) |
| 761 | |
| 762 | /** |
| 763 | * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing |
| 764 | * @p: The pointer to read, prior to dereferencing |
| 765 | * |
| 766 | * Makes rcu_dereference_check() do the dirty work. |
| 767 | */ |
| 768 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
| 769 | |
| 770 | /** |
| 771 | * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism |
| 772 | * @p: The pointer to hand off |
| 773 | * |
| 774 | * This is simply an identity function, but it documents where a pointer |
| 775 | * is handed off from RCU to some other synchronization mechanism, for |
| 776 | * example, reference counting or locking. In C11, it would map to |
| 777 | * kill_dependency(). It could be used as follows:: |
| 778 | * |
| 779 | * rcu_read_lock(); |
| 780 | * p = rcu_dereference(gp); |
| 781 | * long_lived = is_long_lived(p); |
| 782 | * if (long_lived) { |
| 783 | * if (!atomic_inc_not_zero(p->refcnt)) |
| 784 | * long_lived = false; |
| 785 | * else |
| 786 | * p = rcu_pointer_handoff(p); |
| 787 | * } |
| 788 | * rcu_read_unlock(); |
| 789 | */ |
| 790 | #define rcu_pointer_handoff(p) (p) |
| 791 | |
| 792 | /** |
| 793 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
| 794 | * |
| 795 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
| 796 | * are within RCU read-side critical sections, then the |
| 797 | * synchronize_rcu() is guaranteed to block until after all the other |
| 798 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
| 799 | * on one CPU while other CPUs are within RCU read-side critical |
| 800 | * sections, invocation of the corresponding RCU callback is deferred |
| 801 | * until after the all the other CPUs exit their critical sections. |
| 802 | * |
| 803 | * Both synchronize_rcu() and call_rcu() also wait for regions of code |
| 804 | * with preemption disabled, including regions of code with interrupts or |
| 805 | * softirqs disabled. |
| 806 | * |
| 807 | * Note, however, that RCU callbacks are permitted to run concurrently |
| 808 | * with new RCU read-side critical sections. One way that this can happen |
| 809 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
| 810 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
| 811 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
| 812 | * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU |
| 813 | * callback is invoked. This is legal, because the RCU read-side critical |
| 814 | * section that was running concurrently with the call_rcu() (and which |
| 815 | * therefore might be referencing something that the corresponding RCU |
| 816 | * callback would free up) has completed before the corresponding |
| 817 | * RCU callback is invoked. |
| 818 | * |
| 819 | * RCU read-side critical sections may be nested. Any deferred actions |
| 820 | * will be deferred until the outermost RCU read-side critical section |
| 821 | * completes. |
| 822 | * |
| 823 | * You can avoid reading and understanding the next paragraph by |
| 824 | * following this rule: don't put anything in an rcu_read_lock() RCU |
| 825 | * read-side critical section that would block in a !PREEMPTION kernel. |
| 826 | * But if you want the full story, read on! |
| 827 | * |
| 828 | * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU), |
| 829 | * it is illegal to block while in an RCU read-side critical section. |
| 830 | * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION |
| 831 | * kernel builds, RCU read-side critical sections may be preempted, |
| 832 | * but explicit blocking is illegal. Finally, in preemptible RCU |
| 833 | * implementations in real-time (with -rt patchset) kernel builds, RCU |
| 834 | * read-side critical sections may be preempted and they may also block, but |
| 835 | * only when acquiring spinlocks that are subject to priority inheritance. |
| 836 | */ |
| 837 | static __always_inline void rcu_read_lock(void) |
| 838 | { |
| 839 | __rcu_read_lock(); |
| 840 | __acquire(RCU); |
| 841 | rcu_lock_acquire(&rcu_lock_map); |
| 842 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 843 | "rcu_read_lock() used illegally while idle"); |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * So where is rcu_write_lock()? It does not exist, as there is no |
| 848 | * way for writers to lock out RCU readers. This is a feature, not |
| 849 | * a bug -- this property is what provides RCU's performance benefits. |
| 850 | * Of course, writers must coordinate with each other. The normal |
| 851 | * spinlock primitives work well for this, but any other technique may be |
| 852 | * used as well. RCU does not care how the writers keep out of each |
| 853 | * others' way, as long as they do so. |
| 854 | */ |
| 855 | |
| 856 | /** |
| 857 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
| 858 | * |
| 859 | * In almost all situations, rcu_read_unlock() is immune from deadlock. |
| 860 | * This deadlock immunity also extends to the scheduler's runqueue |
| 861 | * and priority-inheritance spinlocks, courtesy of the quiescent-state |
| 862 | * deferral that is carried out when rcu_read_unlock() is invoked with |
| 863 | * interrupts disabled. |
| 864 | * |
| 865 | * See rcu_read_lock() for more information. |
| 866 | */ |
| 867 | static inline void rcu_read_unlock(void) |
| 868 | { |
| 869 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 870 | "rcu_read_unlock() used illegally while idle"); |
| 871 | rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ |
| 872 | __release(RCU); |
| 873 | __rcu_read_unlock(); |
| 874 | } |
| 875 | |
| 876 | /** |
| 877 | * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
| 878 | * |
| 879 | * This is equivalent to rcu_read_lock(), but also disables softirqs. |
| 880 | * Note that anything else that disables softirqs can also serve as an RCU |
| 881 | * read-side critical section. However, please note that this equivalence |
| 882 | * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and |
| 883 | * rcu_read_lock_bh() were unrelated. |
| 884 | * |
| 885 | * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() |
| 886 | * must occur in the same context, for example, it is illegal to invoke |
| 887 | * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() |
| 888 | * was invoked from some other task. |
| 889 | */ |
| 890 | static inline void rcu_read_lock_bh(void) |
| 891 | { |
| 892 | local_bh_disable(); |
| 893 | __acquire(RCU_BH); |
| 894 | rcu_lock_acquire(&rcu_bh_lock_map); |
| 895 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 896 | "rcu_read_lock_bh() used illegally while idle"); |
| 897 | } |
| 898 | |
| 899 | /** |
| 900 | * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section |
| 901 | * |
| 902 | * See rcu_read_lock_bh() for more information. |
| 903 | */ |
| 904 | static inline void rcu_read_unlock_bh(void) |
| 905 | { |
| 906 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 907 | "rcu_read_unlock_bh() used illegally while idle"); |
| 908 | rcu_lock_release(&rcu_bh_lock_map); |
| 909 | __release(RCU_BH); |
| 910 | local_bh_enable(); |
| 911 | } |
| 912 | |
| 913 | /** |
| 914 | * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
| 915 | * |
| 916 | * This is equivalent to rcu_read_lock(), but also disables preemption. |
| 917 | * Read-side critical sections can also be introduced by anything else that |
| 918 | * disables preemption, including local_irq_disable() and friends. However, |
| 919 | * please note that the equivalence to rcu_read_lock() applies only to |
| 920 | * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() |
| 921 | * were unrelated. |
| 922 | * |
| 923 | * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() |
| 924 | * must occur in the same context, for example, it is illegal to invoke |
| 925 | * rcu_read_unlock_sched() from process context if the matching |
| 926 | * rcu_read_lock_sched() was invoked from an NMI handler. |
| 927 | */ |
| 928 | static inline void rcu_read_lock_sched(void) |
| 929 | { |
| 930 | preempt_disable(); |
| 931 | __acquire(RCU_SCHED); |
| 932 | rcu_lock_acquire(&rcu_sched_lock_map); |
| 933 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 934 | "rcu_read_lock_sched() used illegally while idle"); |
| 935 | } |
| 936 | |
| 937 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
| 938 | static inline notrace void rcu_read_lock_sched_notrace(void) |
| 939 | { |
| 940 | preempt_disable_notrace(); |
| 941 | __acquire(RCU_SCHED); |
| 942 | } |
| 943 | |
| 944 | /** |
| 945 | * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section |
| 946 | * |
| 947 | * See rcu_read_lock_sched() for more information. |
| 948 | */ |
| 949 | static inline void rcu_read_unlock_sched(void) |
| 950 | { |
| 951 | RCU_LOCKDEP_WARN(!rcu_is_watching(), |
| 952 | "rcu_read_unlock_sched() used illegally while idle"); |
| 953 | rcu_lock_release(&rcu_sched_lock_map); |
| 954 | __release(RCU_SCHED); |
| 955 | preempt_enable(); |
| 956 | } |
| 957 | |
| 958 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
| 959 | static inline notrace void rcu_read_unlock_sched_notrace(void) |
| 960 | { |
| 961 | __release(RCU_SCHED); |
| 962 | preempt_enable_notrace(); |
| 963 | } |
| 964 | |
| 965 | /** |
| 966 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
| 967 | * @p: The pointer to be initialized. |
| 968 | * @v: The value to initialized the pointer to. |
| 969 | * |
| 970 | * Initialize an RCU-protected pointer in special cases where readers |
| 971 | * do not need ordering constraints on the CPU or the compiler. These |
| 972 | * special cases are: |
| 973 | * |
| 974 | * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* |
| 975 | * 2. The caller has taken whatever steps are required to prevent |
| 976 | * RCU readers from concurrently accessing this pointer *or* |
| 977 | * 3. The referenced data structure has already been exposed to |
| 978 | * readers either at compile time or via rcu_assign_pointer() *and* |
| 979 | * |
| 980 | * a. You have not made *any* reader-visible changes to |
| 981 | * this structure since then *or* |
| 982 | * b. It is OK for readers accessing this structure from its |
| 983 | * new location to see the old state of the structure. (For |
| 984 | * example, the changes were to statistical counters or to |
| 985 | * other state where exact synchronization is not required.) |
| 986 | * |
| 987 | * Failure to follow these rules governing use of RCU_INIT_POINTER() will |
| 988 | * result in impossible-to-diagnose memory corruption. As in the structures |
| 989 | * will look OK in crash dumps, but any concurrent RCU readers might |
| 990 | * see pre-initialized values of the referenced data structure. So |
| 991 | * please be very careful how you use RCU_INIT_POINTER()!!! |
| 992 | * |
| 993 | * If you are creating an RCU-protected linked structure that is accessed |
| 994 | * by a single external-to-structure RCU-protected pointer, then you may |
| 995 | * use RCU_INIT_POINTER() to initialize the internal RCU-protected |
| 996 | * pointers, but you must use rcu_assign_pointer() to initialize the |
| 997 | * external-to-structure pointer *after* you have completely initialized |
| 998 | * the reader-accessible portions of the linked structure. |
| 999 | * |
| 1000 | * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no |
| 1001 | * ordering guarantees for either the CPU or the compiler. |
| 1002 | */ |
| 1003 | #define RCU_INIT_POINTER(p, v) \ |
| 1004 | do { \ |
| 1005 | rcu_check_sparse(p, __rcu); \ |
| 1006 | WRITE_ONCE(p, RCU_INITIALIZER(v)); \ |
| 1007 | } while (0) |
| 1008 | |
| 1009 | /** |
| 1010 | * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer |
| 1011 | * @p: The pointer to be initialized. |
| 1012 | * @v: The value to initialized the pointer to. |
| 1013 | * |
| 1014 | * GCC-style initialization for an RCU-protected pointer in a structure field. |
| 1015 | */ |
| 1016 | #define RCU_POINTER_INITIALIZER(p, v) \ |
| 1017 | .p = RCU_INITIALIZER(v) |
| 1018 | |
| 1019 | /** |
| 1020 | * kfree_rcu() - kfree an object after a grace period. |
| 1021 | * @ptr: pointer to kfree for double-argument invocations. |
| 1022 | * @rhf: the name of the struct rcu_head within the type of @ptr. |
| 1023 | * |
| 1024 | * Many rcu callbacks functions just call kfree() on the base structure. |
| 1025 | * These functions are trivial, but their size adds up, and furthermore |
| 1026 | * when they are used in a kernel module, that module must invoke the |
| 1027 | * high-latency rcu_barrier() function at module-unload time. |
| 1028 | * |
| 1029 | * The kfree_rcu() function handles this issue. In order to have a universal |
| 1030 | * callback function handling different offsets of rcu_head, the callback needs |
| 1031 | * to determine the starting address of the freed object, which can be a large |
| 1032 | * kmalloc or vmalloc allocation. To allow simply aligning the pointer down to |
| 1033 | * page boundary for those, only offsets up to 4095 bytes can be accommodated. |
| 1034 | * If the offset is larger than 4095 bytes, a compile-time error will |
| 1035 | * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can |
| 1036 | * either fall back to use of call_rcu() or rearrange the structure to |
| 1037 | * position the rcu_head structure into the first 4096 bytes. |
| 1038 | * |
| 1039 | * The object to be freed can be allocated either by kmalloc() or |
| 1040 | * kmem_cache_alloc(). |
| 1041 | * |
| 1042 | * Note that the allowable offset might decrease in the future. |
| 1043 | * |
| 1044 | * The BUILD_BUG_ON check must not involve any function calls, hence the |
| 1045 | * checks are done in macros here. |
| 1046 | */ |
| 1047 | #define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) |
| 1048 | #define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) |
| 1049 | |
| 1050 | /** |
| 1051 | * kfree_rcu_mightsleep() - kfree an object after a grace period. |
| 1052 | * @ptr: pointer to kfree for single-argument invocations. |
| 1053 | * |
| 1054 | * When it comes to head-less variant, only one argument |
| 1055 | * is passed and that is just a pointer which has to be |
| 1056 | * freed after a grace period. Therefore the semantic is |
| 1057 | * |
| 1058 | * kfree_rcu_mightsleep(ptr); |
| 1059 | * |
| 1060 | * where @ptr is the pointer to be freed by kvfree(). |
| 1061 | * |
| 1062 | * Please note, head-less way of freeing is permitted to |
| 1063 | * use from a context that has to follow might_sleep() |
| 1064 | * annotation. Otherwise, please switch and embed the |
| 1065 | * rcu_head structure within the type of @ptr. |
| 1066 | */ |
| 1067 | #define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) |
| 1068 | #define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) |
| 1069 | |
| 1070 | /* |
| 1071 | * In mm/slab_common.c, no suitable header to include here. |
| 1072 | */ |
| 1073 | void kvfree_call_rcu(struct rcu_head *head, void *ptr); |
| 1074 | |
| 1075 | /* |
| 1076 | * The BUILD_BUG_ON() makes sure the rcu_head offset can be handled. See the |
| 1077 | * comment of kfree_rcu() for details. |
| 1078 | */ |
| 1079 | #define kvfree_rcu_arg_2(ptr, rhf) \ |
| 1080 | do { \ |
| 1081 | typeof (ptr) ___p = (ptr); \ |
| 1082 | \ |
| 1083 | if (___p) { \ |
| 1084 | BUILD_BUG_ON(offsetof(typeof(*(ptr)), rhf) >= 4096); \ |
| 1085 | kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \ |
| 1086 | } \ |
| 1087 | } while (0) |
| 1088 | |
| 1089 | #define kvfree_rcu_arg_1(ptr) \ |
| 1090 | do { \ |
| 1091 | typeof(ptr) ___p = (ptr); \ |
| 1092 | \ |
| 1093 | if (___p) \ |
| 1094 | kvfree_call_rcu(NULL, (void *) (___p)); \ |
| 1095 | } while (0) |
| 1096 | |
| 1097 | /* |
| 1098 | * Place this after a lock-acquisition primitive to guarantee that |
| 1099 | * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies |
| 1100 | * if the UNLOCK and LOCK are executed by the same CPU or if the |
| 1101 | * UNLOCK and LOCK operate on the same lock variable. |
| 1102 | */ |
| 1103 | #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE |
| 1104 | #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ |
| 1105 | #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ |
| 1106 | #define smp_mb__after_unlock_lock() do { } while (0) |
| 1107 | #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ |
| 1108 | |
| 1109 | |
| 1110 | /* Has the specified rcu_head structure been handed to call_rcu()? */ |
| 1111 | |
| 1112 | /** |
| 1113 | * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() |
| 1114 | * @rhp: The rcu_head structure to initialize. |
| 1115 | * |
| 1116 | * If you intend to invoke rcu_head_after_call_rcu() to test whether a |
| 1117 | * given rcu_head structure has already been passed to call_rcu(), then |
| 1118 | * you must also invoke this rcu_head_init() function on it just after |
| 1119 | * allocating that structure. Calls to this function must not race with |
| 1120 | * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. |
| 1121 | */ |
| 1122 | static inline void rcu_head_init(struct rcu_head *rhp) |
| 1123 | { |
| 1124 | rhp->func = (rcu_callback_t)~0L; |
| 1125 | } |
| 1126 | |
| 1127 | /** |
| 1128 | * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? |
| 1129 | * @rhp: The rcu_head structure to test. |
| 1130 | * @f: The function passed to call_rcu() along with @rhp. |
| 1131 | * |
| 1132 | * Returns @true if the @rhp has been passed to call_rcu() with @func, |
| 1133 | * and @false otherwise. Emits a warning in any other case, including |
| 1134 | * the case where @rhp has already been invoked after a grace period. |
| 1135 | * Calls to this function must not race with callback invocation. One way |
| 1136 | * to avoid such races is to enclose the call to rcu_head_after_call_rcu() |
| 1137 | * in an RCU read-side critical section that includes a read-side fetch |
| 1138 | * of the pointer to the structure containing @rhp. |
| 1139 | */ |
| 1140 | static inline bool |
| 1141 | rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) |
| 1142 | { |
| 1143 | rcu_callback_t func = READ_ONCE(rhp->func); |
| 1144 | |
| 1145 | if (func == f) |
| 1146 | return true; |
| 1147 | WARN_ON_ONCE(func != (rcu_callback_t)~0L); |
| 1148 | return false; |
| 1149 | } |
| 1150 | |
| 1151 | /* kernel/ksysfs.c definitions */ |
| 1152 | extern int rcu_expedited; |
| 1153 | extern int rcu_normal; |
| 1154 | |
| 1155 | DEFINE_LOCK_GUARD_0(rcu, |
| 1156 | do { |
| 1157 | rcu_read_lock(); |
| 1158 | /* |
| 1159 | * sparse doesn't call the cleanup function, |
| 1160 | * so just release immediately and don't track |
| 1161 | * the context. We don't need to anyway, since |
| 1162 | * the whole point of the guard is to not need |
| 1163 | * the explicit unlock. |
| 1164 | */ |
| 1165 | __release(RCU); |
| 1166 | } while (0), |
| 1167 | rcu_read_unlock()) |
| 1168 | |
| 1169 | #endif /* __LINUX_RCUPDATE_H */ |