| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* |
| 3 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
| 4 | * Internal non-public definitions. |
| 5 | * |
| 6 | * Copyright IBM Corporation, 2008 |
| 7 | * |
| 8 | * Author: Ingo Molnar <mingo@elte.hu> |
| 9 | * Paul E. McKenney <paulmck@linux.ibm.com> |
| 10 | */ |
| 11 | |
| 12 | #include <linux/cache.h> |
| 13 | #include <linux/kthread.h> |
| 14 | #include <linux/spinlock.h> |
| 15 | #include <linux/rtmutex.h> |
| 16 | #include <linux/threads.h> |
| 17 | #include <linux/cpumask.h> |
| 18 | #include <linux/seqlock.h> |
| 19 | #include <linux/swait.h> |
| 20 | #include <linux/rcu_node_tree.h> |
| 21 | |
| 22 | #include "rcu_segcblist.h" |
| 23 | |
| 24 | /* Communicate arguments to a kthread worker handler. */ |
| 25 | struct rcu_exp_work { |
| 26 | unsigned long rew_s; |
| 27 | struct kthread_work rew_work; |
| 28 | }; |
| 29 | |
| 30 | /* RCU's kthread states for tracing. */ |
| 31 | #define RCU_KTHREAD_STOPPED 0 |
| 32 | #define RCU_KTHREAD_RUNNING 1 |
| 33 | #define RCU_KTHREAD_WAITING 2 |
| 34 | #define RCU_KTHREAD_OFFCPU 3 |
| 35 | #define RCU_KTHREAD_YIELDING 4 |
| 36 | #define RCU_KTHREAD_MAX 4 |
| 37 | |
| 38 | /* |
| 39 | * Definition for node within the RCU grace-period-detection hierarchy. |
| 40 | */ |
| 41 | struct rcu_node { |
| 42 | raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ |
| 43 | /* some rcu_state fields as well as */ |
| 44 | /* following. */ |
| 45 | unsigned long gp_seq; /* Track rsp->gp_seq. */ |
| 46 | unsigned long gp_seq_needed; /* Track furthest future GP request. */ |
| 47 | unsigned long completedqs; /* All QSes done for this node. */ |
| 48 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
| 49 | /* order for current grace period to proceed.*/ |
| 50 | /* In leaf rcu_node, each bit corresponds to */ |
| 51 | /* an rcu_data structure, otherwise, each */ |
| 52 | /* bit corresponds to a child rcu_node */ |
| 53 | /* structure. */ |
| 54 | unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */ |
| 55 | unsigned long qsmaskinit; |
| 56 | /* Per-GP initial value for qsmask. */ |
| 57 | /* Initialized from ->qsmaskinitnext at the */ |
| 58 | /* beginning of each grace period. */ |
| 59 | unsigned long qsmaskinitnext; |
| 60 | unsigned long expmask; /* CPUs or groups that need to check in */ |
| 61 | /* to allow the current expedited GP */ |
| 62 | /* to complete. */ |
| 63 | unsigned long expmaskinit; |
| 64 | /* Per-GP initial values for expmask. */ |
| 65 | /* Initialized from ->expmaskinitnext at the */ |
| 66 | /* beginning of each expedited GP. */ |
| 67 | unsigned long expmaskinitnext; |
| 68 | /* Online CPUs for next expedited GP. */ |
| 69 | /* Any CPU that has ever been online will */ |
| 70 | /* have its bit set. */ |
| 71 | struct kthread_worker *exp_kworker; |
| 72 | /* Workers performing per node expedited GP */ |
| 73 | /* initialization. */ |
| 74 | unsigned long cbovldmask; |
| 75 | /* CPUs experiencing callback overload. */ |
| 76 | unsigned long ffmask; /* Fully functional CPUs. */ |
| 77 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
| 78 | /* Only one bit will be set in this mask. */ |
| 79 | int grplo; /* lowest-numbered CPU here. */ |
| 80 | int grphi; /* highest-numbered CPU here. */ |
| 81 | u8 grpnum; /* group number for next level up. */ |
| 82 | u8 level; /* root is at level 0. */ |
| 83 | bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ |
| 84 | /* exit RCU read-side critical sections */ |
| 85 | /* before propagating offline up the */ |
| 86 | /* rcu_node tree? */ |
| 87 | struct rcu_node *parent; |
| 88 | struct list_head blkd_tasks; |
| 89 | /* Tasks blocked in RCU read-side critical */ |
| 90 | /* section. Tasks are placed at the head */ |
| 91 | /* of this list and age towards the tail. */ |
| 92 | struct list_head *gp_tasks; |
| 93 | /* Pointer to the first task blocking the */ |
| 94 | /* current grace period, or NULL if there */ |
| 95 | /* is no such task. */ |
| 96 | struct list_head *exp_tasks; |
| 97 | /* Pointer to the first task blocking the */ |
| 98 | /* current expedited grace period, or NULL */ |
| 99 | /* if there is no such task. If there */ |
| 100 | /* is no current expedited grace period, */ |
| 101 | /* then there can cannot be any such task. */ |
| 102 | struct list_head *boost_tasks; |
| 103 | /* Pointer to first task that needs to be */ |
| 104 | /* priority boosted, or NULL if no priority */ |
| 105 | /* boosting is needed for this rcu_node */ |
| 106 | /* structure. If there are no tasks */ |
| 107 | /* queued on this rcu_node structure that */ |
| 108 | /* are blocking the current grace period, */ |
| 109 | /* there can be no such task. */ |
| 110 | struct rt_mutex boost_mtx; |
| 111 | /* Used only for the priority-boosting */ |
| 112 | /* side effect, not as a lock. */ |
| 113 | unsigned long boost_time; |
| 114 | /* When to start boosting (jiffies). */ |
| 115 | struct mutex kthread_mutex; |
| 116 | /* Exclusion for thread spawning and affinity */ |
| 117 | /* manipulation. */ |
| 118 | struct task_struct *boost_kthread_task; |
| 119 | /* kthread that takes care of priority */ |
| 120 | /* boosting for this rcu_node structure. */ |
| 121 | unsigned int boost_kthread_status; |
| 122 | /* State of boost_kthread_task for tracing. */ |
| 123 | unsigned long n_boosts; /* Number of boosts for this rcu_node structure. */ |
| 124 | #ifdef CONFIG_RCU_NOCB_CPU |
| 125 | struct swait_queue_head nocb_gp_wq[2]; |
| 126 | /* Place for rcu_nocb_kthread() to wait GP. */ |
| 127 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 128 | raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; |
| 129 | |
| 130 | spinlock_t exp_lock ____cacheline_internodealigned_in_smp; |
| 131 | unsigned long exp_seq_rq; |
| 132 | wait_queue_head_t exp_wq[4]; |
| 133 | struct rcu_exp_work rew; |
| 134 | bool exp_need_flush; /* Need to flush workitem? */ |
| 135 | raw_spinlock_t exp_poll_lock; |
| 136 | /* Lock and data for polled expedited grace periods. */ |
| 137 | unsigned long exp_seq_poll_rq; |
| 138 | struct work_struct exp_poll_wq; |
| 139 | } ____cacheline_internodealigned_in_smp; |
| 140 | |
| 141 | /* |
| 142 | * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and |
| 143 | * are indexed relative to this interval rather than the global CPU ID space. |
| 144 | * This generates the bit for a CPU in node-local masks. |
| 145 | */ |
| 146 | #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo)) |
| 147 | |
| 148 | /* |
| 149 | * Union to allow "aggregate OR" operation on the need for a quiescent |
| 150 | * state by the normal and expedited grace periods. |
| 151 | */ |
| 152 | union rcu_noqs { |
| 153 | struct { |
| 154 | u8 norm; |
| 155 | u8 exp; |
| 156 | } b; /* Bits. */ |
| 157 | u16 s; /* Set of bits, aggregate OR here. */ |
| 158 | }; |
| 159 | |
| 160 | /* |
| 161 | * Record the snapshot of the core stats at half of the first RCU stall timeout. |
| 162 | * The member gp_seq is used to ensure that all members are updated only once |
| 163 | * during the sampling period. The snapshot is taken only if this gp_seq is not |
| 164 | * equal to rdp->gp_seq. |
| 165 | */ |
| 166 | struct rcu_snap_record { |
| 167 | unsigned long gp_seq; /* Track rdp->gp_seq counter */ |
| 168 | u64 cputime_irq; /* Accumulated cputime of hard irqs */ |
| 169 | u64 cputime_softirq;/* Accumulated cputime of soft irqs */ |
| 170 | u64 cputime_system; /* Accumulated cputime of kernel tasks */ |
| 171 | unsigned long nr_hardirqs; /* Accumulated number of hard irqs */ |
| 172 | unsigned int nr_softirqs; /* Accumulated number of soft irqs */ |
| 173 | unsigned long long nr_csw; /* Accumulated number of task switches */ |
| 174 | unsigned long jiffies; /* Track jiffies value */ |
| 175 | }; |
| 176 | |
| 177 | /* Per-CPU data for read-copy update. */ |
| 178 | struct rcu_data { |
| 179 | /* 1) quiescent-state and grace-period handling : */ |
| 180 | unsigned long gp_seq; /* Track rsp->gp_seq counter. */ |
| 181 | unsigned long gp_seq_needed; /* Track furthest future GP request. */ |
| 182 | union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ |
| 183 | bool core_needs_qs; /* Core waits for quiescent state. */ |
| 184 | bool beenonline; /* CPU online at least once. */ |
| 185 | bool gpwrap; /* Possible ->gp_seq wrap. */ |
| 186 | bool cpu_started; /* RCU watching this onlining CPU. */ |
| 187 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
| 188 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
| 189 | unsigned long ticks_this_gp; /* The number of scheduling-clock */ |
| 190 | /* ticks this CPU has handled */ |
| 191 | /* during and after the last grace */ |
| 192 | /* period it is aware of. */ |
| 193 | struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */ |
| 194 | bool defer_qs_iw_pending; /* Scheduler attention pending? */ |
| 195 | struct work_struct strict_work; /* Schedule readers for strict GPs. */ |
| 196 | |
| 197 | /* 2) batch handling */ |
| 198 | struct rcu_segcblist cblist; /* Segmented callback list, with */ |
| 199 | /* different callbacks waiting for */ |
| 200 | /* different grace periods. */ |
| 201 | long qlen_last_fqs_check; |
| 202 | /* qlen at last check for QS forcing */ |
| 203 | unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */ |
| 204 | unsigned long n_force_qs_snap; |
| 205 | /* did other CPU force QS recently? */ |
| 206 | long blimit; /* Upper limit on a processed batch */ |
| 207 | |
| 208 | /* 3) dynticks interface. */ |
| 209 | int watching_snap; /* Per-GP tracking for dynticks. */ |
| 210 | bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */ |
| 211 | bool rcu_urgent_qs; /* GP old need light quiescent state. */ |
| 212 | bool rcu_forced_tick; /* Forced tick to provide QS. */ |
| 213 | bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */ |
| 214 | |
| 215 | /* 4) rcu_barrier(), OOM callbacks, and expediting. */ |
| 216 | unsigned long barrier_seq_snap; /* Snap of rcu_state.barrier_sequence. */ |
| 217 | struct rcu_head barrier_head; |
| 218 | int exp_watching_snap; /* Double-check need for IPI. */ |
| 219 | |
| 220 | /* 5) Callback offloading. */ |
| 221 | #ifdef CONFIG_RCU_NOCB_CPU |
| 222 | struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ |
| 223 | struct swait_queue_head nocb_state_wq; /* For offloading state changes */ |
| 224 | struct task_struct *nocb_gp_kthread; |
| 225 | raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ |
| 226 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ |
| 227 | struct timer_list nocb_timer; /* Enforce finite deferral. */ |
| 228 | unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */ |
| 229 | struct mutex nocb_gp_kthread_mutex; /* Exclusion for nocb gp kthread */ |
| 230 | /* spawning */ |
| 231 | |
| 232 | /* The following fields are used by call_rcu, hence own cacheline. */ |
| 233 | raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp; |
| 234 | struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */ |
| 235 | unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */ |
| 236 | unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */ |
| 237 | int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */ |
| 238 | |
| 239 | /* The following fields are used by GP kthread, hence own cacheline. */ |
| 240 | raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp; |
| 241 | u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */ |
| 242 | u8 nocb_gp_bypass; /* Found a bypass on last scan? */ |
| 243 | u8 nocb_gp_gp; /* GP to wait for on last scan? */ |
| 244 | unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */ |
| 245 | unsigned long nocb_gp_loops; /* # passes through wait code. */ |
| 246 | struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */ |
| 247 | bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */ |
| 248 | struct task_struct *nocb_cb_kthread; |
| 249 | struct list_head nocb_head_rdp; /* |
| 250 | * Head of rcu_data list in wakeup chain, |
| 251 | * if rdp_gp. |
| 252 | */ |
| 253 | struct list_head nocb_entry_rdp; /* rcu_data node in wakeup chain. */ |
| 254 | struct rcu_data *nocb_toggling_rdp; /* rdp queued for (de-)offloading */ |
| 255 | |
| 256 | /* The following fields are used by CB kthread, hence new cacheline. */ |
| 257 | struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp; |
| 258 | /* GP rdp takes GP-end wakeups. */ |
| 259 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 260 | |
| 261 | /* 6) RCU priority boosting. */ |
| 262 | struct task_struct *rcu_cpu_kthread_task; |
| 263 | /* rcuc per-CPU kthread or NULL. */ |
| 264 | unsigned int rcu_cpu_kthread_status; |
| 265 | char rcu_cpu_has_work; |
| 266 | unsigned long rcuc_activity; |
| 267 | |
| 268 | /* 7) Diagnostic data, including RCU CPU stall warnings. */ |
| 269 | unsigned int softirq_snap; /* Snapshot of softirq activity. */ |
| 270 | /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ |
| 271 | struct irq_work rcu_iw; /* Check for non-irq activity. */ |
| 272 | bool rcu_iw_pending; /* Is ->rcu_iw pending? */ |
| 273 | unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */ |
| 274 | unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */ |
| 275 | short rcu_ofl_gp_state; /* ->gp_state at last offline. */ |
| 276 | unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */ |
| 277 | short rcu_onl_gp_state; /* ->gp_state at last online. */ |
| 278 | unsigned long last_fqs_resched; /* Time of last rcu_resched(). */ |
| 279 | unsigned long last_sched_clock; /* Jiffies of last rcu_sched_clock_irq(). */ |
| 280 | struct rcu_snap_record snap_record; /* Snapshot of core stats at half of */ |
| 281 | /* the first RCU stall timeout */ |
| 282 | |
| 283 | long lazy_len; /* Length of buffered lazy callbacks. */ |
| 284 | int cpu; |
| 285 | }; |
| 286 | |
| 287 | /* Values for nocb_defer_wakeup field in struct rcu_data. */ |
| 288 | #define RCU_NOCB_WAKE_NOT 0 |
| 289 | #define RCU_NOCB_WAKE_BYPASS 1 |
| 290 | #define RCU_NOCB_WAKE_LAZY 2 |
| 291 | #define RCU_NOCB_WAKE 3 |
| 292 | #define RCU_NOCB_WAKE_FORCE 4 |
| 293 | |
| 294 | #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) |
| 295 | /* For jiffies_till_first_fqs and */ |
| 296 | /* and jiffies_till_next_fqs. */ |
| 297 | |
| 298 | #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ |
| 299 | /* delay between bouts of */ |
| 300 | /* quiescent-state forcing. */ |
| 301 | |
| 302 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ |
| 303 | /* at least one scheduling clock */ |
| 304 | /* irq before ratting on them. */ |
| 305 | |
| 306 | #define rcu_wait(cond) \ |
| 307 | do { \ |
| 308 | for (;;) { \ |
| 309 | set_current_state(TASK_INTERRUPTIBLE); \ |
| 310 | if (cond) \ |
| 311 | break; \ |
| 312 | schedule(); \ |
| 313 | } \ |
| 314 | __set_current_state(TASK_RUNNING); \ |
| 315 | } while (0) |
| 316 | |
| 317 | /* |
| 318 | * A max threshold for synchronize_rcu() users which are |
| 319 | * awaken directly by the rcu_gp_kthread(). Left part is |
| 320 | * deferred to the main worker. |
| 321 | */ |
| 322 | #define SR_MAX_USERS_WAKE_FROM_GP 5 |
| 323 | #define SR_NORMAL_GP_WAIT_HEAD_MAX 5 |
| 324 | |
| 325 | struct sr_wait_node { |
| 326 | atomic_t inuse; |
| 327 | struct llist_node node; |
| 328 | }; |
| 329 | |
| 330 | /* |
| 331 | * RCU global state, including node hierarchy. This hierarchy is |
| 332 | * represented in "heap" form in a dense array. The root (first level) |
| 333 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second |
| 334 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), |
| 335 | * and the third level in ->node[m+1] and following (->node[m+1] referenced |
| 336 | * by ->level[2]). The number of levels is determined by the number of |
| 337 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" |
| 338 | * consisting of a single rcu_node. |
| 339 | */ |
| 340 | struct rcu_state { |
| 341 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ |
| 342 | struct rcu_node *level[RCU_NUM_LVLS + 1]; |
| 343 | /* Hierarchy levels (+1 to */ |
| 344 | /* shut bogus gcc warning) */ |
| 345 | int ncpus; /* # CPUs seen so far. */ |
| 346 | int n_online_cpus; /* # CPUs online for RCU. */ |
| 347 | |
| 348 | /* The following fields are guarded by the root rcu_node's lock. */ |
| 349 | |
| 350 | unsigned long gp_seq ____cacheline_internodealigned_in_smp; |
| 351 | /* Grace-period sequence #. */ |
| 352 | unsigned long gp_max; /* Maximum GP duration in */ |
| 353 | /* jiffies. */ |
| 354 | struct task_struct *gp_kthread; /* Task for grace periods. */ |
| 355 | struct swait_queue_head gp_wq; /* Where GP task waits. */ |
| 356 | short gp_flags; /* Commands for GP task. */ |
| 357 | short gp_state; /* GP kthread sleep state. */ |
| 358 | unsigned long gp_wake_time; /* Last GP kthread wake. */ |
| 359 | unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */ |
| 360 | unsigned long gp_seq_polled; /* GP seq for polled API. */ |
| 361 | unsigned long gp_seq_polled_snap; /* ->gp_seq_polled at normal GP start. */ |
| 362 | unsigned long gp_seq_polled_exp_snap; /* ->gp_seq_polled at expedited GP start. */ |
| 363 | |
| 364 | /* End of fields guarded by root rcu_node's lock. */ |
| 365 | |
| 366 | struct mutex barrier_mutex; /* Guards barrier fields. */ |
| 367 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
| 368 | struct completion barrier_completion; /* Wake at barrier end. */ |
| 369 | unsigned long barrier_sequence; /* ++ at start and end of */ |
| 370 | /* rcu_barrier(). */ |
| 371 | /* End of fields guarded by barrier_mutex. */ |
| 372 | |
| 373 | raw_spinlock_t barrier_lock; /* Protects ->barrier_seq_snap. */ |
| 374 | |
| 375 | struct mutex exp_mutex; /* Serialize expedited GP. */ |
| 376 | struct mutex exp_wake_mutex; /* Serialize wakeup. */ |
| 377 | unsigned long expedited_sequence; /* Take a ticket. */ |
| 378 | atomic_t expedited_need_qs; /* # CPUs left to check in. */ |
| 379 | struct swait_queue_head expedited_wq; /* Wait for check-ins. */ |
| 380 | int ncpus_snap; /* # CPUs seen last time. */ |
| 381 | u8 cbovld; /* Callback overload now? */ |
| 382 | u8 cbovldnext; /* ^ ^ next time? */ |
| 383 | |
| 384 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
| 385 | /* force_quiescent_state(). */ |
| 386 | unsigned long jiffies_kick_kthreads; /* Time at which to kick */ |
| 387 | /* kthreads, if configured. */ |
| 388 | unsigned long n_force_qs; /* Number of calls to */ |
| 389 | /* force_quiescent_state(). */ |
| 390 | unsigned long gp_start; /* Time at which GP started, */ |
| 391 | /* but in jiffies. */ |
| 392 | unsigned long gp_end; /* Time last GP ended, again */ |
| 393 | /* in jiffies. */ |
| 394 | unsigned long gp_activity; /* Time of last GP kthread */ |
| 395 | /* activity in jiffies. */ |
| 396 | unsigned long gp_req_activity; /* Time of last GP request */ |
| 397 | /* in jiffies. */ |
| 398 | unsigned long jiffies_stall; /* Time at which to check */ |
| 399 | /* for CPU stalls. */ |
| 400 | int nr_fqs_jiffies_stall; /* Number of fqs loops after |
| 401 | * which read jiffies and set |
| 402 | * jiffies_stall. Stall |
| 403 | * warnings disabled if !0. */ |
| 404 | unsigned long jiffies_resched; /* Time at which to resched */ |
| 405 | /* a reluctant CPU. */ |
| 406 | unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ |
| 407 | /* GP start. */ |
| 408 | const char *name; /* Name of structure. */ |
| 409 | char abbr; /* Abbreviated name. */ |
| 410 | |
| 411 | arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; |
| 412 | /* Synchronize offline with */ |
| 413 | /* GP pre-initialization. */ |
| 414 | |
| 415 | /* synchronize_rcu() part. */ |
| 416 | struct llist_head srs_next; /* request a GP users. */ |
| 417 | struct llist_node *srs_wait_tail; /* wait for GP users. */ |
| 418 | struct llist_node *srs_done_tail; /* ready for GP users. */ |
| 419 | struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX]; |
| 420 | struct work_struct srs_cleanup_work; |
| 421 | atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */ |
| 422 | |
| 423 | #ifdef CONFIG_RCU_NOCB_CPU |
| 424 | struct mutex nocb_mutex; /* Guards (de-)offloading */ |
| 425 | int nocb_is_setup; /* nocb is setup from boot */ |
| 426 | #endif |
| 427 | }; |
| 428 | |
| 429 | /* Values for rcu_state structure's gp_flags field. */ |
| 430 | #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ |
| 431 | #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ |
| 432 | #define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */ |
| 433 | |
| 434 | /* Values for rcu_state structure's gp_state field. */ |
| 435 | #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ |
| 436 | #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ |
| 437 | #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ |
| 438 | #define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */ |
| 439 | #define RCU_GP_INIT 4 /* Grace-period initialization. */ |
| 440 | #define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */ |
| 441 | #define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */ |
| 442 | #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */ |
| 443 | #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */ |
| 444 | |
| 445 | /* |
| 446 | * In order to export the rcu_state name to the tracing tools, it |
| 447 | * needs to be added in the __tracepoint_string section. |
| 448 | * This requires defining a separate variable tp_<sname>_varname |
| 449 | * that points to the string being used, and this will allow |
| 450 | * the tracing userspace tools to be able to decipher the string |
| 451 | * address to the matching string. |
| 452 | */ |
| 453 | #ifdef CONFIG_PREEMPT_RCU |
| 454 | #define RCU_ABBR 'p' |
| 455 | #define RCU_NAME_RAW "rcu_preempt" |
| 456 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 457 | #define RCU_ABBR 's' |
| 458 | #define RCU_NAME_RAW "rcu_sched" |
| 459 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 460 | #ifndef CONFIG_TRACING |
| 461 | #define RCU_NAME RCU_NAME_RAW |
| 462 | #else /* #ifdef CONFIG_TRACING */ |
| 463 | static char rcu_name[] = RCU_NAME_RAW; |
| 464 | static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; |
| 465 | #define RCU_NAME rcu_name |
| 466 | #endif /* #else #ifdef CONFIG_TRACING */ |
| 467 | |
| 468 | /* Forward declarations for tree_plugin.h */ |
| 469 | static void rcu_bootup_announce(void); |
| 470 | static void rcu_qs(void); |
| 471 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
| 472 | #ifdef CONFIG_HOTPLUG_CPU |
| 473 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
| 474 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 475 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); |
| 476 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
| 477 | static void rcu_flavor_sched_clock_irq(int user); |
| 478 | static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); |
| 479 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
| 480 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
| 481 | static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); |
| 482 | static void rcu_cpu_kthread_setup(unsigned int cpu); |
| 483 | static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); |
| 484 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
| 485 | static bool rcu_preempt_need_deferred_qs(struct task_struct *t); |
| 486 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); |
| 487 | static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); |
| 488 | static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); |
| 489 | static void rcu_init_one_nocb(struct rcu_node *rnp); |
| 490 | static bool wake_nocb_gp(struct rcu_data *rdp, bool force); |
| 491 | static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
| 492 | unsigned long j, bool lazy); |
| 493 | static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, |
| 494 | rcu_callback_t func, unsigned long flags, bool lazy); |
| 495 | static void __maybe_unused __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, |
| 496 | unsigned long flags); |
| 497 | static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level); |
| 498 | static bool do_nocb_deferred_wakeup(struct rcu_data *rdp); |
| 499 | static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); |
| 500 | static void rcu_spawn_cpu_nocb_kthread(int cpu); |
| 501 | static void show_rcu_nocb_state(struct rcu_data *rdp); |
| 502 | static void rcu_nocb_lock(struct rcu_data *rdp); |
| 503 | static void rcu_nocb_unlock(struct rcu_data *rdp); |
| 504 | static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, |
| 505 | unsigned long flags); |
| 506 | static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); |
| 507 | #ifdef CONFIG_RCU_NOCB_CPU |
| 508 | static void __init rcu_organize_nocb_kthreads(void); |
| 509 | |
| 510 | /* |
| 511 | * Disable IRQs before checking offloaded state so that local |
| 512 | * locking is safe against concurrent de-offloading. |
| 513 | */ |
| 514 | #define rcu_nocb_lock_irqsave(rdp, flags) \ |
| 515 | do { \ |
| 516 | local_irq_save(flags); \ |
| 517 | if (rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ |
| 518 | raw_spin_lock(&(rdp)->nocb_lock); \ |
| 519 | } while (0) |
| 520 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 521 | #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) |
| 522 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
| 523 | |
| 524 | static void rcu_bind_gp_kthread(void); |
| 525 | static bool rcu_nohz_full_cpu(void); |
| 526 | |
| 527 | /* Forward declarations for tree_stall.h */ |
| 528 | static void record_gp_stall_check_time(void); |
| 529 | static void rcu_iw_handler(struct irq_work *iwp); |
| 530 | static void check_cpu_stall(struct rcu_data *rdp); |
| 531 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
| 532 | const unsigned long gpssdelay); |
| 533 | |
| 534 | /* Forward declarations for tree_exp.h. */ |
| 535 | static void sync_rcu_do_polled_gp(struct work_struct *wp); |