Commit | Line | Data |
---|---|---|
a9b7343e | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
64db4cff PM |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
4 | * | |
64db4cff PM |
5 | * Copyright IBM Corporation, 2008 |
6 | * | |
7 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | |
a9b7343e | 8 | * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm |
64db4cff | 9 | * |
a9b7343e | 10 | * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> |
64db4cff PM |
11 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
12 | * | |
13 | * For detailed explanation of Read-Copy Update mechanism see - | |
a71fca58 | 14 | * Documentation/RCU |
64db4cff PM |
15 | */ |
16 | ||
17 | #ifndef __LINUX_RCUTREE_H | |
18 | #define __LINUX_RCUTREE_H | |
19 | ||
d28139c4 | 20 | void rcu_softirq_qs(void); |
bcbfdd01 | 21 | void rcu_note_context_switch(bool preempt); |
29845399 | 22 | int rcu_needs_cpu(void); |
584dc4ce | 23 | void rcu_cpu_stall_reset(void); |
43a89bae | 24 | void rcu_request_urgent_qs_task(struct task_struct *t); |
64db4cff | 25 | |
29ce8310 GN |
26 | /* |
27 | * Note a virtualization-based context switch. This is simply a | |
28 | * wrapper around rcu_note_context_switch(), which allows TINY_RCU | |
46a5d164 | 29 | * to save a few bytes. The caller must have disabled interrupts. |
29ce8310 | 30 | */ |
b5ad0d2e | 31 | static inline void rcu_virt_note_context_switch(void) |
29ce8310 | 32 | { |
bcbfdd01 | 33 | rcu_note_context_switch(false); |
29ce8310 GN |
34 | } |
35 | ||
584dc4ce | 36 | void synchronize_rcu_expedited(void); |
486e2593 | 37 | |
584dc4ce | 38 | void rcu_barrier(void); |
32a9f26e | 39 | void rcu_momentary_eqs(void); |
91a967fd PM |
40 | |
41 | struct rcu_gp_oldstate { | |
42 | unsigned long rgos_norm; | |
43 | unsigned long rgos_exp; | |
91a967fd PM |
44 | }; |
45 | ||
18538248 PM |
46 | // Maximum number of rcu_gp_oldstate values corresponding to |
47 | // not-yet-completed RCU grace periods. | |
48 | #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4 | |
49 | ||
50 | /** | |
51 | * same_state_synchronize_rcu_full - Are two old-state values identical? | |
52 | * @rgosp1: First old-state value. | |
53 | * @rgosp2: Second old-state value. | |
54 | * | |
55 | * The two old-state values must have been obtained from either | |
56 | * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), | |
57 | * or get_completed_synchronize_rcu_full(). Returns @true if the two | |
58 | * values are identical and @false otherwise. This allows structures | |
59 | * whose lifetimes are tracked by old-state values to push these values | |
60 | * to a list header, allowing those structures to be slightly smaller. | |
61 | * | |
62 | * Note that equality is judged on a bitwise basis, so that an | |
63 | * @rcu_gp_oldstate structure with an already-completed state in one field | |
64 | * will compare not-equal to a structure with an already-completed state | |
65 | * in the other field. After all, the @rcu_gp_oldstate structure is opaque | |
66 | * so how did such a situation come to pass in the first place? | |
67 | */ | |
68 | static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1, | |
69 | struct rcu_gp_oldstate *rgosp2) | |
70 | { | |
71 | return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp; | |
72 | } | |
73 | ||
d96c52fe | 74 | unsigned long start_poll_synchronize_rcu_expedited(void); |
6c502b14 | 75 | void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp); |
d96c52fe | 76 | void cond_synchronize_rcu_expedited(unsigned long oldstate); |
8df13f01 | 77 | void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp); |
765a3f4f | 78 | unsigned long get_state_synchronize_rcu(void); |
3fdefca9 | 79 | void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
7abb18bd | 80 | unsigned long start_poll_synchronize_rcu(void); |
76ea3641 | 81 | void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
7abb18bd | 82 | bool poll_state_synchronize_rcu(unsigned long oldstate); |
91a967fd | 83 | bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
765a3f4f | 84 | void cond_synchronize_rcu(unsigned long oldstate); |
b6fe4917 | 85 | void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); |
a57eb940 | 86 | |
07325d4a TG |
87 | #ifdef CONFIG_PROVE_RCU |
88 | void rcu_irq_exit_check_preempt(void); | |
89 | #else | |
90 | static inline void rcu_irq_exit_check_preempt(void) { } | |
91 | #endif | |
92 | ||
17211455 FW |
93 | struct task_struct; |
94 | void rcu_preempt_deferred_qs(struct task_struct *t); | |
95 | ||
584dc4ce | 96 | void exit_rcu(void); |
2439b696 | 97 | |
584dc4ce | 98 | void rcu_scheduler_starting(void); |
e6339d3b | 99 | extern int rcu_scheduler_active; |
d2b1654f | 100 | void rcu_end_inkernel_boot(void); |
59ee0326 | 101 | bool rcu_inkernel_boot_has_ended(void); |
584dc4ce | 102 | bool rcu_is_watching(void); |
ad6b5b73 | 103 | #ifndef CONFIG_PREEMPT_RCU |
5cd37193 | 104 | void rcu_all_qs(void); |
395a2f09 | 105 | #endif |
5cd37193 | 106 | |
4df83742 TG |
107 | /* RCUtree hotplug events */ |
108 | int rcutree_prepare_cpu(unsigned int cpu); | |
109 | int rcutree_online_cpu(unsigned int cpu); | |
448e9f34 | 110 | void rcutree_report_cpu_starting(unsigned int cpu); |
2cb1f6e9 FW |
111 | |
112 | #ifdef CONFIG_HOTPLUG_CPU | |
4df83742 TG |
113 | int rcutree_dead_cpu(unsigned int cpu); |
114 | int rcutree_dying_cpu(unsigned int cpu); | |
2cb1f6e9 FW |
115 | int rcutree_offline_cpu(unsigned int cpu); |
116 | #else | |
117 | #define rcutree_dead_cpu NULL | |
118 | #define rcutree_dying_cpu NULL | |
119 | #define rcutree_offline_cpu NULL | |
120 | #endif | |
4df83742 | 121 | |
448e9f34 FW |
122 | void rcutree_migrate_callbacks(int cpu); |
123 | ||
124 | /* Called from hotplug and also arm64 early secondary boot failure */ | |
125 | void rcutree_report_cpu_dead(void); | |
126 | ||
64db4cff | 127 | #endif /* __LINUX_RCUTREE_H */ |