Commit | Line | Data |
---|---|---|
6c442127 | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
9b1d82fa PM |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | |
4 | * | |
9b1d82fa PM |
5 | * Copyright IBM Corporation, 2008 |
6 | * | |
6c442127 | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
9b1d82fa PM |
8 | * |
9 | * For detailed explanation of Read-Copy Update mechanism see - | |
4ce5b903 | 10 | * Documentation/RCU |
9b1d82fa | 11 | */ |
9b1d82fa PM |
12 | #ifndef __LINUX_TINY_H |
13 | #define __LINUX_TINY_H | |
14 | ||
24691069 | 15 | #include <asm/param.h> /* for HZ */ |
9b1d82fa | 16 | |
0909fc2b PM |
17 | unsigned long get_state_synchronize_rcu(void); |
18 | unsigned long start_poll_synchronize_rcu(void); | |
19 | bool poll_state_synchronize_rcu(unsigned long oldstate); | |
765a3f4f PM |
20 | |
21 | static inline void cond_synchronize_rcu(unsigned long oldstate) | |
22 | { | |
23 | might_sleep(); | |
24 | } | |
25 | ||
d96c52fe PM |
26 | static inline unsigned long start_poll_synchronize_rcu_expedited(void) |
27 | { | |
28 | return start_poll_synchronize_rcu(); | |
29 | } | |
30 | ||
31 | static inline void cond_synchronize_rcu_expedited(unsigned long oldstate) | |
32 | { | |
33 | cond_synchronize_rcu(oldstate); | |
34 | } | |
35 | ||
709fdce7 | 36 | extern void rcu_barrier(void); |
2c42818e | 37 | |
a57eb940 | 38 | static inline void synchronize_rcu_expedited(void) |
bf66f18e | 39 | { |
a8bb74ac | 40 | synchronize_rcu(); |
bf66f18e PM |
41 | } |
42 | ||
3042f83f URS |
43 | /* |
44 | * Add one more declaration of kvfree() here. It is | |
45 | * not so straight forward to just include <linux/mm.h> | |
46 | * where it is defined due to getting many compile | |
47 | * errors caused by that include. | |
48 | */ | |
49 | extern void kvfree(const void *addr); | |
50 | ||
800d6acf | 51 | static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) |
bf66f18e | 52 | { |
3042f83f URS |
53 | if (head) { |
54 | call_rcu(head, func); | |
55 | return; | |
56 | } | |
57 | ||
58 | // kvfree_rcu(one_arg) call. | |
59 | might_sleep(); | |
60 | synchronize_rcu(); | |
61 | kvfree((void *) func); | |
9b1d82fa PM |
62 | } |
63 | ||
800d6acf JB |
64 | #ifdef CONFIG_KASAN_GENERIC |
65 | void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); | |
66 | #else | |
67 | static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) | |
68 | { | |
69 | __kvfree_call_rcu(head, func); | |
70 | } | |
71 | #endif | |
72 | ||
709fdce7 | 73 | void rcu_qs(void); |
7b27d547 | 74 | |
d28139c4 | 75 | static inline void rcu_softirq_qs(void) |
486e2593 | 76 | { |
709fdce7 | 77 | rcu_qs(); |
486e2593 PM |
78 | } |
79 | ||
bcbfdd01 PM |
80 | #define rcu_note_context_switch(preempt) \ |
81 | do { \ | |
709fdce7 | 82 | rcu_qs(); \ |
43766c3e | 83 | rcu_tasks_qs(current, (preempt)); \ |
bcbfdd01 | 84 | } while (0) |
a57eb940 | 85 | |
29845399 | 86 | static inline int rcu_needs_cpu(void) |
5f192ab0 | 87 | { |
5f192ab0 PM |
88 | return 0; |
89 | } | |
90 | ||
29ce8310 GN |
91 | /* |
92 | * Take advantage of the fact that there is only one CPU, which | |
93 | * allows us to ignore virtualization-based context switches. | |
94 | */ | |
71c40fd0 PM |
95 | static inline void rcu_virt_note_context_switch(int cpu) { } |
96 | static inline void rcu_cpu_stall_reset(void) { } | |
1b27291b | 97 | static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } |
07325d4a | 98 | static inline void rcu_irq_exit_check_preempt(void) { } |
3fcd6a23 | 99 | #define rcu_is_idle_cpu(cpu) \ |
2407a64f | 100 | (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) |
71c40fd0 | 101 | static inline void exit_rcu(void) { } |
3e310098 PM |
102 | static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
103 | { | |
104 | return false; | |
105 | } | |
106 | static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } | |
825c5bd2 | 107 | #ifdef CONFIG_SRCU |
584dc4ce | 108 | void rcu_scheduler_starting(void); |
825c5bd2 | 109 | #else /* #ifndef CONFIG_SRCU */ |
71c40fd0 | 110 | static inline void rcu_scheduler_starting(void) { } |
825c5bd2 | 111 | #endif /* #else #ifndef CONFIG_SRCU */ |
d2b1654f | 112 | static inline void rcu_end_inkernel_boot(void) { } |
59ee0326 | 113 | static inline bool rcu_inkernel_boot_has_ended(void) { return true; } |
71c40fd0 | 114 | static inline bool rcu_is_watching(void) { return true; } |
79ba7ff5 | 115 | static inline void rcu_momentary_dyntick_idle(void) { } |
a35d1690 | 116 | static inline void kfree_rcu_scheduler_running(void) { } |
6be7436d | 117 | static inline bool rcu_gp_might_be_stalled(void) { return false; } |
5c173eb8 | 118 | |
71c40fd0 PM |
119 | /* Avoid RCU read-side critical sections leaking across. */ |
120 | static inline void rcu_all_qs(void) { barrier(); } | |
5cd37193 | 121 | |
4df83742 TG |
122 | /* RCUtree hotplug events */ |
123 | #define rcutree_prepare_cpu NULL | |
124 | #define rcutree_online_cpu NULL | |
125 | #define rcutree_offline_cpu NULL | |
126 | #define rcutree_dead_cpu NULL | |
127 | #define rcutree_dying_cpu NULL | |
f64c6013 | 128 | static inline void rcu_cpu_starting(unsigned int cpu) { } |
4df83742 | 129 | |
9b1d82fa | 130 | #endif /* __LINUX_RCUTINY_H */ |