Commit | Line | Data |
---|---|---|
0f383b6d TG |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * PREEMPT_RT substitution for spin/rw_locks | |
4 | * | |
5 | * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to | |
6 | * resemble the non RT semantics: | |
7 | * | |
8 | * - Contrary to plain rtmutexes, spinlocks and rwlocks are state | |
9 | * preserving. The task state is saved before blocking on the underlying | |
10 | * rtmutex, and restored when the lock has been acquired. Regular wakeups | |
11 | * during that time are redirected to the saved state so no wake up is | |
12 | * missed. | |
13 | * | |
14 | * - Non RT spin/rwlocks disable preemption and eventually interrupts. | |
15 | * Disabling preemption has the side effect of disabling migration and | |
16 | * preventing RCU grace periods. | |
17 | * | |
18 | * The RT substitutions explicitly disable migration and take | |
19 | * rcu_read_lock() across the lock held section. | |
20 | */ | |
21 | #include <linux/spinlock.h> | |
22 | #include <linux/export.h> | |
23 | ||
24 | #define RT_MUTEX_BUILD_SPINLOCKS | |
25 | #include "rtmutex.c" | |
26 | ||
27 | static __always_inline void rtlock_lock(struct rt_mutex_base *rtm) | |
28 | { | |
29 | if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current))) | |
30 | rtlock_slowlock(rtm); | |
31 | } | |
32 | ||
33 | static __always_inline void __rt_spin_lock(spinlock_t *lock) | |
34 | { | |
35 | ___might_sleep(__FILE__, __LINE__, 0); | |
36 | rtlock_lock(&lock->lock); | |
37 | rcu_read_lock(); | |
38 | migrate_disable(); | |
39 | } | |
40 | ||
41 | void __sched rt_spin_lock(spinlock_t *lock) | |
42 | { | |
43 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
44 | __rt_spin_lock(lock); | |
45 | } | |
46 | EXPORT_SYMBOL(rt_spin_lock); | |
47 | ||
48 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
49 | void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass) | |
50 | { | |
51 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | |
52 | __rt_spin_lock(lock); | |
53 | } | |
54 | EXPORT_SYMBOL(rt_spin_lock_nested); | |
55 | ||
56 | void __sched rt_spin_lock_nest_lock(spinlock_t *lock, | |
57 | struct lockdep_map *nest_lock) | |
58 | { | |
59 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | |
60 | __rt_spin_lock(lock); | |
61 | } | |
62 | EXPORT_SYMBOL(rt_spin_lock_nest_lock); | |
63 | #endif | |
64 | ||
65 | void __sched rt_spin_unlock(spinlock_t *lock) | |
66 | { | |
67 | spin_release(&lock->dep_map, _RET_IP_); | |
68 | migrate_enable(); | |
69 | rcu_read_unlock(); | |
70 | ||
71 | if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL))) | |
72 | rt_mutex_slowunlock(&lock->lock); | |
73 | } | |
74 | EXPORT_SYMBOL(rt_spin_unlock); | |
75 | ||
76 | /* | |
77 | * Wait for the lock to get unlocked: instead of polling for an unlock | |
78 | * (like raw spinlocks do), lock and unlock, to force the kernel to | |
79 | * schedule if there's contention: | |
80 | */ | |
81 | void __sched rt_spin_lock_unlock(spinlock_t *lock) | |
82 | { | |
83 | spin_lock(lock); | |
84 | spin_unlock(lock); | |
85 | } | |
86 | EXPORT_SYMBOL(rt_spin_lock_unlock); | |
87 | ||
88 | static __always_inline int __rt_spin_trylock(spinlock_t *lock) | |
89 | { | |
90 | int ret = 1; | |
91 | ||
92 | if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current))) | |
93 | ret = rt_mutex_slowtrylock(&lock->lock); | |
94 | ||
95 | if (ret) { | |
96 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | |
97 | rcu_read_lock(); | |
98 | migrate_disable(); | |
99 | } | |
100 | return ret; | |
101 | } | |
102 | ||
103 | int __sched rt_spin_trylock(spinlock_t *lock) | |
104 | { | |
105 | return __rt_spin_trylock(lock); | |
106 | } | |
107 | EXPORT_SYMBOL(rt_spin_trylock); | |
108 | ||
109 | int __sched rt_spin_trylock_bh(spinlock_t *lock) | |
110 | { | |
111 | int ret; | |
112 | ||
113 | local_bh_disable(); | |
114 | ret = __rt_spin_trylock(lock); | |
115 | if (!ret) | |
116 | local_bh_enable(); | |
117 | return ret; | |
118 | } | |
119 | EXPORT_SYMBOL(rt_spin_trylock_bh); | |
120 | ||
121 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
122 | void __rt_spin_lock_init(spinlock_t *lock, const char *name, | |
31552385 | 123 | struct lock_class_key *key, bool percpu) |
0f383b6d | 124 | { |
31552385 TG |
125 | u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL; |
126 | ||
0f383b6d | 127 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
31552385 TG |
128 | lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG, |
129 | LD_WAIT_INV, type); | |
0f383b6d TG |
130 | } |
131 | EXPORT_SYMBOL(__rt_spin_lock_init); | |
132 | #endif | |
8282947f TG |
133 | |
134 | /* | |
135 | * RT-specific reader/writer locks | |
136 | */ | |
137 | #define rwbase_set_and_save_current_state(state) \ | |
138 | current_save_and_set_rtlock_wait_state() | |
139 | ||
140 | #define rwbase_restore_current_state() \ | |
141 | current_restore_rtlock_saved_state() | |
142 | ||
143 | static __always_inline int | |
144 | rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state) | |
145 | { | |
146 | if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current))) | |
147 | rtlock_slowlock(rtm); | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static __always_inline int | |
152 | rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state) | |
153 | { | |
154 | rtlock_slowlock_locked(rtm); | |
155 | return 0; | |
156 | } | |
157 | ||
158 | static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm) | |
159 | { | |
160 | if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL))) | |
161 | return; | |
162 | ||
163 | rt_mutex_slowunlock(rtm); | |
164 | } | |
165 | ||
166 | static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm) | |
167 | { | |
168 | if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current))) | |
169 | return 1; | |
170 | ||
171 | return rt_mutex_slowtrylock(rtm); | |
172 | } | |
173 | ||
174 | #define rwbase_signal_pending_state(state, current) (0) | |
175 | ||
176 | #define rwbase_schedule() \ | |
177 | schedule_rtlock() | |
178 | ||
179 | #include "rwbase_rt.c" | |
180 | /* | |
181 | * The common functions which get wrapped into the rwlock API. | |
182 | */ | |
183 | int __sched rt_read_trylock(rwlock_t *rwlock) | |
184 | { | |
185 | int ret; | |
186 | ||
187 | ret = rwbase_read_trylock(&rwlock->rwbase); | |
188 | if (ret) { | |
189 | rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); | |
190 | rcu_read_lock(); | |
191 | migrate_disable(); | |
192 | } | |
193 | return ret; | |
194 | } | |
195 | EXPORT_SYMBOL(rt_read_trylock); | |
196 | ||
197 | int __sched rt_write_trylock(rwlock_t *rwlock) | |
198 | { | |
199 | int ret; | |
200 | ||
201 | ret = rwbase_write_trylock(&rwlock->rwbase); | |
202 | if (ret) { | |
203 | rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); | |
204 | rcu_read_lock(); | |
205 | migrate_disable(); | |
206 | } | |
207 | return ret; | |
208 | } | |
209 | EXPORT_SYMBOL(rt_write_trylock); | |
210 | ||
211 | void __sched rt_read_lock(rwlock_t *rwlock) | |
212 | { | |
213 | ___might_sleep(__FILE__, __LINE__, 0); | |
214 | rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); | |
215 | rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT); | |
216 | rcu_read_lock(); | |
217 | migrate_disable(); | |
218 | } | |
219 | EXPORT_SYMBOL(rt_read_lock); | |
220 | ||
221 | void __sched rt_write_lock(rwlock_t *rwlock) | |
222 | { | |
223 | ___might_sleep(__FILE__, __LINE__, 0); | |
224 | rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); | |
225 | rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT); | |
226 | rcu_read_lock(); | |
227 | migrate_disable(); | |
228 | } | |
229 | EXPORT_SYMBOL(rt_write_lock); | |
230 | ||
231 | void __sched rt_read_unlock(rwlock_t *rwlock) | |
232 | { | |
233 | rwlock_release(&rwlock->dep_map, _RET_IP_); | |
234 | migrate_enable(); | |
235 | rcu_read_unlock(); | |
236 | rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT); | |
237 | } | |
238 | EXPORT_SYMBOL(rt_read_unlock); | |
239 | ||
240 | void __sched rt_write_unlock(rwlock_t *rwlock) | |
241 | { | |
242 | rwlock_release(&rwlock->dep_map, _RET_IP_); | |
243 | rcu_read_unlock(); | |
244 | migrate_enable(); | |
245 | rwbase_write_unlock(&rwlock->rwbase); | |
246 | } | |
247 | EXPORT_SYMBOL(rt_write_unlock); | |
248 | ||
249 | int __sched rt_rwlock_is_contended(rwlock_t *rwlock) | |
250 | { | |
251 | return rw_base_is_contended(&rwlock->rwbase); | |
252 | } | |
253 | EXPORT_SYMBOL(rt_rwlock_is_contended); | |
254 | ||
255 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
256 | void __rt_rwlock_init(rwlock_t *rwlock, const char *name, | |
257 | struct lock_class_key *key) | |
258 | { | |
259 | debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); | |
260 | lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG); | |
261 | } | |
262 | EXPORT_SYMBOL(__rt_rwlock_init); | |
263 | #endif |