Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-block.git] / rust / kernel / sync / lock.rs
CommitLineData
76d4bd59
WAF
1// SPDX-License-Identifier: GPL-2.0
2
3//! Generic kernel lock and guard.
4//!
5//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6//! spinlocks, raw spinlocks) to be provided with minimal effort.
7
8use super::LockClassKey;
00280272 9use crate::{init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard};
76d4bd59
WAF
10use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned};
11use macros::pin_data;
12
6d20d629 13pub mod mutex;
c6d917a4 14pub mod spinlock;
6d20d629 15
76d4bd59
WAF
16/// The "backend" of a lock.
17///
18/// It is the actual implementation of the lock, without the need to repeat patterns used in all
19/// locks.
20///
21/// # Safety
22///
23/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
4c799d1d
VO
24/// is owned, that is, between calls to [`lock`] and [`unlock`].
25/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
b6cda913 26/// lock operation.
4c799d1d
VO
27///
28/// [`lock`]: Backend::lock
29/// [`unlock`]: Backend::unlock
30/// [`relock`]: Backend::relock
76d4bd59
WAF
31pub unsafe trait Backend {
32 /// The state required by the lock.
33 type State;
34
4c799d1d
VO
35 /// The state required to be kept between [`lock`] and [`unlock`].
36 ///
37 /// [`lock`]: Backend::lock
38 /// [`unlock`]: Backend::unlock
76d4bd59
WAF
39 type GuardState;
40
41 /// Initialises the lock.
42 ///
43 /// # Safety
44 ///
45 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
46 /// remain valid for read indefinitely.
47 unsafe fn init(
48 ptr: *mut Self::State,
49 name: *const core::ffi::c_char,
50 key: *mut bindings::lock_class_key,
51 );
52
53 /// Acquires the lock, making the caller its owner.
54 ///
55 /// # Safety
56 ///
57 /// Callers must ensure that [`Backend::init`] has been previously called.
58 #[must_use]
59 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
60
61 /// Releases the lock, giving up its ownership.
62 ///
63 /// # Safety
64 ///
65 /// It must only be called by the current owner of the lock.
66 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
e32cca32
WAF
67
68 /// Reacquires the lock, making the caller its owner.
69 ///
70 /// # Safety
71 ///
72 /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
73 /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
74 unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
75 // SAFETY: The safety requirements ensure that the lock is initialised.
76 *guard_state = unsafe { Self::lock(ptr) };
77 }
76d4bd59
WAF
78}
79
80/// A mutual exclusion primitive.
81///
db7193a5
BG
82/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
83/// [`Backend`] specified as the generic parameter `B`.
76d4bd59
WAF
84#[pin_data]
85pub struct Lock<T: ?Sized, B: Backend> {
86 /// The kernel lock object.
87 #[pin]
88 state: Opaque<B::State>,
89
90 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
91 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
92 /// some architecture uses self-references now or in the future.
93 #[pin]
94 _pin: PhantomPinned,
95
96 /// The data protected by the lock.
7b1f55e3 97 pub(crate) data: UnsafeCell<T>,
76d4bd59
WAF
98}
99
100// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
101unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
102
103// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
104// data it protects is `Send`.
105unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
106
107impl<T, B: Backend> Lock<T, B> {
108 /// Constructs a new lock initialiser.
76d4bd59
WAF
109 pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
110 pin_init!(Self {
111 data: UnsafeCell::new(t),
112 _pin: PhantomPinned,
113 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
114 // static lifetimes so they live indefinitely.
115 state <- Opaque::ffi_init(|slot| unsafe {
116 B::init(slot, name.as_char_ptr(), key.as_ptr())
117 }),
118 })
119 }
120}
121
122impl<T: ?Sized, B: Backend> Lock<T, B> {
123 /// Acquires the lock and gives the caller access to the data protected by it.
124 pub fn lock(&self) -> Guard<'_, T, B> {
125 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
126 // that `init` was called.
127 let state = unsafe { B::lock(self.state.get()) };
128 // SAFETY: The lock was just acquired.
129 unsafe { Guard::new(self, state) }
130 }
131}
132
133/// A lock guard.
134///
db7193a5 135/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
76d4bd59
WAF
136/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
137/// protected by the lock.
138#[must_use = "the lock unlocks immediately when the guard is unused"]
139pub struct Guard<'a, T: ?Sized, B: Backend> {
140 pub(crate) lock: &'a Lock<T, B>,
141 pub(crate) state: B::GuardState,
142 _not_send: PhantomData<*mut ()>,
143}
144
145// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
146unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
147
e32cca32 148impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
e7b9b1ff 149 pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
e32cca32
WAF
150 // SAFETY: The caller owns the lock, so it is safe to unlock it.
151 unsafe { B::unlock(self.lock.state.get(), &self.state) };
152
153 // SAFETY: The lock was just unlocked above and is being relocked now.
154 let _relock =
155 ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) });
156
e7b9b1ff 157 cb()
e32cca32
WAF
158 }
159}
160
76d4bd59
WAF
161impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
162 type Target = T;
163
164 fn deref(&self) -> &Self::Target {
165 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
166 unsafe { &*self.lock.data.get() }
167 }
168}
169
170impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
171 fn deref_mut(&mut self) -> &mut Self::Target {
172 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
173 unsafe { &mut *self.lock.data.get() }
174 }
175}
176
177impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
178 fn drop(&mut self) {
179 // SAFETY: The caller owns the lock, so it is safe to unlock it.
180 unsafe { B::unlock(self.lock.state.get(), &self.state) };
181 }
182}
183
184impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
185 /// Constructs a new immutable lock guard.
186 ///
187 /// # Safety
188 ///
189 /// The caller must ensure that it owns the lock.
190 pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
191 Self {
192 lock,
193 state,
194 _not_send: PhantomData,
195 }
196 }
197}