Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
77e9971c WD |
2 | /* |
3 | * Variant of atomic_t specialized for reference counts. | |
4 | * | |
5 | * The interface matches the atomic_t interface (to aid in porting) but only | |
6 | * provides the few functions one should use for reference counting. | |
7 | * | |
dcb78649 WD |
8 | * Saturation semantics |
9 | * ==================== | |
10 | * | |
11 | * refcount_t differs from atomic_t in that the counter saturates at | |
12 | * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the | |
13 | * counter and causing 'spurious' use-after-free issues. In order to avoid the | |
14 | * cost associated with introducing cmpxchg() loops into all of the saturating | |
15 | * operations, we temporarily allow the counter to take on an unchecked value | |
16 | * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow | |
17 | * or overflow has occurred. Although this is racy when multiple threads | |
18 | * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly | |
19 | * equidistant from 0 and INT_MAX we minimise the scope for error: | |
20 | * | |
21 | * INT_MAX REFCOUNT_SATURATED UINT_MAX | |
22 | * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff) | |
23 | * +--------------------------------+----------------+----------------+ | |
24 | * <---------- bad value! ----------> | |
25 | * | |
26 | * (in a signed view of the world, the "bad value" range corresponds to | |
27 | * a negative counter value). | |
28 | * | |
29 | * As an example, consider a refcount_inc() operation that causes the counter | |
30 | * to overflow: | |
31 | * | |
32 | * int old = atomic_fetch_add_relaxed(r); | |
33 | * // old is INT_MAX, refcount now INT_MIN (0x8000_0000) | |
34 | * if (old < 0) | |
35 | * atomic_set(r, REFCOUNT_SATURATED); | |
36 | * | |
37 | * If another thread also performs a refcount_inc() operation between the two | |
38 | * atomic operations, then the count will continue to edge closer to 0. If it | |
39 | * reaches a value of 1 before /any/ of the threads reset it to the saturated | |
40 | * value, then a concurrent refcount_dec_and_test() may erroneously free the | |
a13f58a0 JH |
41 | * underlying object. |
42 | * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently | |
43 | * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK). | |
44 | * With the current PID limit, if no batched refcounting operations are used and | |
45 | * the attacker can't repeatedly trigger kernel oopses in the middle of refcount | |
46 | * operations, this makes it impossible for a saturated refcount to leave the | |
47 | * saturation range, even if it is possible for multiple uses of the same | |
48 | * refcount to nest in the context of a single task: | |
49 | * | |
50 | * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT = | |
51 | * 0x40000000 / 0x400000 = 0x100 = 256 | |
52 | * | |
53 | * If hundreds of references are added/removed with a single refcounting | |
54 | * operation, it may potentially be possible to leave the saturation range; but | |
55 | * given the precise timing details involved with the round-robin scheduling of | |
56 | * each thread manipulating the refcount and the need to hit the race multiple | |
57 | * times in succession, there doesn't appear to be a practical avenue of attack | |
58 | * even if using refcount_add() operations with larger increments. | |
dcb78649 WD |
59 | * |
60 | * Memory ordering | |
61 | * =============== | |
77e9971c WD |
62 | * |
63 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions | |
64 | * and provide only what is strictly required for refcounts. | |
65 | * | |
66 | * The increments are fully relaxed; these will not provide ordering. The | |
67 | * rationale is that whatever is used to obtain the object we're increasing the | |
68 | * reference count on will provide the ordering. For locked data structures, | |
69 | * its the lock acquire, for RCU/lockless data structures its the dependent | |
70 | * load. | |
71 | * | |
72 | * Do note that inc_not_zero() provides a control dependency which will order | |
73 | * future stores against the inc, this ensures we'll never modify the object | |
74 | * if we did not in fact acquire a reference. | |
75 | * | |
76 | * The decrements will provide release order, such that all the prior loads and | |
77 | * stores will be issued before, it also provides a control dependency, which | |
78 | * will order us against the subsequent free(). | |
79 | * | |
80 | * The control dependency is against the load of the cmpxchg (ll/sc) that | |
81 | * succeeded. This means the stores aren't fully ordered, but this is fine | |
82 | * because the 1->0 transition indicates no concurrency. | |
83 | * | |
84 | * Note that the allocator is responsible for ordering things between free() | |
85 | * and alloc(). | |
86 | * | |
87 | * The decrements dec_and_test() and sub_and_test() also provide acquire | |
88 | * ordering on success. | |
89 | * | |
90 | */ | |
91 | ||
fb041bb7 WD |
92 | #ifndef _LINUX_REFCOUNT_H |
93 | #define _LINUX_REFCOUNT_H | |
94 | ||
95 | #include <linux/atomic.h> | |
96 | #include <linux/bug.h> | |
97 | #include <linux/compiler.h> | |
98 | #include <linux/limits.h> | |
f9d6966b | 99 | #include <linux/refcount_types.h> |
fb041bb7 WD |
100 | #include <linux/spinlock_types.h> |
101 | ||
102 | struct mutex; | |
103 | ||
fb041bb7 WD |
104 | #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } |
105 | #define REFCOUNT_MAX INT_MAX | |
106 | #define REFCOUNT_SATURATED (INT_MIN / 2) | |
107 | ||
108 | enum refcount_saturation_type { | |
109 | REFCOUNT_ADD_NOT_ZERO_OVF, | |
110 | REFCOUNT_ADD_OVF, | |
111 | REFCOUNT_ADD_UAF, | |
112 | REFCOUNT_SUB_UAF, | |
113 | REFCOUNT_DEC_LEAK, | |
114 | }; | |
115 | ||
116 | void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t); | |
117 | ||
118 | /** | |
119 | * refcount_set - set a refcount's value | |
120 | * @r: the refcount | |
121 | * @n: value to which the refcount will be set | |
122 | */ | |
123 | static inline void refcount_set(refcount_t *r, int n) | |
124 | { | |
125 | atomic_set(&r->refs, n); | |
126 | } | |
127 | ||
128 | /** | |
129 | * refcount_read - get a refcount's value | |
130 | * @r: the refcount | |
131 | * | |
132 | * Return: the refcount's value | |
133 | */ | |
134 | static inline unsigned int refcount_read(const refcount_t *r) | |
135 | { | |
136 | return atomic_read(&r->refs); | |
137 | } | |
138 | ||
99db710f KC |
139 | static inline __must_check __signed_wrap |
140 | bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) | |
77e9971c | 141 | { |
dcb78649 | 142 | int old = refcount_read(r); |
77e9971c WD |
143 | |
144 | do { | |
dcb78649 WD |
145 | if (!old) |
146 | break; | |
147 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); | |
77e9971c | 148 | |
a435b9a1 PZ |
149 | if (oldp) |
150 | *oldp = old; | |
151 | ||
1eb085d9 WD |
152 | if (unlikely(old < 0 || old + i < 0)) |
153 | refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); | |
77e9971c | 154 | |
dcb78649 | 155 | return old; |
77e9971c WD |
156 | } |
157 | ||
158 | /** | |
cf38cc9f | 159 | * refcount_add_not_zero - add a value to a refcount unless it is 0 |
77e9971c WD |
160 | * @i: the value to add to the refcount |
161 | * @r: the refcount | |
162 | * | |
cf38cc9f | 163 | * Will saturate at REFCOUNT_SATURATED and WARN. |
77e9971c WD |
164 | * |
165 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
166 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
167 | * and thereby orders future stores. See the comment on top. | |
168 | * | |
169 | * Use of this function is not recommended for the normal reference counting | |
170 | * use case in which references are taken and released one at a time. In these | |
171 | * cases, refcount_inc(), or one of its variants, should instead be used to | |
172 | * increment a reference count. | |
cf38cc9f MCC |
173 | * |
174 | * Return: false if the passed refcount is 0, true otherwise | |
77e9971c | 175 | */ |
cf38cc9f MCC |
176 | static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) |
177 | { | |
178 | return __refcount_add_not_zero(i, r, NULL); | |
179 | } | |
180 | ||
99db710f KC |
181 | static inline __signed_wrap |
182 | void __refcount_add(int i, refcount_t *r, int *oldp) | |
77e9971c | 183 | { |
dcb78649 WD |
184 | int old = atomic_fetch_add_relaxed(i, &r->refs); |
185 | ||
a435b9a1 PZ |
186 | if (oldp) |
187 | *oldp = old; | |
188 | ||
1eb085d9 WD |
189 | if (unlikely(!old)) |
190 | refcount_warn_saturate(r, REFCOUNT_ADD_UAF); | |
191 | else if (unlikely(old < 0 || old + i < 0)) | |
192 | refcount_warn_saturate(r, REFCOUNT_ADD_OVF); | |
77e9971c WD |
193 | } |
194 | ||
cf38cc9f MCC |
195 | /** |
196 | * refcount_add - add a value to a refcount | |
197 | * @i: the value to add to the refcount | |
198 | * @r: the refcount | |
199 | * | |
200 | * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. | |
201 | * | |
202 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
203 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
204 | * and thereby orders future stores. See the comment on top. | |
205 | * | |
206 | * Use of this function is not recommended for the normal reference counting | |
207 | * use case in which references are taken and released one at a time. In these | |
208 | * cases, refcount_inc(), or one of its variants, should instead be used to | |
209 | * increment a reference count. | |
210 | */ | |
a435b9a1 PZ |
211 | static inline void refcount_add(int i, refcount_t *r) |
212 | { | |
213 | __refcount_add(i, r, NULL); | |
214 | } | |
215 | ||
cf38cc9f MCC |
216 | static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) |
217 | { | |
218 | return __refcount_add_not_zero(1, r, oldp); | |
219 | } | |
220 | ||
77e9971c WD |
221 | /** |
222 | * refcount_inc_not_zero - increment a refcount unless it is 0 | |
223 | * @r: the refcount to increment | |
224 | * | |
225 | * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED | |
226 | * and WARN. | |
227 | * | |
228 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
229 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
230 | * and thereby orders future stores. See the comment on top. | |
231 | * | |
232 | * Return: true if the increment was successful, false otherwise | |
233 | */ | |
cf38cc9f | 234 | static inline __must_check bool refcount_inc_not_zero(refcount_t *r) |
a435b9a1 | 235 | { |
cf38cc9f | 236 | return __refcount_inc_not_zero(r, NULL); |
a435b9a1 PZ |
237 | } |
238 | ||
cf38cc9f | 239 | static inline void __refcount_inc(refcount_t *r, int *oldp) |
77e9971c | 240 | { |
cf38cc9f | 241 | __refcount_add(1, r, oldp); |
77e9971c WD |
242 | } |
243 | ||
244 | /** | |
245 | * refcount_inc - increment a refcount | |
246 | * @r: the refcount to increment | |
247 | * | |
248 | * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN. | |
249 | * | |
250 | * Provides no memory ordering, it is assumed the caller already has a | |
251 | * reference on the object. | |
252 | * | |
253 | * Will WARN if the refcount is 0, as this represents a possible use-after-free | |
254 | * condition. | |
255 | */ | |
cf38cc9f | 256 | static inline void refcount_inc(refcount_t *r) |
a435b9a1 | 257 | { |
cf38cc9f | 258 | __refcount_inc(r, NULL); |
a435b9a1 PZ |
259 | } |
260 | ||
99db710f KC |
261 | static inline __must_check __signed_wrap |
262 | bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) | |
77e9971c | 263 | { |
cf38cc9f MCC |
264 | int old = atomic_fetch_sub_release(i, &r->refs); |
265 | ||
266 | if (oldp) | |
267 | *oldp = old; | |
268 | ||
269 | if (old == i) { | |
270 | smp_acquire__after_ctrl_dep(); | |
271 | return true; | |
272 | } | |
273 | ||
274 | if (unlikely(old < 0 || old - i < 0)) | |
275 | refcount_warn_saturate(r, REFCOUNT_SUB_UAF); | |
276 | ||
277 | return false; | |
77e9971c WD |
278 | } |
279 | ||
280 | /** | |
281 | * refcount_sub_and_test - subtract from a refcount and test if it is 0 | |
282 | * @i: amount to subtract from the refcount | |
283 | * @r: the refcount | |
284 | * | |
285 | * Similar to atomic_dec_and_test(), but it will WARN, return false and | |
286 | * ultimately leak on underflow and will fail to decrement when saturated | |
287 | * at REFCOUNT_SATURATED. | |
288 | * | |
289 | * Provides release memory ordering, such that prior loads and stores are done | |
290 | * before, and provides an acquire ordering on success such that free() | |
291 | * must come after. | |
292 | * | |
293 | * Use of this function is not recommended for the normal reference counting | |
294 | * use case in which references are taken and released one at a time. In these | |
295 | * cases, refcount_dec(), or one of its variants, should instead be used to | |
296 | * decrement a reference count. | |
297 | * | |
298 | * Return: true if the resulting refcount is 0, false otherwise | |
299 | */ | |
cf38cc9f | 300 | static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) |
77e9971c | 301 | { |
cf38cc9f | 302 | return __refcount_sub_and_test(i, r, NULL); |
77e9971c WD |
303 | } |
304 | ||
cf38cc9f | 305 | static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) |
a435b9a1 | 306 | { |
cf38cc9f | 307 | return __refcount_sub_and_test(1, r, oldp); |
a435b9a1 PZ |
308 | } |
309 | ||
77e9971c WD |
310 | /** |
311 | * refcount_dec_and_test - decrement a refcount and test if it is 0 | |
312 | * @r: the refcount | |
313 | * | |
314 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to | |
315 | * decrement when saturated at REFCOUNT_SATURATED. | |
316 | * | |
317 | * Provides release memory ordering, such that prior loads and stores are done | |
318 | * before, and provides an acquire ordering on success such that free() | |
319 | * must come after. | |
320 | * | |
321 | * Return: true if the resulting refcount is 0, false otherwise | |
322 | */ | |
323 | static inline __must_check bool refcount_dec_and_test(refcount_t *r) | |
324 | { | |
a435b9a1 | 325 | return __refcount_dec_and_test(r, NULL); |
77e9971c WD |
326 | } |
327 | ||
a435b9a1 | 328 | static inline void __refcount_dec(refcount_t *r, int *oldp) |
77e9971c | 329 | { |
a435b9a1 PZ |
330 | int old = atomic_fetch_sub_release(1, &r->refs); |
331 | ||
332 | if (oldp) | |
333 | *oldp = old; | |
334 | ||
335 | if (unlikely(old <= 1)) | |
1eb085d9 | 336 | refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); |
dcb78649 | 337 | } |
f405df5d | 338 | |
cf38cc9f MCC |
339 | /** |
340 | * refcount_dec - decrement a refcount | |
341 | * @r: the refcount | |
342 | * | |
343 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement | |
344 | * when saturated at REFCOUNT_SATURATED. | |
345 | * | |
346 | * Provides release memory ordering, such that prior loads and stores are done | |
347 | * before. | |
348 | */ | |
a435b9a1 PZ |
349 | static inline void refcount_dec(refcount_t *r) |
350 | { | |
351 | __refcount_dec(r, NULL); | |
352 | } | |
353 | ||
29dee3c0 PZ |
354 | extern __must_check bool refcount_dec_if_one(refcount_t *r); |
355 | extern __must_check bool refcount_dec_not_one(refcount_t *r); | |
4a557a5d LT |
356 | extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); |
357 | extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); | |
7ea959c4 AMG |
358 | extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, |
359 | spinlock_t *lock, | |
4a557a5d | 360 | unsigned long *flags) __cond_acquires(lock); |
f405df5d | 361 | #endif /* _LINUX_REFCOUNT_H */ |