Commit | Line | Data |
---|---|---|
29dee3c0 PZ |
1 | /* |
2 | * Variant of atomic_t specialized for reference counts. | |
3 | * | |
4 | * The interface matches the atomic_t interface (to aid in porting) but only | |
5 | * provides the few functions one should use for reference counting. | |
6 | * | |
7 | * It differs in that the counter saturates at UINT_MAX and will not move once | |
8 | * there. This avoids wrapping the counter and causing 'spurious' | |
9 | * use-after-free issues. | |
10 | * | |
11 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions | |
12 | * and provide only what is strictly required for refcounts. | |
13 | * | |
14 | * The increments are fully relaxed; these will not provide ordering. The | |
15 | * rationale is that whatever is used to obtain the object we're increasing the | |
16 | * reference count on will provide the ordering. For locked data structures, | |
17 | * its the lock acquire, for RCU/lockless data structures its the dependent | |
18 | * load. | |
19 | * | |
20 | * Do note that inc_not_zero() provides a control dependency which will order | |
21 | * future stores against the inc, this ensures we'll never modify the object | |
22 | * if we did not in fact acquire a reference. | |
23 | * | |
24 | * The decrements will provide release order, such that all the prior loads and | |
25 | * stores will be issued before, it also provides a control dependency, which | |
26 | * will order us against the subsequent free(). | |
27 | * | |
28 | * The control dependency is against the load of the cmpxchg (ll/sc) that | |
29 | * succeeded. This means the stores aren't fully ordered, but this is fine | |
30 | * because the 1->0 transition indicates no concurrency. | |
31 | * | |
32 | * Note that the allocator is responsible for ordering things between free() | |
33 | * and alloc(). | |
34 | * | |
35 | */ | |
36 | ||
37 | #include <linux/refcount.h> | |
38 | #include <linux/bug.h> | |
39 | ||
bd174169 DW |
40 | /** |
41 | * refcount_add_not_zero - add a value to a refcount unless it is 0 | |
42 | * @i: the value to add to the refcount | |
43 | * @r: the refcount | |
44 | * | |
45 | * Will saturate at UINT_MAX and WARN. | |
46 | * | |
47 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
48 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
49 | * and thereby orders future stores. See the comment on top. | |
50 | * | |
51 | * Use of this function is not recommended for the normal reference counting | |
52 | * use case in which references are taken and released one at a time. In these | |
53 | * cases, refcount_inc(), or one of its variants, should instead be used to | |
54 | * increment a reference count. | |
55 | * | |
56 | * Return: false if the passed refcount is 0, true otherwise | |
57 | */ | |
29dee3c0 PZ |
58 | bool refcount_add_not_zero(unsigned int i, refcount_t *r) |
59 | { | |
60 | unsigned int old, new, val = atomic_read(&r->refs); | |
61 | ||
62 | for (;;) { | |
63 | if (!val) | |
64 | return false; | |
65 | ||
66 | if (unlikely(val == UINT_MAX)) | |
67 | return true; | |
68 | ||
69 | new = val + i; | |
70 | if (new < val) | |
71 | new = UINT_MAX; | |
72 | old = atomic_cmpxchg_relaxed(&r->refs, val, new); | |
73 | if (old == val) | |
74 | break; | |
75 | ||
76 | val = old; | |
77 | } | |
78 | ||
9dcfe2c7 | 79 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
29dee3c0 PZ |
80 | |
81 | return true; | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(refcount_add_not_zero); | |
84 | ||
bd174169 DW |
85 | /** |
86 | * refcount_add - add a value to a refcount | |
87 | * @i: the value to add to the refcount | |
88 | * @r: the refcount | |
89 | * | |
90 | * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. | |
91 | * | |
92 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
93 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
94 | * and thereby orders future stores. See the comment on top. | |
95 | * | |
96 | * Use of this function is not recommended for the normal reference counting | |
97 | * use case in which references are taken and released one at a time. In these | |
98 | * cases, refcount_inc(), or one of its variants, should instead be used to | |
99 | * increment a reference count. | |
100 | */ | |
29dee3c0 PZ |
101 | void refcount_add(unsigned int i, refcount_t *r) |
102 | { | |
9dcfe2c7 | 103 | WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
29dee3c0 PZ |
104 | } |
105 | EXPORT_SYMBOL_GPL(refcount_add); | |
106 | ||
bd174169 DW |
107 | /** |
108 | * refcount_inc_not_zero - increment a refcount unless it is 0 | |
109 | * @r: the refcount to increment | |
110 | * | |
111 | * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. | |
29dee3c0 PZ |
112 | * |
113 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
114 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
115 | * and thereby orders future stores. See the comment on top. | |
bd174169 DW |
116 | * |
117 | * Return: true if the increment was successful, false otherwise | |
29dee3c0 PZ |
118 | */ |
119 | bool refcount_inc_not_zero(refcount_t *r) | |
120 | { | |
121 | unsigned int old, new, val = atomic_read(&r->refs); | |
122 | ||
123 | for (;;) { | |
124 | new = val + 1; | |
125 | ||
126 | if (!val) | |
127 | return false; | |
128 | ||
129 | if (unlikely(!new)) | |
130 | return true; | |
131 | ||
132 | old = atomic_cmpxchg_relaxed(&r->refs, val, new); | |
133 | if (old == val) | |
134 | break; | |
135 | ||
136 | val = old; | |
137 | } | |
138 | ||
9dcfe2c7 | 139 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
29dee3c0 PZ |
140 | |
141 | return true; | |
142 | } | |
143 | EXPORT_SYMBOL_GPL(refcount_inc_not_zero); | |
144 | ||
bd174169 DW |
145 | /** |
146 | * refcount_inc - increment a refcount | |
147 | * @r: the refcount to increment | |
148 | * | |
149 | * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. | |
29dee3c0 PZ |
150 | * |
151 | * Provides no memory ordering, it is assumed the caller already has a | |
bd174169 DW |
152 | * reference on the object. |
153 | * | |
154 | * Will WARN if the refcount is 0, as this represents a possible use-after-free | |
155 | * condition. | |
29dee3c0 PZ |
156 | */ |
157 | void refcount_inc(refcount_t *r) | |
158 | { | |
9dcfe2c7 | 159 | WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); |
29dee3c0 PZ |
160 | } |
161 | EXPORT_SYMBOL_GPL(refcount_inc); | |
162 | ||
bd174169 DW |
163 | /** |
164 | * refcount_sub_and_test - subtract from a refcount and test if it is 0 | |
165 | * @i: amount to subtract from the refcount | |
166 | * @r: the refcount | |
167 | * | |
168 | * Similar to atomic_dec_and_test(), but it will WARN, return false and | |
169 | * ultimately leak on underflow and will fail to decrement when saturated | |
170 | * at UINT_MAX. | |
171 | * | |
172 | * Provides release memory ordering, such that prior loads and stores are done | |
173 | * before, and provides a control dependency such that free() must come after. | |
174 | * See the comment on top. | |
175 | * | |
176 | * Use of this function is not recommended for the normal reference counting | |
177 | * use case in which references are taken and released one at a time. In these | |
178 | * cases, refcount_dec(), or one of its variants, should instead be used to | |
179 | * decrement a reference count. | |
180 | * | |
181 | * Return: true if the resulting refcount is 0, false otherwise | |
182 | */ | |
29dee3c0 PZ |
183 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
184 | { | |
185 | unsigned int old, new, val = atomic_read(&r->refs); | |
186 | ||
187 | for (;;) { | |
188 | if (unlikely(val == UINT_MAX)) | |
189 | return false; | |
190 | ||
191 | new = val - i; | |
192 | if (new > val) { | |
9dcfe2c7 | 193 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
29dee3c0 PZ |
194 | return false; |
195 | } | |
196 | ||
197 | old = atomic_cmpxchg_release(&r->refs, val, new); | |
198 | if (old == val) | |
199 | break; | |
200 | ||
201 | val = old; | |
202 | } | |
203 | ||
204 | return !new; | |
205 | } | |
206 | EXPORT_SYMBOL_GPL(refcount_sub_and_test); | |
207 | ||
bd174169 DW |
208 | /** |
209 | * refcount_dec_and_test - decrement a refcount and test if it is 0 | |
210 | * @r: the refcount | |
211 | * | |
29dee3c0 PZ |
212 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
213 | * decrement when saturated at UINT_MAX. | |
214 | * | |
215 | * Provides release memory ordering, such that prior loads and stores are done | |
216 | * before, and provides a control dependency such that free() must come after. | |
217 | * See the comment on top. | |
bd174169 DW |
218 | * |
219 | * Return: true if the resulting refcount is 0, false otherwise | |
29dee3c0 PZ |
220 | */ |
221 | bool refcount_dec_and_test(refcount_t *r) | |
222 | { | |
223 | return refcount_sub_and_test(1, r); | |
224 | } | |
225 | EXPORT_SYMBOL_GPL(refcount_dec_and_test); | |
226 | ||
bd174169 DW |
227 | /** |
228 | * refcount_dec - decrement a refcount | |
229 | * @r: the refcount | |
230 | * | |
29dee3c0 PZ |
231 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
232 | * when saturated at UINT_MAX. | |
233 | * | |
234 | * Provides release memory ordering, such that prior loads and stores are done | |
235 | * before. | |
236 | */ | |
29dee3c0 PZ |
237 | void refcount_dec(refcount_t *r) |
238 | { | |
9dcfe2c7 | 239 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
29dee3c0 PZ |
240 | } |
241 | EXPORT_SYMBOL_GPL(refcount_dec); | |
242 | ||
bd174169 DW |
243 | /** |
244 | * refcount_dec_if_one - decrement a refcount if it is 1 | |
245 | * @r: the refcount | |
246 | * | |
29dee3c0 PZ |
247 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
248 | * success thereof. | |
249 | * | |
250 | * Like all decrement operations, it provides release memory order and provides | |
251 | * a control dependency. | |
252 | * | |
253 | * It can be used like a try-delete operator; this explicit case is provided | |
254 | * and not cmpxchg in generic, because that would allow implementing unsafe | |
255 | * operations. | |
bd174169 DW |
256 | * |
257 | * Return: true if the resulting refcount is 0, false otherwise | |
29dee3c0 PZ |
258 | */ |
259 | bool refcount_dec_if_one(refcount_t *r) | |
260 | { | |
261 | return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; | |
262 | } | |
263 | EXPORT_SYMBOL_GPL(refcount_dec_if_one); | |
264 | ||
bd174169 DW |
265 | /** |
266 | * refcount_dec_not_one - decrement a refcount if it is not 1 | |
267 | * @r: the refcount | |
268 | * | |
29dee3c0 PZ |
269 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
270 | * it will return false. | |
271 | * | |
272 | * Was often done like: atomic_add_unless(&var, -1, 1) | |
bd174169 DW |
273 | * |
274 | * Return: true if the decrement operation was successful, false otherwise | |
29dee3c0 PZ |
275 | */ |
276 | bool refcount_dec_not_one(refcount_t *r) | |
277 | { | |
278 | unsigned int old, new, val = atomic_read(&r->refs); | |
279 | ||
280 | for (;;) { | |
281 | if (unlikely(val == UINT_MAX)) | |
282 | return true; | |
283 | ||
284 | if (val == 1) | |
285 | return false; | |
286 | ||
287 | new = val - 1; | |
288 | if (new > val) { | |
9dcfe2c7 | 289 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
29dee3c0 PZ |
290 | return true; |
291 | } | |
292 | ||
293 | old = atomic_cmpxchg_release(&r->refs, val, new); | |
294 | if (old == val) | |
295 | break; | |
296 | ||
297 | val = old; | |
298 | } | |
299 | ||
300 | return true; | |
301 | } | |
302 | EXPORT_SYMBOL_GPL(refcount_dec_not_one); | |
303 | ||
bd174169 DW |
304 | /** |
305 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement | |
306 | * refcount to 0 | |
307 | * @r: the refcount | |
308 | * @lock: the mutex to be locked | |
309 | * | |
29dee3c0 PZ |
310 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
311 | * to decrement when saturated at UINT_MAX. | |
312 | * | |
313 | * Provides release memory ordering, such that prior loads and stores are done | |
314 | * before, and provides a control dependency such that free() must come after. | |
315 | * See the comment on top. | |
bd174169 DW |
316 | * |
317 | * Return: true and hold mutex if able to decrement refcount to 0, false | |
318 | * otherwise | |
29dee3c0 PZ |
319 | */ |
320 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) | |
321 | { | |
322 | if (refcount_dec_not_one(r)) | |
323 | return false; | |
324 | ||
325 | mutex_lock(lock); | |
326 | if (!refcount_dec_and_test(r)) { | |
327 | mutex_unlock(lock); | |
328 | return false; | |
329 | } | |
330 | ||
331 | return true; | |
332 | } | |
333 | EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); | |
334 | ||
bd174169 DW |
335 | /** |
336 | * refcount_dec_and_lock - return holding spinlock if able to decrement | |
337 | * refcount to 0 | |
338 | * @r: the refcount | |
339 | * @lock: the spinlock to be locked | |
340 | * | |
29dee3c0 PZ |
341 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
342 | * decrement when saturated at UINT_MAX. | |
343 | * | |
344 | * Provides release memory ordering, such that prior loads and stores are done | |
345 | * before, and provides a control dependency such that free() must come after. | |
346 | * See the comment on top. | |
bd174169 DW |
347 | * |
348 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
349 | * otherwise | |
29dee3c0 PZ |
350 | */ |
351 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) | |
352 | { | |
353 | if (refcount_dec_not_one(r)) | |
354 | return false; | |
355 | ||
356 | spin_lock(lock); | |
357 | if (!refcount_dec_and_test(r)) { | |
358 | spin_unlock(lock); | |
359 | return false; | |
360 | } | |
361 | ||
362 | return true; | |
363 | } | |
364 | EXPORT_SYMBOL_GPL(refcount_dec_and_lock); | |
365 |