Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
215e262f KO |
2 | /* |
3 | * Percpu refcounts: | |
4 | * (C) 2012 Google, Inc. | |
5 | * Author: Kent Overstreet <koverstreet@google.com> | |
6 | * | |
7 | * This implements a refcount with similar semantics to atomic_t - atomic_inc(), | |
8 | * atomic_dec_and_test() - but percpu. | |
9 | * | |
10 | * There's one important difference between percpu refs and normal atomic_t | |
11 | * refcounts; you have to keep track of your initial refcount, and then when you | |
12 | * start shutting down you call percpu_ref_kill() _before_ dropping the initial | |
13 | * refcount. | |
14 | * | |
15 | * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less | |
16 | * than an atomic_t - this is because of the way shutdown works, see | |
eecc16ba | 17 | * percpu_ref_kill()/PERCPU_COUNT_BIAS. |
215e262f KO |
18 | * |
19 | * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the | |
20 | * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() | |
21 | * puts the ref back in single atomic_t mode, collecting the per cpu refs and | |
22 | * issuing the appropriate barriers, and then marks the ref as shutting down so | |
23 | * that percpu_ref_put() will check for the ref hitting 0. After it returns, | |
24 | * it's safe to drop the initial ref. | |
25 | * | |
26 | * USAGE: | |
27 | * | |
28 | * See fs/aio.c for some example usage; it's used there for struct kioctx, which | |
29 | * is created when userspaces calls io_setup(), and destroyed when userspace | |
30 | * calls io_destroy() or the process exits. | |
31 | * | |
32 | * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it | |
b3a5d111 TH |
33 | * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref. |
34 | * After that, there can't be any new users of the kioctx (from lookup_ioctx()) | |
35 | * and it's then safe to drop the initial ref with percpu_ref_put(). | |
36 | * | |
37 | * Note that the free path, free_ioctx(), needs to go through explicit call_rcu() | |
38 | * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't | |
39 | * imply RCU grace periods of any kind and if a user wants to combine percpu_ref | |
40 | * with RCU protection, it must be done explicitly. | |
215e262f KO |
41 | * |
42 | * Code that does a two stage shutdown like this often needs some kind of | |
43 | * explicit synchronization to ensure the initial refcount can only be dropped | |
44 | * once - percpu_ref_kill() does this for you, it returns true once and false if | |
45 | * someone else already called it. The aio code uses it this way, but it's not | |
46 | * necessary if the code has some other mechanism to synchronize teardown. | |
47 | * around. | |
48 | */ | |
49 | ||
50 | #ifndef _LINUX_PERCPU_REFCOUNT_H | |
51 | #define _LINUX_PERCPU_REFCOUNT_H | |
52 | ||
53 | #include <linux/atomic.h> | |
215e262f KO |
54 | #include <linux/percpu.h> |
55 | #include <linux/rcupdate.h> | |
a4f1192c | 56 | #include <linux/types.h> |
a34375ef | 57 | #include <linux/gfp.h> |
215e262f KO |
58 | |
59 | struct percpu_ref; | |
ac899061 | 60 | typedef void (percpu_ref_func_t)(struct percpu_ref *); |
215e262f | 61 | |
9e804d1f TH |
62 | /* flags set in the lower bits of percpu_ref->percpu_count_ptr */ |
63 | enum { | |
64 | __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */ | |
27344a90 TH |
65 | __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */ |
66 | __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD, | |
67 | ||
68 | __PERCPU_REF_FLAG_BITS = 2, | |
9e804d1f TH |
69 | }; |
70 | ||
2aad2a86 TH |
71 | /* @flags for percpu_ref_init() */ |
72 | enum { | |
73 | /* | |
74 | * Start w/ ref == 1 in atomic mode. Can be switched to percpu | |
1cae13e7 TH |
75 | * operation using percpu_ref_switch_to_percpu(). If initialized |
76 | * with this flag, the ref will stay in atomic mode until | |
77 | * percpu_ref_switch_to_percpu() is invoked on it. | |
09ed79d6 | 78 | * Implies ALLOW_REINIT. |
2aad2a86 TH |
79 | */ |
80 | PERCPU_REF_INIT_ATOMIC = 1 << 0, | |
81 | ||
82 | /* | |
83 | * Start dead w/ ref == 0 in atomic mode. Must be revived with | |
09ed79d6 RG |
84 | * percpu_ref_reinit() before used. Implies INIT_ATOMIC and |
85 | * ALLOW_REINIT. | |
2aad2a86 TH |
86 | */ |
87 | PERCPU_REF_INIT_DEAD = 1 << 1, | |
09ed79d6 RG |
88 | |
89 | /* | |
90 | * Allow switching from atomic mode to percpu mode. | |
91 | */ | |
92 | PERCPU_REF_ALLOW_REINIT = 1 << 2, | |
2aad2a86 TH |
93 | }; |
94 | ||
2b0d3d3e | 95 | struct percpu_ref_data { |
e625305b | 96 | atomic_long_t count; |
ac899061 | 97 | percpu_ref_func_t *release; |
9e804d1f | 98 | percpu_ref_func_t *confirm_switch; |
1cae13e7 | 99 | bool force_atomic:1; |
7d9ab9b6 | 100 | bool allow_reinit:1; |
215e262f | 101 | struct rcu_head rcu; |
2b0d3d3e ML |
102 | struct percpu_ref *ref; |
103 | }; | |
104 | ||
105 | struct percpu_ref { | |
106 | /* | |
107 | * The low bit of the pointer indicates whether the ref is in percpu | |
108 | * mode; if set, then get/put will manipulate the atomic_t. | |
109 | */ | |
110 | unsigned long percpu_count_ptr; | |
111 | ||
112 | /* | |
113 | * 'percpu_ref' is often embedded into user structure, and only | |
114 | * 'percpu_count_ptr' is required in fast path, move other fields | |
115 | * into 'percpu_ref_data', so we can reduce memory footprint in | |
116 | * fast path. | |
117 | */ | |
118 | struct percpu_ref_data *data; | |
215e262f KO |
119 | }; |
120 | ||
acac7883 | 121 | int __must_check percpu_ref_init(struct percpu_ref *ref, |
2aad2a86 TH |
122 | percpu_ref_func_t *release, unsigned int flags, |
123 | gfp_t gfp); | |
9a1049da | 124 | void percpu_ref_exit(struct percpu_ref *ref); |
490c79a6 TH |
125 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, |
126 | percpu_ref_func_t *confirm_switch); | |
210f7cdc | 127 | void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); |
f47ad457 | 128 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref); |
dbece3a0 TH |
129 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
130 | percpu_ref_func_t *confirm_kill); | |
18c9a6bb | 131 | void percpu_ref_resurrect(struct percpu_ref *ref); |
f47ad457 | 132 | void percpu_ref_reinit(struct percpu_ref *ref); |
2b0d3d3e | 133 | bool percpu_ref_is_zero(struct percpu_ref *ref); |
dbece3a0 TH |
134 | |
135 | /** | |
136 | * percpu_ref_kill - drop the initial ref | |
137 | * @ref: percpu_ref to kill | |
138 | * | |
139 | * Must be used to drop the initial ref on a percpu refcount; must be called | |
140 | * precisely once before shutdown. | |
141 | * | |
b3a5d111 TH |
142 | * Switches @ref into atomic mode before gathering up the percpu counters |
143 | * and dropping the initial ref. | |
144 | * | |
145 | * There are no implied RCU grace periods between kill and release. | |
dbece3a0 TH |
146 | */ |
147 | static inline void percpu_ref_kill(struct percpu_ref *ref) | |
148 | { | |
4d414269 | 149 | percpu_ref_kill_and_confirm(ref, NULL); |
dbece3a0 | 150 | } |
215e262f | 151 | |
eae7975d TH |
152 | /* |
153 | * Internal helper. Don't use outside percpu-refcount proper. The | |
154 | * function doesn't return the pointer and let the caller test it for NULL | |
155 | * because doing so forces the compiler to generate two conditional | |
eecc16ba | 156 | * branches as it can't assume that @ref->percpu_count is not NULL. |
eae7975d | 157 | */ |
9e804d1f TH |
158 | static inline bool __ref_is_percpu(struct percpu_ref *ref, |
159 | unsigned long __percpu **percpu_countp) | |
eae7975d | 160 | { |
6810e4a3 TH |
161 | unsigned long percpu_ptr; |
162 | ||
163 | /* | |
164 | * The value of @ref->percpu_count_ptr is tested for | |
165 | * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then | |
166 | * used as a pointer. If the compiler generates a separate fetch | |
167 | * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in | |
168 | * between contaminating the pointer value, meaning that | |
ed8ebd1d | 169 | * READ_ONCE() is required when fetching it. |
b393e8b3 | 170 | * |
c6cd2e01 | 171 | * The dependency ordering from the READ_ONCE() pairs |
b393e8b3 | 172 | * with smp_store_release() in __percpu_ref_switch_to_percpu(). |
6810e4a3 | 173 | */ |
ed8ebd1d TH |
174 | percpu_ptr = READ_ONCE(ref->percpu_count_ptr); |
175 | ||
4aab3b5b TH |
176 | /* |
177 | * Theoretically, the following could test just ATOMIC; however, | |
178 | * then we'd have to mask off DEAD separately as DEAD may be | |
179 | * visible without ATOMIC if we race with percpu_ref_kill(). DEAD | |
180 | * implies ATOMIC anyway. Test them together. | |
181 | */ | |
182 | if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) | |
eae7975d TH |
183 | return false; |
184 | ||
eecc16ba | 185 | *percpu_countp = (unsigned long __percpu *)percpu_ptr; |
eae7975d TH |
186 | return true; |
187 | } | |
215e262f KO |
188 | |
189 | /** | |
e8ea14cc | 190 | * percpu_ref_get_many - increment a percpu refcount |
ac899061 | 191 | * @ref: percpu_ref to get |
e8ea14cc | 192 | * @nr: number of references to get |
215e262f | 193 | * |
e8ea14cc | 194 | * Analogous to atomic_long_add(). |
6251f997 TH |
195 | * |
196 | * This function is safe to call as long as @ref is between init and exit. | |
197 | */ | |
e8ea14cc | 198 | static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) |
215e262f | 199 | { |
eecc16ba | 200 | unsigned long __percpu *percpu_count; |
215e262f | 201 | |
9e8d42a0 | 202 | rcu_read_lock(); |
215e262f | 203 | |
9e804d1f | 204 | if (__ref_is_percpu(ref, &percpu_count)) |
e8ea14cc | 205 | this_cpu_add(*percpu_count, nr); |
215e262f | 206 | else |
2b0d3d3e | 207 | atomic_long_add(nr, &ref->data->count); |
215e262f | 208 | |
9e8d42a0 | 209 | rcu_read_unlock(); |
215e262f KO |
210 | } |
211 | ||
e8ea14cc JW |
212 | /** |
213 | * percpu_ref_get - increment a percpu refcount | |
214 | * @ref: percpu_ref to get | |
215 | * | |
c23c8082 | 216 | * Analogous to atomic_long_inc(). |
e8ea14cc JW |
217 | * |
218 | * This function is safe to call as long as @ref is between init and exit. | |
219 | */ | |
220 | static inline void percpu_ref_get(struct percpu_ref *ref) | |
221 | { | |
222 | percpu_ref_get_many(ref, 1); | |
223 | } | |
224 | ||
4fb6e250 | 225 | /** |
4e5ef023 | 226 | * percpu_ref_tryget_many - try to increment a percpu refcount |
4fb6e250 | 227 | * @ref: percpu_ref to try-get |
4e5ef023 | 228 | * @nr: number of references to get |
4fb6e250 | 229 | * |
4e5ef023 | 230 | * Increment a percpu refcount by @nr unless its count already reached zero. |
4fb6e250 TH |
231 | * Returns %true on success; %false on failure. |
232 | * | |
6251f997 | 233 | * This function is safe to call as long as @ref is between init and exit. |
4fb6e250 | 234 | */ |
4e5ef023 PB |
235 | static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, |
236 | unsigned long nr) | |
4fb6e250 | 237 | { |
eecc16ba | 238 | unsigned long __percpu *percpu_count; |
966d2b04 | 239 | bool ret; |
4fb6e250 | 240 | |
9e8d42a0 | 241 | rcu_read_lock(); |
4fb6e250 | 242 | |
9e804d1f | 243 | if (__ref_is_percpu(ref, &percpu_count)) { |
4e5ef023 | 244 | this_cpu_add(*percpu_count, nr); |
4fb6e250 TH |
245 | ret = true; |
246 | } else { | |
2b0d3d3e | 247 | ret = atomic_long_add_unless(&ref->data->count, nr, 0); |
4fb6e250 TH |
248 | } |
249 | ||
9e8d42a0 | 250 | rcu_read_unlock(); |
4fb6e250 TH |
251 | |
252 | return ret; | |
253 | } | |
254 | ||
4e5ef023 PB |
255 | /** |
256 | * percpu_ref_tryget - try to increment a percpu refcount | |
257 | * @ref: percpu_ref to try-get | |
258 | * | |
259 | * Increment a percpu refcount unless its count already reached zero. | |
260 | * Returns %true on success; %false on failure. | |
261 | * | |
262 | * This function is safe to call as long as @ref is between init and exit. | |
263 | */ | |
264 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) | |
265 | { | |
266 | return percpu_ref_tryget_many(ref, 1); | |
267 | } | |
268 | ||
3b13c168 PB |
269 | /** |
270 | * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the | |
271 | * caller is responsible for taking RCU. | |
272 | * | |
273 | * This function is safe to call as long as @ref is between init and exit. | |
274 | */ | |
275 | static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref) | |
276 | { | |
277 | unsigned long __percpu *percpu_count; | |
278 | bool ret = false; | |
279 | ||
280 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
281 | ||
282 | if (likely(__ref_is_percpu(ref, &percpu_count))) { | |
283 | this_cpu_inc(*percpu_count); | |
284 | ret = true; | |
285 | } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { | |
286 | ret = atomic_long_inc_not_zero(&ref->data->count); | |
287 | } | |
288 | return ret; | |
289 | } | |
290 | ||
dbece3a0 | 291 | /** |
2070d50e | 292 | * percpu_ref_tryget_live - try to increment a live percpu refcount |
dbece3a0 TH |
293 | * @ref: percpu_ref to try-get |
294 | * | |
295 | * Increment a percpu refcount unless it has already been killed. Returns | |
296 | * %true on success; %false on failure. | |
297 | * | |
6251f997 TH |
298 | * Completion of percpu_ref_kill() in itself doesn't guarantee that this |
299 | * function will fail. For such guarantee, percpu_ref_kill_and_confirm() | |
300 | * should be used. After the confirm_kill callback is invoked, it's | |
301 | * guaranteed that no new reference will be given out by | |
302 | * percpu_ref_tryget_live(). | |
4fb6e250 | 303 | * |
6251f997 | 304 | * This function is safe to call as long as @ref is between init and exit. |
dbece3a0 | 305 | */ |
2070d50e | 306 | static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) |
dbece3a0 | 307 | { |
966d2b04 | 308 | bool ret = false; |
dbece3a0 | 309 | |
9e8d42a0 | 310 | rcu_read_lock(); |
3b13c168 | 311 | ret = percpu_ref_tryget_live_rcu(ref); |
9e8d42a0 | 312 | rcu_read_unlock(); |
dbece3a0 TH |
313 | return ret; |
314 | } | |
315 | ||
215e262f | 316 | /** |
e8ea14cc | 317 | * percpu_ref_put_many - decrement a percpu refcount |
ac899061 | 318 | * @ref: percpu_ref to put |
e8ea14cc | 319 | * @nr: number of references to put |
215e262f KO |
320 | * |
321 | * Decrement the refcount, and if 0, call the release function (which was passed | |
322 | * to percpu_ref_init()) | |
6251f997 TH |
323 | * |
324 | * This function is safe to call as long as @ref is between init and exit. | |
215e262f | 325 | */ |
e8ea14cc | 326 | static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) |
215e262f | 327 | { |
eecc16ba | 328 | unsigned long __percpu *percpu_count; |
215e262f | 329 | |
9e8d42a0 | 330 | rcu_read_lock(); |
215e262f | 331 | |
9e804d1f | 332 | if (__ref_is_percpu(ref, &percpu_count)) |
e8ea14cc | 333 | this_cpu_sub(*percpu_count, nr); |
2b0d3d3e ML |
334 | else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) |
335 | ref->data->release(ref); | |
215e262f | 336 | |
9e8d42a0 | 337 | rcu_read_unlock(); |
215e262f KO |
338 | } |
339 | ||
e8ea14cc JW |
340 | /** |
341 | * percpu_ref_put - decrement a percpu refcount | |
342 | * @ref: percpu_ref to put | |
343 | * | |
344 | * Decrement the refcount, and if 0, call the release function (which was passed | |
345 | * to percpu_ref_init()) | |
346 | * | |
347 | * This function is safe to call as long as @ref is between init and exit. | |
348 | */ | |
349 | static inline void percpu_ref_put(struct percpu_ref *ref) | |
350 | { | |
351 | percpu_ref_put_many(ref, 1); | |
352 | } | |
353 | ||
4c907baf TH |
354 | /** |
355 | * percpu_ref_is_dying - test whether a percpu refcount is dying or dead | |
356 | * @ref: percpu_ref to test | |
357 | * | |
358 | * Returns %true if @ref is dying or dead. | |
359 | * | |
360 | * This function is safe to call as long as @ref is between init and exit | |
361 | * and the caller is responsible for synchronizing against state changes. | |
362 | */ | |
363 | static inline bool percpu_ref_is_dying(struct percpu_ref *ref) | |
364 | { | |
365 | return ref->percpu_count_ptr & __PERCPU_REF_DEAD; | |
366 | } | |
367 | ||
215e262f | 368 | #endif |