Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
a818e526 | 2 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
215e262f KO |
3 | |
4 | #include <linux/kernel.h> | |
490c79a6 TH |
5 | #include <linux/sched.h> |
6 | #include <linux/wait.h> | |
2b0d3d3e | 7 | #include <linux/slab.h> |
3375efed | 8 | #include <linux/mm.h> |
215e262f KO |
9 | #include <linux/percpu-refcount.h> |
10 | ||
11 | /* | |
12 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we | |
13 | * don't try to detect the ref hitting 0 - which means that get/put can just | |
14 | * increment or decrement the local counter. Note that the counter on a | |
15 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the | |
16 | * percpu counters will all sum to the correct value | |
17 | * | |
bdb428c8 | 18 | * (More precisely: because modular arithmetic is commutative the sum of all the |
eecc16ba TH |
19 | * percpu_count vars will be equal to what it would have been if all the gets |
20 | * and puts were done to a single integer, even if some of the percpu integers | |
215e262f KO |
21 | * overflow or underflow). |
22 | * | |
23 | * The real trick to implementing percpu refcounts is shutdown. We can't detect | |
24 | * the ref hitting 0 on every put - this would require global synchronization | |
25 | * and defeat the whole purpose of using percpu refs. | |
26 | * | |
27 | * What we do is require the user to keep track of the initial refcount; we know | |
28 | * the ref can't hit 0 before the user drops the initial ref, so as long as we | |
29 | * convert to non percpu mode before the initial ref is dropped everything | |
30 | * works. | |
31 | * | |
32 | * Converting to non percpu mode is done with some RCUish stuff in | |
e625305b TH |
33 | * percpu_ref_kill. Additionally, we need a bias value so that the |
34 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. | |
215e262f KO |
35 | */ |
36 | ||
eecc16ba | 37 | #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
215e262f | 38 | |
33e465ce | 39 | static DEFINE_SPINLOCK(percpu_ref_switch_lock); |
490c79a6 TH |
40 | static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); |
41 | ||
eecc16ba | 42 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
eae7975d | 43 | { |
eecc16ba | 44 | return (unsigned long __percpu *) |
27344a90 | 45 | (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); |
eae7975d TH |
46 | } |
47 | ||
215e262f KO |
48 | /** |
49 | * percpu_ref_init - initialize a percpu refcount | |
ac899061 TH |
50 | * @ref: percpu_ref to initialize |
51 | * @release: function which will be called when refcount hits 0 | |
2aad2a86 | 52 | * @flags: PERCPU_REF_INIT_* flags |
a34375ef | 53 | * @gfp: allocation mask to use |
215e262f | 54 | * |
15617dff IW |
55 | * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless |
56 | * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags | |
57 | * change the start state to atomic with the latter setting the initial refcount | |
58 | * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors. | |
215e262f KO |
59 | * |
60 | * Note that @release must not sleep - it may potentially be called from RCU | |
61 | * callback context by percpu_ref_kill(). | |
62 | */ | |
a34375ef | 63 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
2aad2a86 | 64 | unsigned int flags, gfp_t gfp) |
215e262f | 65 | { |
27344a90 TH |
66 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, |
67 | __alignof__(unsigned long)); | |
2aad2a86 | 68 | unsigned long start_count = 0; |
2b0d3d3e | 69 | struct percpu_ref_data *data; |
215e262f | 70 | |
27344a90 TH |
71 | ref->percpu_count_ptr = (unsigned long) |
72 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); | |
eecc16ba | 73 | if (!ref->percpu_count_ptr) |
215e262f KO |
74 | return -ENOMEM; |
75 | ||
2b0d3d3e ML |
76 | data = kzalloc(sizeof(*ref->data), gfp); |
77 | if (!data) { | |
78 | free_percpu((void __percpu *)ref->percpu_count_ptr); | |
a9171431 | 79 | ref->percpu_count_ptr = 0; |
2b0d3d3e ML |
80 | return -ENOMEM; |
81 | } | |
82 | ||
83 | data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; | |
84 | data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; | |
1cae13e7 | 85 | |
7d9ab9b6 | 86 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) { |
2aad2a86 | 87 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; |
2b0d3d3e | 88 | data->allow_reinit = true; |
7d9ab9b6 | 89 | } else { |
2aad2a86 | 90 | start_count += PERCPU_COUNT_BIAS; |
7d9ab9b6 | 91 | } |
2aad2a86 TH |
92 | |
93 | if (flags & PERCPU_REF_INIT_DEAD) | |
94 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | |
95 | else | |
96 | start_count++; | |
97 | ||
2b0d3d3e | 98 | atomic_long_set(&data->count, start_count); |
2aad2a86 | 99 | |
2b0d3d3e ML |
100 | data->release = release; |
101 | data->confirm_switch = NULL; | |
102 | data->ref = ref; | |
103 | ref->data = data; | |
215e262f KO |
104 | return 0; |
105 | } | |
5e9dd373 | 106 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
215e262f | 107 | |
2b0d3d3e ML |
108 | static void __percpu_ref_exit(struct percpu_ref *ref) |
109 | { | |
110 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); | |
111 | ||
112 | if (percpu_count) { | |
113 | /* non-NULL confirm_switch indicates switching in progress */ | |
7ea6bf2e | 114 | WARN_ON_ONCE(ref->data && ref->data->confirm_switch); |
2b0d3d3e ML |
115 | free_percpu(percpu_count); |
116 | ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; | |
117 | } | |
118 | } | |
119 | ||
bc497bd3 | 120 | /** |
9a1049da TH |
121 | * percpu_ref_exit - undo percpu_ref_init() |
122 | * @ref: percpu_ref to exit | |
bc497bd3 | 123 | * |
9a1049da TH |
124 | * This function exits @ref. The caller is responsible for ensuring that |
125 | * @ref is no longer in active use. The usual places to invoke this | |
126 | * function from are the @ref->release() callback or in init failure path | |
127 | * where percpu_ref_init() succeeded but other parts of the initialization | |
128 | * of the embedding object failed. | |
bc497bd3 | 129 | */ |
9a1049da | 130 | void percpu_ref_exit(struct percpu_ref *ref) |
bc497bd3 | 131 | { |
2b0d3d3e ML |
132 | struct percpu_ref_data *data = ref->data; |
133 | unsigned long flags; | |
bc497bd3 | 134 | |
2b0d3d3e ML |
135 | __percpu_ref_exit(ref); |
136 | ||
137 | if (!data) | |
138 | return; | |
139 | ||
140 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | |
141 | ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) << | |
142 | __PERCPU_REF_FLAG_BITS; | |
143 | ref->data = NULL; | |
144 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | |
145 | ||
146 | kfree(data); | |
bc497bd3 | 147 | } |
9a1049da | 148 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
bc497bd3 | 149 | |
490c79a6 TH |
150 | static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) |
151 | { | |
2b0d3d3e ML |
152 | struct percpu_ref_data *data = container_of(rcu, |
153 | struct percpu_ref_data, rcu); | |
154 | struct percpu_ref *ref = data->ref; | |
490c79a6 | 155 | |
2b0d3d3e ML |
156 | data->confirm_switch(ref); |
157 | data->confirm_switch = NULL; | |
490c79a6 TH |
158 | wake_up_all(&percpu_ref_switch_waitq); |
159 | ||
2b0d3d3e ML |
160 | if (!data->allow_reinit) |
161 | __percpu_ref_exit(ref); | |
7d9ab9b6 | 162 | |
490c79a6 TH |
163 | /* drop ref from percpu_ref_switch_to_atomic() */ |
164 | percpu_ref_put(ref); | |
165 | } | |
166 | ||
167 | static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) | |
215e262f | 168 | { |
2b0d3d3e ML |
169 | struct percpu_ref_data *data = container_of(rcu, |
170 | struct percpu_ref_data, rcu); | |
171 | struct percpu_ref *ref = data->ref; | |
eecc16ba | 172 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
3375efed | 173 | static atomic_t underflows; |
e625305b | 174 | unsigned long count = 0; |
215e262f KO |
175 | int cpu; |
176 | ||
215e262f | 177 | for_each_possible_cpu(cpu) |
eecc16ba | 178 | count += *per_cpu_ptr(percpu_count, cpu); |
215e262f | 179 | |
a818e526 | 180 | pr_debug("global %lu percpu %lu\n", |
2b0d3d3e | 181 | atomic_long_read(&data->count), count); |
215e262f KO |
182 | |
183 | /* | |
184 | * It's crucial that we sum the percpu counters _before_ adding the sum | |
185 | * to &ref->count; since gets could be happening on one cpu while puts | |
186 | * happen on another, adding a single cpu's count could cause | |
187 | * @ref->count to hit 0 before we've got a consistent value - but the | |
188 | * sum of all the counts will be consistent and correct. | |
189 | * | |
190 | * Subtracting the bias value then has to happen _after_ adding count to | |
191 | * &ref->count; we need the bias value to prevent &ref->count from | |
192 | * reaching 0 before we add the percpu counts. But doing it at the same | |
193 | * time is equivalent and saves us atomic operations: | |
194 | */ | |
2b0d3d3e | 195 | atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count); |
215e262f | 196 | |
3375efed PM |
197 | if (WARN_ONCE(atomic_long_read(&data->count) <= 0, |
198 | "percpu ref (%ps) <= 0 (%ld) after switching to atomic", | |
199 | data->release, atomic_long_read(&data->count)) && | |
200 | atomic_inc_return(&underflows) < 4) { | |
201 | pr_err("%s(): percpu_ref underflow", __func__); | |
202 | mem_dump_obj(data); | |
203 | } | |
687b0ad2 | 204 | |
490c79a6 TH |
205 | /* @ref is viewed as dead on all CPUs, send out switch confirmation */ |
206 | percpu_ref_call_confirm_rcu(rcu); | |
207 | } | |
dbece3a0 | 208 | |
490c79a6 TH |
209 | static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) |
210 | { | |
211 | } | |
212 | ||
213 | static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |
214 | percpu_ref_func_t *confirm_switch) | |
215 | { | |
b2302c7f | 216 | if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { |
18808354 | 217 | if (confirm_switch) |
b2302c7f | 218 | confirm_switch(ref); |
b2302c7f | 219 | return; |
490c79a6 | 220 | } |
215e262f | 221 | |
b2302c7f TH |
222 | /* switching from percpu to atomic */ |
223 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | |
224 | ||
225 | /* | |
226 | * Non-NULL ->confirm_switch is used to indicate that switching is | |
227 | * in progress. Use noop one if unspecified. | |
228 | */ | |
2b0d3d3e ML |
229 | ref->data->confirm_switch = confirm_switch ?: |
230 | percpu_ref_noop_confirm_switch; | |
b2302c7f TH |
231 | |
232 | percpu_ref_get(ref); /* put after confirmation */ | |
343a72e5 JFG |
233 | call_rcu_hurry(&ref->data->rcu, |
234 | percpu_ref_switch_to_atomic_rcu); | |
215e262f | 235 | } |
a2237370 | 236 | |
f47ad457 | 237 | static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
a2237370 | 238 | { |
eecc16ba | 239 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
a2237370 TH |
240 | int cpu; |
241 | ||
eecc16ba | 242 | BUG_ON(!percpu_count); |
a2237370 | 243 | |
f47ad457 TH |
244 | if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) |
245 | return; | |
246 | ||
2b0d3d3e | 247 | if (WARN_ON_ONCE(!ref->data->allow_reinit)) |
7d9ab9b6 RG |
248 | return; |
249 | ||
2b0d3d3e | 250 | atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count); |
a2237370 TH |
251 | |
252 | /* | |
b393e8b3 PM |
253 | * Restore per-cpu operation. smp_store_release() is paired |
254 | * with READ_ONCE() in __ref_is_percpu() and guarantees that the | |
255 | * zeroing is visible to all percpu accesses which can see the | |
256 | * following __PERCPU_REF_ATOMIC clearing. | |
a2237370 TH |
257 | */ |
258 | for_each_possible_cpu(cpu) | |
eecc16ba | 259 | *per_cpu_ptr(percpu_count, cpu) = 0; |
a2237370 | 260 | |
eecc16ba | 261 | smp_store_release(&ref->percpu_count_ptr, |
f47ad457 TH |
262 | ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); |
263 | } | |
264 | ||
3f49bdd9 TH |
265 | static void __percpu_ref_switch_mode(struct percpu_ref *ref, |
266 | percpu_ref_func_t *confirm_switch) | |
267 | { | |
2b0d3d3e ML |
268 | struct percpu_ref_data *data = ref->data; |
269 | ||
33e465ce TH |
270 | lockdep_assert_held(&percpu_ref_switch_lock); |
271 | ||
3f49bdd9 TH |
272 | /* |
273 | * If the previous ATOMIC switching hasn't finished yet, wait for | |
274 | * its completion. If the caller ensures that ATOMIC switching | |
275 | * isn't in progress, this function can be called from any context. | |
3f49bdd9 | 276 | */ |
2b0d3d3e | 277 | wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch, |
33e465ce | 278 | percpu_ref_switch_lock); |
3f49bdd9 | 279 | |
9e9da02a | 280 | if (data->force_atomic || percpu_ref_is_dying(ref)) |
3f49bdd9 TH |
281 | __percpu_ref_switch_to_atomic(ref, confirm_switch); |
282 | else | |
283 | __percpu_ref_switch_to_percpu(ref); | |
284 | } | |
285 | ||
b2302c7f TH |
286 | /** |
287 | * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode | |
288 | * @ref: percpu_ref to switch to atomic mode | |
289 | * @confirm_switch: optional confirmation callback | |
290 | * | |
291 | * There's no reason to use this function for the usual reference counting. | |
292 | * Use percpu_ref_kill[_and_confirm](). | |
293 | * | |
294 | * Schedule switching of @ref to atomic mode. All its percpu counts will | |
295 | * be collected to the main atomic counter. On completion, when all CPUs | |
296 | * are guaraneed to be in atomic mode, @confirm_switch, which may not | |
297 | * block, is invoked. This function may be invoked concurrently with all | |
298 | * the get/put operations and can safely be mixed with kill and reinit | |
299 | * operations. Note that @ref will stay in atomic mode across kill/reinit | |
300 | * cycles until percpu_ref_switch_to_percpu() is called. | |
301 | * | |
3f49bdd9 TH |
302 | * This function may block if @ref is in the process of switching to atomic |
303 | * mode. If the caller ensures that @ref is not in the process of | |
304 | * switching to atomic mode, this function can be called from any context. | |
b2302c7f TH |
305 | */ |
306 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |
307 | percpu_ref_func_t *confirm_switch) | |
308 | { | |
33e465ce TH |
309 | unsigned long flags; |
310 | ||
311 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | |
312 | ||
2b0d3d3e | 313 | ref->data->force_atomic = true; |
3f49bdd9 | 314 | __percpu_ref_switch_mode(ref, confirm_switch); |
33e465ce TH |
315 | |
316 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | |
b2302c7f | 317 | } |
210f7cdc N |
318 | EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic); |
319 | ||
320 | /** | |
321 | * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode | |
322 | * @ref: percpu_ref to switch to atomic mode | |
323 | * | |
324 | * Schedule switching the ref to atomic mode, and wait for the | |
325 | * switch to complete. Caller must ensure that no other thread | |
326 | * will switch back to percpu mode. | |
327 | */ | |
328 | void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref) | |
329 | { | |
330 | percpu_ref_switch_to_atomic(ref, NULL); | |
2b0d3d3e | 331 | wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch); |
210f7cdc N |
332 | } |
333 | EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync); | |
b2302c7f | 334 | |
f47ad457 TH |
335 | /** |
336 | * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode | |
337 | * @ref: percpu_ref to switch to percpu mode | |
338 | * | |
339 | * There's no reason to use this function for the usual reference counting. | |
340 | * To re-use an expired ref, use percpu_ref_reinit(). | |
341 | * | |
342 | * Switch @ref to percpu mode. This function may be invoked concurrently | |
343 | * with all the get/put operations and can safely be mixed with kill and | |
1cae13e7 TH |
344 | * reinit operations. This function reverses the sticky atomic state set |
345 | * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is | |
346 | * dying or dead, the actual switching takes place on the following | |
347 | * percpu_ref_reinit(). | |
f47ad457 | 348 | * |
3f49bdd9 TH |
349 | * This function may block if @ref is in the process of switching to atomic |
350 | * mode. If the caller ensures that @ref is not in the process of | |
351 | * switching to atomic mode, this function can be called from any context. | |
f47ad457 TH |
352 | */ |
353 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref) | |
354 | { | |
33e465ce TH |
355 | unsigned long flags; |
356 | ||
357 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | |
358 | ||
2b0d3d3e | 359 | ref->data->force_atomic = false; |
3f49bdd9 | 360 | __percpu_ref_switch_mode(ref, NULL); |
33e465ce TH |
361 | |
362 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | |
a2237370 | 363 | } |
210f7cdc | 364 | EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu); |
490c79a6 TH |
365 | |
366 | /** | |
367 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation | |
368 | * @ref: percpu_ref to kill | |
369 | * @confirm_kill: optional confirmation callback | |
370 | * | |
371 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if | |
372 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be | |
373 | * called after @ref is seen as dead from all CPUs at which point all | |
374 | * further invocations of percpu_ref_tryget_live() will fail. See | |
375 | * percpu_ref_tryget_live() for details. | |
376 | * | |
377 | * This function normally doesn't block and can be called from any context | |
f47ad457 | 378 | * but it may block if @confirm_kill is specified and @ref is in the |
a2f5630c | 379 | * process of switching to atomic mode by percpu_ref_switch_to_atomic(). |
b3a5d111 TH |
380 | * |
381 | * There are no implied RCU grace periods between kill and release. | |
490c79a6 TH |
382 | */ |
383 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |
384 | percpu_ref_func_t *confirm_kill) | |
385 | { | |
33e465ce TH |
386 | unsigned long flags; |
387 | ||
388 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | |
389 | ||
9e9da02a | 390 | WARN_ONCE(percpu_ref_is_dying(ref), |
2b0d3d3e ML |
391 | "%s called more than once on %ps!", __func__, |
392 | ref->data->release); | |
490c79a6 TH |
393 | |
394 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | |
3f49bdd9 | 395 | __percpu_ref_switch_mode(ref, confirm_kill); |
490c79a6 | 396 | percpu_ref_put(ref); |
33e465ce TH |
397 | |
398 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | |
490c79a6 TH |
399 | } |
400 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | |
f47ad457 | 401 | |
2b0d3d3e ML |
402 | /** |
403 | * percpu_ref_is_zero - test whether a percpu refcount reached zero | |
404 | * @ref: percpu_ref to test | |
405 | * | |
406 | * Returns %true if @ref reached zero. | |
407 | * | |
408 | * This function is safe to call as long as @ref is between init and exit. | |
409 | */ | |
410 | bool percpu_ref_is_zero(struct percpu_ref *ref) | |
411 | { | |
412 | unsigned long __percpu *percpu_count; | |
413 | unsigned long count, flags; | |
414 | ||
415 | if (__ref_is_percpu(ref, &percpu_count)) | |
416 | return false; | |
417 | ||
418 | /* protect us from being destroyed */ | |
419 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | |
420 | if (ref->data) | |
421 | count = atomic_long_read(&ref->data->count); | |
422 | else | |
423 | count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS; | |
424 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | |
425 | ||
426 | return count == 0; | |
427 | } | |
428 | EXPORT_SYMBOL_GPL(percpu_ref_is_zero); | |
429 | ||
f47ad457 TH |
430 | /** |
431 | * percpu_ref_reinit - re-initialize a percpu refcount | |
432 | * @ref: perpcu_ref to re-initialize | |
433 | * | |
434 | * Re-initialize @ref so that it's in the same state as when it finished | |
1cae13e7 TH |
435 | * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been |
436 | * initialized successfully and reached 0 but not exited. | |
f47ad457 TH |
437 | * |
438 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | |
439 | * this function is in progress. | |
440 | */ | |
441 | void percpu_ref_reinit(struct percpu_ref *ref) | |
442 | { | |
18c9a6bb BVA |
443 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); |
444 | ||
445 | percpu_ref_resurrect(ref); | |
446 | } | |
447 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | |
448 | ||
449 | /** | |
450 | * percpu_ref_resurrect - modify a percpu refcount from dead to live | |
451 | * @ref: perpcu_ref to resurrect | |
452 | * | |
453 | * Modify @ref so that it's in the same state as before percpu_ref_kill() was | |
454 | * called. @ref must be dead but must not yet have exited. | |
455 | * | |
456 | * If @ref->release() frees @ref then the caller is responsible for | |
457 | * guaranteeing that @ref->release() does not get called while this | |
458 | * function is in progress. | |
459 | * | |
460 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | |
461 | * this function is in progress. | |
462 | */ | |
463 | void percpu_ref_resurrect(struct percpu_ref *ref) | |
464 | { | |
465 | unsigned long __percpu *percpu_count; | |
33e465ce TH |
466 | unsigned long flags; |
467 | ||
468 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); | |
469 | ||
9e9da02a | 470 | WARN_ON_ONCE(!percpu_ref_is_dying(ref)); |
18c9a6bb | 471 | WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count)); |
f47ad457 TH |
472 | |
473 | ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; | |
474 | percpu_ref_get(ref); | |
3f49bdd9 | 475 | __percpu_ref_switch_mode(ref, NULL); |
33e465ce TH |
476 | |
477 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); | |
f47ad457 | 478 | } |
18c9a6bb | 479 | EXPORT_SYMBOL_GPL(percpu_ref_resurrect); |