percpu_ref: release percpu memory early without PERCPU_REF_ALLOW_REINIT
[linux-block.git] / lib / percpu-refcount.c
CommitLineData
215e262f
KO
1#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
490c79a6
TH
4#include <linux/sched.h>
5#include <linux/wait.h>
215e262f
KO
6#include <linux/percpu-refcount.h>
7
8/*
9 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
10 * don't try to detect the ref hitting 0 - which means that get/put can just
11 * increment or decrement the local counter. Note that the counter on a
12 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
13 * percpu counters will all sum to the correct value
14 *
bdb428c8 15 * (More precisely: because modular arithmetic is commutative the sum of all the
eecc16ba
TH
16 * percpu_count vars will be equal to what it would have been if all the gets
17 * and puts were done to a single integer, even if some of the percpu integers
215e262f
KO
18 * overflow or underflow).
19 *
20 * The real trick to implementing percpu refcounts is shutdown. We can't detect
21 * the ref hitting 0 on every put - this would require global synchronization
22 * and defeat the whole purpose of using percpu refs.
23 *
24 * What we do is require the user to keep track of the initial refcount; we know
25 * the ref can't hit 0 before the user drops the initial ref, so as long as we
26 * convert to non percpu mode before the initial ref is dropped everything
27 * works.
28 *
29 * Converting to non percpu mode is done with some RCUish stuff in
e625305b
TH
30 * percpu_ref_kill. Additionally, we need a bias value so that the
31 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
215e262f
KO
32 */
33
eecc16ba 34#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
215e262f 35
33e465ce 36static DEFINE_SPINLOCK(percpu_ref_switch_lock);
490c79a6
TH
37static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
38
eecc16ba 39static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
eae7975d 40{
eecc16ba 41 return (unsigned long __percpu *)
27344a90 42 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
eae7975d
TH
43}
44
215e262f
KO
45/**
46 * percpu_ref_init - initialize a percpu refcount
ac899061
TH
47 * @ref: percpu_ref to initialize
48 * @release: function which will be called when refcount hits 0
2aad2a86 49 * @flags: PERCPU_REF_INIT_* flags
a34375ef 50 * @gfp: allocation mask to use
215e262f 51 *
2aad2a86
TH
52 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
53 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
54 * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
215e262f
KO
55 *
56 * Note that @release must not sleep - it may potentially be called from RCU
57 * callback context by percpu_ref_kill().
58 */
a34375ef 59int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
2aad2a86 60 unsigned int flags, gfp_t gfp)
215e262f 61{
27344a90
TH
62 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
63 __alignof__(unsigned long));
2aad2a86 64 unsigned long start_count = 0;
215e262f 65
27344a90
TH
66 ref->percpu_count_ptr = (unsigned long)
67 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
eecc16ba 68 if (!ref->percpu_count_ptr)
215e262f
KO
69 return -ENOMEM;
70
1cae13e7 71 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
7d9ab9b6 72 ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
1cae13e7 73
7d9ab9b6 74 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
2aad2a86 75 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
7d9ab9b6
RG
76 ref->allow_reinit = true;
77 } else {
2aad2a86 78 start_count += PERCPU_COUNT_BIAS;
7d9ab9b6 79 }
2aad2a86
TH
80
81 if (flags & PERCPU_REF_INIT_DEAD)
82 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
83 else
84 start_count++;
85
86 atomic_long_set(&ref->count, start_count);
87
215e262f 88 ref->release = release;
a67823c1 89 ref->confirm_switch = NULL;
215e262f
KO
90 return 0;
91}
5e9dd373 92EXPORT_SYMBOL_GPL(percpu_ref_init);
215e262f 93
bc497bd3 94/**
9a1049da
TH
95 * percpu_ref_exit - undo percpu_ref_init()
96 * @ref: percpu_ref to exit
bc497bd3 97 *
9a1049da
TH
98 * This function exits @ref. The caller is responsible for ensuring that
99 * @ref is no longer in active use. The usual places to invoke this
100 * function from are the @ref->release() callback or in init failure path
101 * where percpu_ref_init() succeeded but other parts of the initialization
102 * of the embedding object failed.
bc497bd3 103 */
9a1049da 104void percpu_ref_exit(struct percpu_ref *ref)
bc497bd3 105{
eecc16ba 106 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
bc497bd3 107
eecc16ba 108 if (percpu_count) {
a67823c1
RP
109 /* non-NULL confirm_switch indicates switching in progress */
110 WARN_ON_ONCE(ref->confirm_switch);
eecc16ba 111 free_percpu(percpu_count);
27344a90 112 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
bc497bd3
TH
113 }
114}
9a1049da 115EXPORT_SYMBOL_GPL(percpu_ref_exit);
bc497bd3 116
490c79a6
TH
117static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
118{
119 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
120
121 ref->confirm_switch(ref);
122 ref->confirm_switch = NULL;
123 wake_up_all(&percpu_ref_switch_waitq);
124
7d9ab9b6
RG
125 if (!ref->allow_reinit)
126 percpu_ref_exit(ref);
127
490c79a6
TH
128 /* drop ref from percpu_ref_switch_to_atomic() */
129 percpu_ref_put(ref);
130}
131
132static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
215e262f
KO
133{
134 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
eecc16ba 135 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
e625305b 136 unsigned long count = 0;
215e262f
KO
137 int cpu;
138
215e262f 139 for_each_possible_cpu(cpu)
eecc16ba 140 count += *per_cpu_ptr(percpu_count, cpu);
215e262f 141
eecc16ba 142 pr_debug("global %ld percpu %ld",
e625305b 143 atomic_long_read(&ref->count), (long)count);
215e262f
KO
144
145 /*
146 * It's crucial that we sum the percpu counters _before_ adding the sum
147 * to &ref->count; since gets could be happening on one cpu while puts
148 * happen on another, adding a single cpu's count could cause
149 * @ref->count to hit 0 before we've got a consistent value - but the
150 * sum of all the counts will be consistent and correct.
151 *
152 * Subtracting the bias value then has to happen _after_ adding count to
153 * &ref->count; we need the bias value to prevent &ref->count from
154 * reaching 0 before we add the percpu counts. But doing it at the same
155 * time is equivalent and saves us atomic operations:
156 */
eecc16ba 157 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
215e262f 158
e625305b 159 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
490c79a6 160 "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
e625305b 161 ref->release, atomic_long_read(&ref->count));
687b0ad2 162
490c79a6
TH
163 /* @ref is viewed as dead on all CPUs, send out switch confirmation */
164 percpu_ref_call_confirm_rcu(rcu);
165}
dbece3a0 166
490c79a6
TH
167static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
168{
169}
170
171static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
172 percpu_ref_func_t *confirm_switch)
173{
b2302c7f 174 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
18808354 175 if (confirm_switch)
b2302c7f 176 confirm_switch(ref);
b2302c7f 177 return;
490c79a6 178 }
215e262f 179
b2302c7f
TH
180 /* switching from percpu to atomic */
181 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
182
183 /*
184 * Non-NULL ->confirm_switch is used to indicate that switching is
185 * in progress. Use noop one if unspecified.
186 */
b2302c7f
TH
187 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
188
189 percpu_ref_get(ref); /* put after confirmation */
36bd1a8e 190 call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
215e262f 191}
a2237370 192
f47ad457 193static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
a2237370 194{
eecc16ba 195 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
a2237370
TH
196 int cpu;
197
eecc16ba 198 BUG_ON(!percpu_count);
a2237370 199
f47ad457
TH
200 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
201 return;
202
7d9ab9b6
RG
203 if (WARN_ON_ONCE(!ref->allow_reinit))
204 return;
205
f47ad457 206 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
a2237370
TH
207
208 /*
b393e8b3
PM
209 * Restore per-cpu operation. smp_store_release() is paired
210 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
211 * zeroing is visible to all percpu accesses which can see the
212 * following __PERCPU_REF_ATOMIC clearing.
a2237370
TH
213 */
214 for_each_possible_cpu(cpu)
eecc16ba 215 *per_cpu_ptr(percpu_count, cpu) = 0;
a2237370 216
eecc16ba 217 smp_store_release(&ref->percpu_count_ptr,
f47ad457
TH
218 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
219}
220
3f49bdd9
TH
221static void __percpu_ref_switch_mode(struct percpu_ref *ref,
222 percpu_ref_func_t *confirm_switch)
223{
33e465ce
TH
224 lockdep_assert_held(&percpu_ref_switch_lock);
225
3f49bdd9
TH
226 /*
227 * If the previous ATOMIC switching hasn't finished yet, wait for
228 * its completion. If the caller ensures that ATOMIC switching
229 * isn't in progress, this function can be called from any context.
3f49bdd9 230 */
33e465ce
TH
231 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
232 percpu_ref_switch_lock);
3f49bdd9
TH
233
234 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
235 __percpu_ref_switch_to_atomic(ref, confirm_switch);
236 else
237 __percpu_ref_switch_to_percpu(ref);
238}
239
b2302c7f
TH
240/**
241 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
242 * @ref: percpu_ref to switch to atomic mode
243 * @confirm_switch: optional confirmation callback
244 *
245 * There's no reason to use this function for the usual reference counting.
246 * Use percpu_ref_kill[_and_confirm]().
247 *
248 * Schedule switching of @ref to atomic mode. All its percpu counts will
249 * be collected to the main atomic counter. On completion, when all CPUs
250 * are guaraneed to be in atomic mode, @confirm_switch, which may not
251 * block, is invoked. This function may be invoked concurrently with all
252 * the get/put operations and can safely be mixed with kill and reinit
253 * operations. Note that @ref will stay in atomic mode across kill/reinit
254 * cycles until percpu_ref_switch_to_percpu() is called.
255 *
3f49bdd9
TH
256 * This function may block if @ref is in the process of switching to atomic
257 * mode. If the caller ensures that @ref is not in the process of
258 * switching to atomic mode, this function can be called from any context.
b2302c7f
TH
259 */
260void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
261 percpu_ref_func_t *confirm_switch)
262{
33e465ce
TH
263 unsigned long flags;
264
265 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
266
b2302c7f 267 ref->force_atomic = true;
3f49bdd9 268 __percpu_ref_switch_mode(ref, confirm_switch);
33e465ce
TH
269
270 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
b2302c7f 271}
210f7cdc
N
272EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
273
274/**
275 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
276 * @ref: percpu_ref to switch to atomic mode
277 *
278 * Schedule switching the ref to atomic mode, and wait for the
279 * switch to complete. Caller must ensure that no other thread
280 * will switch back to percpu mode.
281 */
282void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
283{
284 percpu_ref_switch_to_atomic(ref, NULL);
285 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
286}
287EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
b2302c7f 288
f47ad457
TH
289/**
290 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
291 * @ref: percpu_ref to switch to percpu mode
292 *
293 * There's no reason to use this function for the usual reference counting.
294 * To re-use an expired ref, use percpu_ref_reinit().
295 *
296 * Switch @ref to percpu mode. This function may be invoked concurrently
297 * with all the get/put operations and can safely be mixed with kill and
1cae13e7
TH
298 * reinit operations. This function reverses the sticky atomic state set
299 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
300 * dying or dead, the actual switching takes place on the following
301 * percpu_ref_reinit().
f47ad457 302 *
3f49bdd9
TH
303 * This function may block if @ref is in the process of switching to atomic
304 * mode. If the caller ensures that @ref is not in the process of
305 * switching to atomic mode, this function can be called from any context.
f47ad457
TH
306 */
307void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
308{
33e465ce
TH
309 unsigned long flags;
310
311 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
312
1cae13e7 313 ref->force_atomic = false;
3f49bdd9 314 __percpu_ref_switch_mode(ref, NULL);
33e465ce
TH
315
316 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
a2237370 317}
210f7cdc 318EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
490c79a6
TH
319
320/**
321 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
322 * @ref: percpu_ref to kill
323 * @confirm_kill: optional confirmation callback
324 *
325 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
326 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
327 * called after @ref is seen as dead from all CPUs at which point all
328 * further invocations of percpu_ref_tryget_live() will fail. See
329 * percpu_ref_tryget_live() for details.
330 *
331 * This function normally doesn't block and can be called from any context
f47ad457 332 * but it may block if @confirm_kill is specified and @ref is in the
a2f5630c 333 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
b3a5d111
TH
334 *
335 * There are no implied RCU grace periods between kill and release.
490c79a6
TH
336 */
337void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
338 percpu_ref_func_t *confirm_kill)
339{
33e465ce
TH
340 unsigned long flags;
341
342 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
343
490c79a6
TH
344 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
345 "%s called more than once on %pf!", __func__, ref->release);
346
347 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
3f49bdd9 348 __percpu_ref_switch_mode(ref, confirm_kill);
490c79a6 349 percpu_ref_put(ref);
33e465ce
TH
350
351 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
490c79a6
TH
352}
353EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
f47ad457
TH
354
355/**
356 * percpu_ref_reinit - re-initialize a percpu refcount
357 * @ref: perpcu_ref to re-initialize
358 *
359 * Re-initialize @ref so that it's in the same state as when it finished
1cae13e7
TH
360 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
361 * initialized successfully and reached 0 but not exited.
f47ad457
TH
362 *
363 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
364 * this function is in progress.
365 */
366void percpu_ref_reinit(struct percpu_ref *ref)
367{
18c9a6bb
BVA
368 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
369
370 percpu_ref_resurrect(ref);
371}
372EXPORT_SYMBOL_GPL(percpu_ref_reinit);
373
374/**
375 * percpu_ref_resurrect - modify a percpu refcount from dead to live
376 * @ref: perpcu_ref to resurrect
377 *
378 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
379 * called. @ref must be dead but must not yet have exited.
380 *
381 * If @ref->release() frees @ref then the caller is responsible for
382 * guaranteeing that @ref->release() does not get called while this
383 * function is in progress.
384 *
385 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
386 * this function is in progress.
387 */
388void percpu_ref_resurrect(struct percpu_ref *ref)
389{
390 unsigned long __percpu *percpu_count;
33e465ce
TH
391 unsigned long flags;
392
393 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
394
18c9a6bb
BVA
395 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
396 WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
f47ad457
TH
397
398 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
399 percpu_ref_get(ref);
3f49bdd9 400 __percpu_ref_switch_mode(ref, NULL);
33e465ce
TH
401
402 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
f47ad457 403}
18c9a6bb 404EXPORT_SYMBOL_GPL(percpu_ref_resurrect);