proportions: add @gfp to init functions
[linux-2.6-block.git] / lib / percpu-refcount.c
CommitLineData
215e262f
KO
1#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
4#include <linux/percpu-refcount.h>
5
6/*
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
12 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and
15 * puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
17 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
21 *
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
25 * works.
26 *
27 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
29 * can't hit 0 before we've added up all the percpu refs.
30 */
31
32#define PCPU_COUNT_BIAS (1U << 31)
33
eae7975d
TH
34static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
35{
7d742075 36 return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
eae7975d
TH
37}
38
215e262f
KO
39/**
40 * percpu_ref_init - initialize a percpu refcount
ac899061
TH
41 * @ref: percpu_ref to initialize
42 * @release: function which will be called when refcount hits 0
215e262f
KO
43 *
44 * Initializes the refcount in single atomic counter mode with a refcount of 1;
45 * analagous to atomic_set(ref, 1).
46 *
47 * Note that @release must not sleep - it may potentially be called from RCU
48 * callback context by percpu_ref_kill().
49 */
ac899061 50int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
215e262f
KO
51{
52 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
53
7d742075
TH
54 ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
55 if (!ref->pcpu_count_ptr)
215e262f
KO
56 return -ENOMEM;
57
58 ref->release = release;
59 return 0;
60}
5e9dd373 61EXPORT_SYMBOL_GPL(percpu_ref_init);
215e262f 62
2d722782
TH
63/**
64 * percpu_ref_reinit - re-initialize a percpu refcount
65 * @ref: perpcu_ref to re-initialize
66 *
67 * Re-initialize @ref so that it's in the same state as when it finished
68 * percpu_ref_init(). @ref must have been initialized successfully, killed
69 * and reached 0 but not exited.
70 *
71 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
72 * this function is in progress.
73 */
74void percpu_ref_reinit(struct percpu_ref *ref)
75{
76 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
77 int cpu;
78
79 BUG_ON(!pcpu_count);
80 WARN_ON(!percpu_ref_is_zero(ref));
81
82 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
83
84 /*
85 * Restore per-cpu operation. smp_store_release() is paired with
86 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
87 * that the zeroing is visible to all percpu accesses which can see
88 * the following PCPU_REF_DEAD clearing.
89 */
90 for_each_possible_cpu(cpu)
91 *per_cpu_ptr(pcpu_count, cpu) = 0;
92
93 smp_store_release(&ref->pcpu_count_ptr,
94 ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
95}
96EXPORT_SYMBOL_GPL(percpu_ref_reinit);
97
bc497bd3 98/**
9a1049da
TH
99 * percpu_ref_exit - undo percpu_ref_init()
100 * @ref: percpu_ref to exit
bc497bd3 101 *
9a1049da
TH
102 * This function exits @ref. The caller is responsible for ensuring that
103 * @ref is no longer in active use. The usual places to invoke this
104 * function from are the @ref->release() callback or in init failure path
105 * where percpu_ref_init() succeeded but other parts of the initialization
106 * of the embedding object failed.
bc497bd3 107 */
9a1049da 108void percpu_ref_exit(struct percpu_ref *ref)
bc497bd3 109{
eae7975d 110 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
bc497bd3
TH
111
112 if (pcpu_count) {
eae7975d 113 free_percpu(pcpu_count);
9a1049da 114 ref->pcpu_count_ptr = PCPU_REF_DEAD;
bc497bd3
TH
115 }
116}
9a1049da 117EXPORT_SYMBOL_GPL(percpu_ref_exit);
bc497bd3 118
215e262f
KO
119static void percpu_ref_kill_rcu(struct rcu_head *rcu)
120{
121 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
eae7975d 122 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
215e262f
KO
123 unsigned count = 0;
124 int cpu;
125
215e262f
KO
126 for_each_possible_cpu(cpu)
127 count += *per_cpu_ptr(pcpu_count, cpu);
128
215e262f
KO
129 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
130
131 /*
132 * It's crucial that we sum the percpu counters _before_ adding the sum
133 * to &ref->count; since gets could be happening on one cpu while puts
134 * happen on another, adding a single cpu's count could cause
135 * @ref->count to hit 0 before we've got a consistent value - but the
136 * sum of all the counts will be consistent and correct.
137 *
138 * Subtracting the bias value then has to happen _after_ adding count to
139 * &ref->count; we need the bias value to prevent &ref->count from
140 * reaching 0 before we add the percpu counts. But doing it at the same
141 * time is equivalent and saves us atomic operations:
142 */
143
144 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
145
687b0ad2
KO
146 WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
147 atomic_read(&ref->count));
148
dbece3a0
TH
149 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
150 if (ref->confirm_kill)
151 ref->confirm_kill(ref);
152
215e262f
KO
153 /*
154 * Now we're in single atomic_t mode with a consistent refcount, so it's
155 * safe to drop our initial ref:
156 */
157 percpu_ref_put(ref);
158}
159
160/**
dbece3a0 161 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
ac899061 162 * @ref: percpu_ref to kill
dbece3a0 163 * @confirm_kill: optional confirmation callback
215e262f 164 *
dbece3a0
TH
165 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
166 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
167 * called after @ref is seen as dead from all CPUs - all further
168 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
169 * for more details.
215e262f 170 *
dbece3a0
TH
171 * Due to the way percpu_ref is implemented, @confirm_kill will be called
172 * after at least one full RCU grace period has passed but this is an
173 * implementation detail and callers must not depend on it.
215e262f 174 */
dbece3a0
TH
175void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
176 percpu_ref_func_t *confirm_kill)
215e262f 177{
7d742075 178 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
c1ae6e9b 179 "percpu_ref_kill() called more than once!\n");
215e262f 180
7d742075 181 ref->pcpu_count_ptr |= PCPU_REF_DEAD;
dbece3a0 182 ref->confirm_kill = confirm_kill;
215e262f 183
a4244454 184 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
215e262f 185}
5e9dd373 186EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);