Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PERCPU_COUNTER_H |
3 | #define _LINUX_PERCPU_COUNTER_H | |
4 | /* | |
5 | * A simple "approximate counter" for use in ext2 and ext3 superblocks. | |
6 | * | |
7 | * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <linux/spinlock.h> |
11 | #include <linux/smp.h> | |
c67ad917 | 12 | #include <linux/list.h> |
1da177e4 LT |
13 | #include <linux/threads.h> |
14 | #include <linux/percpu.h> | |
0216bfcf | 15 | #include <linux/types.h> |
1da177e4 | 16 | |
5d0ce359 JS |
17 | /* percpu_counter batch for local add or sub */ |
18 | #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX | |
19 | ||
1da177e4 LT |
20 | #ifdef CONFIG_SMP |
21 | ||
22 | struct percpu_counter { | |
f032a450 | 23 | raw_spinlock_t lock; |
0216bfcf | 24 | s64 count; |
c67ad917 AM |
25 | #ifdef CONFIG_HOTPLUG_CPU |
26 | struct list_head list; /* All percpu_counters are on a list */ | |
27 | #endif | |
43cf38eb | 28 | s32 __percpu *counters; |
1da177e4 LT |
29 | }; |
30 | ||
179f7ebf | 31 | extern int percpu_counter_batch; |
1da177e4 | 32 | |
c439d5e8 MG |
33 | int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, |
34 | gfp_t gfp, u32 nr_counters, | |
35 | struct lock_class_key *key); | |
ea319518 | 36 | |
c439d5e8 | 37 | #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \ |
ea319518 PZ |
38 | ({ \ |
39 | static struct lock_class_key __key; \ | |
40 | \ | |
c439d5e8 MG |
41 | __percpu_counter_init_many(fbc, value, gfp, nr_counters,\ |
42 | &__key); \ | |
ea319518 PZ |
43 | }) |
44 | ||
c439d5e8 MG |
45 | |
46 | #define percpu_counter_init(fbc, value, gfp) \ | |
47 | percpu_counter_init_many(fbc, value, gfp, 1) | |
48 | ||
49 | void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters); | |
50 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) | |
51 | { | |
52 | percpu_counter_destroy_many(fbc, 1); | |
53 | } | |
54 | ||
3a587f47 | 55 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
104b4e51 NB |
56 | void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, |
57 | s32 batch); | |
02d21168 | 58 | s64 __percpu_counter_sum(struct percpu_counter *fbc); |
80188b0d | 59 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); |
beb98686 HD |
60 | bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, |
61 | s64 amount, s32 batch); | |
0a4954a8 | 62 | void percpu_counter_sync(struct percpu_counter *fbc); |
80188b0d DC |
63 | |
64 | static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | |
65 | { | |
66 | return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); | |
67 | } | |
1da177e4 | 68 | |
20e89767 | 69 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
252e0ba6 | 70 | { |
104b4e51 | 71 | percpu_counter_add_batch(fbc, amount, percpu_counter_batch); |
252e0ba6 PZ |
72 | } |
73 | ||
beb98686 HD |
74 | static inline bool |
75 | percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) | |
76 | { | |
77 | return __percpu_counter_limited_add(fbc, limit, amount, | |
78 | percpu_counter_batch); | |
79 | } | |
80 | ||
5d0ce359 JS |
81 | /* |
82 | * With percpu_counter_add_local() and percpu_counter_sub_local(), counts | |
83 | * are accumulated in local per cpu counter and not in fbc->count until | |
84 | * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter | |
85 | * write efficient. | |
86 | * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be | |
87 | * used to add up the counts from each CPU to account for all the local | |
88 | * counts. So percpu_counter_add_local() and percpu_counter_sub_local() | |
89 | * should be used when a counter is updated frequently and read rarely. | |
90 | */ | |
91 | static inline void | |
92 | percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) | |
93 | { | |
94 | percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH); | |
95 | } | |
96 | ||
bf1d89c8 PZ |
97 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
98 | { | |
02d21168 | 99 | s64 ret = __percpu_counter_sum(fbc); |
bf1d89c8 PZ |
100 | return ret < 0 ? 0 : ret; |
101 | } | |
102 | ||
103 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) | |
104 | { | |
02d21168 | 105 | return __percpu_counter_sum(fbc); |
bf1d89c8 PZ |
106 | } |
107 | ||
0216bfcf | 108 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4 LT |
109 | { |
110 | return fbc->count; | |
111 | } | |
112 | ||
113 | /* | |
114 | * It is possible for the percpu_counter_read() to return a small negative | |
115 | * number for some counter which should never be negative. | |
0216bfcf | 116 | * |
1da177e4 | 117 | */ |
0216bfcf | 118 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4 | 119 | { |
7e234520 QC |
120 | /* Prevent reloads of fbc->count */ |
121 | s64 ret = READ_ONCE(fbc->count); | |
1da177e4 | 122 | |
0216bfcf | 123 | if (ret >= 0) |
1da177e4 | 124 | return ret; |
c84598bb | 125 | return 0; |
1da177e4 LT |
126 | } |
127 | ||
85dcbba3 | 128 | static inline bool percpu_counter_initialized(struct percpu_counter *fbc) |
7f93cff9 TT |
129 | { |
130 | return (fbc->counters != NULL); | |
131 | } | |
132 | ||
7fa4cf92 | 133 | #else /* !CONFIG_SMP */ |
1da177e4 LT |
134 | |
135 | struct percpu_counter { | |
0216bfcf | 136 | s64 count; |
1da177e4 LT |
137 | }; |
138 | ||
c439d5e8 MG |
139 | static inline int percpu_counter_init_many(struct percpu_counter *fbc, |
140 | s64 amount, gfp_t gfp, | |
141 | u32 nr_counters) | |
142 | { | |
143 | u32 i; | |
144 | ||
145 | for (i = 0; i < nr_counters; i++) | |
146 | fbc[i].count = amount; | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
908c7f19 TH |
151 | static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
152 | gfp_t gfp) | |
1da177e4 | 153 | { |
c439d5e8 MG |
154 | return percpu_counter_init_many(fbc, amount, gfp, 1); |
155 | } | |
156 | ||
157 | static inline void percpu_counter_destroy_many(struct percpu_counter *fbc, | |
158 | u32 nr_counters) | |
159 | { | |
1da177e4 LT |
160 | } |
161 | ||
162 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) | |
163 | { | |
164 | } | |
165 | ||
3a587f47 PZ |
166 | static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
167 | { | |
168 | fbc->count = amount; | |
169 | } | |
170 | ||
27f5e0f6 TC |
171 | static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) |
172 | { | |
173 | if (fbc->count > rhs) | |
174 | return 1; | |
175 | else if (fbc->count < rhs) | |
176 | return -1; | |
177 | else | |
178 | return 0; | |
179 | } | |
180 | ||
80188b0d DC |
181 | static inline int |
182 | __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) | |
183 | { | |
184 | return percpu_counter_compare(fbc, rhs); | |
185 | } | |
186 | ||
1da177e4 | 187 | static inline void |
20e89767 | 188 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
1da177e4 | 189 | { |
88ad32a7 MS |
190 | unsigned long flags; |
191 | ||
192 | local_irq_save(flags); | |
1da177e4 | 193 | fbc->count += amount; |
88ad32a7 | 194 | local_irq_restore(flags); |
1da177e4 LT |
195 | } |
196 | ||
beb98686 HD |
197 | static inline bool |
198 | percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) | |
199 | { | |
200 | unsigned long flags; | |
1431996b | 201 | bool good = false; |
beb98686 HD |
202 | s64 count; |
203 | ||
1431996b HD |
204 | if (amount == 0) |
205 | return true; | |
206 | ||
beb98686 HD |
207 | local_irq_save(flags); |
208 | count = fbc->count + amount; | |
1431996b HD |
209 | if ((amount > 0 && count <= limit) || |
210 | (amount < 0 && count >= limit)) { | |
beb98686 | 211 | fbc->count = count; |
1431996b HD |
212 | good = true; |
213 | } | |
beb98686 | 214 | local_irq_restore(flags); |
1431996b | 215 | return good; |
beb98686 HD |
216 | } |
217 | ||
5d0ce359 JS |
218 | /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ |
219 | static inline void | |
220 | percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) | |
221 | { | |
222 | percpu_counter_add(fbc, amount); | |
223 | } | |
224 | ||
0c9cf2ef | 225 | static inline void |
104b4e51 | 226 | percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) |
0c9cf2ef AB |
227 | { |
228 | percpu_counter_add(fbc, amount); | |
229 | } | |
230 | ||
0216bfcf | 231 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4 LT |
232 | { |
233 | return fbc->count; | |
234 | } | |
235 | ||
c84598bb SL |
236 | /* |
237 | * percpu_counter is intended to track positive numbers. In the UP case the | |
238 | * number should never be negative. | |
239 | */ | |
0216bfcf | 240 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4 LT |
241 | { |
242 | return fbc->count; | |
243 | } | |
244 | ||
52d9f3b4 | 245 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
e2bab3d9 AM |
246 | { |
247 | return percpu_counter_read_positive(fbc); | |
248 | } | |
249 | ||
bf1d89c8 PZ |
250 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
251 | { | |
252 | return percpu_counter_read(fbc); | |
253 | } | |
254 | ||
85dcbba3 | 255 | static inline bool percpu_counter_initialized(struct percpu_counter *fbc) |
7f93cff9 | 256 | { |
85dcbba3 | 257 | return true; |
7f93cff9 TT |
258 | } |
259 | ||
0a4954a8 FT |
260 | static inline void percpu_counter_sync(struct percpu_counter *fbc) |
261 | { | |
262 | } | |
1da177e4 LT |
263 | #endif /* CONFIG_SMP */ |
264 | ||
265 | static inline void percpu_counter_inc(struct percpu_counter *fbc) | |
266 | { | |
aa0dff2d | 267 | percpu_counter_add(fbc, 1); |
1da177e4 LT |
268 | } |
269 | ||
270 | static inline void percpu_counter_dec(struct percpu_counter *fbc) | |
271 | { | |
aa0dff2d | 272 | percpu_counter_add(fbc, -1); |
1da177e4 LT |
273 | } |
274 | ||
3cb4f9fa PZ |
275 | static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) |
276 | { | |
277 | percpu_counter_add(fbc, -amount); | |
278 | } | |
279 | ||
5d0ce359 JS |
280 | static inline void |
281 | percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount) | |
282 | { | |
283 | percpu_counter_add_local(fbc, -amount); | |
284 | } | |
285 | ||
1da177e4 | 286 | #endif /* _LINUX_PERCPU_COUNTER_H */ |