mm: Don't pin ZERO_PAGE in pin_user_pages()
[linux-block.git] / include / linux / percpu_counter.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_PERCPU_COUNTER_H
3#define _LINUX_PERCPU_COUNTER_H
4/*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
1da177e4
LT
10#include <linux/spinlock.h>
11#include <linux/smp.h>
c67ad917 12#include <linux/list.h>
1da177e4
LT
13#include <linux/threads.h>
14#include <linux/percpu.h>
0216bfcf 15#include <linux/types.h>
1da177e4 16
5d0ce359
JS
17/* percpu_counter batch for local add or sub */
18#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
19
1da177e4
LT
20#ifdef CONFIG_SMP
21
22struct percpu_counter {
f032a450 23 raw_spinlock_t lock;
0216bfcf 24 s64 count;
c67ad917
AM
25#ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list; /* All percpu_counters are on a list */
27#endif
43cf38eb 28 s32 __percpu *counters;
1da177e4
LT
29};
30
179f7ebf 31extern int percpu_counter_batch;
1da177e4 32
908c7f19 33int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518
PZ
34 struct lock_class_key *key);
35
908c7f19 36#define percpu_counter_init(fbc, value, gfp) \
ea319518
PZ
37 ({ \
38 static struct lock_class_key __key; \
39 \
908c7f19 40 __percpu_counter_init(fbc, value, gfp, &__key); \
ea319518
PZ
41 })
42
c67ad917 43void percpu_counter_destroy(struct percpu_counter *fbc);
3a587f47 44void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
104b4e51
NB
45void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
46 s32 batch);
02d21168 47s64 __percpu_counter_sum(struct percpu_counter *fbc);
80188b0d 48int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
0a4954a8 49void percpu_counter_sync(struct percpu_counter *fbc);
80188b0d
DC
50
51static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
52{
53 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
54}
1da177e4 55
20e89767 56static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
252e0ba6 57{
104b4e51 58 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
252e0ba6
PZ
59}
60
5d0ce359
JS
61/*
62 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
63 * are accumulated in local per cpu counter and not in fbc->count until
64 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
65 * write efficient.
66 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
67 * used to add up the counts from each CPU to account for all the local
68 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
69 * should be used when a counter is updated frequently and read rarely.
70 */
71static inline void
72percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
73{
74 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
75}
76
bf1d89c8
PZ
77static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
78{
02d21168 79 s64 ret = __percpu_counter_sum(fbc);
bf1d89c8
PZ
80 return ret < 0 ? 0 : ret;
81}
82
83static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
84{
02d21168 85 return __percpu_counter_sum(fbc);
bf1d89c8
PZ
86}
87
0216bfcf 88static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
89{
90 return fbc->count;
91}
92
93/*
94 * It is possible for the percpu_counter_read() to return a small negative
95 * number for some counter which should never be negative.
0216bfcf 96 *
1da177e4 97 */
0216bfcf 98static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4 99{
7e234520
QC
100 /* Prevent reloads of fbc->count */
101 s64 ret = READ_ONCE(fbc->count);
1da177e4 102
0216bfcf 103 if (ret >= 0)
1da177e4 104 return ret;
c84598bb 105 return 0;
1da177e4
LT
106}
107
85dcbba3 108static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
7f93cff9
TT
109{
110 return (fbc->counters != NULL);
111}
112
7fa4cf92 113#else /* !CONFIG_SMP */
1da177e4
LT
114
115struct percpu_counter {
0216bfcf 116 s64 count;
1da177e4
LT
117};
118
908c7f19
TH
119static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
120 gfp_t gfp)
1da177e4 121{
0216bfcf 122 fbc->count = amount;
833f4077 123 return 0;
1da177e4
LT
124}
125
126static inline void percpu_counter_destroy(struct percpu_counter *fbc)
127{
128}
129
3a587f47
PZ
130static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
131{
132 fbc->count = amount;
133}
134
27f5e0f6
TC
135static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
136{
137 if (fbc->count > rhs)
138 return 1;
139 else if (fbc->count < rhs)
140 return -1;
141 else
142 return 0;
143}
144
80188b0d
DC
145static inline int
146__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
147{
148 return percpu_counter_compare(fbc, rhs);
149}
150
1da177e4 151static inline void
20e89767 152percpu_counter_add(struct percpu_counter *fbc, s64 amount)
1da177e4 153{
88ad32a7
MS
154 unsigned long flags;
155
156 local_irq_save(flags);
1da177e4 157 fbc->count += amount;
88ad32a7 158 local_irq_restore(flags);
1da177e4
LT
159}
160
5d0ce359
JS
161/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
162static inline void
163percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
164{
165 percpu_counter_add(fbc, amount);
166}
167
0c9cf2ef 168static inline void
104b4e51 169percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
0c9cf2ef
AB
170{
171 percpu_counter_add(fbc, amount);
172}
173
0216bfcf 174static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
175{
176 return fbc->count;
177}
178
c84598bb
SL
179/*
180 * percpu_counter is intended to track positive numbers. In the UP case the
181 * number should never be negative.
182 */
0216bfcf 183static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4
LT
184{
185 return fbc->count;
186}
187
52d9f3b4 188static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
e2bab3d9
AM
189{
190 return percpu_counter_read_positive(fbc);
191}
192
bf1d89c8
PZ
193static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
194{
195 return percpu_counter_read(fbc);
196}
197
85dcbba3 198static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
7f93cff9 199{
85dcbba3 200 return true;
7f93cff9
TT
201}
202
0a4954a8
FT
203static inline void percpu_counter_sync(struct percpu_counter *fbc)
204{
205}
1da177e4
LT
206#endif /* CONFIG_SMP */
207
208static inline void percpu_counter_inc(struct percpu_counter *fbc)
209{
aa0dff2d 210 percpu_counter_add(fbc, 1);
1da177e4
LT
211}
212
213static inline void percpu_counter_dec(struct percpu_counter *fbc)
214{
aa0dff2d 215 percpu_counter_add(fbc, -1);
1da177e4
LT
216}
217
3cb4f9fa
PZ
218static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
219{
220 percpu_counter_add(fbc, -amount);
221}
222
5d0ce359
JS
223static inline void
224percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
225{
226 percpu_counter_add_local(fbc, -amount);
227}
228
1da177e4 229#endif /* _LINUX_PERCPU_COUNTER_H */