Merge tag 'sound-5.16-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux-block.git] / include / linux / percpu_counter.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_PERCPU_COUNTER_H
3#define _LINUX_PERCPU_COUNTER_H
4/*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
1da177e4
LT
10#include <linux/spinlock.h>
11#include <linux/smp.h>
c67ad917 12#include <linux/list.h>
1da177e4
LT
13#include <linux/threads.h>
14#include <linux/percpu.h>
0216bfcf 15#include <linux/types.h>
908c7f19 16#include <linux/gfp.h>
1da177e4
LT
17
18#ifdef CONFIG_SMP
19
20struct percpu_counter {
f032a450 21 raw_spinlock_t lock;
0216bfcf 22 s64 count;
c67ad917
AM
23#ifdef CONFIG_HOTPLUG_CPU
24 struct list_head list; /* All percpu_counters are on a list */
25#endif
43cf38eb 26 s32 __percpu *counters;
1da177e4
LT
27};
28
179f7ebf 29extern int percpu_counter_batch;
1da177e4 30
908c7f19 31int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
ea319518
PZ
32 struct lock_class_key *key);
33
908c7f19 34#define percpu_counter_init(fbc, value, gfp) \
ea319518
PZ
35 ({ \
36 static struct lock_class_key __key; \
37 \
908c7f19 38 __percpu_counter_init(fbc, value, gfp, &__key); \
ea319518
PZ
39 })
40
c67ad917 41void percpu_counter_destroy(struct percpu_counter *fbc);
3a587f47 42void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
104b4e51
NB
43void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
44 s32 batch);
02d21168 45s64 __percpu_counter_sum(struct percpu_counter *fbc);
80188b0d 46int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
0a4954a8 47void percpu_counter_sync(struct percpu_counter *fbc);
80188b0d
DC
48
49static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
50{
51 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
52}
1da177e4 53
20e89767 54static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
252e0ba6 55{
104b4e51 56 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
252e0ba6
PZ
57}
58
bf1d89c8
PZ
59static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
60{
02d21168 61 s64 ret = __percpu_counter_sum(fbc);
bf1d89c8
PZ
62 return ret < 0 ? 0 : ret;
63}
64
65static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
66{
02d21168 67 return __percpu_counter_sum(fbc);
bf1d89c8
PZ
68}
69
0216bfcf 70static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
71{
72 return fbc->count;
73}
74
75/*
76 * It is possible for the percpu_counter_read() to return a small negative
77 * number for some counter which should never be negative.
0216bfcf 78 *
1da177e4 79 */
0216bfcf 80static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4 81{
7e234520
QC
82 /* Prevent reloads of fbc->count */
83 s64 ret = READ_ONCE(fbc->count);
1da177e4 84
0216bfcf 85 if (ret >= 0)
1da177e4 86 return ret;
c84598bb 87 return 0;
1da177e4
LT
88}
89
85dcbba3 90static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
7f93cff9
TT
91{
92 return (fbc->counters != NULL);
93}
94
7fa4cf92 95#else /* !CONFIG_SMP */
1da177e4
LT
96
97struct percpu_counter {
0216bfcf 98 s64 count;
1da177e4
LT
99};
100
908c7f19
TH
101static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
102 gfp_t gfp)
1da177e4 103{
0216bfcf 104 fbc->count = amount;
833f4077 105 return 0;
1da177e4
LT
106}
107
108static inline void percpu_counter_destroy(struct percpu_counter *fbc)
109{
110}
111
3a587f47
PZ
112static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
113{
114 fbc->count = amount;
115}
116
27f5e0f6
TC
117static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
118{
119 if (fbc->count > rhs)
120 return 1;
121 else if (fbc->count < rhs)
122 return -1;
123 else
124 return 0;
125}
126
80188b0d
DC
127static inline int
128__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
129{
130 return percpu_counter_compare(fbc, rhs);
131}
132
1da177e4 133static inline void
20e89767 134percpu_counter_add(struct percpu_counter *fbc, s64 amount)
1da177e4
LT
135{
136 preempt_disable();
137 fbc->count += amount;
138 preempt_enable();
139}
140
0c9cf2ef 141static inline void
104b4e51 142percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
0c9cf2ef
AB
143{
144 percpu_counter_add(fbc, amount);
145}
146
0216bfcf 147static inline s64 percpu_counter_read(struct percpu_counter *fbc)
1da177e4
LT
148{
149 return fbc->count;
150}
151
c84598bb
SL
152/*
153 * percpu_counter is intended to track positive numbers. In the UP case the
154 * number should never be negative.
155 */
0216bfcf 156static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
1da177e4
LT
157{
158 return fbc->count;
159}
160
52d9f3b4 161static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
e2bab3d9
AM
162{
163 return percpu_counter_read_positive(fbc);
164}
165
bf1d89c8
PZ
166static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
167{
168 return percpu_counter_read(fbc);
169}
170
85dcbba3 171static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
7f93cff9 172{
85dcbba3 173 return true;
7f93cff9
TT
174}
175
0a4954a8
FT
176static inline void percpu_counter_sync(struct percpu_counter *fbc)
177{
178}
1da177e4
LT
179#endif /* CONFIG_SMP */
180
181static inline void percpu_counter_inc(struct percpu_counter *fbc)
182{
aa0dff2d 183 percpu_counter_add(fbc, 1);
1da177e4
LT
184}
185
186static inline void percpu_counter_dec(struct percpu_counter *fbc)
187{
aa0dff2d 188 percpu_counter_add(fbc, -1);
1da177e4
LT
189}
190
3cb4f9fa
PZ
191static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
192{
193 percpu_counter_add(fbc, -amount);
194}
195
1da177e4 196#endif /* _LINUX_PERCPU_COUNTER_H */