Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
fe896d18 JK |
2 | #ifndef _LINUX_PAGE_REF_H |
3 | #define _LINUX_PAGE_REF_H | |
4 | ||
5 | #include <linux/atomic.h> | |
6 | #include <linux/mm_types.h> | |
7 | #include <linux/page-flags.h> | |
95813b8f JK |
8 | #include <linux/tracepoint-defs.h> |
9 | ||
c65fc227 SRV |
10 | DECLARE_TRACEPOINT(page_ref_set); |
11 | DECLARE_TRACEPOINT(page_ref_mod); | |
12 | DECLARE_TRACEPOINT(page_ref_mod_and_test); | |
13 | DECLARE_TRACEPOINT(page_ref_mod_and_return); | |
14 | DECLARE_TRACEPOINT(page_ref_mod_unless); | |
15 | DECLARE_TRACEPOINT(page_ref_freeze); | |
16 | DECLARE_TRACEPOINT(page_ref_unfreeze); | |
95813b8f JK |
17 | |
18 | #ifdef CONFIG_DEBUG_PAGE_REF | |
19 | ||
20 | /* | |
21 | * Ideally we would want to use the trace_<tracepoint>_enabled() helper | |
22 | * functions. But due to include header file issues, that is not | |
23 | * feasible. Instead we have to open code the static key functions. | |
24 | * | |
25 | * See trace_##name##_enabled(void) in include/linux/tracepoint.h | |
26 | */ | |
c65fc227 | 27 | #define page_ref_tracepoint_active(t) tracepoint_enabled(t) |
95813b8f JK |
28 | |
29 | extern void __page_ref_set(struct page *page, int v); | |
30 | extern void __page_ref_mod(struct page *page, int v); | |
31 | extern void __page_ref_mod_and_test(struct page *page, int v, int ret); | |
32 | extern void __page_ref_mod_and_return(struct page *page, int v, int ret); | |
33 | extern void __page_ref_mod_unless(struct page *page, int v, int u); | |
34 | extern void __page_ref_freeze(struct page *page, int v, int ret); | |
35 | extern void __page_ref_unfreeze(struct page *page, int v); | |
36 | ||
37 | #else | |
38 | ||
39 | #define page_ref_tracepoint_active(t) false | |
40 | ||
41 | static inline void __page_ref_set(struct page *page, int v) | |
42 | { | |
43 | } | |
44 | static inline void __page_ref_mod(struct page *page, int v) | |
45 | { | |
46 | } | |
47 | static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) | |
48 | { | |
49 | } | |
50 | static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) | |
51 | { | |
52 | } | |
53 | static inline void __page_ref_mod_unless(struct page *page, int v, int u) | |
54 | { | |
55 | } | |
56 | static inline void __page_ref_freeze(struct page *page, int v, int ret) | |
57 | { | |
58 | } | |
59 | static inline void __page_ref_unfreeze(struct page *page, int v) | |
60 | { | |
61 | } | |
62 | ||
63 | #endif | |
fe896d18 | 64 | |
5f7dadf3 | 65 | static inline int page_ref_count(const struct page *page) |
fe896d18 | 66 | { |
0139aa7b | 67 | return atomic_read(&page->_refcount); |
fe896d18 JK |
68 | } |
69 | ||
c24016ac MWO |
70 | /** |
71 | * folio_ref_count - The reference count on this folio. | |
72 | * @folio: The folio. | |
73 | * | |
74 | * The refcount is usually incremented by calls to folio_get() and | |
75 | * decremented by calls to folio_put(). Some typical users of the | |
76 | * folio refcount: | |
77 | * | |
78 | * - Each reference from a page table | |
79 | * - The page cache | |
80 | * - Filesystem private data | |
81 | * - The LRU list | |
82 | * - Pipes | |
83 | * - Direct IO which references this page in the process address space | |
84 | * | |
85 | * Return: The number of references to this folio. | |
86 | */ | |
87 | static inline int folio_ref_count(const struct folio *folio) | |
88 | { | |
89 | return page_ref_count(&folio->page); | |
90 | } | |
91 | ||
5f7dadf3 | 92 | static inline int page_count(const struct page *page) |
fe896d18 | 93 | { |
c24016ac | 94 | return folio_ref_count(page_folio(page)); |
fe896d18 JK |
95 | } |
96 | ||
97 | static inline void set_page_count(struct page *page, int v) | |
98 | { | |
0139aa7b | 99 | atomic_set(&page->_refcount, v); |
c65fc227 | 100 | if (page_ref_tracepoint_active(page_ref_set)) |
95813b8f | 101 | __page_ref_set(page, v); |
fe896d18 JK |
102 | } |
103 | ||
c24016ac MWO |
104 | static inline void folio_set_count(struct folio *folio, int v) |
105 | { | |
106 | set_page_count(&folio->page, v); | |
107 | } | |
108 | ||
fe896d18 JK |
109 | /* |
110 | * Setup the page count before being freed into the page allocator for | |
111 | * the first time (boot or memory hotplug) | |
112 | */ | |
113 | static inline void init_page_count(struct page *page) | |
114 | { | |
115 | set_page_count(page, 1); | |
116 | } | |
117 | ||
118 | static inline void page_ref_add(struct page *page, int nr) | |
119 | { | |
0139aa7b | 120 | atomic_add(nr, &page->_refcount); |
c65fc227 | 121 | if (page_ref_tracepoint_active(page_ref_mod)) |
95813b8f | 122 | __page_ref_mod(page, nr); |
fe896d18 JK |
123 | } |
124 | ||
c24016ac MWO |
125 | static inline void folio_ref_add(struct folio *folio, int nr) |
126 | { | |
127 | page_ref_add(&folio->page, nr); | |
128 | } | |
129 | ||
fe896d18 JK |
130 | static inline void page_ref_sub(struct page *page, int nr) |
131 | { | |
0139aa7b | 132 | atomic_sub(nr, &page->_refcount); |
c65fc227 | 133 | if (page_ref_tracepoint_active(page_ref_mod)) |
95813b8f | 134 | __page_ref_mod(page, -nr); |
fe896d18 JK |
135 | } |
136 | ||
c24016ac MWO |
137 | static inline void folio_ref_sub(struct folio *folio, int nr) |
138 | { | |
139 | page_ref_sub(&folio->page, nr); | |
140 | } | |
141 | ||
498aefbc | 142 | static inline int folio_ref_sub_return(struct folio *folio, int nr) |
566d774a | 143 | { |
498aefbc | 144 | int ret = atomic_sub_return(nr, &folio->_refcount); |
566d774a | 145 | |
c65fc227 | 146 | if (page_ref_tracepoint_active(page_ref_mod_and_return)) |
498aefbc | 147 | __page_ref_mod_and_return(&folio->page, -nr, ret); |
566d774a JH |
148 | return ret; |
149 | } | |
150 | ||
fe896d18 JK |
151 | static inline void page_ref_inc(struct page *page) |
152 | { | |
0139aa7b | 153 | atomic_inc(&page->_refcount); |
c65fc227 | 154 | if (page_ref_tracepoint_active(page_ref_mod)) |
95813b8f | 155 | __page_ref_mod(page, 1); |
fe896d18 JK |
156 | } |
157 | ||
c24016ac MWO |
158 | static inline void folio_ref_inc(struct folio *folio) |
159 | { | |
160 | page_ref_inc(&folio->page); | |
161 | } | |
162 | ||
fe896d18 JK |
163 | static inline void page_ref_dec(struct page *page) |
164 | { | |
0139aa7b | 165 | atomic_dec(&page->_refcount); |
c65fc227 | 166 | if (page_ref_tracepoint_active(page_ref_mod)) |
95813b8f | 167 | __page_ref_mod(page, -1); |
fe896d18 JK |
168 | } |
169 | ||
c24016ac MWO |
170 | static inline void folio_ref_dec(struct folio *folio) |
171 | { | |
172 | page_ref_dec(&folio->page); | |
173 | } | |
174 | ||
fe896d18 JK |
175 | static inline int page_ref_sub_and_test(struct page *page, int nr) |
176 | { | |
0139aa7b | 177 | int ret = atomic_sub_and_test(nr, &page->_refcount); |
95813b8f | 178 | |
c65fc227 | 179 | if (page_ref_tracepoint_active(page_ref_mod_and_test)) |
95813b8f JK |
180 | __page_ref_mod_and_test(page, -nr, ret); |
181 | return ret; | |
fe896d18 JK |
182 | } |
183 | ||
c24016ac MWO |
184 | static inline int folio_ref_sub_and_test(struct folio *folio, int nr) |
185 | { | |
186 | return page_ref_sub_and_test(&folio->page, nr); | |
187 | } | |
188 | ||
df9b2b4a DH |
189 | static inline int page_ref_inc_return(struct page *page) |
190 | { | |
191 | int ret = atomic_inc_return(&page->_refcount); | |
192 | ||
c65fc227 | 193 | if (page_ref_tracepoint_active(page_ref_mod_and_return)) |
df9b2b4a DH |
194 | __page_ref_mod_and_return(page, 1, ret); |
195 | return ret; | |
196 | } | |
197 | ||
c24016ac MWO |
198 | static inline int folio_ref_inc_return(struct folio *folio) |
199 | { | |
200 | return page_ref_inc_return(&folio->page); | |
201 | } | |
202 | ||
fe896d18 JK |
203 | static inline int page_ref_dec_and_test(struct page *page) |
204 | { | |
0139aa7b | 205 | int ret = atomic_dec_and_test(&page->_refcount); |
95813b8f | 206 | |
c65fc227 | 207 | if (page_ref_tracepoint_active(page_ref_mod_and_test)) |
95813b8f JK |
208 | __page_ref_mod_and_test(page, -1, ret); |
209 | return ret; | |
fe896d18 JK |
210 | } |
211 | ||
c24016ac MWO |
212 | static inline int folio_ref_dec_and_test(struct folio *folio) |
213 | { | |
214 | return page_ref_dec_and_test(&folio->page); | |
215 | } | |
216 | ||
fe896d18 JK |
217 | static inline int page_ref_dec_return(struct page *page) |
218 | { | |
0139aa7b | 219 | int ret = atomic_dec_return(&page->_refcount); |
95813b8f | 220 | |
c65fc227 | 221 | if (page_ref_tracepoint_active(page_ref_mod_and_return)) |
95813b8f JK |
222 | __page_ref_mod_and_return(page, -1, ret); |
223 | return ret; | |
fe896d18 JK |
224 | } |
225 | ||
c24016ac MWO |
226 | static inline int folio_ref_dec_return(struct folio *folio) |
227 | { | |
228 | return page_ref_dec_return(&folio->page); | |
229 | } | |
230 | ||
c2530328 | 231 | static inline bool page_ref_add_unless(struct page *page, int nr, int u) |
fe896d18 | 232 | { |
bd225530 YZ |
233 | bool ret = false; |
234 | ||
235 | rcu_read_lock(); | |
236 | /* avoid writing to the vmemmap area being remapped */ | |
237 | if (!page_is_fake_head(page) && page_ref_count(page) != u) | |
238 | ret = atomic_add_unless(&page->_refcount, nr, u); | |
239 | rcu_read_unlock(); | |
95813b8f | 240 | |
c65fc227 | 241 | if (page_ref_tracepoint_active(page_ref_mod_unless)) |
95813b8f JK |
242 | __page_ref_mod_unless(page, nr, ret); |
243 | return ret; | |
fe896d18 JK |
244 | } |
245 | ||
c24016ac MWO |
246 | static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u) |
247 | { | |
248 | return page_ref_add_unless(&folio->page, nr, u); | |
249 | } | |
250 | ||
020853b6 MWO |
251 | /** |
252 | * folio_try_get - Attempt to increase the refcount on a folio. | |
253 | * @folio: The folio. | |
254 | * | |
255 | * If you do not already have a reference to a folio, you can attempt to | |
256 | * get one using this function. It may fail if, for example, the folio | |
257 | * has been freed since you found a pointer to it, or it is frozen for | |
258 | * the purposes of splitting or migration. | |
259 | * | |
260 | * Return: True if the reference count was successfully incremented. | |
261 | */ | |
262 | static inline bool folio_try_get(struct folio *folio) | |
263 | { | |
264 | return folio_ref_add_unless(folio, 1, 0); | |
265 | } | |
266 | ||
fa2690af | 267 | static inline bool folio_ref_try_add(struct folio *folio, int count) |
020853b6 | 268 | { |
fa2690af | 269 | return folio_ref_add_unless(folio, count, 0); |
020853b6 MWO |
270 | } |
271 | ||
fe896d18 JK |
272 | static inline int page_ref_freeze(struct page *page, int count) |
273 | { | |
0139aa7b | 274 | int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); |
95813b8f | 275 | |
c65fc227 | 276 | if (page_ref_tracepoint_active(page_ref_freeze)) |
95813b8f JK |
277 | __page_ref_freeze(page, count, ret); |
278 | return ret; | |
fe896d18 JK |
279 | } |
280 | ||
c24016ac MWO |
281 | static inline int folio_ref_freeze(struct folio *folio, int count) |
282 | { | |
283 | return page_ref_freeze(&folio->page, count); | |
284 | } | |
285 | ||
fe896d18 JK |
286 | static inline void page_ref_unfreeze(struct page *page, int count) |
287 | { | |
288 | VM_BUG_ON_PAGE(page_count(page) != 0, page); | |
289 | VM_BUG_ON(count == 0); | |
290 | ||
03f5d58f | 291 | atomic_set_release(&page->_refcount, count); |
c65fc227 | 292 | if (page_ref_tracepoint_active(page_ref_unfreeze)) |
95813b8f | 293 | __page_ref_unfreeze(page, count); |
fe896d18 JK |
294 | } |
295 | ||
c24016ac MWO |
296 | static inline void folio_ref_unfreeze(struct folio *folio, int count) |
297 | { | |
298 | page_ref_unfreeze(&folio->page, count); | |
299 | } | |
fe896d18 | 300 | #endif |