Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
fe896d18 JK |
2 | #ifndef _LINUX_PAGE_REF_H |
3 | #define _LINUX_PAGE_REF_H | |
4 | ||
5 | #include <linux/atomic.h> | |
6 | #include <linux/mm_types.h> | |
7 | #include <linux/page-flags.h> | |
95813b8f JK |
8 | #include <linux/tracepoint-defs.h> |
9 | ||
10 | extern struct tracepoint __tracepoint_page_ref_set; | |
11 | extern struct tracepoint __tracepoint_page_ref_mod; | |
12 | extern struct tracepoint __tracepoint_page_ref_mod_and_test; | |
13 | extern struct tracepoint __tracepoint_page_ref_mod_and_return; | |
14 | extern struct tracepoint __tracepoint_page_ref_mod_unless; | |
15 | extern struct tracepoint __tracepoint_page_ref_freeze; | |
16 | extern struct tracepoint __tracepoint_page_ref_unfreeze; | |
17 | ||
18 | #ifdef CONFIG_DEBUG_PAGE_REF | |
19 | ||
20 | /* | |
21 | * Ideally we would want to use the trace_<tracepoint>_enabled() helper | |
22 | * functions. But due to include header file issues, that is not | |
23 | * feasible. Instead we have to open code the static key functions. | |
24 | * | |
25 | * See trace_##name##_enabled(void) in include/linux/tracepoint.h | |
26 | */ | |
27 | #define page_ref_tracepoint_active(t) static_key_false(&(t).key) | |
28 | ||
29 | extern void __page_ref_set(struct page *page, int v); | |
30 | extern void __page_ref_mod(struct page *page, int v); | |
31 | extern void __page_ref_mod_and_test(struct page *page, int v, int ret); | |
32 | extern void __page_ref_mod_and_return(struct page *page, int v, int ret); | |
33 | extern void __page_ref_mod_unless(struct page *page, int v, int u); | |
34 | extern void __page_ref_freeze(struct page *page, int v, int ret); | |
35 | extern void __page_ref_unfreeze(struct page *page, int v); | |
36 | ||
37 | #else | |
38 | ||
39 | #define page_ref_tracepoint_active(t) false | |
40 | ||
41 | static inline void __page_ref_set(struct page *page, int v) | |
42 | { | |
43 | } | |
44 | static inline void __page_ref_mod(struct page *page, int v) | |
45 | { | |
46 | } | |
47 | static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) | |
48 | { | |
49 | } | |
50 | static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) | |
51 | { | |
52 | } | |
53 | static inline void __page_ref_mod_unless(struct page *page, int v, int u) | |
54 | { | |
55 | } | |
56 | static inline void __page_ref_freeze(struct page *page, int v, int ret) | |
57 | { | |
58 | } | |
59 | static inline void __page_ref_unfreeze(struct page *page, int v) | |
60 | { | |
61 | } | |
62 | ||
63 | #endif | |
fe896d18 JK |
64 | |
65 | static inline int page_ref_count(struct page *page) | |
66 | { | |
0139aa7b | 67 | return atomic_read(&page->_refcount); |
fe896d18 JK |
68 | } |
69 | ||
70 | static inline int page_count(struct page *page) | |
71 | { | |
0139aa7b | 72 | return atomic_read(&compound_head(page)->_refcount); |
fe896d18 JK |
73 | } |
74 | ||
75 | static inline void set_page_count(struct page *page, int v) | |
76 | { | |
0139aa7b | 77 | atomic_set(&page->_refcount, v); |
95813b8f JK |
78 | if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) |
79 | __page_ref_set(page, v); | |
fe896d18 JK |
80 | } |
81 | ||
82 | /* | |
83 | * Setup the page count before being freed into the page allocator for | |
84 | * the first time (boot or memory hotplug) | |
85 | */ | |
86 | static inline void init_page_count(struct page *page) | |
87 | { | |
88 | set_page_count(page, 1); | |
89 | } | |
90 | ||
91 | static inline void page_ref_add(struct page *page, int nr) | |
92 | { | |
0139aa7b | 93 | atomic_add(nr, &page->_refcount); |
95813b8f JK |
94 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
95 | __page_ref_mod(page, nr); | |
fe896d18 JK |
96 | } |
97 | ||
98 | static inline void page_ref_sub(struct page *page, int nr) | |
99 | { | |
0139aa7b | 100 | atomic_sub(nr, &page->_refcount); |
95813b8f JK |
101 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
102 | __page_ref_mod(page, -nr); | |
fe896d18 JK |
103 | } |
104 | ||
566d774a JH |
105 | static inline int page_ref_sub_return(struct page *page, int nr) |
106 | { | |
107 | int ret = atomic_sub_return(nr, &page->_refcount); | |
108 | ||
109 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) | |
110 | __page_ref_mod_and_return(page, -nr, ret); | |
111 | return ret; | |
112 | } | |
113 | ||
fe896d18 JK |
114 | static inline void page_ref_inc(struct page *page) |
115 | { | |
0139aa7b | 116 | atomic_inc(&page->_refcount); |
95813b8f JK |
117 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
118 | __page_ref_mod(page, 1); | |
fe896d18 JK |
119 | } |
120 | ||
121 | static inline void page_ref_dec(struct page *page) | |
122 | { | |
0139aa7b | 123 | atomic_dec(&page->_refcount); |
95813b8f JK |
124 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
125 | __page_ref_mod(page, -1); | |
fe896d18 JK |
126 | } |
127 | ||
128 | static inline int page_ref_sub_and_test(struct page *page, int nr) | |
129 | { | |
0139aa7b | 130 | int ret = atomic_sub_and_test(nr, &page->_refcount); |
95813b8f JK |
131 | |
132 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) | |
133 | __page_ref_mod_and_test(page, -nr, ret); | |
134 | return ret; | |
fe896d18 JK |
135 | } |
136 | ||
df9b2b4a DH |
137 | static inline int page_ref_inc_return(struct page *page) |
138 | { | |
139 | int ret = atomic_inc_return(&page->_refcount); | |
140 | ||
141 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) | |
142 | __page_ref_mod_and_return(page, 1, ret); | |
143 | return ret; | |
144 | } | |
145 | ||
fe896d18 JK |
146 | static inline int page_ref_dec_and_test(struct page *page) |
147 | { | |
0139aa7b | 148 | int ret = atomic_dec_and_test(&page->_refcount); |
95813b8f JK |
149 | |
150 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) | |
151 | __page_ref_mod_and_test(page, -1, ret); | |
152 | return ret; | |
fe896d18 JK |
153 | } |
154 | ||
155 | static inline int page_ref_dec_return(struct page *page) | |
156 | { | |
0139aa7b | 157 | int ret = atomic_dec_return(&page->_refcount); |
95813b8f JK |
158 | |
159 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) | |
160 | __page_ref_mod_and_return(page, -1, ret); | |
161 | return ret; | |
fe896d18 JK |
162 | } |
163 | ||
164 | static inline int page_ref_add_unless(struct page *page, int nr, int u) | |
165 | { | |
0139aa7b | 166 | int ret = atomic_add_unless(&page->_refcount, nr, u); |
95813b8f JK |
167 | |
168 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) | |
169 | __page_ref_mod_unless(page, nr, ret); | |
170 | return ret; | |
fe896d18 JK |
171 | } |
172 | ||
173 | static inline int page_ref_freeze(struct page *page, int count) | |
174 | { | |
0139aa7b | 175 | int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); |
95813b8f JK |
176 | |
177 | if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) | |
178 | __page_ref_freeze(page, count, ret); | |
179 | return ret; | |
fe896d18 JK |
180 | } |
181 | ||
182 | static inline void page_ref_unfreeze(struct page *page, int count) | |
183 | { | |
184 | VM_BUG_ON_PAGE(page_count(page) != 0, page); | |
185 | VM_BUG_ON(count == 0); | |
186 | ||
03f5d58f | 187 | atomic_set_release(&page->_refcount, count); |
95813b8f JK |
188 | if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) |
189 | __page_ref_unfreeze(page, count); | |
fe896d18 JK |
190 | } |
191 | ||
192 | #endif |