Commit | Line | Data |
---|---|---|
fe896d18 JK |
1 | #ifndef _LINUX_PAGE_REF_H |
2 | #define _LINUX_PAGE_REF_H | |
3 | ||
4 | #include <linux/atomic.h> | |
5 | #include <linux/mm_types.h> | |
6 | #include <linux/page-flags.h> | |
95813b8f JK |
7 | #include <linux/tracepoint-defs.h> |
8 | ||
9 | extern struct tracepoint __tracepoint_page_ref_set; | |
10 | extern struct tracepoint __tracepoint_page_ref_mod; | |
11 | extern struct tracepoint __tracepoint_page_ref_mod_and_test; | |
12 | extern struct tracepoint __tracepoint_page_ref_mod_and_return; | |
13 | extern struct tracepoint __tracepoint_page_ref_mod_unless; | |
14 | extern struct tracepoint __tracepoint_page_ref_freeze; | |
15 | extern struct tracepoint __tracepoint_page_ref_unfreeze; | |
16 | ||
17 | #ifdef CONFIG_DEBUG_PAGE_REF | |
18 | ||
19 | /* | |
20 | * Ideally we would want to use the trace_<tracepoint>_enabled() helper | |
21 | * functions. But due to include header file issues, that is not | |
22 | * feasible. Instead we have to open code the static key functions. | |
23 | * | |
24 | * See trace_##name##_enabled(void) in include/linux/tracepoint.h | |
25 | */ | |
26 | #define page_ref_tracepoint_active(t) static_key_false(&(t).key) | |
27 | ||
28 | extern void __page_ref_set(struct page *page, int v); | |
29 | extern void __page_ref_mod(struct page *page, int v); | |
30 | extern void __page_ref_mod_and_test(struct page *page, int v, int ret); | |
31 | extern void __page_ref_mod_and_return(struct page *page, int v, int ret); | |
32 | extern void __page_ref_mod_unless(struct page *page, int v, int u); | |
33 | extern void __page_ref_freeze(struct page *page, int v, int ret); | |
34 | extern void __page_ref_unfreeze(struct page *page, int v); | |
35 | ||
36 | #else | |
37 | ||
38 | #define page_ref_tracepoint_active(t) false | |
39 | ||
40 | static inline void __page_ref_set(struct page *page, int v) | |
41 | { | |
42 | } | |
43 | static inline void __page_ref_mod(struct page *page, int v) | |
44 | { | |
45 | } | |
46 | static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) | |
47 | { | |
48 | } | |
49 | static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) | |
50 | { | |
51 | } | |
52 | static inline void __page_ref_mod_unless(struct page *page, int v, int u) | |
53 | { | |
54 | } | |
55 | static inline void __page_ref_freeze(struct page *page, int v, int ret) | |
56 | { | |
57 | } | |
58 | static inline void __page_ref_unfreeze(struct page *page, int v) | |
59 | { | |
60 | } | |
61 | ||
62 | #endif | |
fe896d18 JK |
63 | |
64 | static inline int page_ref_count(struct page *page) | |
65 | { | |
0139aa7b | 66 | return atomic_read(&page->_refcount); |
fe896d18 JK |
67 | } |
68 | ||
69 | static inline int page_count(struct page *page) | |
70 | { | |
0139aa7b | 71 | return atomic_read(&compound_head(page)->_refcount); |
fe896d18 JK |
72 | } |
73 | ||
74 | static inline void set_page_count(struct page *page, int v) | |
75 | { | |
0139aa7b | 76 | atomic_set(&page->_refcount, v); |
95813b8f JK |
77 | if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) |
78 | __page_ref_set(page, v); | |
fe896d18 JK |
79 | } |
80 | ||
81 | /* | |
82 | * Setup the page count before being freed into the page allocator for | |
83 | * the first time (boot or memory hotplug) | |
84 | */ | |
85 | static inline void init_page_count(struct page *page) | |
86 | { | |
87 | set_page_count(page, 1); | |
88 | } | |
89 | ||
90 | static inline void page_ref_add(struct page *page, int nr) | |
91 | { | |
0139aa7b | 92 | atomic_add(nr, &page->_refcount); |
95813b8f JK |
93 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
94 | __page_ref_mod(page, nr); | |
fe896d18 JK |
95 | } |
96 | ||
97 | static inline void page_ref_sub(struct page *page, int nr) | |
98 | { | |
0139aa7b | 99 | atomic_sub(nr, &page->_refcount); |
95813b8f JK |
100 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
101 | __page_ref_mod(page, -nr); | |
fe896d18 JK |
102 | } |
103 | ||
104 | static inline void page_ref_inc(struct page *page) | |
105 | { | |
0139aa7b | 106 | atomic_inc(&page->_refcount); |
95813b8f JK |
107 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
108 | __page_ref_mod(page, 1); | |
fe896d18 JK |
109 | } |
110 | ||
111 | static inline void page_ref_dec(struct page *page) | |
112 | { | |
0139aa7b | 113 | atomic_dec(&page->_refcount); |
95813b8f JK |
114 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) |
115 | __page_ref_mod(page, -1); | |
fe896d18 JK |
116 | } |
117 | ||
118 | static inline int page_ref_sub_and_test(struct page *page, int nr) | |
119 | { | |
0139aa7b | 120 | int ret = atomic_sub_and_test(nr, &page->_refcount); |
95813b8f JK |
121 | |
122 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) | |
123 | __page_ref_mod_and_test(page, -nr, ret); | |
124 | return ret; | |
fe896d18 JK |
125 | } |
126 | ||
127 | static inline int page_ref_dec_and_test(struct page *page) | |
128 | { | |
0139aa7b | 129 | int ret = atomic_dec_and_test(&page->_refcount); |
95813b8f JK |
130 | |
131 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) | |
132 | __page_ref_mod_and_test(page, -1, ret); | |
133 | return ret; | |
fe896d18 JK |
134 | } |
135 | ||
136 | static inline int page_ref_dec_return(struct page *page) | |
137 | { | |
0139aa7b | 138 | int ret = atomic_dec_return(&page->_refcount); |
95813b8f JK |
139 | |
140 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) | |
141 | __page_ref_mod_and_return(page, -1, ret); | |
142 | return ret; | |
fe896d18 JK |
143 | } |
144 | ||
145 | static inline int page_ref_add_unless(struct page *page, int nr, int u) | |
146 | { | |
0139aa7b | 147 | int ret = atomic_add_unless(&page->_refcount, nr, u); |
95813b8f JK |
148 | |
149 | if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) | |
150 | __page_ref_mod_unless(page, nr, ret); | |
151 | return ret; | |
fe896d18 JK |
152 | } |
153 | ||
154 | static inline int page_ref_freeze(struct page *page, int count) | |
155 | { | |
0139aa7b | 156 | int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); |
95813b8f JK |
157 | |
158 | if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) | |
159 | __page_ref_freeze(page, count, ret); | |
160 | return ret; | |
fe896d18 JK |
161 | } |
162 | ||
163 | static inline void page_ref_unfreeze(struct page *page, int count) | |
164 | { | |
165 | VM_BUG_ON_PAGE(page_count(page) != 0, page); | |
166 | VM_BUG_ON(count == 0); | |
167 | ||
0139aa7b | 168 | atomic_set(&page->_refcount, count); |
95813b8f JK |
169 | if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) |
170 | __page_ref_unfreeze(page, count); | |
fe896d18 JK |
171 | } |
172 | ||
173 | #endif |