Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
eefa864b JK |
2 | #ifndef __LINUX_PAGE_EXT_H |
3 | #define __LINUX_PAGE_EXT_H | |
4 | ||
48c96a36 | 5 | #include <linux/types.h> |
9039b909 | 6 | #include <linux/mmzone.h> |
48c96a36 JK |
7 | #include <linux/stacktrace.h> |
8 | ||
eefa864b | 9 | struct pglist_data; |
6189eb82 | 10 | |
67311a36 | 11 | #ifdef CONFIG_PAGE_EXTENSION |
6189eb82 PT |
12 | /** |
13 | * struct page_ext_operations - per page_ext client operations | |
14 | * @offset: Offset to the client's data within page_ext. Offset is returned to | |
15 | * the client by page_ext_init. | |
16 | * @size: The size of the client data within page_ext. | |
17 | * @need: Function that returns true if client requires page_ext. | |
18 | * @init: (optional) Called to initialize client once page_exts are allocated. | |
19 | * @need_shared_flags: True when client is using shared page_ext->flags | |
20 | * field. | |
21 | * | |
22 | * Each Page Extension client must define page_ext_operations in | |
23 | * page_ext_ops array. | |
24 | */ | |
eefa864b | 25 | struct page_ext_operations { |
980ac167 JK |
26 | size_t offset; |
27 | size_t size; | |
eefa864b JK |
28 | bool (*need)(void); |
29 | void (*init)(void); | |
6189eb82 | 30 | bool need_shared_flags; |
eefa864b JK |
31 | }; |
32 | ||
6189eb82 PT |
33 | /* |
34 | * The page_ext_flags users must set need_shared_flags to true. | |
35 | */ | |
e30825f1 | 36 | enum page_ext_flags { |
48c96a36 | 37 | PAGE_EXT_OWNER, |
fdf3bf80 | 38 | PAGE_EXT_OWNER_ALLOCATED, |
1c676e0d | 39 | #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) |
33c3fc71 VD |
40 | PAGE_EXT_YOUNG, |
41 | PAGE_EXT_IDLE, | |
42 | #endif | |
e30825f1 JK |
43 | }; |
44 | ||
eefa864b JK |
45 | /* |
46 | * Page Extension can be considered as an extended mem_map. | |
47 | * A page_ext page is associated with every page descriptor. The | |
48 | * page_ext helps us add more information about the page. | |
49 | * All page_ext are allocated at boot or memory hotplug event, | |
50 | * then the page_ext for pfn always exists. | |
51 | */ | |
52 | struct page_ext { | |
53 | unsigned long flags; | |
54 | }; | |
55 | ||
c4f20f14 | 56 | extern bool early_page_ext; |
5556cfe8 | 57 | extern unsigned long page_ext_size; |
eefa864b JK |
58 | extern void pgdat_page_ext_init(struct pglist_data *pgdat); |
59 | ||
c4f20f14 LZ |
60 | static inline bool early_page_ext_enabled(void) |
61 | { | |
62 | return early_page_ext; | |
63 | } | |
64 | ||
eefa864b JK |
65 | #ifdef CONFIG_SPARSEMEM |
66 | static inline void page_ext_init_flatmem(void) | |
67 | { | |
68 | } | |
69 | extern void page_ext_init(void); | |
7fb7ab6d ZH |
70 | static inline void page_ext_init_flatmem_late(void) |
71 | { | |
72 | } | |
9039b909 LC |
73 | |
74 | static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) | |
75 | { | |
76 | /* | |
77 | * page_ext is allocated per memory section. Once we cross a | |
78 | * memory section, we have to fetch the new pointer. | |
79 | */ | |
80 | return next_pfn % PAGES_PER_SECTION; | |
81 | } | |
eefa864b JK |
82 | #else |
83 | extern void page_ext_init_flatmem(void); | |
7fb7ab6d | 84 | extern void page_ext_init_flatmem_late(void); |
eefa864b JK |
85 | static inline void page_ext_init(void) |
86 | { | |
87 | } | |
9039b909 LC |
88 | |
89 | static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) | |
90 | { | |
91 | return true; | |
92 | } | |
eefa864b JK |
93 | #endif |
94 | ||
6e65aa55 | 95 | extern struct page_ext *page_ext_get(const struct page *page); |
b1d5488a | 96 | extern void page_ext_put(struct page_ext *page_ext); |
9039b909 | 97 | extern struct page_ext *page_ext_lookup(unsigned long pfn); |
eefa864b | 98 | |
c0a5d93a KS |
99 | static inline void *page_ext_data(struct page_ext *page_ext, |
100 | struct page_ext_operations *ops) | |
101 | { | |
102 | return (void *)(page_ext) + ops->offset; | |
103 | } | |
104 | ||
5556cfe8 VB |
105 | static inline struct page_ext *page_ext_next(struct page_ext *curr) |
106 | { | |
107 | void *next = curr; | |
108 | next += page_ext_size; | |
109 | return next; | |
110 | } | |
111 | ||
9039b909 LC |
112 | struct page_ext_iter { |
113 | unsigned long index; | |
114 | unsigned long start_pfn; | |
115 | struct page_ext *page_ext; | |
116 | }; | |
117 | ||
118 | /** | |
119 | * page_ext_iter_begin() - Prepare for iterating through page extensions. | |
120 | * @iter: page extension iterator. | |
121 | * @pfn: PFN of the page we're interested in. | |
122 | * | |
123 | * Must be called with RCU read lock taken. | |
124 | * | |
125 | * Return: NULL if no page_ext exists for this page. | |
126 | */ | |
127 | static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter, | |
128 | unsigned long pfn) | |
129 | { | |
130 | iter->index = 0; | |
131 | iter->start_pfn = pfn; | |
132 | iter->page_ext = page_ext_lookup(pfn); | |
133 | ||
134 | return iter->page_ext; | |
135 | } | |
136 | ||
137 | /** | |
138 | * page_ext_iter_next() - Get next page extension | |
139 | * @iter: page extension iterator. | |
140 | * | |
141 | * Must be called with RCU read lock taken. | |
142 | * | |
143 | * Return: NULL if no next page_ext exists. | |
144 | */ | |
145 | static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter) | |
146 | { | |
147 | unsigned long pfn; | |
148 | ||
149 | if (WARN_ON_ONCE(!iter->page_ext)) | |
150 | return NULL; | |
151 | ||
152 | iter->index++; | |
153 | pfn = iter->start_pfn + iter->index; | |
154 | ||
155 | if (page_ext_iter_next_fast_possible(pfn)) | |
156 | iter->page_ext = page_ext_next(iter->page_ext); | |
157 | else | |
158 | iter->page_ext = page_ext_lookup(pfn); | |
159 | ||
160 | return iter->page_ext; | |
161 | } | |
162 | ||
163 | /** | |
164 | * page_ext_iter_get() - Get current page extension | |
165 | * @iter: page extension iterator. | |
166 | * | |
167 | * Return: NULL if no page_ext exists for this iterator. | |
168 | */ | |
169 | static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter) | |
170 | { | |
171 | return iter->page_ext; | |
172 | } | |
173 | ||
174 | /** | |
175 | * for_each_page_ext(): iterate through page_ext objects. | |
176 | * @__page: the page we're interested in | |
177 | * @__pgcount: how many pages to iterate through | |
178 | * @__page_ext: struct page_ext pointer where the current page_ext | |
179 | * object is returned | |
180 | * @__iter: struct page_ext_iter object (defined in the stack) | |
181 | * | |
182 | * IMPORTANT: must be called with RCU read lock taken. | |
183 | */ | |
184 | #define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \ | |
185 | for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\ | |
186 | __page_ext && __iter.index < __pgcount; \ | |
187 | __page_ext = page_ext_iter_next(&__iter)) | |
188 | ||
eefa864b JK |
189 | #else /* !CONFIG_PAGE_EXTENSION */ |
190 | struct page_ext; | |
191 | ||
c4f20f14 LZ |
192 | static inline bool early_page_ext_enabled(void) |
193 | { | |
194 | return false; | |
195 | } | |
196 | ||
eefa864b JK |
197 | static inline void pgdat_page_ext_init(struct pglist_data *pgdat) |
198 | { | |
199 | } | |
200 | ||
eefa864b JK |
201 | static inline void page_ext_init(void) |
202 | { | |
203 | } | |
204 | ||
7fb7ab6d ZH |
205 | static inline void page_ext_init_flatmem_late(void) |
206 | { | |
207 | } | |
208 | ||
eefa864b JK |
209 | static inline void page_ext_init_flatmem(void) |
210 | { | |
211 | } | |
b1d5488a | 212 | |
6e65aa55 | 213 | static inline struct page_ext *page_ext_get(const struct page *page) |
b1d5488a CTK |
214 | { |
215 | return NULL; | |
216 | } | |
217 | ||
218 | static inline void page_ext_put(struct page_ext *page_ext) | |
219 | { | |
220 | } | |
eefa864b JK |
221 | #endif /* CONFIG_PAGE_EXTENSION */ |
222 | #endif /* __LINUX_PAGE_EXT_H */ |