Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
a2c16d6c HD |
2 | #ifndef _LINUX_SWAPOPS_H |
3 | #define _LINUX_SWAPOPS_H | |
4 | ||
5 | #include <linux/radix-tree.h> | |
187f1882 | 6 | #include <linux/bug.h> |
2b740303 | 7 | #include <linux/mm_types.h> |
a2c16d6c | 8 | |
1da177e4 LT |
9 | /* |
10 | * swapcache pages are stored in the swapper_space radix tree. We want to | |
11 | * get good packing density in that tree, so the index should be dense in | |
12 | * the low-order bits. | |
13 | * | |
9b15b817 | 14 | * We arrange the `type' and `offset' fields so that `type' is at the seven |
e83a9596 | 15 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
9b15b817 HD |
16 | * remaining bits. Although `type' itself needs only five bits, we allow for |
17 | * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry(). | |
1da177e4 LT |
18 | * |
19 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. | |
20 | */ | |
9b15b817 HD |
21 | #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ |
22 | (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) | |
1da177e4 LT |
23 | #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) |
24 | ||
25 | /* | |
26 | * Store a type+offset into a swp_entry_t in an arch-independent format | |
27 | */ | |
28 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) | |
29 | { | |
30 | swp_entry_t ret; | |
31 | ||
32 | ret.val = (type << SWP_TYPE_SHIFT(ret)) | | |
33 | (offset & SWP_OFFSET_MASK(ret)); | |
34 | return ret; | |
35 | } | |
36 | ||
37 | /* | |
38 | * Extract the `type' field from a swp_entry_t. The swp_entry_t is in | |
39 | * arch-independent format | |
40 | */ | |
41 | static inline unsigned swp_type(swp_entry_t entry) | |
42 | { | |
43 | return (entry.val >> SWP_TYPE_SHIFT(entry)); | |
44 | } | |
45 | ||
46 | /* | |
47 | * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in | |
48 | * arch-independent format | |
49 | */ | |
50 | static inline pgoff_t swp_offset(swp_entry_t entry) | |
51 | { | |
52 | return entry.val & SWP_OFFSET_MASK(entry); | |
53 | } | |
54 | ||
880cdf3a | 55 | #ifdef CONFIG_MMU |
698dd4ba MM |
56 | /* check whether a pte points to a swap entry */ |
57 | static inline int is_swap_pte(pte_t pte) | |
58 | { | |
21d9ee3e | 59 | return !pte_none(pte) && !pte_present(pte); |
698dd4ba | 60 | } |
880cdf3a | 61 | #endif |
698dd4ba | 62 | |
1da177e4 LT |
63 | /* |
64 | * Convert the arch-dependent pte representation of a swp_entry_t into an | |
65 | * arch-independent swp_entry_t. | |
66 | */ | |
67 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) | |
68 | { | |
69 | swp_entry_t arch_entry; | |
70 | ||
179ef71c CG |
71 | if (pte_swp_soft_dirty(pte)) |
72 | pte = pte_swp_clear_soft_dirty(pte); | |
1da177e4 LT |
73 | arch_entry = __pte_to_swp_entry(pte); |
74 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | |
75 | } | |
76 | ||
77 | /* | |
78 | * Convert the arch-independent representation of a swp_entry_t into the | |
79 | * arch-dependent pte representation. | |
80 | */ | |
81 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) | |
82 | { | |
83 | swp_entry_t arch_entry; | |
84 | ||
85 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); | |
1da177e4 LT |
86 | return __swp_entry_to_pte(arch_entry); |
87 | } | |
0697212a | 88 | |
a2c16d6c HD |
89 | static inline swp_entry_t radix_to_swp_entry(void *arg) |
90 | { | |
91 | swp_entry_t entry; | |
92 | ||
93 | entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; | |
94 | return entry; | |
95 | } | |
96 | ||
97 | static inline void *swp_to_radix_entry(swp_entry_t entry) | |
98 | { | |
99 | unsigned long value; | |
100 | ||
101 | value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; | |
102 | return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); | |
103 | } | |
104 | ||
5042db43 JG |
105 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
106 | static inline swp_entry_t make_device_private_entry(struct page *page, bool write) | |
107 | { | |
108 | return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ, | |
109 | page_to_pfn(page)); | |
110 | } | |
111 | ||
112 | static inline bool is_device_private_entry(swp_entry_t entry) | |
113 | { | |
114 | int type = swp_type(entry); | |
115 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; | |
116 | } | |
117 | ||
118 | static inline void make_device_private_entry_read(swp_entry_t *entry) | |
119 | { | |
120 | *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry)); | |
121 | } | |
122 | ||
123 | static inline bool is_write_device_private_entry(swp_entry_t entry) | |
124 | { | |
125 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); | |
126 | } | |
127 | ||
0d665e7b KS |
128 | static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) |
129 | { | |
130 | return swp_offset(entry); | |
131 | } | |
132 | ||
5042db43 JG |
133 | static inline struct page *device_private_entry_to_page(swp_entry_t entry) |
134 | { | |
135 | return pfn_to_page(swp_offset(entry)); | |
136 | } | |
137 | ||
2b740303 | 138 | vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, |
5042db43 JG |
139 | unsigned long addr, |
140 | swp_entry_t entry, | |
141 | unsigned int flags, | |
142 | pmd_t *pmdp); | |
143 | #else /* CONFIG_DEVICE_PRIVATE */ | |
144 | static inline swp_entry_t make_device_private_entry(struct page *page, bool write) | |
145 | { | |
146 | return swp_entry(0, 0); | |
147 | } | |
148 | ||
149 | static inline void make_device_private_entry_read(swp_entry_t *entry) | |
150 | { | |
151 | } | |
152 | ||
153 | static inline bool is_device_private_entry(swp_entry_t entry) | |
154 | { | |
155 | return false; | |
156 | } | |
157 | ||
158 | static inline bool is_write_device_private_entry(swp_entry_t entry) | |
159 | { | |
160 | return false; | |
161 | } | |
162 | ||
0d665e7b KS |
163 | static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry) |
164 | { | |
165 | return 0; | |
166 | } | |
167 | ||
5042db43 JG |
168 | static inline struct page *device_private_entry_to_page(swp_entry_t entry) |
169 | { | |
170 | return NULL; | |
171 | } | |
172 | ||
2b740303 | 173 | static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma, |
5042db43 JG |
174 | unsigned long addr, |
175 | swp_entry_t entry, | |
176 | unsigned int flags, | |
177 | pmd_t *pmdp) | |
178 | { | |
179 | return VM_FAULT_SIGBUS; | |
180 | } | |
181 | #endif /* CONFIG_DEVICE_PRIVATE */ | |
182 | ||
0697212a CL |
183 | #ifdef CONFIG_MIGRATION |
184 | static inline swp_entry_t make_migration_entry(struct page *page, int write) | |
185 | { | |
616b8371 ZY |
186 | BUG_ON(!PageLocked(compound_head(page))); |
187 | ||
0697212a CL |
188 | return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, |
189 | page_to_pfn(page)); | |
190 | } | |
191 | ||
192 | static inline int is_migration_entry(swp_entry_t entry) | |
193 | { | |
194 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || | |
195 | swp_type(entry) == SWP_MIGRATION_WRITE); | |
196 | } | |
197 | ||
198 | static inline int is_write_migration_entry(swp_entry_t entry) | |
199 | { | |
200 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); | |
201 | } | |
202 | ||
0d665e7b KS |
203 | static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) |
204 | { | |
205 | return swp_offset(entry); | |
206 | } | |
207 | ||
0697212a CL |
208 | static inline struct page *migration_entry_to_page(swp_entry_t entry) |
209 | { | |
210 | struct page *p = pfn_to_page(swp_offset(entry)); | |
211 | /* | |
212 | * Any use of migration entries may only occur while the | |
213 | * corresponding page is locked | |
214 | */ | |
616b8371 | 215 | BUG_ON(!PageLocked(compound_head(p))); |
0697212a CL |
216 | return p; |
217 | } | |
218 | ||
219 | static inline void make_migration_entry_read(swp_entry_t *entry) | |
220 | { | |
221 | *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); | |
222 | } | |
223 | ||
e66f17ff NH |
224 | extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
225 | spinlock_t *ptl); | |
0697212a CL |
226 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
227 | unsigned long address); | |
cb900f41 KS |
228 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, |
229 | struct mm_struct *mm, pte_t *pte); | |
0697212a CL |
230 | #else |
231 | ||
232 | #define make_migration_entry(page, write) swp_entry(0, 0) | |
5ec553a9 AM |
233 | static inline int is_migration_entry(swp_entry_t swp) |
234 | { | |
235 | return 0; | |
236 | } | |
0d665e7b KS |
237 | |
238 | static inline unsigned long migration_entry_to_pfn(swp_entry_t entry) | |
239 | { | |
240 | return 0; | |
241 | } | |
242 | ||
616b8371 ZY |
243 | static inline struct page *migration_entry_to_page(swp_entry_t entry) |
244 | { | |
245 | return NULL; | |
246 | } | |
247 | ||
0697212a | 248 | static inline void make_migration_entry_read(swp_entry_t *entryp) { } |
e66f17ff NH |
249 | static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
250 | spinlock_t *ptl) { } | |
0697212a CL |
251 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
252 | unsigned long address) { } | |
cb900f41 KS |
253 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, |
254 | struct mm_struct *mm, pte_t *pte) { } | |
0697212a CL |
255 | static inline int is_write_migration_entry(swp_entry_t entry) |
256 | { | |
257 | return 0; | |
258 | } | |
259 | ||
260 | #endif | |
261 | ||
616b8371 ZY |
262 | struct page_vma_mapped_walk; |
263 | ||
264 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
265 | extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, | |
266 | struct page *page); | |
267 | ||
268 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, | |
269 | struct page *new); | |
270 | ||
271 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); | |
272 | ||
273 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) | |
274 | { | |
275 | swp_entry_t arch_entry; | |
276 | ||
ab6e3d09 NH |
277 | if (pmd_swp_soft_dirty(pmd)) |
278 | pmd = pmd_swp_clear_soft_dirty(pmd); | |
616b8371 ZY |
279 | arch_entry = __pmd_to_swp_entry(pmd); |
280 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | |
281 | } | |
282 | ||
283 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) | |
284 | { | |
285 | swp_entry_t arch_entry; | |
286 | ||
287 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); | |
288 | return __swp_entry_to_pmd(arch_entry); | |
289 | } | |
290 | ||
291 | static inline int is_pmd_migration_entry(pmd_t pmd) | |
292 | { | |
293 | return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); | |
294 | } | |
295 | #else | |
296 | static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, | |
297 | struct page *page) | |
298 | { | |
299 | BUILD_BUG(); | |
300 | } | |
301 | ||
302 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, | |
303 | struct page *new) | |
304 | { | |
305 | BUILD_BUG(); | |
306 | } | |
307 | ||
308 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } | |
309 | ||
310 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) | |
311 | { | |
312 | return swp_entry(0, 0); | |
313 | } | |
314 | ||
315 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) | |
316 | { | |
317 | return __pmd(0); | |
318 | } | |
319 | ||
320 | static inline int is_pmd_migration_entry(pmd_t pmd) | |
321 | { | |
322 | return 0; | |
323 | } | |
324 | #endif | |
325 | ||
a7420aa5 | 326 | #ifdef CONFIG_MEMORY_FAILURE |
8e30456b NH |
327 | |
328 | extern atomic_long_t num_poisoned_pages __read_mostly; | |
329 | ||
a7420aa5 AK |
330 | /* |
331 | * Support for hardware poisoned pages | |
332 | */ | |
333 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
334 | { | |
335 | BUG_ON(!PageLocked(page)); | |
336 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); | |
337 | } | |
338 | ||
339 | static inline int is_hwpoison_entry(swp_entry_t entry) | |
340 | { | |
341 | return swp_type(entry) == SWP_HWPOISON; | |
342 | } | |
8e30456b NH |
343 | |
344 | static inline void num_poisoned_pages_inc(void) | |
345 | { | |
346 | atomic_long_inc(&num_poisoned_pages); | |
347 | } | |
348 | ||
349 | static inline void num_poisoned_pages_dec(void) | |
350 | { | |
351 | atomic_long_dec(&num_poisoned_pages); | |
352 | } | |
353 | ||
a7420aa5 AK |
354 | #else |
355 | ||
356 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
357 | { | |
358 | return swp_entry(0, 0); | |
359 | } | |
360 | ||
361 | static inline int is_hwpoison_entry(swp_entry_t swp) | |
362 | { | |
363 | return 0; | |
364 | } | |
da1b13cc | 365 | |
da1b13cc WL |
366 | static inline void num_poisoned_pages_inc(void) |
367 | { | |
368 | } | |
a7420aa5 AK |
369 | #endif |
370 | ||
371 | #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) | |
372 | static inline int non_swap_entry(swp_entry_t entry) | |
373 | { | |
374 | return swp_type(entry) >= MAX_SWAPFILES; | |
375 | } | |
376 | #else | |
377 | static inline int non_swap_entry(swp_entry_t entry) | |
378 | { | |
379 | return 0; | |
380 | } | |
381 | #endif | |
a2c16d6c HD |
382 | |
383 | #endif /* _LINUX_SWAPOPS_H */ |