Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
a2c16d6c HD |
2 | #ifndef _LINUX_SWAPOPS_H |
3 | #define _LINUX_SWAPOPS_H | |
4 | ||
5 | #include <linux/radix-tree.h> | |
187f1882 | 6 | #include <linux/bug.h> |
2b740303 | 7 | #include <linux/mm_types.h> |
a2c16d6c | 8 | |
9b98fa22 CH |
9 | #ifdef CONFIG_MMU |
10 | ||
2e346877 PX |
11 | #ifdef CONFIG_SWAP |
12 | #include <linux/swapfile.h> | |
13 | #endif /* CONFIG_SWAP */ | |
14 | ||
1da177e4 LT |
15 | /* |
16 | * swapcache pages are stored in the swapper_space radix tree. We want to | |
17 | * get good packing density in that tree, so the index should be dense in | |
18 | * the low-order bits. | |
19 | * | |
a930c210 | 20 | * We arrange the `type' and `offset' fields so that `type' is at the six |
e83a9596 | 21 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
9b15b817 | 22 | * remaining bits. Although `type' itself needs only five bits, we allow for |
a930c210 | 23 | * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry(). |
1da177e4 LT |
24 | * |
25 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. | |
26 | */ | |
3159f943 MW |
27 | #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) |
28 | #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) | |
1da177e4 | 29 | |
0d206b5d PX |
30 | /* |
31 | * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To | |
32 | * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries | |
33 | * can use the extra bits to store other information besides PFN. | |
34 | */ | |
35 | #ifdef MAX_PHYSMEM_BITS | |
630dc25e | 36 | #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) |
0d206b5d | 37 | #else /* MAX_PHYSMEM_BITS */ |
630dc25e DH |
38 | #define SWP_PFN_BITS min_t(int, \ |
39 | sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \ | |
40 | SWP_TYPE_SHIFT) | |
0d206b5d | 41 | #endif /* MAX_PHYSMEM_BITS */ |
630dc25e | 42 | #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) |
0d206b5d | 43 | |
2e346877 PX |
44 | /** |
45 | * Migration swap entry specific bitfield definitions. Layout: | |
46 | * | |
47 | * |----------+--------------------| | |
48 | * | swp_type | swp_offset | | |
49 | * |----------+--------+-+-+-------| | |
50 | * | | resv |D|A| PFN | | |
51 | * |----------+--------+-+-+-------| | |
52 | * | |
53 | * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A) | |
54 | * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D) | |
55 | * | |
56 | * Note: A/D bits will be stored in migration entries iff there're enough | |
57 | * free bits in arch specific swp offset. By default we'll ignore A/D bits | |
58 | * when migrating a page. Please refer to migration_entry_supports_ad() | |
59 | * for more information. If there're more bits besides PFN and A/D bits, | |
60 | * they should be reserved and always be zeros. | |
61 | */ | |
62 | #define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS) | |
63 | #define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1) | |
64 | #define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2) | |
65 | ||
66 | #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT) | |
67 | #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT) | |
68 | ||
0d206b5d PX |
69 | static inline bool is_pfn_swap_entry(swp_entry_t entry); |
70 | ||
099dd687 PX |
71 | /* Clear all flags but only keep swp_entry_t related information */ |
72 | static inline pte_t pte_swp_clear_flags(pte_t pte) | |
73 | { | |
1493a191 DH |
74 | if (pte_swp_exclusive(pte)) |
75 | pte = pte_swp_clear_exclusive(pte); | |
099dd687 PX |
76 | if (pte_swp_soft_dirty(pte)) |
77 | pte = pte_swp_clear_soft_dirty(pte); | |
78 | if (pte_swp_uffd_wp(pte)) | |
79 | pte = pte_swp_clear_uffd_wp(pte); | |
80 | return pte; | |
81 | } | |
82 | ||
1da177e4 LT |
83 | /* |
84 | * Store a type+offset into a swp_entry_t in an arch-independent format | |
85 | */ | |
86 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) | |
87 | { | |
88 | swp_entry_t ret; | |
89 | ||
3159f943 | 90 | ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); |
1da177e4 LT |
91 | return ret; |
92 | } | |
93 | ||
94 | /* | |
95 | * Extract the `type' field from a swp_entry_t. The swp_entry_t is in | |
96 | * arch-independent format | |
97 | */ | |
98 | static inline unsigned swp_type(swp_entry_t entry) | |
99 | { | |
3159f943 | 100 | return (entry.val >> SWP_TYPE_SHIFT); |
1da177e4 LT |
101 | } |
102 | ||
103 | /* | |
104 | * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in | |
105 | * arch-independent format | |
106 | */ | |
107 | static inline pgoff_t swp_offset(swp_entry_t entry) | |
108 | { | |
3159f943 | 109 | return entry.val & SWP_OFFSET_MASK; |
1da177e4 LT |
110 | } |
111 | ||
0d206b5d PX |
112 | /* |
113 | * This should only be called upon a pfn swap entry to get the PFN stored | |
114 | * in the swap entry. Please refers to is_pfn_swap_entry() for definition | |
115 | * of pfn swap entry. | |
116 | */ | |
117 | static inline unsigned long swp_offset_pfn(swp_entry_t entry) | |
118 | { | |
119 | VM_BUG_ON(!is_pfn_swap_entry(entry)); | |
120 | return swp_offset(entry) & SWP_PFN_MASK; | |
121 | } | |
122 | ||
698dd4ba MM |
123 | /* check whether a pte points to a swap entry */ |
124 | static inline int is_swap_pte(pte_t pte) | |
125 | { | |
21d9ee3e | 126 | return !pte_none(pte) && !pte_present(pte); |
698dd4ba MM |
127 | } |
128 | ||
1da177e4 LT |
129 | /* |
130 | * Convert the arch-dependent pte representation of a swp_entry_t into an | |
131 | * arch-independent swp_entry_t. | |
132 | */ | |
133 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) | |
134 | { | |
135 | swp_entry_t arch_entry; | |
136 | ||
099dd687 | 137 | pte = pte_swp_clear_flags(pte); |
1da177e4 LT |
138 | arch_entry = __pte_to_swp_entry(pte); |
139 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | |
140 | } | |
141 | ||
142 | /* | |
143 | * Convert the arch-independent representation of a swp_entry_t into the | |
144 | * arch-dependent pte representation. | |
145 | */ | |
146 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) | |
147 | { | |
148 | swp_entry_t arch_entry; | |
149 | ||
150 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); | |
1da177e4 LT |
151 | return __swp_entry_to_pte(arch_entry); |
152 | } | |
0697212a | 153 | |
a2c16d6c HD |
154 | static inline swp_entry_t radix_to_swp_entry(void *arg) |
155 | { | |
156 | swp_entry_t entry; | |
157 | ||
3159f943 | 158 | entry.val = xa_to_value(arg); |
a2c16d6c HD |
159 | return entry; |
160 | } | |
161 | ||
162 | static inline void *swp_to_radix_entry(swp_entry_t entry) | |
163 | { | |
3159f943 | 164 | return xa_mk_value(entry.val); |
a2c16d6c HD |
165 | } |
166 | ||
5042db43 | 167 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
4dd845b5 | 168 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) |
5042db43 | 169 | { |
4dd845b5 | 170 | return swp_entry(SWP_DEVICE_READ, offset); |
5042db43 JG |
171 | } |
172 | ||
4dd845b5 | 173 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) |
5042db43 | 174 | { |
4dd845b5 | 175 | return swp_entry(SWP_DEVICE_WRITE, offset); |
5042db43 JG |
176 | } |
177 | ||
4dd845b5 | 178 | static inline bool is_device_private_entry(swp_entry_t entry) |
5042db43 | 179 | { |
4dd845b5 AP |
180 | int type = swp_type(entry); |
181 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; | |
5042db43 JG |
182 | } |
183 | ||
4dd845b5 | 184 | static inline bool is_writable_device_private_entry(swp_entry_t entry) |
5042db43 JG |
185 | { |
186 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); | |
187 | } | |
b756a3b5 | 188 | |
c25465eb | 189 | static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) |
b756a3b5 | 190 | { |
c25465eb | 191 | return swp_entry(SWP_DEVICE_EXCLUSIVE, offset); |
b756a3b5 AP |
192 | } |
193 | ||
194 | static inline bool is_device_exclusive_entry(swp_entry_t entry) | |
195 | { | |
c25465eb | 196 | return swp_type(entry) == SWP_DEVICE_EXCLUSIVE; |
b756a3b5 AP |
197 | } |
198 | ||
5042db43 | 199 | #else /* CONFIG_DEVICE_PRIVATE */ |
4dd845b5 | 200 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) |
5042db43 JG |
201 | { |
202 | return swp_entry(0, 0); | |
203 | } | |
204 | ||
4dd845b5 | 205 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) |
5042db43 | 206 | { |
4dd845b5 | 207 | return swp_entry(0, 0); |
5042db43 JG |
208 | } |
209 | ||
210 | static inline bool is_device_private_entry(swp_entry_t entry) | |
211 | { | |
212 | return false; | |
213 | } | |
214 | ||
4dd845b5 | 215 | static inline bool is_writable_device_private_entry(swp_entry_t entry) |
5042db43 JG |
216 | { |
217 | return false; | |
218 | } | |
b756a3b5 | 219 | |
c25465eb | 220 | static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) |
b756a3b5 AP |
221 | { |
222 | return swp_entry(0, 0); | |
223 | } | |
224 | ||
225 | static inline bool is_device_exclusive_entry(swp_entry_t entry) | |
226 | { | |
227 | return false; | |
228 | } | |
229 | ||
5042db43 JG |
230 | #endif /* CONFIG_DEVICE_PRIVATE */ |
231 | ||
0697212a | 232 | #ifdef CONFIG_MIGRATION |
0697212a CL |
233 | static inline int is_migration_entry(swp_entry_t entry) |
234 | { | |
235 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || | |
6c287605 | 236 | swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || |
0697212a CL |
237 | swp_type(entry) == SWP_MIGRATION_WRITE); |
238 | } | |
239 | ||
4dd845b5 | 240 | static inline int is_writable_migration_entry(swp_entry_t entry) |
0697212a CL |
241 | { |
242 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); | |
243 | } | |
244 | ||
6c287605 DH |
245 | static inline int is_readable_migration_entry(swp_entry_t entry) |
246 | { | |
247 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ); | |
248 | } | |
249 | ||
250 | static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) | |
251 | { | |
252 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); | |
253 | } | |
254 | ||
4dd845b5 | 255 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) |
0697212a | 256 | { |
4dd845b5 AP |
257 | return swp_entry(SWP_MIGRATION_READ, offset); |
258 | } | |
259 | ||
6c287605 DH |
260 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) |
261 | { | |
262 | return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset); | |
263 | } | |
264 | ||
4dd845b5 AP |
265 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) |
266 | { | |
267 | return swp_entry(SWP_MIGRATION_WRITE, offset); | |
0697212a CL |
268 | } |
269 | ||
2e346877 PX |
270 | /* |
271 | * Returns whether the host has large enough swap offset field to support | |
272 | * carrying over pgtable A/D bits for page migrations. The result is | |
273 | * pretty much arch specific. | |
274 | */ | |
275 | static inline bool migration_entry_supports_ad(void) | |
276 | { | |
2e346877 | 277 | #ifdef CONFIG_SWAP |
5154e607 | 278 | return swap_migration_ad_supported; |
2e346877 PX |
279 | #else /* CONFIG_SWAP */ |
280 | return false; | |
281 | #endif /* CONFIG_SWAP */ | |
282 | } | |
283 | ||
284 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) | |
285 | { | |
286 | if (migration_entry_supports_ad()) | |
287 | return swp_entry(swp_type(entry), | |
288 | swp_offset(entry) | SWP_MIG_YOUNG); | |
289 | return entry; | |
290 | } | |
291 | ||
292 | static inline bool is_migration_entry_young(swp_entry_t entry) | |
293 | { | |
294 | if (migration_entry_supports_ad()) | |
295 | return swp_offset(entry) & SWP_MIG_YOUNG; | |
296 | /* Keep the old behavior of aging page after migration */ | |
297 | return false; | |
298 | } | |
299 | ||
300 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) | |
301 | { | |
302 | if (migration_entry_supports_ad()) | |
303 | return swp_entry(swp_type(entry), | |
304 | swp_offset(entry) | SWP_MIG_DIRTY); | |
305 | return entry; | |
306 | } | |
307 | ||
308 | static inline bool is_migration_entry_dirty(swp_entry_t entry) | |
309 | { | |
310 | if (migration_entry_supports_ad()) | |
311 | return swp_offset(entry) & SWP_MIG_DIRTY; | |
312 | /* Keep the old behavior of clean page after migration */ | |
313 | return false; | |
314 | } | |
315 | ||
0697212a CL |
316 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
317 | unsigned long address); | |
e6c0c032 | 318 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte); |
eba4d770 | 319 | #else /* CONFIG_MIGRATION */ |
4dd845b5 AP |
320 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) |
321 | { | |
322 | return swp_entry(0, 0); | |
323 | } | |
324 | ||
6c287605 DH |
325 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) |
326 | { | |
327 | return swp_entry(0, 0); | |
328 | } | |
329 | ||
4dd845b5 AP |
330 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) |
331 | { | |
332 | return swp_entry(0, 0); | |
333 | } | |
0697212a | 334 | |
5ec553a9 AM |
335 | static inline int is_migration_entry(swp_entry_t swp) |
336 | { | |
337 | return 0; | |
338 | } | |
0d665e7b | 339 | |
0697212a | 340 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
0cb8fd4d HD |
341 | unsigned long address) { } |
342 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, | |
e6c0c032 | 343 | unsigned long addr, pte_t *pte) { } |
4dd845b5 | 344 | static inline int is_writable_migration_entry(swp_entry_t entry) |
0697212a CL |
345 | { |
346 | return 0; | |
347 | } | |
6c287605 DH |
348 | static inline int is_readable_migration_entry(swp_entry_t entry) |
349 | { | |
350 | return 0; | |
351 | } | |
0697212a | 352 | |
2e346877 PX |
353 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) |
354 | { | |
355 | return entry; | |
356 | } | |
357 | ||
358 | static inline bool is_migration_entry_young(swp_entry_t entry) | |
359 | { | |
360 | return false; | |
361 | } | |
362 | ||
363 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) | |
364 | { | |
365 | return entry; | |
366 | } | |
367 | ||
368 | static inline bool is_migration_entry_dirty(swp_entry_t entry) | |
369 | { | |
370 | return false; | |
371 | } | |
eba4d770 | 372 | #endif /* CONFIG_MIGRATION */ |
0697212a | 373 | |
07a57a33 OS |
374 | #ifdef CONFIG_MEMORY_FAILURE |
375 | ||
376 | /* | |
377 | * Support for hardware poisoned pages | |
378 | */ | |
379 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
380 | { | |
381 | BUG_ON(!PageLocked(page)); | |
382 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); | |
383 | } | |
384 | ||
385 | static inline int is_hwpoison_entry(swp_entry_t entry) | |
386 | { | |
387 | return swp_type(entry) == SWP_HWPOISON; | |
388 | } | |
389 | ||
390 | #else | |
391 | ||
392 | static inline swp_entry_t make_hwpoison_entry(struct page *page) | |
393 | { | |
394 | return swp_entry(0, 0); | |
395 | } | |
396 | ||
397 | static inline int is_hwpoison_entry(swp_entry_t swp) | |
398 | { | |
399 | return 0; | |
400 | } | |
401 | #endif | |
402 | ||
679d1033 PX |
403 | typedef unsigned long pte_marker; |
404 | ||
15520a3f | 405 | #define PTE_MARKER_UFFD_WP BIT(0) |
af19487f AR |
406 | /* |
407 | * "Poisoned" here is meant in the very general sense of "future accesses are | |
408 | * invalid", instead of referring very specifically to hardware memory errors. | |
409 | * This marker is meant to represent any of various different causes of this. | |
7c53dfbd LS |
410 | * |
411 | * Note that, when encountered by the faulting logic, PTEs with this marker will | |
412 | * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error | |
413 | * logic. | |
af19487f AR |
414 | */ |
415 | #define PTE_MARKER_POISONED BIT(1) | |
7c53dfbd LS |
416 | /* |
417 | * Indicates that, on fault, this PTE will case a SIGSEGV signal to be | |
418 | * sent. This means guard markers behave in effect as if the region were mapped | |
419 | * PROT_NONE, rather than if they were a memory hole or equivalent. | |
420 | */ | |
421 | #define PTE_MARKER_GUARD BIT(2) | |
422 | #define PTE_MARKER_MASK (BIT(3) - 1) | |
679d1033 | 423 | |
679d1033 PX |
424 | static inline swp_entry_t make_pte_marker_entry(pte_marker marker) |
425 | { | |
426 | return swp_entry(SWP_PTE_MARKER, marker); | |
427 | } | |
428 | ||
429 | static inline bool is_pte_marker_entry(swp_entry_t entry) | |
430 | { | |
431 | return swp_type(entry) == SWP_PTE_MARKER; | |
432 | } | |
433 | ||
434 | static inline pte_marker pte_marker_get(swp_entry_t entry) | |
435 | { | |
436 | return swp_offset(entry) & PTE_MARKER_MASK; | |
437 | } | |
438 | ||
439 | static inline bool is_pte_marker(pte_t pte) | |
440 | { | |
441 | return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte)); | |
442 | } | |
443 | ||
679d1033 PX |
444 | static inline pte_t make_pte_marker(pte_marker marker) |
445 | { | |
446 | return swp_entry_to_pte(make_pte_marker_entry(marker)); | |
447 | } | |
448 | ||
af19487f | 449 | static inline swp_entry_t make_poisoned_swp_entry(void) |
15520a3f | 450 | { |
af19487f | 451 | return make_pte_marker_entry(PTE_MARKER_POISONED); |
15520a3f PX |
452 | } |
453 | ||
af19487f | 454 | static inline int is_poisoned_swp_entry(swp_entry_t entry) |
15520a3f PX |
455 | { |
456 | return is_pte_marker_entry(entry) && | |
af19487f | 457 | (pte_marker_get(entry) & PTE_MARKER_POISONED); |
7c53dfbd LS |
458 | |
459 | } | |
460 | ||
461 | static inline swp_entry_t make_guard_swp_entry(void) | |
462 | { | |
463 | return make_pte_marker_entry(PTE_MARKER_GUARD); | |
464 | } | |
465 | ||
466 | static inline int is_guard_swp_entry(swp_entry_t entry) | |
467 | { | |
468 | return is_pte_marker_entry(entry) && | |
469 | (pte_marker_get(entry) & PTE_MARKER_GUARD); | |
15520a3f PX |
470 | } |
471 | ||
679d1033 PX |
472 | /* |
473 | * This is a special version to check pte_none() just to cover the case when | |
474 | * the pte is a pte marker. It existed because in many cases the pte marker | |
475 | * should be seen as a none pte; it's just that we have stored some information | |
476 | * onto the none pte so it becomes not-none any more. | |
477 | * | |
478 | * It should be used when the pte is file-backed, ram-based and backing | |
479 | * userspace pages, like shmem. It is not needed upon pgtables that do not | |
480 | * support pte markers at all. For example, it's not needed on anonymous | |
481 | * memory, kernel-only memory (including when the system is during-boot), | |
482 | * non-ram based generic file-system. It's fine to be used even there, but the | |
483 | * extra pte marker check will be pure overhead. | |
679d1033 PX |
484 | */ |
485 | static inline int pte_none_mostly(pte_t pte) | |
486 | { | |
487 | return pte_none(pte) || is_pte_marker(pte); | |
488 | } | |
489 | ||
af5cdaf8 AP |
490 | static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) |
491 | { | |
0d206b5d | 492 | struct page *p = pfn_to_page(swp_offset_pfn(entry)); |
af5cdaf8 AP |
493 | |
494 | /* | |
495 | * Any use of migration entries may only occur while the | |
496 | * corresponding page is locked | |
497 | */ | |
498 | BUG_ON(is_migration_entry(entry) && !PageLocked(p)); | |
499 | ||
500 | return p; | |
501 | } | |
502 | ||
5662400a MWO |
503 | static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry) |
504 | { | |
505 | struct folio *folio = pfn_folio(swp_offset_pfn(entry)); | |
506 | ||
507 | /* | |
508 | * Any use of migration entries may only occur while the | |
509 | * corresponding folio is locked | |
510 | */ | |
511 | BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio)); | |
512 | ||
513 | return folio; | |
514 | } | |
515 | ||
af5cdaf8 AP |
516 | /* |
517 | * A pfn swap entry is a special type of swap entry that always has a pfn stored | |
07a57a33 OS |
518 | * in the swap offset. They can either be used to represent unaddressable device |
519 | * memory, to restrict access to a page undergoing migration or to represent a | |
520 | * pfn which has been hwpoisoned and unmapped. | |
af5cdaf8 AP |
521 | */ |
522 | static inline bool is_pfn_swap_entry(swp_entry_t entry) | |
523 | { | |
0d206b5d PX |
524 | /* Make sure the swp offset can always store the needed fields */ |
525 | BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); | |
526 | ||
b756a3b5 | 527 | return is_migration_entry(entry) || is_device_private_entry(entry) || |
07a57a33 | 528 | is_device_exclusive_entry(entry) || is_hwpoison_entry(entry); |
af5cdaf8 AP |
529 | } |
530 | ||
616b8371 ZY |
531 | struct page_vma_mapped_walk; |
532 | ||
533 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
7f5abe60 | 534 | extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
616b8371 ZY |
535 | struct page *page); |
536 | ||
537 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, | |
538 | struct page *new); | |
539 | ||
540 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); | |
541 | ||
542 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) | |
543 | { | |
544 | swp_entry_t arch_entry; | |
545 | ||
ab6e3d09 NH |
546 | if (pmd_swp_soft_dirty(pmd)) |
547 | pmd = pmd_swp_clear_soft_dirty(pmd); | |
8f34f1ea PX |
548 | if (pmd_swp_uffd_wp(pmd)) |
549 | pmd = pmd_swp_clear_uffd_wp(pmd); | |
616b8371 ZY |
550 | arch_entry = __pmd_to_swp_entry(pmd); |
551 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | |
552 | } | |
553 | ||
554 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) | |
555 | { | |
556 | swp_entry_t arch_entry; | |
557 | ||
558 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); | |
559 | return __swp_entry_to_pmd(arch_entry); | |
560 | } | |
561 | ||
562 | static inline int is_pmd_migration_entry(pmd_t pmd) | |
563 | { | |
b304c6f0 | 564 | return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); |
616b8371 | 565 | } |
eba4d770 | 566 | #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
7f5abe60 | 567 | static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
616b8371 ZY |
568 | struct page *page) |
569 | { | |
570 | BUILD_BUG(); | |
571 | } | |
572 | ||
573 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, | |
574 | struct page *new) | |
575 | { | |
576 | BUILD_BUG(); | |
577 | } | |
578 | ||
579 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } | |
580 | ||
581 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) | |
582 | { | |
583 | return swp_entry(0, 0); | |
584 | } | |
585 | ||
586 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) | |
587 | { | |
588 | return __pmd(0); | |
589 | } | |
590 | ||
591 | static inline int is_pmd_migration_entry(pmd_t pmd) | |
592 | { | |
593 | return 0; | |
594 | } | |
eba4d770 | 595 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
616b8371 | 596 | |
a7420aa5 AK |
597 | static inline int non_swap_entry(swp_entry_t entry) |
598 | { | |
599 | return swp_type(entry) >= MAX_SWAPFILES; | |
600 | } | |
a2c16d6c | 601 | |
9b98fa22 | 602 | #endif /* CONFIG_MMU */ |
a2c16d6c | 603 | #endif /* _LINUX_SWAPOPS_H */ |