Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
71e3aac0 AA |
2 | #ifndef _LINUX_HUGE_MM_H |
3 | #define _LINUX_HUGE_MM_H | |
4 | ||
16981d76 | 5 | #include <linux/sched/coredump.h> |
226ab561 | 6 | #include <linux/mm_types.h> |
16981d76 | 7 | |
baabda26 DW |
8 | #include <linux/fs.h> /* only for vma_is_dax() */ |
9 | ||
ebfe1b8f RC |
10 | vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
11 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
12 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | |
8f34f1ea | 13 | struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
5db4f15c | 14 | void huge_pmd_set_accessed(struct vm_fault *vmf); |
ebfe1b8f RC |
15 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
16 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, | |
17 | struct vm_area_struct *vma); | |
a00cc7d9 MW |
18 | |
19 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
ebfe1b8f | 20 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); |
a00cc7d9 MW |
21 | #else |
22 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) | |
23 | { | |
24 | } | |
25 | #endif | |
26 | ||
5db4f15c | 27 | vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); |
ebfe1b8f RC |
28 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
29 | pmd_t *pmd, unsigned long addr, unsigned long next); | |
30 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, | |
31 | unsigned long addr); | |
32 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, | |
33 | unsigned long addr); | |
34 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |
35 | unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); | |
4a18419f NA |
36 | int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
37 | pmd_t *pmd, unsigned long addr, pgprot_t newprot, | |
38 | unsigned long cp_flags); | |
9a9731b1 | 39 | |
7b806d22 LS |
40 | vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); |
41 | vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); | |
9a9731b1 | 42 | |
71e3aac0 | 43 | enum transparent_hugepage_flag { |
3c556d24 | 44 | TRANSPARENT_HUGEPAGE_UNSUPPORTED, |
71e3aac0 AA |
45 | TRANSPARENT_HUGEPAGE_FLAG, |
46 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | |
444eb2a4 MG |
47 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
48 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, | |
21440d7e | 49 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, |
71e3aac0 | 50 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
ba76149f | 51 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
79da5407 | 52 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
71e3aac0 AA |
53 | }; |
54 | ||
b46e756f KS |
55 | struct kobject; |
56 | struct kobj_attribute; | |
57 | ||
ebfe1b8f RC |
58 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
59 | struct kobj_attribute *attr, | |
60 | const char *buf, size_t count, | |
61 | enum transparent_hugepage_flag flag); | |
62 | ssize_t single_hugepage_flag_show(struct kobject *kobj, | |
63 | struct kobj_attribute *attr, char *buf, | |
64 | enum transparent_hugepage_flag flag); | |
5a6e75f8 KS |
65 | extern struct kobj_attribute shmem_enabled_attr; |
66 | ||
d8c37c48 NH |
67 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
68 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | |
69 | ||
3485b883 | 70 | /* |
19eaf449 RR |
71 | * Mask of all large folio orders supported for anonymous THP; all orders up to |
72 | * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 | |
73 | * (which is a limitation of the THP implementation). | |
3485b883 | 74 | */ |
19eaf449 | 75 | #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) |
3485b883 RR |
76 | |
77 | /* | |
78 | * Mask of all large folio orders supported for file THP. | |
79 | */ | |
80 | #define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) | |
81 | ||
82 | /* | |
83 | * Mask of all large folio orders supported for THP. | |
84 | */ | |
85 | #define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) | |
86 | ||
87 | #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ | |
88 | (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) | |
89 | ||
71e3aac0 | 90 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
fde52796 AK |
91 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
92 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) | |
93 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) | |
71e3aac0 | 94 | |
a00cc7d9 MW |
95 | #define HPAGE_PUD_SHIFT PUD_SHIFT |
96 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) | |
97 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) | |
98 | ||
16981d76 | 99 | extern unsigned long transparent_hugepage_flags; |
3485b883 RR |
100 | extern unsigned long huge_anon_orders_always; |
101 | extern unsigned long huge_anon_orders_madvise; | |
102 | extern unsigned long huge_anon_orders_inherit; | |
103 | ||
104 | static inline bool hugepage_global_enabled(void) | |
105 | { | |
106 | return transparent_hugepage_flags & | |
107 | ((1<<TRANSPARENT_HUGEPAGE_FLAG) | | |
108 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)); | |
109 | } | |
110 | ||
111 | static inline bool hugepage_global_always(void) | |
112 | { | |
113 | return transparent_hugepage_flags & | |
114 | (1<<TRANSPARENT_HUGEPAGE_FLAG); | |
115 | } | |
116 | ||
117 | static inline bool hugepage_flags_enabled(void) | |
118 | { | |
119 | /* | |
120 | * We cover both the anon and the file-backed case here; we must return | |
121 | * true if globally enabled, even when all anon sizes are set to never. | |
122 | * So we don't need to look at huge_anon_orders_inherit. | |
123 | */ | |
124 | return hugepage_global_enabled() || | |
125 | huge_anon_orders_always || | |
126 | huge_anon_orders_madvise; | |
127 | } | |
128 | ||
129 | static inline int highest_order(unsigned long orders) | |
130 | { | |
131 | return fls_long(orders) - 1; | |
132 | } | |
16981d76 | 133 | |
3485b883 RR |
134 | static inline int next_order(unsigned long *orders, int prev) |
135 | { | |
136 | *orders &= ~BIT(prev); | |
137 | return highest_order(*orders); | |
138 | } | |
1064026b | 139 | |
4fa6893f YS |
140 | /* |
141 | * Do the below checks: | |
142 | * - For file vma, check if the linear page offset of vma is | |
3485b883 RR |
143 | * order-aligned within the file. The hugepage is |
144 | * guaranteed to be order-aligned within the file, but we must | |
145 | * check that the order-aligned addresses in the VMA map to | |
146 | * order-aligned offsets within the file, else the hugepage will | |
147 | * not be mappable. | |
148 | * - For all vmas, check if the haddr is in an aligned hugepage | |
4fa6893f YS |
149 | * area. |
150 | */ | |
3485b883 RR |
151 | static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, |
152 | unsigned long addr, int order) | |
e6be37b2 | 153 | { |
3485b883 | 154 | unsigned long hpage_size = PAGE_SIZE << order; |
c453d8c7 YS |
155 | unsigned long haddr; |
156 | ||
e6be37b2 ML |
157 | /* Don't have to check pgoff for anonymous vma */ |
158 | if (!vma_is_anonymous(vma)) { | |
159 | if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, | |
3485b883 | 160 | hpage_size >> PAGE_SHIFT)) |
e6be37b2 ML |
161 | return false; |
162 | } | |
163 | ||
3485b883 | 164 | haddr = ALIGN_DOWN(addr, hpage_size); |
e6be37b2 | 165 | |
3485b883 | 166 | if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) |
e6be37b2 ML |
167 | return false; |
168 | return true; | |
169 | } | |
170 | ||
3485b883 RR |
171 | /* |
172 | * Filter the bitfield of input orders to the ones suitable for use in the vma. | |
173 | * See thp_vma_suitable_order(). | |
174 | * All orders that pass the checks are returned as a bitfield. | |
175 | */ | |
176 | static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, | |
177 | unsigned long addr, unsigned long orders) | |
178 | { | |
179 | int order; | |
180 | ||
181 | /* | |
182 | * Iterate over orders, highest to lowest, removing orders that don't | |
183 | * meet alignment requirements from the set. Exit loop at first order | |
184 | * that meets requirements, since all lower orders must also meet | |
185 | * requirements. | |
186 | */ | |
187 | ||
188 | order = highest_order(orders); | |
189 | ||
190 | while (orders) { | |
191 | if (thp_vma_suitable_order(vma, addr, order)) | |
192 | break; | |
193 | order = next_order(&orders, order); | |
194 | } | |
195 | ||
196 | return orders; | |
197 | } | |
198 | ||
78d12c19 YS |
199 | static inline bool file_thp_enabled(struct vm_area_struct *vma) |
200 | { | |
201 | struct inode *inode; | |
202 | ||
203 | if (!vma->vm_file) | |
204 | return false; | |
205 | ||
206 | inode = vma->vm_file->f_inode; | |
207 | ||
208 | return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && | |
78d12c19 YS |
209 | !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); |
210 | } | |
211 | ||
3485b883 RR |
212 | unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, |
213 | unsigned long vm_flags, bool smaps, | |
214 | bool in_pf, bool enforce_sysfs, | |
215 | unsigned long orders); | |
216 | ||
217 | /** | |
218 | * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma | |
219 | * @vma: the vm area to check | |
220 | * @vm_flags: use these vm_flags instead of vma->vm_flags | |
221 | * @smaps: whether answer will be used for smaps file | |
222 | * @in_pf: whether answer will be used by page fault handler | |
223 | * @enforce_sysfs: whether sysfs config should be taken into account | |
224 | * @orders: bitfield of all orders to consider | |
225 | * | |
226 | * Calculates the intersection of the requested hugepage orders and the allowed | |
227 | * hugepage orders for the provided vma. Permitted orders are encoded as a set | |
228 | * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 | |
229 | * corresponds to order-3, etc). Order-0 is never considered a hugepage order. | |
230 | * | |
231 | * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage | |
232 | * orders are allowed. | |
233 | */ | |
234 | static inline | |
235 | unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, | |
236 | unsigned long vm_flags, bool smaps, | |
237 | bool in_pf, bool enforce_sysfs, | |
238 | unsigned long orders) | |
239 | { | |
240 | /* Optimization to check if required orders are enabled early. */ | |
241 | if (enforce_sysfs && vma_is_anonymous(vma)) { | |
242 | unsigned long mask = READ_ONCE(huge_anon_orders_always); | |
243 | ||
244 | if (vm_flags & VM_HUGEPAGE) | |
245 | mask |= READ_ONCE(huge_anon_orders_madvise); | |
246 | if (hugepage_global_always() || | |
247 | ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) | |
248 | mask |= READ_ONCE(huge_anon_orders_inherit); | |
249 | ||
250 | orders &= mask; | |
251 | if (!orders) | |
252 | return 0; | |
253 | } | |
254 | ||
255 | return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, | |
256 | enforce_sysfs, orders); | |
257 | } | |
43675e6f | 258 | |
79da5407 KS |
259 | #define transparent_hugepage_use_zero_page() \ |
260 | (transparent_hugepage_flags & \ | |
261 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | |
71e3aac0 | 262 | |
ebfe1b8f RC |
263 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
264 | unsigned long len, unsigned long pgoff, unsigned long flags); | |
74d2fad1 | 265 | |
da6e7bf3 | 266 | void folio_prep_large_rmappable(struct folio *folio); |
d4b4084a | 267 | bool can_split_folio(struct folio *folio, int *pextra_pins); |
c010d47f ZY |
268 | int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
269 | unsigned int new_order); | |
e9b61f19 KS |
270 | static inline int split_huge_page(struct page *page) |
271 | { | |
c010d47f | 272 | return split_huge_page_to_list_to_order(page, NULL, 0); |
e9b61f19 | 273 | } |
f158ed61 | 274 | void deferred_split_folio(struct folio *folio); |
eef1b3ba KS |
275 | |
276 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |
af28a988 | 277 | unsigned long address, bool freeze, struct folio *folio); |
eef1b3ba KS |
278 | |
279 | #define split_huge_pmd(__vma, __pmd, __address) \ | |
280 | do { \ | |
281 | pmd_t *____pmd = (__pmd); \ | |
84c3fc4e | 282 | if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ |
5c7fb56e | 283 | || pmd_devmap(*____pmd)) \ |
fec89c10 | 284 | __split_huge_pmd(__vma, __pmd, __address, \ |
33f4751e | 285 | false, NULL); \ |
eef1b3ba | 286 | } while (0) |
ad0bed24 | 287 | |
2a52bcbc | 288 | |
fec89c10 | 289 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
af28a988 | 290 | bool freeze, struct folio *folio); |
2a52bcbc | 291 | |
a00cc7d9 MW |
292 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
293 | unsigned long address); | |
294 | ||
295 | #define split_huge_pud(__vma, __pud, __address) \ | |
296 | do { \ | |
297 | pud_t *____pud = (__pud); \ | |
298 | if (pud_trans_huge(*____pud) \ | |
299 | || pud_devmap(*____pud)) \ | |
300 | __split_huge_pud(__vma, __pud, __address); \ | |
301 | } while (0) | |
302 | ||
ebfe1b8f RC |
303 | int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, |
304 | int advice); | |
7d8faaf1 ZK |
305 | int madvise_collapse(struct vm_area_struct *vma, |
306 | struct vm_area_struct **prev, | |
307 | unsigned long start, unsigned long end); | |
ebfe1b8f RC |
308 | void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, |
309 | unsigned long end, long adjust_next); | |
310 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); | |
311 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); | |
84c3fc4e ZY |
312 | |
313 | static inline int is_swap_pmd(pmd_t pmd) | |
314 | { | |
315 | return !pmd_none(pmd) && !pmd_present(pmd); | |
316 | } | |
317 | ||
c1e8d7c6 | 318 | /* mmap_lock must be held on entry */ |
b6ec57f4 KS |
319 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
320 | struct vm_area_struct *vma) | |
025c5b24 | 321 | { |
84c3fc4e | 322 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) |
b6ec57f4 | 323 | return __pmd_trans_huge_lock(pmd, vma); |
025c5b24 | 324 | else |
969e8d7e | 325 | return NULL; |
025c5b24 | 326 | } |
a00cc7d9 MW |
327 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
328 | struct vm_area_struct *vma) | |
329 | { | |
a00cc7d9 MW |
330 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) |
331 | return __pud_trans_huge_lock(pud, vma); | |
332 | else | |
333 | return NULL; | |
334 | } | |
6ffbb458 | 335 | |
5bf34d7c MWO |
336 | /** |
337 | * folio_test_pmd_mappable - Can we map this folio with a PMD? | |
338 | * @folio: The folio to test | |
339 | */ | |
340 | static inline bool folio_test_pmd_mappable(struct folio *folio) | |
341 | { | |
342 | return folio_order(folio) >= HPAGE_PMD_ORDER; | |
343 | } | |
344 | ||
a00cc7d9 | 345 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 346 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 347 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 348 | pud_t *pud, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 349 | |
5db4f15c | 350 | vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); |
d10e63f2 | 351 | |
56873f43 | 352 | extern struct page *huge_zero_page; |
3b77e8c8 | 353 | extern unsigned long huge_zero_pfn; |
56873f43 WY |
354 | |
355 | static inline bool is_huge_zero_page(struct page *page) | |
356 | { | |
6aa7de05 | 357 | return READ_ONCE(huge_zero_page) == page; |
56873f43 WY |
358 | } |
359 | ||
fc437044 MW |
360 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
361 | { | |
3ce4fee4 | 362 | return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); |
fc437044 MW |
363 | } |
364 | ||
a00cc7d9 MW |
365 | static inline bool is_huge_zero_pud(pud_t pud) |
366 | { | |
367 | return false; | |
368 | } | |
369 | ||
6fcb52a5 AL |
370 | struct page *mm_get_huge_zero_page(struct mm_struct *mm); |
371 | void mm_put_huge_zero_page(struct mm_struct *mm); | |
fc437044 | 372 | |
10102459 KS |
373 | #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) |
374 | ||
9c670ea3 NH |
375 | static inline bool thp_migration_supported(void) |
376 | { | |
377 | return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); | |
378 | } | |
379 | ||
71e3aac0 | 380 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
d8c37c48 NH |
381 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
382 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | |
383 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) | |
71e3aac0 | 384 | |
a00cc7d9 MW |
385 | #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) |
386 | #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) | |
387 | #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) | |
388 | ||
5bf34d7c MWO |
389 | static inline bool folio_test_pmd_mappable(struct folio *folio) |
390 | { | |
391 | return false; | |
392 | } | |
393 | ||
3485b883 RR |
394 | static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, |
395 | unsigned long addr, int order) | |
43675e6f YS |
396 | { |
397 | return false; | |
398 | } | |
399 | ||
3485b883 RR |
400 | static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, |
401 | unsigned long addr, unsigned long orders) | |
e6be37b2 | 402 | { |
3485b883 RR |
403 | return 0; |
404 | } | |
405 | ||
406 | static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, | |
407 | unsigned long vm_flags, bool smaps, | |
408 | bool in_pf, bool enforce_sysfs, | |
409 | unsigned long orders) | |
410 | { | |
411 | return 0; | |
e6be37b2 ML |
412 | } |
413 | ||
da6e7bf3 | 414 | static inline void folio_prep_large_rmappable(struct folio *folio) {} |
800d8c63 | 415 | |
71e3aac0 | 416 | #define transparent_hugepage_flags 0UL |
74d2fad1 TK |
417 | |
418 | #define thp_get_unmapped_area NULL | |
419 | ||
b8f593cd | 420 | static inline bool |
d4b4084a | 421 | can_split_folio(struct folio *folio, int *pextra_pins) |
b8f593cd | 422 | { |
b8f593cd HY |
423 | return false; |
424 | } | |
5bc7b8ac | 425 | static inline int |
c010d47f ZY |
426 | split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
427 | unsigned int new_order) | |
5bc7b8ac SL |
428 | { |
429 | return 0; | |
430 | } | |
71e3aac0 AA |
431 | static inline int split_huge_page(struct page *page) |
432 | { | |
433 | return 0; | |
434 | } | |
f158ed61 | 435 | static inline void deferred_split_folio(struct folio *folio) {} |
78ddc534 | 436 | #define split_huge_pmd(__vma, __pmd, __address) \ |
e180377f | 437 | do { } while (0) |
2a52bcbc | 438 | |
fd60775a | 439 | static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
af28a988 | 440 | unsigned long address, bool freeze, struct folio *folio) {} |
2a52bcbc | 441 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
af28a988 | 442 | unsigned long address, bool freeze, struct folio *folio) {} |
2a52bcbc | 443 | |
a00cc7d9 MW |
444 | #define split_huge_pud(__vma, __pmd, __address) \ |
445 | do { } while (0) | |
446 | ||
60ab3244 AA |
447 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
448 | unsigned long *vm_flags, int advice) | |
0af4e98b | 449 | { |
7d8faaf1 | 450 | return -EINVAL; |
0af4e98b | 451 | } |
7d8faaf1 ZK |
452 | |
453 | static inline int madvise_collapse(struct vm_area_struct *vma, | |
454 | struct vm_area_struct **prev, | |
455 | unsigned long start, unsigned long end) | |
456 | { | |
457 | return -EINVAL; | |
458 | } | |
459 | ||
94fcc585 AA |
460 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
461 | unsigned long start, | |
462 | unsigned long end, | |
463 | long adjust_next) | |
464 | { | |
465 | } | |
84c3fc4e ZY |
466 | static inline int is_swap_pmd(pmd_t pmd) |
467 | { | |
468 | return 0; | |
469 | } | |
b6ec57f4 KS |
470 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
471 | struct vm_area_struct *vma) | |
025c5b24 | 472 | { |
b6ec57f4 | 473 | return NULL; |
025c5b24 | 474 | } |
a00cc7d9 MW |
475 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
476 | struct vm_area_struct *vma) | |
477 | { | |
478 | return NULL; | |
479 | } | |
d10e63f2 | 480 | |
5db4f15c | 481 | static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) |
d10e63f2 | 482 | { |
4daae3b4 | 483 | return 0; |
d10e63f2 MG |
484 | } |
485 | ||
56873f43 WY |
486 | static inline bool is_huge_zero_page(struct page *page) |
487 | { | |
488 | return false; | |
489 | } | |
490 | ||
3b77e8c8 HD |
491 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
492 | { | |
493 | return false; | |
494 | } | |
495 | ||
a00cc7d9 MW |
496 | static inline bool is_huge_zero_pud(pud_t pud) |
497 | { | |
498 | return false; | |
499 | } | |
500 | ||
6fcb52a5 | 501 | static inline void mm_put_huge_zero_page(struct mm_struct *mm) |
aa88b68c | 502 | { |
6fcb52a5 | 503 | return; |
aa88b68c | 504 | } |
3565fce3 DW |
505 | |
506 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | |
df06b37f | 507 | unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
3565fce3 DW |
508 | { |
509 | return NULL; | |
510 | } | |
a00cc7d9 MW |
511 | |
512 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, | |
df06b37f | 513 | unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) |
a00cc7d9 MW |
514 | { |
515 | return NULL; | |
516 | } | |
9c670ea3 NH |
517 | |
518 | static inline bool thp_migration_supported(void) | |
519 | { | |
520 | return false; | |
521 | } | |
71e3aac0 AA |
522 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
523 | ||
c010d47f ZY |
524 | static inline int split_folio_to_list_to_order(struct folio *folio, |
525 | struct list_head *list, int new_order) | |
346cf613 | 526 | { |
c010d47f | 527 | return split_huge_page_to_list_to_order(&folio->page, list, new_order); |
346cf613 MWO |
528 | } |
529 | ||
c010d47f | 530 | static inline int split_folio_to_order(struct folio *folio, int new_order) |
d788f5b3 | 531 | { |
c010d47f | 532 | return split_folio_to_list_to_order(folio, NULL, new_order); |
d788f5b3 MWO |
533 | } |
534 | ||
c010d47f ZY |
535 | #define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0) |
536 | #define split_folio(f) split_folio_to_order(f, 0) | |
537 | ||
d0637c50 BS |
538 | /* |
539 | * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to | |
540 | * limitations in the implementation like arm64 MTE can override this to | |
541 | * false | |
542 | */ | |
543 | #ifndef arch_thp_swp_supported | |
544 | static inline bool arch_thp_swp_supported(void) | |
545 | { | |
546 | return true; | |
547 | } | |
548 | #endif | |
549 | ||
71e3aac0 | 550 | #endif /* _LINUX_HUGE_MM_H */ |