Merge tag 'input-for-v6.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
16981d76 5#include <linux/sched/coredump.h>
226ab561 6#include <linux/mm_types.h>
16981d76 7
baabda26
DW
8#include <linux/fs.h> /* only for vma_is_dax() */
9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
5db4f15c 14void huge_pmd_set_accessed(struct vm_fault *vmf);
ebfe1b8f
RC
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
5db4f15c 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
ebfe1b8f
RC
28bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 pmd_t *pmd, unsigned long addr, unsigned long next);
30int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 unsigned long addr);
32int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 unsigned long addr);
34bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
4a18419f
NA
36int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 unsigned long cp_flags);
9a9731b1 39
7b806d22
LS
40vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
9a9731b1 42
71e3aac0 43enum transparent_hugepage_flag {
3c556d24 44 TRANSPARENT_HUGEPAGE_UNSUPPORTED,
71e3aac0
AA
45 TRANSPARENT_HUGEPAGE_FLAG,
46 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
47 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
48 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 49 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 50 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 51 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 52 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
53};
54
b46e756f
KS
55struct kobject;
56struct kobj_attribute;
57
ebfe1b8f
RC
58ssize_t single_hugepage_flag_store(struct kobject *kobj,
59 struct kobj_attribute *attr,
60 const char *buf, size_t count,
61 enum transparent_hugepage_flag flag);
62ssize_t single_hugepage_flag_show(struct kobject *kobj,
63 struct kobj_attribute *attr, char *buf,
64 enum transparent_hugepage_flag flag);
5a6e75f8
KS
65extern struct kobj_attribute shmem_enabled_attr;
66
3485b883 67/*
19eaf449
RR
68 * Mask of all large folio orders supported for anonymous THP; all orders up to
69 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
70 * (which is a limitation of the THP implementation).
3485b883 71 */
19eaf449 72#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
3485b883
RR
73
74/*
75 * Mask of all large folio orders supported for file THP.
76 */
77#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
78
79/*
80 * Mask of all large folio orders supported for THP.
81 */
82#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
83
e0ffb29b
MW
84#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
85#define TVA_IN_PF (1 << 1) /* Page fault handler */
86#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
87
88#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
89 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
3485b883 90
b979db16 91#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
fde52796 92#define HPAGE_PMD_SHIFT PMD_SHIFT
b979db16
PX
93#define HPAGE_PUD_SHIFT PUD_SHIFT
94#else
95#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
96#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
97#endif
98
99#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
100#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
fde52796 101#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
b979db16 102#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
71e3aac0 103
b979db16
PX
104#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
105#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
a00cc7d9 106#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
b979db16
PX
107#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
108
109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9 110
16981d76 111extern unsigned long transparent_hugepage_flags;
3485b883
RR
112extern unsigned long huge_anon_orders_always;
113extern unsigned long huge_anon_orders_madvise;
114extern unsigned long huge_anon_orders_inherit;
115
116static inline bool hugepage_global_enabled(void)
117{
118 return transparent_hugepage_flags &
119 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
120 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
121}
122
123static inline bool hugepage_global_always(void)
124{
125 return transparent_hugepage_flags &
126 (1<<TRANSPARENT_HUGEPAGE_FLAG);
127}
128
129static inline bool hugepage_flags_enabled(void)
130{
131 /*
132 * We cover both the anon and the file-backed case here; we must return
133 * true if globally enabled, even when all anon sizes are set to never.
134 * So we don't need to look at huge_anon_orders_inherit.
135 */
136 return hugepage_global_enabled() ||
137 huge_anon_orders_always ||
138 huge_anon_orders_madvise;
139}
140
141static inline int highest_order(unsigned long orders)
142{
143 return fls_long(orders) - 1;
144}
16981d76 145
3485b883
RR
146static inline int next_order(unsigned long *orders, int prev)
147{
148 *orders &= ~BIT(prev);
149 return highest_order(*orders);
150}
1064026b 151
4fa6893f
YS
152/*
153 * Do the below checks:
154 * - For file vma, check if the linear page offset of vma is
3485b883
RR
155 * order-aligned within the file. The hugepage is
156 * guaranteed to be order-aligned within the file, but we must
157 * check that the order-aligned addresses in the VMA map to
158 * order-aligned offsets within the file, else the hugepage will
159 * not be mappable.
160 * - For all vmas, check if the haddr is in an aligned hugepage
4fa6893f
YS
161 * area.
162 */
3485b883
RR
163static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
164 unsigned long addr, int order)
e6be37b2 165{
3485b883 166 unsigned long hpage_size = PAGE_SIZE << order;
c453d8c7
YS
167 unsigned long haddr;
168
e6be37b2
ML
169 /* Don't have to check pgoff for anonymous vma */
170 if (!vma_is_anonymous(vma)) {
171 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
3485b883 172 hpage_size >> PAGE_SHIFT))
e6be37b2
ML
173 return false;
174 }
175
3485b883 176 haddr = ALIGN_DOWN(addr, hpage_size);
e6be37b2 177
3485b883 178 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
e6be37b2
ML
179 return false;
180 return true;
181}
182
3485b883
RR
183/*
184 * Filter the bitfield of input orders to the ones suitable for use in the vma.
185 * See thp_vma_suitable_order().
186 * All orders that pass the checks are returned as a bitfield.
187 */
188static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
189 unsigned long addr, unsigned long orders)
190{
191 int order;
192
193 /*
194 * Iterate over orders, highest to lowest, removing orders that don't
195 * meet alignment requirements from the set. Exit loop at first order
196 * that meets requirements, since all lower orders must also meet
197 * requirements.
198 */
199
200 order = highest_order(orders);
201
202 while (orders) {
203 if (thp_vma_suitable_order(vma, addr, order))
204 break;
205 order = next_order(&orders, order);
206 }
207
208 return orders;
209}
210
78d12c19
YS
211static inline bool file_thp_enabled(struct vm_area_struct *vma)
212{
213 struct inode *inode;
214
215 if (!vma->vm_file)
216 return false;
217
218 inode = vma->vm_file->f_inode;
219
220 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
78d12c19
YS
221 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
222}
223
3485b883 224unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
225 unsigned long vm_flags,
226 unsigned long tva_flags,
3485b883
RR
227 unsigned long orders);
228
229/**
230 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
231 * @vma: the vm area to check
232 * @vm_flags: use these vm_flags instead of vma->vm_flags
e0ffb29b 233 * @tva_flags: Which TVA flags to honour
3485b883
RR
234 * @orders: bitfield of all orders to consider
235 *
236 * Calculates the intersection of the requested hugepage orders and the allowed
237 * hugepage orders for the provided vma. Permitted orders are encoded as a set
238 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
239 * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
240 *
241 * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
242 * orders are allowed.
243 */
244static inline
245unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
246 unsigned long vm_flags,
247 unsigned long tva_flags,
3485b883
RR
248 unsigned long orders)
249{
250 /* Optimization to check if required orders are enabled early. */
e0ffb29b 251 if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
3485b883
RR
252 unsigned long mask = READ_ONCE(huge_anon_orders_always);
253
254 if (vm_flags & VM_HUGEPAGE)
255 mask |= READ_ONCE(huge_anon_orders_madvise);
256 if (hugepage_global_always() ||
257 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
258 mask |= READ_ONCE(huge_anon_orders_inherit);
259
260 orders &= mask;
261 if (!orders)
262 return 0;
263 }
264
e0ffb29b 265 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
3485b883 266}
43675e6f 267
ec33687c
BS
268enum mthp_stat_item {
269 MTHP_STAT_ANON_FAULT_ALLOC,
270 MTHP_STAT_ANON_FAULT_FALLBACK,
271 MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
0d648dd5
BW
272 MTHP_STAT_SWPOUT,
273 MTHP_STAT_SWPOUT_FALLBACK,
ec33687c
BS
274 __MTHP_STAT_COUNT
275};
276
277struct mthp_stat {
278 unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
279};
280
94d46bf1 281#ifdef CONFIG_SYSFS
ec33687c
BS
282DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
283
284static inline void count_mthp_stat(int order, enum mthp_stat_item item)
285{
286 if (order <= 0 || order > PMD_ORDER)
287 return;
288
289 this_cpu_inc(mthp_stats.stats[order][item]);
290}
94d46bf1
BS
291#else
292static inline void count_mthp_stat(int order, enum mthp_stat_item item)
293{
294}
295#endif
ec33687c 296
79da5407
KS
297#define transparent_hugepage_use_zero_page() \
298 (transparent_hugepage_flags & \
299 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 300
ebfe1b8f
RC
301unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
302 unsigned long len, unsigned long pgoff, unsigned long flags);
ed48e87c
RE
303unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
304 unsigned long len, unsigned long pgoff, unsigned long flags,
305 vm_flags_t vm_flags);
74d2fad1 306
d4b4084a 307bool can_split_folio(struct folio *folio, int *pextra_pins);
c010d47f
ZY
308int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
309 unsigned int new_order);
e9b61f19
KS
310static inline int split_huge_page(struct page *page)
311{
c010d47f 312 return split_huge_page_to_list_to_order(page, NULL, 0);
e9b61f19 313}
f158ed61 314void deferred_split_folio(struct folio *folio);
eef1b3ba
KS
315
316void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 317 unsigned long address, bool freeze, struct folio *folio);
eef1b3ba
KS
318
319#define split_huge_pmd(__vma, __pmd, __address) \
320 do { \
321 pmd_t *____pmd = (__pmd); \
84c3fc4e 322 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
5c7fb56e 323 || pmd_devmap(*____pmd)) \
fec89c10 324 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 325 false, NULL); \
eef1b3ba 326 } while (0)
ad0bed24 327
2a52bcbc 328
fec89c10 329void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
af28a988 330 bool freeze, struct folio *folio);
2a52bcbc 331
a00cc7d9
MW
332void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
333 unsigned long address);
334
335#define split_huge_pud(__vma, __pud, __address) \
336 do { \
337 pud_t *____pud = (__pud); \
338 if (pud_trans_huge(*____pud) \
339 || pud_devmap(*____pud)) \
340 __split_huge_pud(__vma, __pud, __address); \
341 } while (0)
342
ebfe1b8f
RC
343int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
344 int advice);
7d8faaf1
ZK
345int madvise_collapse(struct vm_area_struct *vma,
346 struct vm_area_struct **prev,
347 unsigned long start, unsigned long end);
ebfe1b8f
RC
348void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
349 unsigned long end, long adjust_next);
350spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
351spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
352
353static inline int is_swap_pmd(pmd_t pmd)
354{
355 return !pmd_none(pmd) && !pmd_present(pmd);
356}
357
c1e8d7c6 358/* mmap_lock must be held on entry */
b6ec57f4
KS
359static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
360 struct vm_area_struct *vma)
025c5b24 361{
84c3fc4e 362 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 363 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 364 else
969e8d7e 365 return NULL;
025c5b24 366}
a00cc7d9
MW
367static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
368 struct vm_area_struct *vma)
369{
a00cc7d9
MW
370 if (pud_trans_huge(*pud) || pud_devmap(*pud))
371 return __pud_trans_huge_lock(pud, vma);
372 else
373 return NULL;
374}
6ffbb458 375
5bf34d7c
MWO
376/**
377 * folio_test_pmd_mappable - Can we map this folio with a PMD?
378 * @folio: The folio to test
379 */
380static inline bool folio_test_pmd_mappable(struct folio *folio)
381{
382 return folio_order(folio) >= HPAGE_PMD_ORDER;
383}
384
a00cc7d9 385struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 386 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
a00cc7d9 387
5db4f15c 388vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
d10e63f2 389
5691753d 390extern struct folio *huge_zero_folio;
3b77e8c8 391extern unsigned long huge_zero_pfn;
56873f43 392
5beaee54
MWO
393static inline bool is_huge_zero_folio(const struct folio *folio)
394{
5691753d 395 return READ_ONCE(huge_zero_folio) == folio;
5beaee54
MWO
396}
397
fc437044
MW
398static inline bool is_huge_zero_pmd(pmd_t pmd)
399{
3ce4fee4 400 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
fc437044
MW
401}
402
a00cc7d9
MW
403static inline bool is_huge_zero_pud(pud_t pud)
404{
405 return false;
406}
407
5691753d 408struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
632230ff 409void mm_put_huge_zero_folio(struct mm_struct *mm);
5691753d 410
10102459
KS
411#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
412
9c670ea3
NH
413static inline bool thp_migration_supported(void)
414{
415 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
416}
417
71e3aac0 418#else /* CONFIG_TRANSPARENT_HUGEPAGE */
a00cc7d9 419
5bf34d7c
MWO
420static inline bool folio_test_pmd_mappable(struct folio *folio)
421{
422 return false;
423}
424
3485b883
RR
425static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
426 unsigned long addr, int order)
43675e6f
YS
427{
428 return false;
429}
430
3485b883
RR
431static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
432 unsigned long addr, unsigned long orders)
e6be37b2 433{
3485b883
RR
434 return 0;
435}
436
437static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
438 unsigned long vm_flags,
439 unsigned long tva_flags,
3485b883
RR
440 unsigned long orders)
441{
442 return 0;
e6be37b2
ML
443}
444
71e3aac0 445#define transparent_hugepage_flags 0UL
74d2fad1
TK
446
447#define thp_get_unmapped_area NULL
448
ed48e87c
RE
449static inline unsigned long
450thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
451 unsigned long len, unsigned long pgoff,
452 unsigned long flags, vm_flags_t vm_flags)
453{
454 return 0;
455}
456
b8f593cd 457static inline bool
d4b4084a 458can_split_folio(struct folio *folio, int *pextra_pins)
b8f593cd 459{
b8f593cd
HY
460 return false;
461}
5bc7b8ac 462static inline int
c010d47f
ZY
463split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
464 unsigned int new_order)
5bc7b8ac
SL
465{
466 return 0;
467}
71e3aac0
AA
468static inline int split_huge_page(struct page *page)
469{
470 return 0;
471}
f158ed61 472static inline void deferred_split_folio(struct folio *folio) {}
78ddc534 473#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 474 do { } while (0)
2a52bcbc 475
fd60775a 476static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 477 unsigned long address, bool freeze, struct folio *folio) {}
2a52bcbc 478static inline void split_huge_pmd_address(struct vm_area_struct *vma,
af28a988 479 unsigned long address, bool freeze, struct folio *folio) {}
2a52bcbc 480
a00cc7d9
MW
481#define split_huge_pud(__vma, __pmd, __address) \
482 do { } while (0)
483
60ab3244
AA
484static inline int hugepage_madvise(struct vm_area_struct *vma,
485 unsigned long *vm_flags, int advice)
0af4e98b 486{
7d8faaf1 487 return -EINVAL;
0af4e98b 488}
7d8faaf1
ZK
489
490static inline int madvise_collapse(struct vm_area_struct *vma,
491 struct vm_area_struct **prev,
492 unsigned long start, unsigned long end)
493{
494 return -EINVAL;
495}
496
94fcc585
AA
497static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
498 unsigned long start,
499 unsigned long end,
500 long adjust_next)
501{
502}
84c3fc4e
ZY
503static inline int is_swap_pmd(pmd_t pmd)
504{
505 return 0;
506}
b6ec57f4
KS
507static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
508 struct vm_area_struct *vma)
025c5b24 509{
b6ec57f4 510 return NULL;
025c5b24 511}
a00cc7d9
MW
512static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
513 struct vm_area_struct *vma)
514{
515 return NULL;
516}
d10e63f2 517
5db4f15c 518static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 519{
4daae3b4 520 return 0;
d10e63f2
MG
521}
522
5beaee54
MWO
523static inline bool is_huge_zero_folio(const struct folio *folio)
524{
525 return false;
526}
527
3b77e8c8
HD
528static inline bool is_huge_zero_pmd(pmd_t pmd)
529{
530 return false;
531}
532
a00cc7d9
MW
533static inline bool is_huge_zero_pud(pud_t pud)
534{
535 return false;
536}
537
632230ff 538static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
aa88b68c 539{
6fcb52a5 540 return;
aa88b68c 541}
3565fce3
DW
542
543static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 544 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
545{
546 return NULL;
547}
a00cc7d9 548
9c670ea3
NH
549static inline bool thp_migration_supported(void)
550{
551 return false;
552}
71e3aac0
AA
553#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
554
c010d47f
ZY
555static inline int split_folio_to_list_to_order(struct folio *folio,
556 struct list_head *list, int new_order)
346cf613 557{
c010d47f 558 return split_huge_page_to_list_to_order(&folio->page, list, new_order);
346cf613
MWO
559}
560
c010d47f 561static inline int split_folio_to_order(struct folio *folio, int new_order)
d788f5b3 562{
c010d47f 563 return split_folio_to_list_to_order(folio, NULL, new_order);
d788f5b3
MWO
564}
565
c010d47f
ZY
566#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
567#define split_folio(f) split_folio_to_order(f, 0)
568
71e3aac0 569#endif /* _LINUX_HUGE_MM_H */