Linux 6.17-rc6
[linux-2.6-block.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
226ab561 5#include <linux/mm_types.h>
16981d76 6
baabda26 7#include <linux/fs.h> /* only for vma_is_dax() */
4b989955 8#include <linux/kobject.h>
baabda26 9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
5db4f15c 14void huge_pmd_set_accessed(struct vm_fault *vmf);
ebfe1b8f
RC
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
5db4f15c 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
ebfe1b8f
RC
28bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 pmd_t *pmd, unsigned long addr, unsigned long next);
30int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 unsigned long addr);
32int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 unsigned long addr);
34bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
4a18419f
NA
36int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 unsigned long cp_flags);
9a9731b1 39
21aa65bf
AP
40vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
41 bool write);
42vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
43 bool write);
6c88f726
AP
44vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
45 bool write);
dbe54153
AP
46vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
47 bool write);
9a9731b1 48
71e3aac0 49enum transparent_hugepage_flag {
3c556d24 50 TRANSPARENT_HUGEPAGE_UNSUPPORTED,
71e3aac0
AA
51 TRANSPARENT_HUGEPAGE_FLAG,
52 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
53 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
54 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 55 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 56 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 57 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 58 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
59};
60
b46e756f
KS
61struct kobject;
62struct kobj_attribute;
63
ebfe1b8f
RC
64ssize_t single_hugepage_flag_store(struct kobject *kobj,
65 struct kobj_attribute *attr,
66 const char *buf, size_t count,
67 enum transparent_hugepage_flag flag);
68ssize_t single_hugepage_flag_show(struct kobject *kobj,
69 struct kobj_attribute *attr, char *buf,
70 enum transparent_hugepage_flag flag);
5a6e75f8 71extern struct kobj_attribute shmem_enabled_attr;
4b989955 72extern struct kobj_attribute thpsize_shmem_enabled_attr;
5a6e75f8 73
3485b883 74/*
19eaf449
RR
75 * Mask of all large folio orders supported for anonymous THP; all orders up to
76 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
77 * (which is a limitation of the THP implementation).
3485b883 78 */
19eaf449 79#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
3485b883
RR
80
81/*
d659b715
GS
82 * Mask of all large folio orders supported for file THP. Folios in a DAX
83 * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
5dd40721 84 * it. Same to PFNMAPs where there's neither page* nor pagecache.
3485b883 85 */
5dd40721 86#define THP_ORDERS_ALL_SPECIAL \
d659b715
GS
87 (BIT(PMD_ORDER) | BIT(PUD_ORDER))
88#define THP_ORDERS_ALL_FILE_DEFAULT \
89 ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
3485b883
RR
90
91/*
92 * Mask of all large folio orders supported for THP.
93 */
d659b715 94#define THP_ORDERS_ALL \
5dd40721 95 (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
3485b883 96
e0ffb29b
MW
97#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
98#define TVA_IN_PF (1 << 1) /* Page fault handler */
99#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
100
101#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
102 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
3485b883 103
e220917f
LC
104#define split_folio(f) split_folio_to_list(f, NULL)
105
b979db16 106#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
fde52796 107#define HPAGE_PMD_SHIFT PMD_SHIFT
b979db16
PX
108#define HPAGE_PUD_SHIFT PUD_SHIFT
109#else
110#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
111#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
112#endif
113
114#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
115#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
fde52796 116#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
b979db16 117#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
71e3aac0 118
b979db16
PX
119#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
120#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
a00cc7d9 121#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
b979db16
PX
122#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
123
246d3aa3
RR
124enum mthp_stat_item {
125 MTHP_STAT_ANON_FAULT_ALLOC,
126 MTHP_STAT_ANON_FAULT_FALLBACK,
127 MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
0c560dd8 128 MTHP_STAT_ZSWPOUT,
aaf2914a 129 MTHP_STAT_SWPIN,
67c8b11b
WH
130 MTHP_STAT_SWPIN_FALLBACK,
131 MTHP_STAT_SWPIN_FALLBACK_CHARGE,
246d3aa3
RR
132 MTHP_STAT_SWPOUT,
133 MTHP_STAT_SWPOUT_FALLBACK,
134 MTHP_STAT_SHMEM_ALLOC,
135 MTHP_STAT_SHMEM_FALLBACK,
136 MTHP_STAT_SHMEM_FALLBACK_CHARGE,
137 MTHP_STAT_SPLIT,
138 MTHP_STAT_SPLIT_FAILED,
139 MTHP_STAT_SPLIT_DEFERRED,
5d65c8d7 140 MTHP_STAT_NR_ANON,
8175ebfd 141 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
246d3aa3
RR
142 __MTHP_STAT_COUNT
143};
144
145#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
146struct mthp_stat {
147 unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
148};
149
150DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
151
5d65c8d7 152static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
246d3aa3
RR
153{
154 if (order <= 0 || order > PMD_ORDER)
155 return;
156
5d65c8d7
BS
157 this_cpu_add(mthp_stats.stats[order][item], delta);
158}
159
160static inline void count_mthp_stat(int order, enum mthp_stat_item item)
161{
162 mod_mthp_stat(order, item, 1);
246d3aa3 163}
5d65c8d7 164
246d3aa3 165#else
5d65c8d7
BS
166static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
167{
168}
169
246d3aa3
RR
170static inline void count_mthp_stat(int order, enum mthp_stat_item item)
171{
172}
173#endif
174
b979db16 175#ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9 176
16981d76 177extern unsigned long transparent_hugepage_flags;
3485b883
RR
178extern unsigned long huge_anon_orders_always;
179extern unsigned long huge_anon_orders_madvise;
180extern unsigned long huge_anon_orders_inherit;
181
182static inline bool hugepage_global_enabled(void)
183{
184 return transparent_hugepage_flags &
185 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
186 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
187}
188
189static inline bool hugepage_global_always(void)
190{
191 return transparent_hugepage_flags &
192 (1<<TRANSPARENT_HUGEPAGE_FLAG);
193}
194
3485b883
RR
195static inline int highest_order(unsigned long orders)
196{
197 return fls_long(orders) - 1;
198}
16981d76 199
3485b883
RR
200static inline int next_order(unsigned long *orders, int prev)
201{
202 *orders &= ~BIT(prev);
203 return highest_order(*orders);
204}
1064026b 205
4fa6893f
YS
206/*
207 * Do the below checks:
208 * - For file vma, check if the linear page offset of vma is
3485b883
RR
209 * order-aligned within the file. The hugepage is
210 * guaranteed to be order-aligned within the file, but we must
211 * check that the order-aligned addresses in the VMA map to
212 * order-aligned offsets within the file, else the hugepage will
213 * not be mappable.
214 * - For all vmas, check if the haddr is in an aligned hugepage
4fa6893f
YS
215 * area.
216 */
3485b883
RR
217static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
218 unsigned long addr, int order)
e6be37b2 219{
3485b883 220 unsigned long hpage_size = PAGE_SIZE << order;
c453d8c7
YS
221 unsigned long haddr;
222
e6be37b2
ML
223 /* Don't have to check pgoff for anonymous vma */
224 if (!vma_is_anonymous(vma)) {
225 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
3485b883 226 hpage_size >> PAGE_SHIFT))
e6be37b2
ML
227 return false;
228 }
229
3485b883 230 haddr = ALIGN_DOWN(addr, hpage_size);
e6be37b2 231
3485b883 232 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
e6be37b2
ML
233 return false;
234 return true;
235}
236
3485b883
RR
237/*
238 * Filter the bitfield of input orders to the ones suitable for use in the vma.
239 * See thp_vma_suitable_order().
240 * All orders that pass the checks are returned as a bitfield.
241 */
242static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
243 unsigned long addr, unsigned long orders)
244{
245 int order;
246
247 /*
248 * Iterate over orders, highest to lowest, removing orders that don't
249 * meet alignment requirements from the set. Exit loop at first order
250 * that meets requirements, since all lower orders must also meet
251 * requirements.
252 */
253
254 order = highest_order(orders);
255
256 while (orders) {
257 if (thp_vma_suitable_order(vma, addr, order))
258 break;
259 order = next_order(&orders, order);
260 }
261
262 return orders;
263}
264
3485b883 265unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
bfbe7110 266 vm_flags_t vm_flags,
e0ffb29b 267 unsigned long tva_flags,
3485b883
RR
268 unsigned long orders);
269
270/**
271 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
272 * @vma: the vm area to check
273 * @vm_flags: use these vm_flags instead of vma->vm_flags
e0ffb29b 274 * @tva_flags: Which TVA flags to honour
3485b883
RR
275 * @orders: bitfield of all orders to consider
276 *
277 * Calculates the intersection of the requested hugepage orders and the allowed
278 * hugepage orders for the provided vma. Permitted orders are encoded as a set
279 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
280 * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
281 *
282 * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
283 * orders are allowed.
284 */
285static inline
286unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
bfbe7110 287 vm_flags_t vm_flags,
e0ffb29b 288 unsigned long tva_flags,
3485b883
RR
289 unsigned long orders)
290{
291 /* Optimization to check if required orders are enabled early. */
e0ffb29b 292 if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
3485b883
RR
293 unsigned long mask = READ_ONCE(huge_anon_orders_always);
294
295 if (vm_flags & VM_HUGEPAGE)
296 mask |= READ_ONCE(huge_anon_orders_madvise);
297 if (hugepage_global_always() ||
298 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
299 mask |= READ_ONCE(huge_anon_orders_inherit);
300
301 orders &= mask;
302 if (!orders)
303 return 0;
304 }
305
e0ffb29b 306 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
3485b883 307}
43675e6f 308
4b989955
BW
309struct thpsize {
310 struct kobject kobj;
311 struct list_head node;
312 int order;
313};
314
315#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
316
79da5407
KS
317#define transparent_hugepage_use_zero_page() \
318 (transparent_hugepage_flags & \
319 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 320
963756aa 321static inline bool vma_thp_disabled(struct vm_area_struct *vma,
bfbe7110 322 vm_flags_t vm_flags)
963756aa
KW
323{
324 /*
325 * Explicitly disabled through madvise or prctl, or some
326 * architectures may disable THP for some mappings, for
327 * example, s390 kvm.
328 */
329 return (vm_flags & VM_NOHUGEPAGE) ||
330 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
331}
332
333static inline bool thp_disabled_by_hw(void)
334{
335 /* If the hardware/firmware marked hugepage support disabled. */
336 return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
337}
338
ebfe1b8f
RC
339unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
340 unsigned long len, unsigned long pgoff, unsigned long flags);
ed48e87c
RE
341unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
342 unsigned long len, unsigned long pgoff, unsigned long flags,
343 vm_flags_t vm_flags);
74d2fad1 344
8710f6ed 345bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
c010d47f
ZY
346int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
347 unsigned int new_order);
e220917f
LC
348int min_order_for_split(struct folio *folio);
349int split_folio_to_list(struct folio *folio, struct list_head *list);
7460b470
ZY
350bool uniform_split_supported(struct folio *folio, unsigned int new_order,
351 bool warns);
352bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
353 bool warns);
354int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
355 struct list_head *list);
356/*
357 * try_folio_split - try to split a @folio at @page using non uniform split.
358 * @folio: folio to be split
359 * @page: split to order-0 at the given page
360 * @list: store the after-split folios
361 *
362 * Try to split a @folio at @page using non uniform split to order-0, if
363 * non uniform split is not supported, fall back to uniform split.
364 *
365 * Return: 0: split is successful, otherwise split failed.
366 */
367static inline int try_folio_split(struct folio *folio, struct page *page,
368 struct list_head *list)
369{
370 int ret = min_order_for_split(folio);
371
372 if (ret < 0)
373 return ret;
374
375 if (!non_uniform_split_supported(folio, 0, false))
376 return split_huge_page_to_list_to_order(&folio->page, list,
377 ret);
378 return folio_split(folio, ret, page, list);
379}
e9b61f19
KS
380static inline int split_huge_page(struct page *page)
381{
e220917f
LC
382 struct folio *folio = page_folio(page);
383 int ret = min_order_for_split(folio);
384
385 if (ret < 0)
386 return ret;
387
388 /*
389 * split_huge_page() locks the page before splitting and
390 * expects the same page that has been split to be locked when
391 * returned. split_folio(page_folio(page)) cannot be used here
392 * because it converts the page to folio and passes the head
393 * page to be split.
394 */
395 return split_huge_page_to_list_to_order(page, NULL, ret);
e9b61f19 396}
8422acdc 397void deferred_split_folio(struct folio *folio, bool partially_mapped);
eef1b3ba
KS
398
399void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
b960818d 400 unsigned long address, bool freeze);
eef1b3ba
KS
401
402#define split_huge_pmd(__vma, __pmd, __address) \
403 do { \
404 pmd_t *____pmd = (__pmd); \
8a6a984c 405 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \
fec89c10 406 __split_huge_pmd(__vma, __pmd, __address, \
b960818d 407 false); \
eef1b3ba 408 } while (0)
ad0bed24 409
fec89c10 410void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
b960818d 411 bool freeze);
2a52bcbc 412
a00cc7d9
MW
413void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
414 unsigned long address);
415
cb0f01be
PX
416#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
417int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
418 pud_t *pudp, unsigned long addr, pgprot_t newprot,
419 unsigned long cp_flags);
420#else
421static inline int
422change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
423 pud_t *pudp, unsigned long addr, pgprot_t newprot,
424 unsigned long cp_flags) { return 0; }
425#endif
426
a00cc7d9
MW
427#define split_huge_pud(__vma, __pud, __address) \
428 do { \
429 pud_t *____pud = (__pud); \
8a6a984c 430 if (pud_trans_huge(*____pud)) \
a00cc7d9
MW
431 __split_huge_pud(__vma, __pud, __address); \
432 } while (0)
433
bfbe7110 434int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
ebfe1b8f 435 int advice);
e24d552a
LS
436int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
437 unsigned long end, bool *lock_dropped);
ebfe1b8f 438void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
c372473a 439 unsigned long end, struct vm_area_struct *next);
ebfe1b8f
RC
440spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
441spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
442
443static inline int is_swap_pmd(pmd_t pmd)
444{
445 return !pmd_none(pmd) && !pmd_present(pmd);
446}
447
c1e8d7c6 448/* mmap_lock must be held on entry */
b6ec57f4
KS
449static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
450 struct vm_area_struct *vma)
025c5b24 451{
8a6a984c 452 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
b6ec57f4 453 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 454 else
969e8d7e 455 return NULL;
025c5b24 456}
a00cc7d9
MW
457static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
458 struct vm_area_struct *vma)
459{
8a6a984c 460 if (pud_trans_huge(*pud))
a00cc7d9
MW
461 return __pud_trans_huge_lock(pud, vma);
462 else
463 return NULL;
464}
6ffbb458 465
5bf34d7c
MWO
466/**
467 * folio_test_pmd_mappable - Can we map this folio with a PMD?
468 * @folio: The folio to test
469 */
470static inline bool folio_test_pmd_mappable(struct folio *folio)
471{
472 return folio_order(folio) >= HPAGE_PMD_ORDER;
473}
474
5db4f15c 475vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
d10e63f2 476
5691753d 477extern struct folio *huge_zero_folio;
3b77e8c8 478extern unsigned long huge_zero_pfn;
56873f43 479
5beaee54
MWO
480static inline bool is_huge_zero_folio(const struct folio *folio)
481{
5691753d 482 return READ_ONCE(huge_zero_folio) == folio;
5beaee54
MWO
483}
484
92c99fc6
DH
485static inline bool is_huge_zero_pfn(unsigned long pfn)
486{
487 return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
488}
489
fc437044
MW
490static inline bool is_huge_zero_pmd(pmd_t pmd)
491{
92c99fc6 492 return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
fc437044
MW
493}
494
5691753d 495struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
632230ff 496void mm_put_huge_zero_folio(struct mm_struct *mm);
5691753d 497
9c670ea3
NH
498static inline bool thp_migration_supported(void)
499{
500 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
501}
502
29e847d2 503void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
b960818d 504 pmd_t *pmd, bool freeze);
735ecdfa
LY
505bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
506 pmd_t *pmdp, struct folio *folio);
29e847d2 507
71e3aac0 508#else /* CONFIG_TRANSPARENT_HUGEPAGE */
a00cc7d9 509
5bf34d7c
MWO
510static inline bool folio_test_pmd_mappable(struct folio *folio)
511{
512 return false;
513}
514
3485b883
RR
515static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
516 unsigned long addr, int order)
43675e6f
YS
517{
518 return false;
519}
520
3485b883
RR
521static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
522 unsigned long addr, unsigned long orders)
e6be37b2 523{
3485b883
RR
524 return 0;
525}
526
527static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
bfbe7110 528 vm_flags_t vm_flags,
e0ffb29b 529 unsigned long tva_flags,
3485b883
RR
530 unsigned long orders)
531{
532 return 0;
e6be37b2
ML
533}
534
71e3aac0 535#define transparent_hugepage_flags 0UL
74d2fad1
TK
536
537#define thp_get_unmapped_area NULL
538
ed48e87c
RE
539static inline unsigned long
540thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
541 unsigned long len, unsigned long pgoff,
542 unsigned long flags, vm_flags_t vm_flags)
543{
544 return 0;
545}
546
b8f593cd 547static inline bool
8710f6ed 548can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
b8f593cd 549{
b8f593cd
HY
550 return false;
551}
5bc7b8ac 552static inline int
c010d47f
ZY
553split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
554 unsigned int new_order)
5bc7b8ac
SL
555{
556 return 0;
557}
71e3aac0
AA
558static inline int split_huge_page(struct page *page)
559{
560 return 0;
561}
e220917f
LC
562
563static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
564{
565 return 0;
566}
567
7460b470
ZY
568static inline int try_folio_split(struct folio *folio, struct page *page,
569 struct list_head *list)
570{
571 return 0;
572}
573
8422acdc 574static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
78ddc534 575#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 576 do { } while (0)
2a52bcbc 577
fd60775a 578static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
b960818d 579 unsigned long address, bool freeze) {}
2a52bcbc 580static inline void split_huge_pmd_address(struct vm_area_struct *vma,
b960818d 581 unsigned long address, bool freeze) {}
29e847d2
LY
582static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
583 unsigned long address, pmd_t *pmd,
b960818d 584 bool freeze) {}
2a52bcbc 585
735ecdfa
LY
586static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
587 unsigned long addr, pmd_t *pmdp,
588 struct folio *folio)
589{
590 return false;
591}
592
a00cc7d9
MW
593#define split_huge_pud(__vma, __pmd, __address) \
594 do { } while (0)
595
60ab3244 596static inline int hugepage_madvise(struct vm_area_struct *vma,
bfbe7110 597 vm_flags_t *vm_flags, int advice)
0af4e98b 598{
7d8faaf1 599 return -EINVAL;
0af4e98b 600}
7d8faaf1
ZK
601
602static inline int madvise_collapse(struct vm_area_struct *vma,
e24d552a
LS
603 unsigned long start,
604 unsigned long end, bool *lock_dropped)
7d8faaf1
ZK
605{
606 return -EINVAL;
607}
608
94fcc585
AA
609static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
610 unsigned long start,
611 unsigned long end,
c372473a 612 struct vm_area_struct *next)
94fcc585
AA
613{
614}
84c3fc4e
ZY
615static inline int is_swap_pmd(pmd_t pmd)
616{
617 return 0;
618}
b6ec57f4
KS
619static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
620 struct vm_area_struct *vma)
025c5b24 621{
b6ec57f4 622 return NULL;
025c5b24 623}
a00cc7d9
MW
624static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
625 struct vm_area_struct *vma)
626{
627 return NULL;
628}
d10e63f2 629
5db4f15c 630static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 631{
4daae3b4 632 return 0;
d10e63f2
MG
633}
634
5beaee54
MWO
635static inline bool is_huge_zero_folio(const struct folio *folio)
636{
637 return false;
638}
639
92c99fc6
DH
640static inline bool is_huge_zero_pfn(unsigned long pfn)
641{
642 return false;
643}
644
3b77e8c8
HD
645static inline bool is_huge_zero_pmd(pmd_t pmd)
646{
647 return false;
648}
649
632230ff 650static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
aa88b68c 651{
6fcb52a5 652 return;
aa88b68c 653}
3565fce3
DW
654
655static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 656 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
657{
658 return NULL;
659}
a00cc7d9 660
9c670ea3
NH
661static inline bool thp_migration_supported(void)
662{
663 return false;
664}
e7a2ab7b
BW
665
666static inline int highest_order(unsigned long orders)
667{
668 return 0;
669}
670
671static inline int next_order(unsigned long *orders, int prev)
672{
673 return 0;
674}
cb0f01be
PX
675
676static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
677 unsigned long address)
678{
679}
680
681static inline int change_huge_pud(struct mmu_gather *tlb,
682 struct vm_area_struct *vma, pud_t *pudp,
683 unsigned long addr, pgprot_t newprot,
684 unsigned long cp_flags)
685{
686 return 0;
687}
71e3aac0
AA
688#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
689
c010d47f
ZY
690static inline int split_folio_to_list_to_order(struct folio *folio,
691 struct list_head *list, int new_order)
346cf613 692{
c010d47f 693 return split_huge_page_to_list_to_order(&folio->page, list, new_order);
346cf613
MWO
694}
695
c010d47f 696static inline int split_folio_to_order(struct folio *folio, int new_order)
d788f5b3 697{
c010d47f 698 return split_folio_to_list_to_order(folio, NULL, new_order);
d788f5b3
MWO
699}
700
71e3aac0 701#endif /* _LINUX_HUGE_MM_H */