Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
226ab561 5#include <linux/mm_types.h>
16981d76 6
baabda26 7#include <linux/fs.h> /* only for vma_is_dax() */
4b989955 8#include <linux/kobject.h>
baabda26 9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
5db4f15c 14void huge_pmd_set_accessed(struct vm_fault *vmf);
ebfe1b8f
RC
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
5db4f15c 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
ebfe1b8f
RC
28bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 pmd_t *pmd, unsigned long addr, unsigned long next);
30int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 unsigned long addr);
32int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 unsigned long addr);
34bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
4a18419f
NA
36int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 unsigned long cp_flags);
9a9731b1 39
7b806d22
LS
40vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
41vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
6c88f726
AP
42vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
43 bool write);
dbe54153
AP
44vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
45 bool write);
9a9731b1 46
71e3aac0 47enum transparent_hugepage_flag {
3c556d24 48 TRANSPARENT_HUGEPAGE_UNSUPPORTED,
71e3aac0
AA
49 TRANSPARENT_HUGEPAGE_FLAG,
50 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
51 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
52 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 53 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 54 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 55 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 56 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
57};
58
b46e756f
KS
59struct kobject;
60struct kobj_attribute;
61
ebfe1b8f
RC
62ssize_t single_hugepage_flag_store(struct kobject *kobj,
63 struct kobj_attribute *attr,
64 const char *buf, size_t count,
65 enum transparent_hugepage_flag flag);
66ssize_t single_hugepage_flag_show(struct kobject *kobj,
67 struct kobj_attribute *attr, char *buf,
68 enum transparent_hugepage_flag flag);
5a6e75f8 69extern struct kobj_attribute shmem_enabled_attr;
4b989955 70extern struct kobj_attribute thpsize_shmem_enabled_attr;
5a6e75f8 71
3485b883 72/*
19eaf449
RR
73 * Mask of all large folio orders supported for anonymous THP; all orders up to
74 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
75 * (which is a limitation of the THP implementation).
3485b883 76 */
19eaf449 77#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
3485b883
RR
78
79/*
d659b715
GS
80 * Mask of all large folio orders supported for file THP. Folios in a DAX
81 * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
5dd40721 82 * it. Same to PFNMAPs where there's neither page* nor pagecache.
3485b883 83 */
5dd40721 84#define THP_ORDERS_ALL_SPECIAL \
d659b715
GS
85 (BIT(PMD_ORDER) | BIT(PUD_ORDER))
86#define THP_ORDERS_ALL_FILE_DEFAULT \
87 ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
3485b883
RR
88
89/*
90 * Mask of all large folio orders supported for THP.
91 */
d659b715 92#define THP_ORDERS_ALL \
5dd40721 93 (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
3485b883 94
e0ffb29b
MW
95#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
96#define TVA_IN_PF (1 << 1) /* Page fault handler */
97#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
98
99#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
100 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
3485b883 101
e220917f
LC
102#define split_folio(f) split_folio_to_list(f, NULL)
103
b979db16 104#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
fde52796 105#define HPAGE_PMD_SHIFT PMD_SHIFT
b979db16
PX
106#define HPAGE_PUD_SHIFT PUD_SHIFT
107#else
108#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
109#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
110#endif
111
112#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
113#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
fde52796 114#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
b979db16 115#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
71e3aac0 116
b979db16
PX
117#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
118#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
a00cc7d9 119#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
b979db16
PX
120#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
121
246d3aa3
RR
122enum mthp_stat_item {
123 MTHP_STAT_ANON_FAULT_ALLOC,
124 MTHP_STAT_ANON_FAULT_FALLBACK,
125 MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
0c560dd8 126 MTHP_STAT_ZSWPOUT,
aaf2914a 127 MTHP_STAT_SWPIN,
67c8b11b
WH
128 MTHP_STAT_SWPIN_FALLBACK,
129 MTHP_STAT_SWPIN_FALLBACK_CHARGE,
246d3aa3
RR
130 MTHP_STAT_SWPOUT,
131 MTHP_STAT_SWPOUT_FALLBACK,
132 MTHP_STAT_SHMEM_ALLOC,
133 MTHP_STAT_SHMEM_FALLBACK,
134 MTHP_STAT_SHMEM_FALLBACK_CHARGE,
135 MTHP_STAT_SPLIT,
136 MTHP_STAT_SPLIT_FAILED,
137 MTHP_STAT_SPLIT_DEFERRED,
5d65c8d7 138 MTHP_STAT_NR_ANON,
8175ebfd 139 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
246d3aa3
RR
140 __MTHP_STAT_COUNT
141};
142
143#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
144struct mthp_stat {
145 unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
146};
147
148DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
149
5d65c8d7 150static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
246d3aa3
RR
151{
152 if (order <= 0 || order > PMD_ORDER)
153 return;
154
5d65c8d7
BS
155 this_cpu_add(mthp_stats.stats[order][item], delta);
156}
157
158static inline void count_mthp_stat(int order, enum mthp_stat_item item)
159{
160 mod_mthp_stat(order, item, 1);
246d3aa3 161}
5d65c8d7 162
246d3aa3 163#else
5d65c8d7
BS
164static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
165{
166}
167
246d3aa3
RR
168static inline void count_mthp_stat(int order, enum mthp_stat_item item)
169{
170}
171#endif
172
b979db16 173#ifdef CONFIG_TRANSPARENT_HUGEPAGE
a00cc7d9 174
16981d76 175extern unsigned long transparent_hugepage_flags;
3485b883
RR
176extern unsigned long huge_anon_orders_always;
177extern unsigned long huge_anon_orders_madvise;
178extern unsigned long huge_anon_orders_inherit;
179
180static inline bool hugepage_global_enabled(void)
181{
182 return transparent_hugepage_flags &
183 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
184 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
185}
186
187static inline bool hugepage_global_always(void)
188{
189 return transparent_hugepage_flags &
190 (1<<TRANSPARENT_HUGEPAGE_FLAG);
191}
192
3485b883
RR
193static inline int highest_order(unsigned long orders)
194{
195 return fls_long(orders) - 1;
196}
16981d76 197
3485b883
RR
198static inline int next_order(unsigned long *orders, int prev)
199{
200 *orders &= ~BIT(prev);
201 return highest_order(*orders);
202}
1064026b 203
4fa6893f
YS
204/*
205 * Do the below checks:
206 * - For file vma, check if the linear page offset of vma is
3485b883
RR
207 * order-aligned within the file. The hugepage is
208 * guaranteed to be order-aligned within the file, but we must
209 * check that the order-aligned addresses in the VMA map to
210 * order-aligned offsets within the file, else the hugepage will
211 * not be mappable.
212 * - For all vmas, check if the haddr is in an aligned hugepage
4fa6893f
YS
213 * area.
214 */
3485b883
RR
215static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
216 unsigned long addr, int order)
e6be37b2 217{
3485b883 218 unsigned long hpage_size = PAGE_SIZE << order;
c453d8c7
YS
219 unsigned long haddr;
220
e6be37b2
ML
221 /* Don't have to check pgoff for anonymous vma */
222 if (!vma_is_anonymous(vma)) {
223 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
3485b883 224 hpage_size >> PAGE_SHIFT))
e6be37b2
ML
225 return false;
226 }
227
3485b883 228 haddr = ALIGN_DOWN(addr, hpage_size);
e6be37b2 229
3485b883 230 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
e6be37b2
ML
231 return false;
232 return true;
233}
234
3485b883
RR
235/*
236 * Filter the bitfield of input orders to the ones suitable for use in the vma.
237 * See thp_vma_suitable_order().
238 * All orders that pass the checks are returned as a bitfield.
239 */
240static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
241 unsigned long addr, unsigned long orders)
242{
243 int order;
244
245 /*
246 * Iterate over orders, highest to lowest, removing orders that don't
247 * meet alignment requirements from the set. Exit loop at first order
248 * that meets requirements, since all lower orders must also meet
249 * requirements.
250 */
251
252 order = highest_order(orders);
253
254 while (orders) {
255 if (thp_vma_suitable_order(vma, addr, order))
256 break;
257 order = next_order(&orders, order);
258 }
259
260 return orders;
261}
262
3485b883 263unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
264 unsigned long vm_flags,
265 unsigned long tva_flags,
3485b883
RR
266 unsigned long orders);
267
268/**
269 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
270 * @vma: the vm area to check
271 * @vm_flags: use these vm_flags instead of vma->vm_flags
e0ffb29b 272 * @tva_flags: Which TVA flags to honour
3485b883
RR
273 * @orders: bitfield of all orders to consider
274 *
275 * Calculates the intersection of the requested hugepage orders and the allowed
276 * hugepage orders for the provided vma. Permitted orders are encoded as a set
277 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
278 * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
279 *
280 * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
281 * orders are allowed.
282 */
283static inline
284unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
285 unsigned long vm_flags,
286 unsigned long tva_flags,
3485b883
RR
287 unsigned long orders)
288{
289 /* Optimization to check if required orders are enabled early. */
e0ffb29b 290 if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
3485b883
RR
291 unsigned long mask = READ_ONCE(huge_anon_orders_always);
292
293 if (vm_flags & VM_HUGEPAGE)
294 mask |= READ_ONCE(huge_anon_orders_madvise);
295 if (hugepage_global_always() ||
296 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
297 mask |= READ_ONCE(huge_anon_orders_inherit);
298
299 orders &= mask;
300 if (!orders)
301 return 0;
302 }
303
e0ffb29b 304 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
3485b883 305}
43675e6f 306
4b989955
BW
307struct thpsize {
308 struct kobject kobj;
309 struct list_head node;
310 int order;
311};
312
313#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
314
79da5407
KS
315#define transparent_hugepage_use_zero_page() \
316 (transparent_hugepage_flags & \
317 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 318
963756aa
KW
319static inline bool vma_thp_disabled(struct vm_area_struct *vma,
320 unsigned long vm_flags)
321{
322 /*
323 * Explicitly disabled through madvise or prctl, or some
324 * architectures may disable THP for some mappings, for
325 * example, s390 kvm.
326 */
327 return (vm_flags & VM_NOHUGEPAGE) ||
328 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
329}
330
331static inline bool thp_disabled_by_hw(void)
332{
333 /* If the hardware/firmware marked hugepage support disabled. */
334 return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
335}
336
ebfe1b8f
RC
337unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
338 unsigned long len, unsigned long pgoff, unsigned long flags);
ed48e87c
RE
339unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
340 unsigned long len, unsigned long pgoff, unsigned long flags,
341 vm_flags_t vm_flags);
74d2fad1 342
8710f6ed 343bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
c010d47f
ZY
344int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
345 unsigned int new_order);
e220917f
LC
346int min_order_for_split(struct folio *folio);
347int split_folio_to_list(struct folio *folio, struct list_head *list);
7460b470
ZY
348bool uniform_split_supported(struct folio *folio, unsigned int new_order,
349 bool warns);
350bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
351 bool warns);
352int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
353 struct list_head *list);
354/*
355 * try_folio_split - try to split a @folio at @page using non uniform split.
356 * @folio: folio to be split
357 * @page: split to order-0 at the given page
358 * @list: store the after-split folios
359 *
360 * Try to split a @folio at @page using non uniform split to order-0, if
361 * non uniform split is not supported, fall back to uniform split.
362 *
363 * Return: 0: split is successful, otherwise split failed.
364 */
365static inline int try_folio_split(struct folio *folio, struct page *page,
366 struct list_head *list)
367{
368 int ret = min_order_for_split(folio);
369
370 if (ret < 0)
371 return ret;
372
373 if (!non_uniform_split_supported(folio, 0, false))
374 return split_huge_page_to_list_to_order(&folio->page, list,
375 ret);
376 return folio_split(folio, ret, page, list);
377}
e9b61f19
KS
378static inline int split_huge_page(struct page *page)
379{
e220917f
LC
380 struct folio *folio = page_folio(page);
381 int ret = min_order_for_split(folio);
382
383 if (ret < 0)
384 return ret;
385
386 /*
387 * split_huge_page() locks the page before splitting and
388 * expects the same page that has been split to be locked when
389 * returned. split_folio(page_folio(page)) cannot be used here
390 * because it converts the page to folio and passes the head
391 * page to be split.
392 */
393 return split_huge_page_to_list_to_order(page, NULL, ret);
e9b61f19 394}
8422acdc 395void deferred_split_folio(struct folio *folio, bool partially_mapped);
eef1b3ba
KS
396
397void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
b960818d 398 unsigned long address, bool freeze);
eef1b3ba
KS
399
400#define split_huge_pmd(__vma, __pmd, __address) \
401 do { \
402 pmd_t *____pmd = (__pmd); \
84c3fc4e 403 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
5c7fb56e 404 || pmd_devmap(*____pmd)) \
fec89c10 405 __split_huge_pmd(__vma, __pmd, __address, \
b960818d 406 false); \
eef1b3ba 407 } while (0)
ad0bed24 408
fec89c10 409void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
b960818d 410 bool freeze);
2a52bcbc 411
a00cc7d9
MW
412void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
413 unsigned long address);
414
cb0f01be
PX
415#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
416int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
417 pud_t *pudp, unsigned long addr, pgprot_t newprot,
418 unsigned long cp_flags);
419#else
420static inline int
421change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
422 pud_t *pudp, unsigned long addr, pgprot_t newprot,
423 unsigned long cp_flags) { return 0; }
424#endif
425
a00cc7d9
MW
426#define split_huge_pud(__vma, __pud, __address) \
427 do { \
428 pud_t *____pud = (__pud); \
429 if (pud_trans_huge(*____pud) \
430 || pud_devmap(*____pud)) \
431 __split_huge_pud(__vma, __pud, __address); \
432 } while (0)
433
ebfe1b8f
RC
434int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
435 int advice);
7d8faaf1
ZK
436int madvise_collapse(struct vm_area_struct *vma,
437 struct vm_area_struct **prev,
438 unsigned long start, unsigned long end);
ebfe1b8f 439void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
c372473a 440 unsigned long end, struct vm_area_struct *next);
ebfe1b8f
RC
441spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
442spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
443
444static inline int is_swap_pmd(pmd_t pmd)
445{
446 return !pmd_none(pmd) && !pmd_present(pmd);
447}
448
c1e8d7c6 449/* mmap_lock must be held on entry */
b6ec57f4
KS
450static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
451 struct vm_area_struct *vma)
025c5b24 452{
84c3fc4e 453 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 454 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 455 else
969e8d7e 456 return NULL;
025c5b24 457}
a00cc7d9
MW
458static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
459 struct vm_area_struct *vma)
460{
a00cc7d9
MW
461 if (pud_trans_huge(*pud) || pud_devmap(*pud))
462 return __pud_trans_huge_lock(pud, vma);
463 else
464 return NULL;
465}
6ffbb458 466
5bf34d7c
MWO
467/**
468 * folio_test_pmd_mappable - Can we map this folio with a PMD?
469 * @folio: The folio to test
470 */
471static inline bool folio_test_pmd_mappable(struct folio *folio)
472{
473 return folio_order(folio) >= HPAGE_PMD_ORDER;
474}
475
a00cc7d9 476struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 477 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
a00cc7d9 478
5db4f15c 479vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
d10e63f2 480
5691753d 481extern struct folio *huge_zero_folio;
3b77e8c8 482extern unsigned long huge_zero_pfn;
56873f43 483
5beaee54
MWO
484static inline bool is_huge_zero_folio(const struct folio *folio)
485{
5691753d 486 return READ_ONCE(huge_zero_folio) == folio;
5beaee54
MWO
487}
488
fc437044
MW
489static inline bool is_huge_zero_pmd(pmd_t pmd)
490{
3ce4fee4 491 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
fc437044
MW
492}
493
5691753d 494struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
632230ff 495void mm_put_huge_zero_folio(struct mm_struct *mm);
5691753d 496
9c670ea3
NH
497static inline bool thp_migration_supported(void)
498{
499 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
500}
501
29e847d2 502void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
b960818d 503 pmd_t *pmd, bool freeze);
735ecdfa
LY
504bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
505 pmd_t *pmdp, struct folio *folio);
29e847d2 506
71e3aac0 507#else /* CONFIG_TRANSPARENT_HUGEPAGE */
a00cc7d9 508
5bf34d7c
MWO
509static inline bool folio_test_pmd_mappable(struct folio *folio)
510{
511 return false;
512}
513
3485b883
RR
514static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
515 unsigned long addr, int order)
43675e6f
YS
516{
517 return false;
518}
519
3485b883
RR
520static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
521 unsigned long addr, unsigned long orders)
e6be37b2 522{
3485b883
RR
523 return 0;
524}
525
526static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
e0ffb29b
MW
527 unsigned long vm_flags,
528 unsigned long tva_flags,
3485b883
RR
529 unsigned long orders)
530{
531 return 0;
e6be37b2
ML
532}
533
71e3aac0 534#define transparent_hugepage_flags 0UL
74d2fad1
TK
535
536#define thp_get_unmapped_area NULL
537
ed48e87c
RE
538static inline unsigned long
539thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
540 unsigned long len, unsigned long pgoff,
541 unsigned long flags, vm_flags_t vm_flags)
542{
543 return 0;
544}
545
b8f593cd 546static inline bool
8710f6ed 547can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
b8f593cd 548{
b8f593cd
HY
549 return false;
550}
5bc7b8ac 551static inline int
c010d47f
ZY
552split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
553 unsigned int new_order)
5bc7b8ac
SL
554{
555 return 0;
556}
71e3aac0
AA
557static inline int split_huge_page(struct page *page)
558{
559 return 0;
560}
e220917f
LC
561
562static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
563{
564 return 0;
565}
566
7460b470
ZY
567static inline int try_folio_split(struct folio *folio, struct page *page,
568 struct list_head *list)
569{
570 return 0;
571}
572
8422acdc 573static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
78ddc534 574#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 575 do { } while (0)
2a52bcbc 576
fd60775a 577static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
b960818d 578 unsigned long address, bool freeze) {}
2a52bcbc 579static inline void split_huge_pmd_address(struct vm_area_struct *vma,
b960818d 580 unsigned long address, bool freeze) {}
29e847d2
LY
581static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
582 unsigned long address, pmd_t *pmd,
b960818d 583 bool freeze) {}
2a52bcbc 584
735ecdfa
LY
585static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
586 unsigned long addr, pmd_t *pmdp,
587 struct folio *folio)
588{
589 return false;
590}
591
a00cc7d9
MW
592#define split_huge_pud(__vma, __pmd, __address) \
593 do { } while (0)
594
60ab3244
AA
595static inline int hugepage_madvise(struct vm_area_struct *vma,
596 unsigned long *vm_flags, int advice)
0af4e98b 597{
7d8faaf1 598 return -EINVAL;
0af4e98b 599}
7d8faaf1
ZK
600
601static inline int madvise_collapse(struct vm_area_struct *vma,
602 struct vm_area_struct **prev,
603 unsigned long start, unsigned long end)
604{
605 return -EINVAL;
606}
607
94fcc585
AA
608static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
609 unsigned long start,
610 unsigned long end,
c372473a 611 struct vm_area_struct *next)
94fcc585
AA
612{
613}
84c3fc4e
ZY
614static inline int is_swap_pmd(pmd_t pmd)
615{
616 return 0;
617}
b6ec57f4
KS
618static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
619 struct vm_area_struct *vma)
025c5b24 620{
b6ec57f4 621 return NULL;
025c5b24 622}
a00cc7d9
MW
623static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
624 struct vm_area_struct *vma)
625{
626 return NULL;
627}
d10e63f2 628
5db4f15c 629static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 630{
4daae3b4 631 return 0;
d10e63f2
MG
632}
633
5beaee54
MWO
634static inline bool is_huge_zero_folio(const struct folio *folio)
635{
636 return false;
637}
638
3b77e8c8
HD
639static inline bool is_huge_zero_pmd(pmd_t pmd)
640{
641 return false;
642}
643
632230ff 644static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
aa88b68c 645{
6fcb52a5 646 return;
aa88b68c 647}
3565fce3
DW
648
649static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 650 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
651{
652 return NULL;
653}
a00cc7d9 654
9c670ea3
NH
655static inline bool thp_migration_supported(void)
656{
657 return false;
658}
e7a2ab7b
BW
659
660static inline int highest_order(unsigned long orders)
661{
662 return 0;
663}
664
665static inline int next_order(unsigned long *orders, int prev)
666{
667 return 0;
668}
cb0f01be
PX
669
670static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
671 unsigned long address)
672{
673}
674
675static inline int change_huge_pud(struct mmu_gather *tlb,
676 struct vm_area_struct *vma, pud_t *pudp,
677 unsigned long addr, pgprot_t newprot,
678 unsigned long cp_flags)
679{
680 return 0;
681}
71e3aac0
AA
682#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
683
c010d47f
ZY
684static inline int split_folio_to_list_to_order(struct folio *folio,
685 struct list_head *list, int new_order)
346cf613 686{
c010d47f 687 return split_huge_page_to_list_to_order(&folio->page, list, new_order);
346cf613
MWO
688}
689
c010d47f 690static inline int split_folio_to_order(struct folio *folio, int new_order)
d788f5b3 691{
c010d47f 692 return split_folio_to_list_to_order(folio, NULL, new_order);
d788f5b3
MWO
693}
694
71e3aac0 695#endif /* _LINUX_HUGE_MM_H */