1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
8 #include <linux/fs.h> /* only for vma_is_dax() */
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
40 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
41 unsigned long cp_flags);
42 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
43 pgprot_t pgprot, bool write);
46 * vmf_insert_pfn_pmd - insert a pmd size pfn
47 * @vmf: Structure describing the fault
49 * @pgprot: page protection to use
50 * @write: whether it's a write fault
52 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
54 * Return: vm_fault_t value.
56 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
61 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
62 pgprot_t pgprot, bool write);
65 * vmf_insert_pfn_pud - insert a pud size pfn
66 * @vmf: Structure describing the fault
68 * @pgprot: page protection to use
69 * @write: whether it's a write fault
71 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
73 * Return: vm_fault_t value.
75 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
78 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
81 enum transparent_hugepage_flag {
82 TRANSPARENT_HUGEPAGE_NEVER_DAX,
83 TRANSPARENT_HUGEPAGE_FLAG,
84 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
87 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
88 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
89 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
90 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
94 struct kobj_attribute;
96 ssize_t single_hugepage_flag_store(struct kobject *kobj,
97 struct kobj_attribute *attr,
98 const char *buf, size_t count,
99 enum transparent_hugepage_flag flag);
100 ssize_t single_hugepage_flag_show(struct kobject *kobj,
101 struct kobj_attribute *attr, char *buf,
102 enum transparent_hugepage_flag flag);
103 extern struct kobj_attribute shmem_enabled_attr;
105 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
108 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
109 #define HPAGE_PMD_SHIFT PMD_SHIFT
110 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
111 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
113 #define HPAGE_PUD_SHIFT PUD_SHIFT
114 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
115 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
117 extern unsigned long transparent_hugepage_flags;
119 #define hugepage_flags_enabled() \
120 (transparent_hugepage_flags & \
121 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
122 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
123 #define hugepage_flags_always() \
124 (transparent_hugepage_flags & \
125 (1<<TRANSPARENT_HUGEPAGE_FLAG))
128 * Do the below checks:
129 * - For file vma, check if the linear page offset of vma is
130 * HPAGE_PMD_NR aligned within the file. The hugepage is
131 * guaranteed to be hugepage-aligned within the file, but we must
132 * check that the PMD-aligned addresses in the VMA map to
133 * PMD-aligned offsets within the file, else the hugepage will
134 * not be PMD-mappable.
135 * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
138 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
143 /* Don't have to check pgoff for anonymous vma */
144 if (!vma_is_anonymous(vma)) {
145 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
150 haddr = addr & HPAGE_PMD_MASK;
152 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
157 static inline bool file_thp_enabled(struct vm_area_struct *vma)
164 inode = vma->vm_file->f_inode;
166 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
167 (vma->vm_flags & VM_EXEC) &&
168 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
171 bool hugepage_vma_check(struct vm_area_struct *vma,
172 unsigned long vm_flags,
173 bool smaps, bool in_pf);
175 #define transparent_hugepage_use_zero_page() \
176 (transparent_hugepage_flags & \
177 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
179 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
180 unsigned long len, unsigned long pgoff, unsigned long flags);
182 void prep_transhuge_page(struct page *page);
183 void free_transhuge_page(struct page *page);
185 bool can_split_folio(struct folio *folio, int *pextra_pins);
186 int split_huge_page_to_list(struct page *page, struct list_head *list);
187 static inline int split_huge_page(struct page *page)
189 return split_huge_page_to_list(page, NULL);
191 void deferred_split_huge_page(struct page *page);
193 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
194 unsigned long address, bool freeze, struct folio *folio);
196 #define split_huge_pmd(__vma, __pmd, __address) \
198 pmd_t *____pmd = (__pmd); \
199 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
200 || pmd_devmap(*____pmd)) \
201 __split_huge_pmd(__vma, __pmd, __address, \
206 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
207 bool freeze, struct folio *folio);
209 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
210 unsigned long address);
212 #define split_huge_pud(__vma, __pud, __address) \
214 pud_t *____pud = (__pud); \
215 if (pud_trans_huge(*____pud) \
216 || pud_devmap(*____pud)) \
217 __split_huge_pud(__vma, __pud, __address); \
220 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
222 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
223 unsigned long end, long adjust_next);
224 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
225 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
227 static inline int is_swap_pmd(pmd_t pmd)
229 return !pmd_none(pmd) && !pmd_present(pmd);
232 /* mmap_lock must be held on entry */
233 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
234 struct vm_area_struct *vma)
236 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
237 return __pmd_trans_huge_lock(pmd, vma);
241 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
242 struct vm_area_struct *vma)
244 if (pud_trans_huge(*pud) || pud_devmap(*pud))
245 return __pud_trans_huge_lock(pud, vma);
251 * folio_test_pmd_mappable - Can we map this folio with a PMD?
252 * @folio: The folio to test
254 static inline bool folio_test_pmd_mappable(struct folio *folio)
256 return folio_order(folio) >= HPAGE_PMD_ORDER;
259 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
260 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
261 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
262 pud_t *pud, int flags, struct dev_pagemap **pgmap);
264 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
266 extern struct page *huge_zero_page;
267 extern unsigned long huge_zero_pfn;
269 static inline bool is_huge_zero_page(struct page *page)
271 return READ_ONCE(huge_zero_page) == page;
274 static inline bool is_huge_zero_pmd(pmd_t pmd)
276 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
279 static inline bool is_huge_zero_pud(pud_t pud)
284 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
285 void mm_put_huge_zero_page(struct mm_struct *mm);
287 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
289 static inline bool thp_migration_supported(void)
291 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
294 static inline struct list_head *page_deferred_list(struct page *page)
297 * See organization of tail pages of compound page in
298 * "struct page" definition.
300 return &page[2].deferred_list;
303 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
304 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
305 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
306 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
308 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
309 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
310 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
312 static inline bool folio_test_pmd_mappable(struct folio *folio)
317 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
323 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
324 unsigned long vm_flags,
325 bool smaps, bool in_pf)
330 static inline void prep_transhuge_page(struct page *page) {}
332 #define transparent_hugepage_flags 0UL
334 #define thp_get_unmapped_area NULL
337 can_split_folio(struct folio *folio, int *pextra_pins)
342 split_huge_page_to_list(struct page *page, struct list_head *list)
346 static inline int split_huge_page(struct page *page)
350 static inline void deferred_split_huge_page(struct page *page) {}
351 #define split_huge_pmd(__vma, __pmd, __address) \
354 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
355 unsigned long address, bool freeze, struct folio *folio) {}
356 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
357 unsigned long address, bool freeze, struct folio *folio) {}
359 #define split_huge_pud(__vma, __pmd, __address) \
362 static inline int hugepage_madvise(struct vm_area_struct *vma,
363 unsigned long *vm_flags, int advice)
368 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
374 static inline int is_swap_pmd(pmd_t pmd)
378 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
379 struct vm_area_struct *vma)
383 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
384 struct vm_area_struct *vma)
389 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
394 static inline bool is_huge_zero_page(struct page *page)
399 static inline bool is_huge_zero_pmd(pmd_t pmd)
404 static inline bool is_huge_zero_pud(pud_t pud)
409 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
414 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
415 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
420 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
421 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
426 static inline bool thp_migration_supported(void)
430 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
432 static inline int split_folio_to_list(struct folio *folio,
433 struct list_head *list)
435 return split_huge_page_to_list(&folio->page, list);
439 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
440 * limitations in the implementation like arm64 MTE can override this to
443 #ifndef arch_thp_swp_supported
444 static inline bool arch_thp_swp_supported(void)
450 #endif /* _LINUX_HUGE_MM_H */