fsi: Add regmap and refactor sbefifo
[linux-block.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
16981d76 5#include <linux/sched/coredump.h>
226ab561 6#include <linux/mm_types.h>
16981d76 7
baabda26
DW
8#include <linux/fs.h> /* only for vma_is_dax() */
9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
5db4f15c 14void huge_pmd_set_accessed(struct vm_fault *vmf);
ebfe1b8f
RC
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
5db4f15c 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
ebfe1b8f
RC
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
4a18419f
NA
39int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
40 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
41 unsigned long cp_flags);
9a9731b1
THV
42vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
43 pgprot_t pgprot, bool write);
44
45/**
46 * vmf_insert_pfn_pmd - insert a pmd size pfn
47 * @vmf: Structure describing the fault
48 * @pfn: pfn to insert
49 * @pgprot: page protection to use
50 * @write: whether it's a write fault
51 *
52 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
53 *
54 * Return: vm_fault_t value.
55 */
56static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
57 bool write)
58{
59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
60}
61vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
62 pgprot_t pgprot, bool write);
63
64/**
65 * vmf_insert_pfn_pud - insert a pud size pfn
66 * @vmf: Structure describing the fault
67 * @pfn: pfn to insert
68 * @pgprot: page protection to use
69 * @write: whether it's a write fault
70 *
71 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
72 *
73 * Return: vm_fault_t value.
74 */
75static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
76 bool write)
77{
78 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
79}
80
71e3aac0 81enum transparent_hugepage_flag {
bae84953 82 TRANSPARENT_HUGEPAGE_NEVER_DAX,
71e3aac0
AA
83 TRANSPARENT_HUGEPAGE_FLAG,
84 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
85 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 87 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 88 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 89 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 90 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
91};
92
b46e756f
KS
93struct kobject;
94struct kobj_attribute;
95
ebfe1b8f
RC
96ssize_t single_hugepage_flag_store(struct kobject *kobj,
97 struct kobj_attribute *attr,
98 const char *buf, size_t count,
99 enum transparent_hugepage_flag flag);
100ssize_t single_hugepage_flag_show(struct kobject *kobj,
101 struct kobj_attribute *attr, char *buf,
102 enum transparent_hugepage_flag flag);
5a6e75f8
KS
103extern struct kobj_attribute shmem_enabled_attr;
104
d8c37c48
NH
105#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
107
71e3aac0 108#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
109#define HPAGE_PMD_SHIFT PMD_SHIFT
110#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
111#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 112
a00cc7d9
MW
113#define HPAGE_PUD_SHIFT PUD_SHIFT
114#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
115#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
116
16981d76
DW
117extern unsigned long transparent_hugepage_flags;
118
1064026b
YS
119#define hugepage_flags_enabled() \
120 (transparent_hugepage_flags & \
121 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
122 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
123#define hugepage_flags_always() \
124 (transparent_hugepage_flags & \
125 (1<<TRANSPARENT_HUGEPAGE_FLAG))
126
4fa6893f
YS
127/*
128 * Do the below checks:
129 * - For file vma, check if the linear page offset of vma is
130 * HPAGE_PMD_NR aligned within the file. The hugepage is
131 * guaranteed to be hugepage-aligned within the file, but we must
132 * check that the PMD-aligned addresses in the VMA map to
133 * PMD-aligned offsets within the file, else the hugepage will
134 * not be PMD-mappable.
135 * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
136 * area.
137 */
e6be37b2 138static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
c453d8c7 139 unsigned long addr)
e6be37b2 140{
c453d8c7
YS
141 unsigned long haddr;
142
e6be37b2
ML
143 /* Don't have to check pgoff for anonymous vma */
144 if (!vma_is_anonymous(vma)) {
145 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
146 HPAGE_PMD_NR))
147 return false;
148 }
149
c453d8c7 150 haddr = addr & HPAGE_PMD_MASK;
e6be37b2 151
e6be37b2 152 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
e6be37b2
ML
153 return false;
154 return true;
155}
156
78d12c19
YS
157static inline bool file_thp_enabled(struct vm_area_struct *vma)
158{
159 struct inode *inode;
160
161 if (!vma->vm_file)
162 return false;
163
164 inode = vma->vm_file->f_inode;
165
166 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
167 (vma->vm_flags & VM_EXEC) &&
168 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
169}
170
a7f4e6e4
ZK
171bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
172 bool smaps, bool in_pf, bool enforce_sysfs);
43675e6f 173
79da5407
KS
174#define transparent_hugepage_use_zero_page() \
175 (transparent_hugepage_flags & \
176 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 177
ebfe1b8f
RC
178unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
179 unsigned long len, unsigned long pgoff, unsigned long flags);
74d2fad1 180
ebfe1b8f
RC
181void prep_transhuge_page(struct page *page);
182void free_transhuge_page(struct page *page);
9a982250 183
d4b4084a 184bool can_split_folio(struct folio *folio, int *pextra_pins);
e9b61f19
KS
185int split_huge_page_to_list(struct page *page, struct list_head *list);
186static inline int split_huge_page(struct page *page)
187{
188 return split_huge_page_to_list(page, NULL);
189}
9a982250 190void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
191
192void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 193 unsigned long address, bool freeze, struct folio *folio);
eef1b3ba
KS
194
195#define split_huge_pmd(__vma, __pmd, __address) \
196 do { \
197 pmd_t *____pmd = (__pmd); \
84c3fc4e 198 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
5c7fb56e 199 || pmd_devmap(*____pmd)) \
fec89c10 200 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 201 false, NULL); \
eef1b3ba 202 } while (0)
ad0bed24 203
2a52bcbc 204
fec89c10 205void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
af28a988 206 bool freeze, struct folio *folio);
2a52bcbc 207
a00cc7d9
MW
208void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
209 unsigned long address);
210
211#define split_huge_pud(__vma, __pud, __address) \
212 do { \
213 pud_t *____pud = (__pud); \
214 if (pud_trans_huge(*____pud) \
215 || pud_devmap(*____pud)) \
216 __split_huge_pud(__vma, __pud, __address); \
217 } while (0)
218
ebfe1b8f
RC
219int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
220 int advice);
7d8faaf1
ZK
221int madvise_collapse(struct vm_area_struct *vma,
222 struct vm_area_struct **prev,
223 unsigned long start, unsigned long end);
ebfe1b8f
RC
224void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
225 unsigned long end, long adjust_next);
226spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
227spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
228
229static inline int is_swap_pmd(pmd_t pmd)
230{
231 return !pmd_none(pmd) && !pmd_present(pmd);
232}
233
c1e8d7c6 234/* mmap_lock must be held on entry */
b6ec57f4
KS
235static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
236 struct vm_area_struct *vma)
025c5b24 237{
84c3fc4e 238 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 239 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 240 else
969e8d7e 241 return NULL;
025c5b24 242}
a00cc7d9
MW
243static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
244 struct vm_area_struct *vma)
245{
a00cc7d9
MW
246 if (pud_trans_huge(*pud) || pud_devmap(*pud))
247 return __pud_trans_huge_lock(pud, vma);
248 else
249 return NULL;
250}
6ffbb458 251
5bf34d7c
MWO
252/**
253 * folio_test_pmd_mappable - Can we map this folio with a PMD?
254 * @folio: The folio to test
255 */
256static inline bool folio_test_pmd_mappable(struct folio *folio)
257{
258 return folio_order(folio) >= HPAGE_PMD_ORDER;
259}
260
a00cc7d9 261struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 262 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
a00cc7d9 263struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 264 pud_t *pud, int flags, struct dev_pagemap **pgmap);
a00cc7d9 265
5db4f15c 266vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
d10e63f2 267
56873f43 268extern struct page *huge_zero_page;
3b77e8c8 269extern unsigned long huge_zero_pfn;
56873f43
WY
270
271static inline bool is_huge_zero_page(struct page *page)
272{
6aa7de05 273 return READ_ONCE(huge_zero_page) == page;
56873f43
WY
274}
275
fc437044
MW
276static inline bool is_huge_zero_pmd(pmd_t pmd)
277{
3ce4fee4 278 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
fc437044
MW
279}
280
a00cc7d9
MW
281static inline bool is_huge_zero_pud(pud_t pud)
282{
283 return false;
284}
285
6fcb52a5
AL
286struct page *mm_get_huge_zero_page(struct mm_struct *mm);
287void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 288
10102459
KS
289#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
290
9c670ea3
NH
291static inline bool thp_migration_supported(void)
292{
293 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
294}
295
87eaceb3
YS
296static inline struct list_head *page_deferred_list(struct page *page)
297{
298 /*
121c1781
ML
299 * See organization of tail pages of compound page in
300 * "struct page" definition.
87eaceb3
YS
301 */
302 return &page[2].deferred_list;
303}
304
71e3aac0 305#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
306#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
307#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
308#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 309
a00cc7d9
MW
310#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
311#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
312#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
313
5bf34d7c
MWO
314static inline bool folio_test_pmd_mappable(struct folio *folio)
315{
316 return false;
317}
318
43675e6f 319static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
c453d8c7 320 unsigned long addr)
43675e6f
YS
321{
322 return false;
323}
324
9fec5168 325static inline bool hugepage_vma_check(struct vm_area_struct *vma,
a7f4e6e4
ZK
326 unsigned long vm_flags, bool smaps,
327 bool in_pf, bool enforce_sysfs)
e6be37b2
ML
328{
329 return false;
330}
331
800d8c63
KS
332static inline void prep_transhuge_page(struct page *page) {}
333
71e3aac0 334#define transparent_hugepage_flags 0UL
74d2fad1
TK
335
336#define thp_get_unmapped_area NULL
337
b8f593cd 338static inline bool
d4b4084a 339can_split_folio(struct folio *folio, int *pextra_pins)
b8f593cd 340{
b8f593cd
HY
341 return false;
342}
5bc7b8ac
SL
343static inline int
344split_huge_page_to_list(struct page *page, struct list_head *list)
345{
346 return 0;
347}
71e3aac0
AA
348static inline int split_huge_page(struct page *page)
349{
350 return 0;
351}
9a982250 352static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 353#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 354 do { } while (0)
2a52bcbc 355
fd60775a 356static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 357 unsigned long address, bool freeze, struct folio *folio) {}
2a52bcbc 358static inline void split_huge_pmd_address(struct vm_area_struct *vma,
af28a988 359 unsigned long address, bool freeze, struct folio *folio) {}
2a52bcbc 360
a00cc7d9
MW
361#define split_huge_pud(__vma, __pmd, __address) \
362 do { } while (0)
363
60ab3244
AA
364static inline int hugepage_madvise(struct vm_area_struct *vma,
365 unsigned long *vm_flags, int advice)
0af4e98b 366{
7d8faaf1 367 return -EINVAL;
0af4e98b 368}
7d8faaf1
ZK
369
370static inline int madvise_collapse(struct vm_area_struct *vma,
371 struct vm_area_struct **prev,
372 unsigned long start, unsigned long end)
373{
374 return -EINVAL;
375}
376
94fcc585
AA
377static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
378 unsigned long start,
379 unsigned long end,
380 long adjust_next)
381{
382}
84c3fc4e
ZY
383static inline int is_swap_pmd(pmd_t pmd)
384{
385 return 0;
386}
b6ec57f4
KS
387static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
388 struct vm_area_struct *vma)
025c5b24 389{
b6ec57f4 390 return NULL;
025c5b24 391}
a00cc7d9
MW
392static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
393 struct vm_area_struct *vma)
394{
395 return NULL;
396}
d10e63f2 397
5db4f15c 398static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 399{
4daae3b4 400 return 0;
d10e63f2
MG
401}
402
56873f43
WY
403static inline bool is_huge_zero_page(struct page *page)
404{
405 return false;
406}
407
3b77e8c8
HD
408static inline bool is_huge_zero_pmd(pmd_t pmd)
409{
410 return false;
411}
412
a00cc7d9
MW
413static inline bool is_huge_zero_pud(pud_t pud)
414{
415 return false;
416}
417
6fcb52a5 418static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 419{
6fcb52a5 420 return;
aa88b68c 421}
3565fce3
DW
422
423static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 424 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
425{
426 return NULL;
427}
a00cc7d9
MW
428
429static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
df06b37f 430 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
431{
432 return NULL;
433}
9c670ea3
NH
434
435static inline bool thp_migration_supported(void)
436{
437 return false;
438}
71e3aac0
AA
439#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
440
346cf613
MWO
441static inline int split_folio_to_list(struct folio *folio,
442 struct list_head *list)
443{
444 return split_huge_page_to_list(&folio->page, list);
445}
446
d788f5b3
MWO
447static inline int split_folio(struct folio *folio)
448{
449 return split_folio_to_list(folio, NULL);
450}
451
d0637c50
BS
452/*
453 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
454 * limitations in the implementation like arm64 MTE can override this to
455 * false
456 */
457#ifndef arch_thp_swp_supported
458static inline bool arch_thp_swp_supported(void)
459{
460 return true;
461}
462#endif
463
71e3aac0 464#endif /* _LINUX_HUGE_MM_H */