Merge tag 'mips_5.18_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[linux-block.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
16981d76 5#include <linux/sched/coredump.h>
226ab561 6#include <linux/mm_types.h>
16981d76 7
baabda26
DW
8#include <linux/fs.h> /* only for vma_is_dax() */
9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
5db4f15c 14void huge_pmd_set_accessed(struct vm_fault *vmf);
ebfe1b8f
RC
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
5db4f15c 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
ebfe1b8f
RC
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40 pgprot_t newprot, unsigned long cp_flags);
9a9731b1
THV
41vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42 pgprot_t pgprot, bool write);
43
44/**
45 * vmf_insert_pfn_pmd - insert a pmd size pfn
46 * @vmf: Structure describing the fault
47 * @pfn: pfn to insert
48 * @pgprot: page protection to use
49 * @write: whether it's a write fault
50 *
51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52 *
53 * Return: vm_fault_t value.
54 */
55static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56 bool write)
57{
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59}
60vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61 pgprot_t pgprot, bool write);
62
63/**
64 * vmf_insert_pfn_pud - insert a pud size pfn
65 * @vmf: Structure describing the fault
66 * @pfn: pfn to insert
67 * @pgprot: page protection to use
68 * @write: whether it's a write fault
69 *
70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71 *
72 * Return: vm_fault_t value.
73 */
74static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75 bool write)
76{
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78}
79
71e3aac0 80enum transparent_hugepage_flag {
bae84953 81 TRANSPARENT_HUGEPAGE_NEVER_DAX,
71e3aac0
AA
82 TRANSPARENT_HUGEPAGE_FLAG,
83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
90};
91
b46e756f
KS
92struct kobject;
93struct kobj_attribute;
94
ebfe1b8f
RC
95ssize_t single_hugepage_flag_store(struct kobject *kobj,
96 struct kobj_attribute *attr,
97 const char *buf, size_t count,
98 enum transparent_hugepage_flag flag);
99ssize_t single_hugepage_flag_show(struct kobject *kobj,
100 struct kobj_attribute *attr, char *buf,
101 enum transparent_hugepage_flag flag);
5a6e75f8
KS
102extern struct kobj_attribute shmem_enabled_attr;
103
d8c37c48
NH
104#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
105#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
106
71e3aac0 107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
108#define HPAGE_PMD_SHIFT PMD_SHIFT
109#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
110#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 111
a00cc7d9
MW
112#define HPAGE_PUD_SHIFT PUD_SHIFT
113#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
114#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
115
16981d76
DW
116extern unsigned long transparent_hugepage_flags;
117
e6be37b2
ML
118static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
119 unsigned long haddr)
120{
121 /* Don't have to check pgoff for anonymous vma */
122 if (!vma_is_anonymous(vma)) {
123 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
124 HPAGE_PMD_NR))
125 return false;
126 }
127
128 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
129 return false;
130 return true;
131}
132
133static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
134 unsigned long vm_flags)
135{
136 /* Explicitly disabled through madvise. */
137 if ((vm_flags & VM_NOHUGEPAGE) ||
138 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
139 return false;
140 return true;
141}
142
7635d9cb
MH
143/*
144 * to be used on vmas which are known to support THP.
e6be37b2 145 * Use transparent_hugepage_active otherwise
7635d9cb
MH
146 */
147static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
16981d76 148{
bae84953
AK
149
150 /*
151 * If the hardware/firmware marked hugepage support disabled.
152 */
153 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
154 return false;
155
e6be37b2 156 if (!transhuge_vma_enabled(vma, vma->vm_flags))
16981d76
DW
157 return false;
158
222100ee 159 if (vma_is_temporary_stack(vma))
16981d76
DW
160 return false;
161
16981d76
DW
162 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
163 return true;
bae84953 164
baabda26
DW
165 if (vma_is_dax(vma))
166 return true;
167
16981d76
DW
168 if (transparent_hugepage_flags &
169 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
170 return !!(vma->vm_flags & VM_HUGEPAGE);
171
172 return false;
173}
174
e6be37b2 175bool transparent_hugepage_active(struct vm_area_struct *vma);
43675e6f 176
79da5407
KS
177#define transparent_hugepage_use_zero_page() \
178 (transparent_hugepage_flags & \
179 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 180
ebfe1b8f
RC
181unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
182 unsigned long len, unsigned long pgoff, unsigned long flags);
74d2fad1 183
ebfe1b8f
RC
184void prep_transhuge_page(struct page *page);
185void free_transhuge_page(struct page *page);
9a982250 186
d4b4084a 187bool can_split_folio(struct folio *folio, int *pextra_pins);
e9b61f19
KS
188int split_huge_page_to_list(struct page *page, struct list_head *list);
189static inline int split_huge_page(struct page *page)
190{
191 return split_huge_page_to_list(page, NULL);
192}
9a982250 193void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
194
195void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 196 unsigned long address, bool freeze, struct folio *folio);
eef1b3ba
KS
197
198#define split_huge_pmd(__vma, __pmd, __address) \
199 do { \
200 pmd_t *____pmd = (__pmd); \
84c3fc4e 201 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
5c7fb56e 202 || pmd_devmap(*____pmd)) \
fec89c10 203 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 204 false, NULL); \
eef1b3ba 205 } while (0)
ad0bed24 206
2a52bcbc 207
fec89c10 208void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
af28a988 209 bool freeze, struct folio *folio);
2a52bcbc 210
a00cc7d9
MW
211void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
212 unsigned long address);
213
214#define split_huge_pud(__vma, __pud, __address) \
215 do { \
216 pud_t *____pud = (__pud); \
217 if (pud_trans_huge(*____pud) \
218 || pud_devmap(*____pud)) \
219 __split_huge_pud(__vma, __pud, __address); \
220 } while (0)
221
ebfe1b8f
RC
222int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
223 int advice);
224void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
225 unsigned long end, long adjust_next);
226spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
227spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
228
229static inline int is_swap_pmd(pmd_t pmd)
230{
231 return !pmd_none(pmd) && !pmd_present(pmd);
232}
233
c1e8d7c6 234/* mmap_lock must be held on entry */
b6ec57f4
KS
235static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
236 struct vm_area_struct *vma)
025c5b24 237{
84c3fc4e 238 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 239 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 240 else
969e8d7e 241 return NULL;
025c5b24 242}
a00cc7d9
MW
243static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
244 struct vm_area_struct *vma)
245{
a00cc7d9
MW
246 if (pud_trans_huge(*pud) || pud_devmap(*pud))
247 return __pud_trans_huge_lock(pud, vma);
248 else
249 return NULL;
250}
6ffbb458 251
5bf34d7c
MWO
252/**
253 * folio_test_pmd_mappable - Can we map this folio with a PMD?
254 * @folio: The folio to test
255 */
256static inline bool folio_test_pmd_mappable(struct folio *folio)
257{
258 return folio_order(folio) >= HPAGE_PMD_ORDER;
259}
260
a00cc7d9 261struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 262 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
a00cc7d9 263struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 264 pud_t *pud, int flags, struct dev_pagemap **pgmap);
a00cc7d9 265
5db4f15c 266vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
d10e63f2 267
56873f43 268extern struct page *huge_zero_page;
3b77e8c8 269extern unsigned long huge_zero_pfn;
56873f43
WY
270
271static inline bool is_huge_zero_page(struct page *page)
272{
6aa7de05 273 return READ_ONCE(huge_zero_page) == page;
56873f43
WY
274}
275
fc437044
MW
276static inline bool is_huge_zero_pmd(pmd_t pmd)
277{
3b77e8c8 278 return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
fc437044
MW
279}
280
a00cc7d9
MW
281static inline bool is_huge_zero_pud(pud_t pud)
282{
283 return false;
284}
285
6fcb52a5
AL
286struct page *mm_get_huge_zero_page(struct mm_struct *mm);
287void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 288
10102459
KS
289#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
290
9c670ea3
NH
291static inline bool thp_migration_supported(void)
292{
293 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
294}
295
87eaceb3
YS
296static inline struct list_head *page_deferred_list(struct page *page)
297{
298 /*
299 * Global or memcg deferred list in the second tail pages is
300 * occupied by compound_head.
301 */
302 return &page[2].deferred_list;
303}
304
71e3aac0 305#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
306#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
307#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
308#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 309
a00cc7d9
MW
310#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
311#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
312#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
313
5bf34d7c
MWO
314static inline bool folio_test_pmd_mappable(struct folio *folio)
315{
316 return false;
317}
318
7635d9cb
MH
319static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
320{
321 return false;
322}
323
e6be37b2 324static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
16981d76
DW
325{
326 return false;
327}
71e3aac0 328
43675e6f
YS
329static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
330 unsigned long haddr)
331{
332 return false;
333}
334
e6be37b2
ML
335static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
336 unsigned long vm_flags)
337{
338 return false;
339}
340
800d8c63
KS
341static inline void prep_transhuge_page(struct page *page) {}
342
71e3aac0 343#define transparent_hugepage_flags 0UL
74d2fad1
TK
344
345#define thp_get_unmapped_area NULL
346
b8f593cd 347static inline bool
d4b4084a 348can_split_folio(struct folio *folio, int *pextra_pins)
b8f593cd
HY
349{
350 BUILD_BUG();
351 return false;
352}
5bc7b8ac
SL
353static inline int
354split_huge_page_to_list(struct page *page, struct list_head *list)
355{
356 return 0;
357}
71e3aac0
AA
358static inline int split_huge_page(struct page *page)
359{
360 return 0;
361}
9a982250 362static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 363#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 364 do { } while (0)
2a52bcbc 365
fd60775a 366static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 367 unsigned long address, bool freeze, struct folio *folio) {}
2a52bcbc 368static inline void split_huge_pmd_address(struct vm_area_struct *vma,
af28a988 369 unsigned long address, bool freeze, struct folio *folio) {}
2a52bcbc 370
a00cc7d9
MW
371#define split_huge_pud(__vma, __pmd, __address) \
372 do { } while (0)
373
60ab3244
AA
374static inline int hugepage_madvise(struct vm_area_struct *vma,
375 unsigned long *vm_flags, int advice)
0af4e98b
AA
376{
377 BUG();
378 return 0;
379}
94fcc585
AA
380static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
381 unsigned long start,
382 unsigned long end,
383 long adjust_next)
384{
385}
84c3fc4e
ZY
386static inline int is_swap_pmd(pmd_t pmd)
387{
388 return 0;
389}
b6ec57f4
KS
390static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
391 struct vm_area_struct *vma)
025c5b24 392{
b6ec57f4 393 return NULL;
025c5b24 394}
a00cc7d9
MW
395static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
396 struct vm_area_struct *vma)
397{
398 return NULL;
399}
d10e63f2 400
5db4f15c 401static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 402{
4daae3b4 403 return 0;
d10e63f2
MG
404}
405
56873f43
WY
406static inline bool is_huge_zero_page(struct page *page)
407{
408 return false;
409}
410
3b77e8c8
HD
411static inline bool is_huge_zero_pmd(pmd_t pmd)
412{
413 return false;
414}
415
a00cc7d9
MW
416static inline bool is_huge_zero_pud(pud_t pud)
417{
418 return false;
419}
420
6fcb52a5 421static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 422{
6fcb52a5 423 return;
aa88b68c 424}
3565fce3
DW
425
426static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 427 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
428{
429 return NULL;
430}
a00cc7d9
MW
431
432static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
df06b37f 433 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
434{
435 return NULL;
436}
9c670ea3
NH
437
438static inline bool thp_migration_supported(void)
439{
440 return false;
441}
71e3aac0
AA
442#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
443
346cf613
MWO
444static inline int split_folio_to_list(struct folio *folio,
445 struct list_head *list)
446{
447 return split_huge_page_to_list(&folio->page, list);
448}
449
71e3aac0 450#endif /* _LINUX_HUGE_MM_H */