mm/huge_memory.c: use page->deferred_list
[linux-block.git] / include / linux / huge_mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
71e3aac0
AA
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
16981d76 5#include <linux/sched/coredump.h>
226ab561 6#include <linux/mm_types.h>
16981d76 7
baabda26
DW
8#include <linux/fs.h> /* only for vma_is_dax() */
9
ebfe1b8f
RC
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
a00cc7d9
MW
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
ebfe1b8f 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
a00cc7d9
MW
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
ebfe1b8f
RC
27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40 pgprot_t newprot, unsigned long cp_flags);
9a9731b1
THV
41vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42 pgprot_t pgprot, bool write);
43
44/**
45 * vmf_insert_pfn_pmd - insert a pmd size pfn
46 * @vmf: Structure describing the fault
47 * @pfn: pfn to insert
48 * @pgprot: page protection to use
49 * @write: whether it's a write fault
50 *
51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52 *
53 * Return: vm_fault_t value.
54 */
55static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56 bool write)
57{
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59}
60vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61 pgprot_t pgprot, bool write);
62
63/**
64 * vmf_insert_pfn_pud - insert a pud size pfn
65 * @vmf: Structure describing the fault
66 * @pfn: pfn to insert
67 * @pgprot: page protection to use
68 * @write: whether it's a write fault
69 *
70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71 *
72 * Return: vm_fault_t value.
73 */
74static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75 bool write)
76{
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78}
79
71e3aac0 80enum transparent_hugepage_flag {
bae84953 81 TRANSPARENT_HUGEPAGE_NEVER_DAX,
71e3aac0
AA
82 TRANSPARENT_HUGEPAGE_FLAG,
83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
444eb2a4
MG
84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
21440d7e 86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
71e3aac0 87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
ba76149f 88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
79da5407 89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
71e3aac0
AA
90};
91
b46e756f
KS
92struct kobject;
93struct kobj_attribute;
94
ebfe1b8f
RC
95ssize_t single_hugepage_flag_store(struct kobject *kobj,
96 struct kobj_attribute *attr,
97 const char *buf, size_t count,
98 enum transparent_hugepage_flag flag);
99ssize_t single_hugepage_flag_show(struct kobject *kobj,
100 struct kobj_attribute *attr, char *buf,
101 enum transparent_hugepage_flag flag);
5a6e75f8
KS
102extern struct kobj_attribute shmem_enabled_attr;
103
d8c37c48
NH
104#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
105#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
106
71e3aac0 107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
fde52796
AK
108#define HPAGE_PMD_SHIFT PMD_SHIFT
109#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
110#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
71e3aac0 111
a00cc7d9
MW
112#define HPAGE_PUD_SHIFT PUD_SHIFT
113#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
114#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
115
16981d76
DW
116extern unsigned long transparent_hugepage_flags;
117
7635d9cb
MH
118/*
119 * to be used on vmas which are known to support THP.
120 * Use transparent_hugepage_enabled otherwise
121 */
122static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
16981d76 123{
bae84953
AK
124
125 /*
126 * If the hardware/firmware marked hugepage support disabled.
127 */
128 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
129 return false;
130
16981d76
DW
131 if (vma->vm_flags & VM_NOHUGEPAGE)
132 return false;
133
222100ee 134 if (vma_is_temporary_stack(vma))
16981d76
DW
135 return false;
136
137 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
138 return false;
139
140 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
141 return true;
bae84953 142
baabda26
DW
143 if (vma_is_dax(vma))
144 return true;
145
16981d76
DW
146 if (transparent_hugepage_flags &
147 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
148 return !!(vma->vm_flags & VM_HUGEPAGE);
149
150 return false;
151}
152
7635d9cb
MH
153bool transparent_hugepage_enabled(struct vm_area_struct *vma);
154
43675e6f
YS
155static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
156 unsigned long haddr)
157{
158 /* Don't have to check pgoff for anonymous vma */
159 if (!vma_is_anonymous(vma)) {
b2bd53f1
ML
160 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
161 HPAGE_PMD_NR))
43675e6f
YS
162 return false;
163 }
164
165 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
166 return false;
167 return true;
168}
169
79da5407
KS
170#define transparent_hugepage_use_zero_page() \
171 (transparent_hugepage_flags & \
172 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
71e3aac0 173
ebfe1b8f
RC
174unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
175 unsigned long len, unsigned long pgoff, unsigned long flags);
74d2fad1 176
ebfe1b8f
RC
177void prep_transhuge_page(struct page *page);
178void free_transhuge_page(struct page *page);
005ba37c 179bool is_transparent_hugepage(struct page *page);
9a982250 180
b8f593cd 181bool can_split_huge_page(struct page *page, int *pextra_pins);
e9b61f19
KS
182int split_huge_page_to_list(struct page *page, struct list_head *list);
183static inline int split_huge_page(struct page *page)
184{
185 return split_huge_page_to_list(page, NULL);
186}
9a982250 187void deferred_split_huge_page(struct page *page);
eef1b3ba
KS
188
189void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
33f4751e 190 unsigned long address, bool freeze, struct page *page);
eef1b3ba
KS
191
192#define split_huge_pmd(__vma, __pmd, __address) \
193 do { \
194 pmd_t *____pmd = (__pmd); \
84c3fc4e 195 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
5c7fb56e 196 || pmd_devmap(*____pmd)) \
fec89c10 197 __split_huge_pmd(__vma, __pmd, __address, \
33f4751e 198 false, NULL); \
eef1b3ba 199 } while (0)
ad0bed24 200
2a52bcbc 201
fec89c10
KS
202void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
203 bool freeze, struct page *page);
2a52bcbc 204
a00cc7d9
MW
205void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
206 unsigned long address);
207
208#define split_huge_pud(__vma, __pud, __address) \
209 do { \
210 pud_t *____pud = (__pud); \
211 if (pud_trans_huge(*____pud) \
212 || pud_devmap(*____pud)) \
213 __split_huge_pud(__vma, __pud, __address); \
214 } while (0)
215
ebfe1b8f
RC
216int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
217 int advice);
218void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
219 unsigned long end, long adjust_next);
220spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
221spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
84c3fc4e
ZY
222
223static inline int is_swap_pmd(pmd_t pmd)
224{
225 return !pmd_none(pmd) && !pmd_present(pmd);
226}
227
c1e8d7c6 228/* mmap_lock must be held on entry */
b6ec57f4
KS
229static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
230 struct vm_area_struct *vma)
025c5b24 231{
84c3fc4e 232 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
b6ec57f4 233 return __pmd_trans_huge_lock(pmd, vma);
025c5b24 234 else
969e8d7e 235 return NULL;
025c5b24 236}
a00cc7d9
MW
237static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
238 struct vm_area_struct *vma)
239{
a00cc7d9
MW
240 if (pud_trans_huge(*pud) || pud_devmap(*pud))
241 return __pud_trans_huge_lock(pud, vma);
242 else
243 return NULL;
244}
6ffbb458 245
2be1d718
MWO
246/**
247 * thp_head - Head page of a transparent huge page.
248 * @page: Any page (tail, head or regular) found in the page cache.
249 */
250static inline struct page *thp_head(struct page *page)
251{
252 return compound_head(page);
253}
254
6ffbb458
MWO
255/**
256 * thp_order - Order of a transparent huge page.
257 * @page: Head page of a transparent huge page.
258 */
259static inline unsigned int thp_order(struct page *page)
260{
261 VM_BUG_ON_PGFLAGS(PageTail(page), page);
262 if (PageHead(page))
263 return HPAGE_PMD_ORDER;
264 return 0;
265}
266
6c357848
MWO
267/**
268 * thp_nr_pages - The number of regular pages in this huge page.
269 * @page: The head page of a huge page.
270 */
271static inline int thp_nr_pages(struct page *page)
2c888cfb 272{
6c357848
MWO
273 VM_BUG_ON_PGFLAGS(PageTail(page), page);
274 if (PageHead(page))
2c888cfb
RR
275 return HPAGE_PMD_NR;
276 return 1;
277}
d10e63f2 278
a00cc7d9 279struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 280 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
a00cc7d9 281struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 282 pud_t *pud, int flags, struct dev_pagemap **pgmap);
a00cc7d9 283
ebfe1b8f 284vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
d10e63f2 285
56873f43 286extern struct page *huge_zero_page;
3b77e8c8 287extern unsigned long huge_zero_pfn;
56873f43
WY
288
289static inline bool is_huge_zero_page(struct page *page)
290{
6aa7de05 291 return READ_ONCE(huge_zero_page) == page;
56873f43
WY
292}
293
fc437044
MW
294static inline bool is_huge_zero_pmd(pmd_t pmd)
295{
3b77e8c8 296 return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
fc437044
MW
297}
298
a00cc7d9
MW
299static inline bool is_huge_zero_pud(pud_t pud)
300{
301 return false;
302}
303
6fcb52a5
AL
304struct page *mm_get_huge_zero_page(struct mm_struct *mm);
305void mm_put_huge_zero_page(struct mm_struct *mm);
fc437044 306
10102459
KS
307#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
308
9c670ea3
NH
309static inline bool thp_migration_supported(void)
310{
311 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
312}
313
87eaceb3
YS
314static inline struct list_head *page_deferred_list(struct page *page)
315{
316 /*
317 * Global or memcg deferred list in the second tail pages is
318 * occupied by compound_head.
319 */
320 return &page[2].deferred_list;
321}
322
71e3aac0 323#else /* CONFIG_TRANSPARENT_HUGEPAGE */
d8c37c48
NH
324#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
325#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
326#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
71e3aac0 327
a00cc7d9
MW
328#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
329#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
330#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
331
2be1d718
MWO
332static inline struct page *thp_head(struct page *page)
333{
334 VM_BUG_ON_PGFLAGS(PageTail(page), page);
335 return page;
336}
337
6ffbb458
MWO
338static inline unsigned int thp_order(struct page *page)
339{
340 VM_BUG_ON_PGFLAGS(PageTail(page), page);
341 return 0;
342}
343
6c357848 344static inline int thp_nr_pages(struct page *page)
77d6b909 345{
6c357848 346 VM_BUG_ON_PGFLAGS(PageTail(page), page);
77d6b909
MWO
347 return 1;
348}
2c888cfb 349
7635d9cb
MH
350static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
351{
352 return false;
353}
354
16981d76
DW
355static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
356{
357 return false;
358}
71e3aac0 359
43675e6f
YS
360static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
361 unsigned long haddr)
362{
363 return false;
364}
365
800d8c63
KS
366static inline void prep_transhuge_page(struct page *page) {}
367
005ba37c
SC
368static inline bool is_transparent_hugepage(struct page *page)
369{
370 return false;
371}
372
71e3aac0 373#define transparent_hugepage_flags 0UL
74d2fad1
TK
374
375#define thp_get_unmapped_area NULL
376
b8f593cd
HY
377static inline bool
378can_split_huge_page(struct page *page, int *pextra_pins)
379{
380 BUILD_BUG();
381 return false;
382}
5bc7b8ac
SL
383static inline int
384split_huge_page_to_list(struct page *page, struct list_head *list)
385{
386 return 0;
387}
71e3aac0
AA
388static inline int split_huge_page(struct page *page)
389{
390 return 0;
391}
9a982250 392static inline void deferred_split_huge_page(struct page *page) {}
78ddc534 393#define split_huge_pmd(__vma, __pmd, __address) \
e180377f 394 do { } while (0)
2a52bcbc 395
fd60775a
DR
396static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
397 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 398static inline void split_huge_pmd_address(struct vm_area_struct *vma,
fec89c10 399 unsigned long address, bool freeze, struct page *page) {}
2a52bcbc 400
a00cc7d9
MW
401#define split_huge_pud(__vma, __pmd, __address) \
402 do { } while (0)
403
60ab3244
AA
404static inline int hugepage_madvise(struct vm_area_struct *vma,
405 unsigned long *vm_flags, int advice)
0af4e98b
AA
406{
407 BUG();
408 return 0;
409}
94fcc585
AA
410static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
411 unsigned long start,
412 unsigned long end,
413 long adjust_next)
414{
415}
84c3fc4e
ZY
416static inline int is_swap_pmd(pmd_t pmd)
417{
418 return 0;
419}
b6ec57f4
KS
420static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
421 struct vm_area_struct *vma)
025c5b24 422{
b6ec57f4 423 return NULL;
025c5b24 424}
a00cc7d9
MW
425static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
426 struct vm_area_struct *vma)
427{
428 return NULL;
429}
d10e63f2 430
2b740303
SJ
431static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
432 pmd_t orig_pmd)
d10e63f2 433{
4daae3b4 434 return 0;
d10e63f2
MG
435}
436
56873f43
WY
437static inline bool is_huge_zero_page(struct page *page)
438{
439 return false;
440}
441
3b77e8c8
HD
442static inline bool is_huge_zero_pmd(pmd_t pmd)
443{
444 return false;
445}
446
a00cc7d9
MW
447static inline bool is_huge_zero_pud(pud_t pud)
448{
449 return false;
450}
451
6fcb52a5 452static inline void mm_put_huge_zero_page(struct mm_struct *mm)
aa88b68c 453{
6fcb52a5 454 return;
aa88b68c 455}
3565fce3
DW
456
457static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
df06b37f 458 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
459{
460 return NULL;
461}
a00cc7d9
MW
462
463static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
df06b37f 464 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
465{
466 return NULL;
467}
9c670ea3
NH
468
469static inline bool thp_migration_supported(void)
470{
471 return false;
472}
71e3aac0
AA
473#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
474
af3bbc12
MWO
475/**
476 * thp_size - Size of a transparent huge page.
477 * @page: Head page of a transparent huge page.
478 *
479 * Return: Number of bytes in this page.
480 */
481static inline unsigned long thp_size(struct page *page)
482{
483 return PAGE_SIZE << thp_order(page);
484}
485
71e3aac0 486#endif /* _LINUX_HUGE_MM_H */