Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
ba76149f AA |
2 | #ifndef _LINUX_KHUGEPAGED_H |
3 | #define _LINUX_KHUGEPAGED_H | |
4 | ||
f7ccbae4 | 5 | #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ |
cd89fb06 | 6 | #include <linux/shmem_fs.h> |
f7ccbae4 | 7 | |
ba76149f AA |
8 | |
9 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
b46e756f KS |
10 | extern struct attribute_group khugepaged_attr_group; |
11 | ||
12 | extern int khugepaged_init(void); | |
13 | extern void khugepaged_destroy(void); | |
14 | extern int start_stop_khugepaged(void); | |
ba76149f AA |
15 | extern int __khugepaged_enter(struct mm_struct *mm); |
16 | extern void __khugepaged_exit(struct mm_struct *mm); | |
6d50e60c DR |
17 | extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
18 | unsigned long vm_flags); | |
4aab2be0 | 19 | extern void khugepaged_min_free_kbytes_update(void); |
27e1f827 SL |
20 | #ifdef CONFIG_SHMEM |
21 | extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); | |
22 | #else | |
23 | static inline void collapse_pte_mapped_thp(struct mm_struct *mm, | |
24 | unsigned long addr) | |
25 | { | |
26 | } | |
27 | #endif | |
ba76149f AA |
28 | |
29 | #define khugepaged_enabled() \ | |
30 | (transparent_hugepage_flags & \ | |
31 | ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ | |
32 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) | |
33 | #define khugepaged_always() \ | |
34 | (transparent_hugepage_flags & \ | |
35 | (1<<TRANSPARENT_HUGEPAGE_FLAG)) | |
36 | #define khugepaged_req_madv() \ | |
37 | (transparent_hugepage_flags & \ | |
38 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) | |
39 | #define khugepaged_defrag() \ | |
40 | (transparent_hugepage_flags & \ | |
41 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) | |
42 | ||
43 | static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
44 | { | |
45 | if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) | |
46 | return __khugepaged_enter(mm); | |
47 | return 0; | |
48 | } | |
49 | ||
50 | static inline void khugepaged_exit(struct mm_struct *mm) | |
51 | { | |
52 | if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) | |
53 | __khugepaged_exit(mm); | |
54 | } | |
55 | ||
6d50e60c DR |
56 | static inline int khugepaged_enter(struct vm_area_struct *vma, |
57 | unsigned long vm_flags) | |
ba76149f AA |
58 | { |
59 | if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) | |
a664b2d8 | 60 | if ((khugepaged_always() || |
cd89fb06 | 61 | (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || |
6d50e60c | 62 | (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && |
18600332 MH |
63 | !(vm_flags & VM_NOHUGEPAGE) && |
64 | !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) | |
ba76149f AA |
65 | if (__khugepaged_enter(vma->vm_mm)) |
66 | return -ENOMEM; | |
67 | return 0; | |
68 | } | |
69 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
70 | static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
71 | { | |
72 | return 0; | |
73 | } | |
74 | static inline void khugepaged_exit(struct mm_struct *mm) | |
75 | { | |
76 | } | |
6d50e60c DR |
77 | static inline int khugepaged_enter(struct vm_area_struct *vma, |
78 | unsigned long vm_flags) | |
ba76149f AA |
79 | { |
80 | return 0; | |
81 | } | |
6d50e60c DR |
82 | static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
83 | unsigned long vm_flags) | |
ba76149f AA |
84 | { |
85 | return 0; | |
86 | } | |
27e1f827 SL |
87 | static inline void collapse_pte_mapped_thp(struct mm_struct *mm, |
88 | unsigned long addr) | |
89 | { | |
90 | } | |
4aab2be0 VB |
91 | |
92 | static inline void khugepaged_min_free_kbytes_update(void) | |
93 | { | |
94 | } | |
ba76149f AA |
95 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
96 | ||
97 | #endif /* _LINUX_KHUGEPAGED_H */ |