Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f8af4da3 HD |
2 | #ifndef __LINUX_KSM_H |
3 | #define __LINUX_KSM_H | |
4 | /* | |
5 | * Memory merging support. | |
6 | * | |
7 | * This code enables dynamic sharing of identical pages found in different | |
8 | * memory areas, even if they are not shared by fork(). | |
9 | */ | |
10 | ||
11 | #include <linux/bitops.h> | |
12 | #include <linux/mm.h> | |
5ad64688 HD |
13 | #include <linux/pagemap.h> |
14 | #include <linux/rmap.h> | |
f8af4da3 | 15 | #include <linux/sched.h> |
f7ccbae4 | 16 | #include <linux/sched/coredump.h> |
f8af4da3 | 17 | |
08beca44 | 18 | struct stable_node; |
5ad64688 | 19 | struct mem_cgroup; |
08beca44 | 20 | |
f8af4da3 HD |
21 | #ifdef CONFIG_KSM |
22 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
23 | unsigned long end, int advice, unsigned long *vm_flags); | |
24 | int __ksm_enter(struct mm_struct *mm); | |
1c2fb7a4 | 25 | void __ksm_exit(struct mm_struct *mm); |
f8af4da3 HD |
26 | |
27 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | |
28 | { | |
29 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | |
30 | return __ksm_enter(mm); | |
31 | return 0; | |
32 | } | |
33 | ||
1c2fb7a4 | 34 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
35 | { |
36 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | |
1c2fb7a4 | 37 | __ksm_exit(mm); |
f8af4da3 | 38 | } |
9a840895 | 39 | |
5ad64688 HD |
40 | /* |
41 | * When do_swap_page() first faults in from swap what used to be a KSM page, | |
42 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, | |
43 | * it might be faulted into a different anon_vma (or perhaps to a different | |
44 | * offset in the same anon_vma). do_swap_page() cannot do all the locking | |
45 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make | |
46 | * a copy, and leave remerging the pages to a later pass of ksmd. | |
47 | * | |
48 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | |
49 | * but what if the vma was unmerged while the page was swapped out? | |
50 | */ | |
cbf86cfe HD |
51 | struct page *ksm_might_need_to_copy(struct page *page, |
52 | struct vm_area_struct *vma, unsigned long address); | |
5ad64688 | 53 | |
1df631ae | 54 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); |
19138349 | 55 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); |
5ad64688 | 56 | |
f8af4da3 HD |
57 | #else /* !CONFIG_KSM */ |
58 | ||
f8af4da3 HD |
59 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
60 | { | |
61 | return 0; | |
62 | } | |
63 | ||
1c2fb7a4 | 64 | static inline void ksm_exit(struct mm_struct *mm) |
f8af4da3 HD |
65 | { |
66 | } | |
9a840895 | 67 | |
f42647ac HD |
68 | #ifdef CONFIG_MMU |
69 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |
70 | unsigned long end, int advice, unsigned long *vm_flags) | |
71 | { | |
72 | return 0; | |
73 | } | |
74 | ||
cbf86cfe | 75 | static inline struct page *ksm_might_need_to_copy(struct page *page, |
5ad64688 HD |
76 | struct vm_area_struct *vma, unsigned long address) |
77 | { | |
cbf86cfe | 78 | return page; |
5ad64688 HD |
79 | } |
80 | ||
1df631ae | 81 | static inline void rmap_walk_ksm(struct page *page, |
051ac83a | 82 | struct rmap_walk_control *rwc) |
e9995ef9 | 83 | { |
e9995ef9 HD |
84 | } |
85 | ||
19138349 | 86 | static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) |
e9995ef9 HD |
87 | { |
88 | } | |
f42647ac | 89 | #endif /* CONFIG_MMU */ |
f8af4da3 HD |
90 | #endif /* !CONFIG_KSM */ |
91 | ||
5ad64688 | 92 | #endif /* __LINUX_KSM_H */ |