1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Memory merging support.
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
11 #include <linux/bitops.h>
13 #include <linux/pagemap.h>
14 #include <linux/rmap.h>
15 #include <linux/sched.h>
16 #include <linux/sched/coredump.h>
19 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20 unsigned long end, int advice, unsigned long *vm_flags);
22 void ksm_add_vma(struct vm_area_struct *vma);
23 int ksm_enable_merge_any(struct mm_struct *mm);
24 int ksm_disable_merge_any(struct mm_struct *mm);
25 int ksm_disable(struct mm_struct *mm);
27 int __ksm_enter(struct mm_struct *mm);
28 void __ksm_exit(struct mm_struct *mm);
30 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
34 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
35 ret = __ksm_enter(mm);
40 if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
41 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
46 static inline void ksm_exit(struct mm_struct *mm)
48 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
53 * When do_swap_page() first faults in from swap what used to be a KSM page,
54 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
55 * it might be faulted into a different anon_vma (or perhaps to a different
56 * offset in the same anon_vma). do_swap_page() cannot do all the locking
57 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
58 * a copy, and leave remerging the pages to a later pass of ksmd.
60 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
61 * but what if the vma was unmerged while the page was swapped out?
63 struct page *ksm_might_need_to_copy(struct page *page,
64 struct vm_area_struct *vma, unsigned long address);
66 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
67 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
69 #ifdef CONFIG_MEMORY_FAILURE
70 void collect_procs_ksm(struct page *page, struct list_head *to_kill,
75 long ksm_process_profit(struct mm_struct *);
76 #endif /* CONFIG_PROC_FS */
78 #else /* !CONFIG_KSM */
80 static inline void ksm_add_vma(struct vm_area_struct *vma)
84 static inline int ksm_disable(struct mm_struct *mm)
89 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
94 static inline void ksm_exit(struct mm_struct *mm)
98 #ifdef CONFIG_MEMORY_FAILURE
99 static inline void collect_procs_ksm(struct page *page,
100 struct list_head *to_kill, int force_early)
106 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
107 unsigned long end, int advice, unsigned long *vm_flags)
112 static inline struct page *ksm_might_need_to_copy(struct page *page,
113 struct vm_area_struct *vma, unsigned long address)
118 static inline void rmap_walk_ksm(struct folio *folio,
119 struct rmap_walk_control *rwc)
123 static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
126 #endif /* CONFIG_MMU */
127 #endif /* !CONFIG_KSM */
129 #endif /* __LINUX_KSM_H */