| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_KSM_H |
| 3 | #define __LINUX_KSM_H |
| 4 | /* |
| 5 | * Memory merging support. |
| 6 | * |
| 7 | * This code enables dynamic sharing of identical pages found in different |
| 8 | * memory areas, even if they are not shared by fork(). |
| 9 | */ |
| 10 | |
| 11 | #include <linux/bitops.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/rmap.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/sched/coredump.h> |
| 17 | |
| 18 | #ifdef CONFIG_KSM |
| 19 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 20 | unsigned long end, int advice, unsigned long *vm_flags); |
| 21 | |
| 22 | void ksm_add_vma(struct vm_area_struct *vma); |
| 23 | int ksm_enable_merge_any(struct mm_struct *mm); |
| 24 | int ksm_disable_merge_any(struct mm_struct *mm); |
| 25 | int ksm_disable(struct mm_struct *mm); |
| 26 | |
| 27 | int __ksm_enter(struct mm_struct *mm); |
| 28 | void __ksm_exit(struct mm_struct *mm); |
| 29 | /* |
| 30 | * To identify zeropages that were mapped by KSM, we reuse the dirty bit |
| 31 | * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when |
| 32 | * deduplicating memory. |
| 33 | */ |
| 34 | #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) |
| 35 | |
| 36 | extern atomic_long_t ksm_zero_pages; |
| 37 | |
| 38 | static inline void ksm_map_zero_page(struct mm_struct *mm) |
| 39 | { |
| 40 | atomic_long_inc(&ksm_zero_pages); |
| 41 | atomic_long_inc(&mm->ksm_zero_pages); |
| 42 | } |
| 43 | |
| 44 | static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) |
| 45 | { |
| 46 | if (is_ksm_zero_pte(pte)) { |
| 47 | atomic_long_dec(&ksm_zero_pages); |
| 48 | atomic_long_dec(&mm->ksm_zero_pages); |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | static inline long mm_ksm_zero_pages(struct mm_struct *mm) |
| 53 | { |
| 54 | return atomic_long_read(&mm->ksm_zero_pages); |
| 55 | } |
| 56 | |
| 57 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
| 58 | { |
| 59 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) |
| 60 | return __ksm_enter(mm); |
| 61 | |
| 62 | return 0; |
| 63 | } |
| 64 | |
| 65 | static inline int ksm_execve(struct mm_struct *mm) |
| 66 | { |
| 67 | if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) |
| 68 | return __ksm_enter(mm); |
| 69 | |
| 70 | return 0; |
| 71 | } |
| 72 | |
| 73 | static inline void ksm_exit(struct mm_struct *mm) |
| 74 | { |
| 75 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) |
| 76 | __ksm_exit(mm); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * When do_swap_page() first faults in from swap what used to be a KSM page, |
| 81 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, |
| 82 | * it might be faulted into a different anon_vma (or perhaps to a different |
| 83 | * offset in the same anon_vma). do_swap_page() cannot do all the locking |
| 84 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make |
| 85 | * a copy, and leave remerging the pages to a later pass of ksmd. |
| 86 | * |
| 87 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, |
| 88 | * but what if the vma was unmerged while the page was swapped out? |
| 89 | */ |
| 90 | struct folio *ksm_might_need_to_copy(struct folio *folio, |
| 91 | struct vm_area_struct *vma, unsigned long addr); |
| 92 | |
| 93 | void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); |
| 94 | void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); |
| 95 | void collect_procs_ksm(struct folio *folio, struct page *page, |
| 96 | struct list_head *to_kill, int force_early); |
| 97 | long ksm_process_profit(struct mm_struct *); |
| 98 | |
| 99 | #else /* !CONFIG_KSM */ |
| 100 | |
| 101 | static inline void ksm_add_vma(struct vm_area_struct *vma) |
| 102 | { |
| 103 | } |
| 104 | |
| 105 | static inline int ksm_disable(struct mm_struct *mm) |
| 106 | { |
| 107 | return 0; |
| 108 | } |
| 109 | |
| 110 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) |
| 111 | { |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | static inline int ksm_execve(struct mm_struct *mm) |
| 116 | { |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | static inline void ksm_exit(struct mm_struct *mm) |
| 121 | { |
| 122 | } |
| 123 | |
| 124 | static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) |
| 125 | { |
| 126 | } |
| 127 | |
| 128 | static inline void collect_procs_ksm(struct folio *folio, struct page *page, |
| 129 | struct list_head *to_kill, int force_early) |
| 130 | { |
| 131 | } |
| 132 | |
| 133 | #ifdef CONFIG_MMU |
| 134 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
| 135 | unsigned long end, int advice, unsigned long *vm_flags) |
| 136 | { |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | static inline struct folio *ksm_might_need_to_copy(struct folio *folio, |
| 141 | struct vm_area_struct *vma, unsigned long addr) |
| 142 | { |
| 143 | return folio; |
| 144 | } |
| 145 | |
| 146 | static inline void rmap_walk_ksm(struct folio *folio, |
| 147 | struct rmap_walk_control *rwc) |
| 148 | { |
| 149 | } |
| 150 | |
| 151 | static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) |
| 152 | { |
| 153 | } |
| 154 | #endif /* CONFIG_MMU */ |
| 155 | #endif /* !CONFIG_KSM */ |
| 156 | |
| 157 | #endif /* __LINUX_KSM_H */ |